prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import argparse
import os
import pandas as pd
from af_dataset_builder import AFDatasetBuilder
import plots
import utils
CLASS_NAMES = ['normal', 'af', 'other', 'noise']
SEED = 3
def summarize_metrics(models_path, test_set_path, target_record_len, batch_size, model_name=None, output_path=None):
all_metrics = | pd.DataFrame() | pandas.DataFrame |
import bs4 as bs
import urllib.request
import pandas as pd
import random
import pathlib
import progressbar
import numpy as np
def schedule(names):
schedule = [['YourScore', "OppScore","FGA","FGP","3PA","3PP","FTA","FTP","TRB","STL","BLK","TOV","OppFGA","OppFGP","Opp3PA","Opp3PP","OppFTA","OppFTP","OppTRB","OppSTL","OppBLK","OppTOV"]]
count = 1
bar = progressbar.ProgressBar(max_value=progressbar.UnknownLength)
for name in names:
bar.update(count)
try:
url = 'https://www.sports-reference.com/cbb/schools/'+name+'/2020-gamelogs.html'
source = urllib.request.urlopen(url).read()
soup = bs.BeautifulSoup(source,'lxml')
content = soup.find("div", {"id":"content"})
all_sgl = content.find("div", {"id":"all_sgl-basic"})
table_container_outer = all_sgl.find("div", {"class":"table_outer_container"})
table_container = table_container_outer.find("div", {"id":"div_sgl-basic"})
table = table_container.find("table", {"id":"sgl-basic"})
tbody = table.find("tbody")
trs = tbody.find_all("tr")
count = count + 1
for tr in trs:
tds = tr.find_all("td")
k = random.randint(0, 1)
otherName = tds[2].text
otherName = otherName.replace("&", "")
otherName = otherName.replace(" ", "-", 5)
otherName = otherName.replace(".", "") # decide on k once
if k == 1:
if tds[1].text == "":
schedule.append([name, tds[4].text,otherName,tds[5].text,tds[7].text,tds[8].text,tds[10].text,tds[11].text,tds[13].text,tds[14].text,tds[16].text,tds[18].text,tds[19].text,tds[20].text, 1, tds[24].text,tds[25].text,tds[27].text,tds[28].text,tds[30].text,tds[31].text,tds[33].text,tds[35].text,tds[36].text,tds[37].text, 0])
else:
schedule.append([name,tds[4].text,otherName,tds[5].text,tds[7].text,tds[8].text,tds[10].text,tds[11].text,tds[13].text,tds[14].text,tds[16].text,tds[18].text,tds[19].text,tds[20].text, 0 ,tds[24].text,tds[25].text,tds[27].text,tds[28].text,tds[30].text,tds[31].text,tds[33].text,tds[35].text,tds[36].text,tds[37].text, 1])
else:
if tds[1].text == "":
schedule.append([otherName,tds[5].text,name,tds[4].text,tds[24].text,tds[25].text,tds[27].text,tds[28].text,tds[30].text,tds[31].text,tds[33].text,tds[35].text,tds[36].text,tds[37].text, 0 ,tds[7].text,tds[8].text,tds[10].text,tds[11].text,tds[13].text,tds[14].text,tds[16].text,tds[18].text,tds[19].text,tds[20].text, 1])
else:
schedule.append([otherName,tds[5].text,name,tds[4].text,tds[24].text,tds[25].text,tds[27].text,tds[28].text,tds[30].text,tds[31].text,tds[33].text,tds[35].text,tds[36].text,tds[37].text, 1,tds[7].text,tds[8].text,tds[10].text,tds[11].text,tds[13].text,tds[14].text,tds[16].text,tds[18].text,tds[19].text,tds[20].text, 0])
except AttributeError as e:
#print(e)
count = count + 1
except IndexError as i:
#print(i)
count = count + 1
return schedule
def scheduleStart(path):
url = str(path) + '/csv/test.csv'
names = ["teamName","GP","FGA","FGP","3PA","3PP","FTA","FTP","TRB","STL","BLK","TOV","Home",'Away']
data = | pd.read_csv(url, names=names,encoding='utf-8') | pandas.read_csv |
import argparse
import pandas as pd
import os
from random import shuffle
def parse_args():
parser = argparse.ArgumentParser(description="Takes the meta_data, l4, and l4_no_pca files for the train, val and test sets "
"of students and returns them in libsvc format.")
parser.add_argument("--in_path", dest="in_path", help="Path to folder with meta_data, l4 and l4_no_pca", type=str,
required=True)
parser.add_argument("--test", dest="test", help="Id of VP for test", type=int, required=True)
parser.add_argument("--train", dest="train", help="String of train vp ids, comma separated: 1,12,15,9,2", type=str,
required=True)
parser.add_argument("--val", dest="val", help="String of val vp ids, comma separated: 1,12,15,9,2", type=str,
required=True)
parser.add_argument("--out_path", dest="out_path", help="Path for outputting the formatted files.", type=str,
required=True)
return parser.parse_args()
def to_libsvm_format(df: pd.DataFrame, labels: pd.DataFrame):
out = ""
for idx, row in df.iterrows():
out += "{} ".format(int(labels.iloc[idx].values[0]))
row_list = row.values.tolist()
for i in range(len(row_list)):
out += "{}:{} ".format(i+1, row_list[i])
out += "\n"
return out
# args = parse_args()
# test = "{}.csv".format(args.test)
# vals = args.val.split(",")
# val = ["{}.csv".format(x) for x in vals]
# trains = args.train.split(",")
# train = ["{}.csv".format(x) for x in trains]
in_path = "../../source/train_val_test_sets/" # args.in_path
out_path = "../../source/libsvm_train_test_val/" # args.out_path
out_df = pd.DataFrame([], columns=["test", "val", "train"])
files = os.listdir(in_path + "meta_data/")
count = 1
for file in files:
print("{} {}/{}".format(file, count, len(files)))
count += 1
test = file
copy = files.copy()
copy.remove(test)
shuffle(copy)
train = copy[:9]
val = copy[9:]
out_df = out_df.append(pd.DataFrame([[test[:-4], str([x[:-4] for x in val])[1:-1], str([x[:-4] for x in train])[1:-1]]],
columns=out_df.columns))
test_label_df = pd.read_csv("{}labels/{}".format(in_path, test))
test_hp_df = pd.read_csv("{}hp/{}".format(in_path, test))
test_l4_df = pd.read_csv("{}l4/{}".format(in_path, test))
test_l4_no_pca_df = pd.read_csv("{}l4_no_pca/{}".format(in_path, test))
val_label_df = pd.DataFrame([])
val_hp_df = pd.DataFrame([])
val_l4_df = pd.DataFrame([])
val_l4_no_pca_df = pd.DataFrame([])
for vvp in val:
val_label_df = val_label_df.append(pd.read_csv("{}labels/{}".format(in_path, vvp)))
val_hp_df = val_hp_df.append(pd.read_csv("{}hp/{}".format(in_path, vvp)))
val_l4_df = val_l4_df.append(pd.read_csv("{}l4/{}".format(in_path, vvp)))
val_l4_no_pca_df = val_l4_no_pca_df.append(pd.read_csv("{}l4_no_pca/{}".format(in_path, vvp)))
train_label_df = | pd.DataFrame([]) | pandas.DataFrame |
import climetlab as cml
from . import DATA_VERSION, PATTERN_GRIB, PATTERN_NCDF
class Info:
def __init__(self, dataset):
import os
import yaml
self.dataset = dataset
filename = self.dataset.replace("-", "_") + ".yaml"
path = os.path.join(os.path.dirname(os.path.abspath(__file__)), filename)
with open(path) as f:
self.config = yaml.unsafe_load(f.read())
def _get_cf_name(self, param):
return cml.utils.conventions.normalise_string(param, convention="cf")
# TODO add _
def get_category_param(self, param):
if param in "2t/sst/sm20/sm100/st20/st100/ci/rsn/tcc/tcw".split("/"):
return "daily_average"
if param in "sp/msl/ttr/tp".split("/"):
return "instantaneous"
if param in "lsm".split("/"):
return "instantaneous_only_control"
if param in "u/v/gh/t".split("/"):
return "3d"
if param in "q".split("/"):
return "3dbis"
raise NotImplementedError(param)
def _get_config_keys(self):
return self.config.keys()
def _get_s3path_grib(
self, origin, fctype, parameter, date, url="s3://", version=DATA_VERSION
):
return PATTERN_GRIB.format(
url=url,
data="s2s-ai-challenge/data",
dataset=self.dataset,
fctype=fctype,
origin=origin,
version=version,
parameter=parameter,
date=date,
)
def _get_s3path_netcdf(
self, origin, fctype, parameter, date, url="s3://", version=DATA_VERSION
):
return PATTERN_NCDF.format(
url=url,
data="s2s-ai-challenge/data",
dataset=self.dataset,
fctype=fctype,
origin=origin,
version=version,
parameter=parameter,
date=date,
)
def _get_config(self, key, origin, fctype, date=None, param=None):
origin_fctype = f"{origin}-{fctype}"
import pandas as pd
if key == "hdate":
if origin == "ncep" and fctype == "hindcast":
return pd.date_range(end=date, periods=12, freq=pd.DateOffset(years=1))
if key == "marsdate":
if origin == "ncep" and fctype == "hindcast":
only_one_date = "2011-03-01"
return | pd.to_datetime(only_one_date) | pandas.to_datetime |
import pandas as pd
import numpy as np
import math
from openpyxl import load_workbook
# Dictionary of expiry dates hard coded as historical expiry dates are not readily available
expdct = {'10APR20': 1586505600000,
'17APR20': 1587110400000,
'24APR20': 1587715200000
}
# Arbitrary start time
rando_start = 1586433719209
path = ("path to formatted data using groupbyexp")
data_destination = ("data destination")
columnsToDrop = ['underlying_price', 'timestamp', 'state', 'settlement_price', 'open_interest', 'min_price',
'max_price', 'mark_price', 'mark_iv', 'last_price', 'interest_rate', 'instrument_name',
'index_price', 'change_id', 'bids', 'bid_iv', 'best_bid_amount', 'best_ask_amount',
'asks', 'ask_iv', '24h_high', '24h_low', '24h_vol', 'theta', 'delta', 'rho',
'gamma', 'vega']
def getSigma(df, timeStamp, optionExpDate, columnsToDrop):
# get the time to expiration in minutes
expTime = expdct[optionExpDate]
N = (expTime - timeStamp)/(1000 * 60)
T = N/525600
# formatting
CP = df['instrument_name'].str
df['CP'] = CP[-1:]
df['strike'] = CP.extract('([0-9][0-9][0-9]+)').astype(int)
df = df.drop(columns=columnsToDrop)
df = df.sort_values(['CP', 'strike']).reset_index()
df['mid'] = (df['best_bid_price'] + df['best_ask_price']) / 2
dfTemp = df.copy()
# calculating F and K
dfTemp.set_index(['CP', 'strike'], inplace=True)
dfTemp = dfTemp[dfTemp['best_bid_price'] > 0]['mid'].unstack('CP')
dfTemp['diff'] = np.absolute(np.array(dfTemp['C']) - np.array((dfTemp['P'])))
# Might potentially get IndexError on the next line. I think its when there is no minimum. ToDo
strike = dfTemp.index[np.where(dfTemp['diff'] == np.amin(dfTemp['diff']))[0][0]]
# Have to check if this multiplier is needed
eRT = math.exp(N * 0.001)
F = strike + (eRT * np.amin(dfTemp['diff']))
dfTemp = dfTemp[dfTemp.index < F]
K = dfTemp.index[dfTemp.shape[0] - 1]
# selecting out of money option
P = df[df['CP'] == 'P']
strike_index = int(np.where((P['strike'] == K) == True)[0])
oomPut = (P['best_bid_price'] != P['best_bid_price']).tolist()
putCutoff = 0
for i in range(strike_index):
if(oomPut[i] == oomPut[i+1] and oomPut[i] == True):
putCutoff = i+1
continue
P = P.iloc[putCutoff+1:]
keep = np.array(P['strike'] > K-1) + np.array(P['best_bid_price'] != 0)
P = P[keep].reset_index()
C = df[df['CP'] == 'C']
oomCall = (C['best_bid_price'] != C['best_bid_price']).tolist()
callCutOff = C.shape[0]
for i in range((len(oomCall)-1),strike_index,-1):
if(oomCall[i] == oomCall[i-1] and oomPut[i] == True):
callCutOff = i-1
continue
C = C.iloc[:callCutOff]
keep = np.array(C['strike'] < K) + np.array(C['best_bid_price'] != 0)
C = C[keep].reset_index()
P_put = int(np.where((P['strike'] == K) == True)[0])
# TypeError: only size-1 arrays can be converted to Python scalars. Not sure why ToDo
C_call = int(np.where((C['strike'] == K) == True)[0])
mid = P['mid'][:P_put].tolist() + [(P['mid'][P_put] + C['mid'][C_call])/2] + C['mid'][C_call+1:].tolist()
df_mid = pd.merge(P, C, on='strike', how='inner')
# step 2 formula part
strike = df_mid['strike'].tolist()
sum = 0
for i in range(len(strike)):
if i == 0:
delta_strike = strike[i+1] - strike[i]
elif i == len(strike)-1:
delta_strike = strike[i] - strike[i-1]
else:
delta_strike = (strike[i-1] + strike[i+1])/2
sum += (delta_strike) * eRT * mid[i]
sigma = (2 * sum - ((F/K) - 1)**2) / (T * (F**2))
return N, sigma
def calculateVix(N1, sum1, N2, sum2):
try:
intermediate = ((N1 * sum1 * ((N2 - 10080)/(N2 - N1))) + (N2 * sum2 * ((10080 - N1)/(N2 - N1)))) * (1/10080)
return 100 * math.sqrt(intermediate)
except ZeroDivisionError:
return 0
def closest(timestamp, path, nextDate, i):
#Finding the timestamp that is closest to the 15 minute interval we use
smallestDiff = math.inf
while True:
t = int(mean(list(pd.read_csv(path + "//" + str(nextDate) + "_" + str(i) + ".csv")['timestamp'])))
diff = abs(timestamp - t)
if diff > smallestDiff:
return i - 1
else:
smallestDiff = diff
i += 1
def mean(lst):
# Find the mean of a list. Needed to create my own implementation as there is a stray string in one of the options
# that needs to be handled
acc = 0
lgth = len(lst)
for num in lst:
try:
acc += int(num)
except ValueError:
lgth -= 1
return acc/len(lst)
dates = list(expdct.keys())
dateIndex = 0
counter1 = closest(rando_start, path, dates[0],0)
counter2 = closest(rando_start, path, dates[1],0)
time = rando_start
lst = []
timelst = []
while True:
try:
option1 = path + "//" + str(dates[dateIndex]) + "_" + str(counter1) + ".csv"
option2 = path + "//" + str(dates[dateIndex+1]) + "_" + str(counter2) + ".csv"
df1 = pd.read_csv(option1).replace(0, np.nan)
N1, sum1 = getSigma(df1, time, dates[dateIndex], columnsToDrop)
df2 = pd.read_csv(option2).replace(0, np.nan)
N2, sum2 = getSigma(df2, time, dates[dateIndex+1], columnsToDrop)
y = calculateVix(N1, sum1, N2, sum2)
# IndexError from line 48, TypeError from line 79
except (IndexError, TypeError):
y = np.nan
lst += [y]
timelst += [time]
time += 900000
try:
counter1 = closest(time, path, dates[dateIndex], counter1+1)
counter2 = closest(time, path, dates[dateIndex+1], counter2+1)
except FileNotFoundError:
# FileNotFoundError is thrown when an option expires and we overshoot the index range.
# Thus we change the expiry dates
counter1 = counter2
option1 = option2
if dateIndex + 2 >= len(dates):
break
else:
counter2 = closest(time, path, dates[dateIndex+2], 0)
dateIndex += 1
# writing as xlsx file
data = pd.DataFrame({'timeStamp':timelst, 'Vix': lst})
book = load_workbook(data_destination)
writer = | pd.ExcelWriter(data_destination, engine="openpyxl", mode="a") | pandas.ExcelWriter |
""" This script aggregates zugdata on a daily basis and uploads it in /live/aggdata """
import os
import re
import pandas as pd
from datetime import datetime, date, timedelta
# compatibility with ipython
#os.chdir(os.path.dirname(__file__))
import json
import boto3
from pathlib import Path
from coords_to_kreis import coords_convert
date = date.today() - timedelta(days = 4)
# connect to aws
date.weekday()
client_s3 = boto3.client("s3")
s3 = boto3.resource('s3')
content_object = s3.Object("sdd-s3-basebucket", "zugdaten/{}/{}/{}/zugdaten.json".format(str(date.year).zfill(4), str(date.month).zfill(2), str(date.day).zfill(2)))
file_content = content_object.get()['Body'].read().decode('utf-8')
json_content = json.loads(file_content)
df = | pd.DataFrame(json_content) | pandas.DataFrame |
from functools import lru_cache
import datetime
from typing import Tuple, List, Callable, NamedTuple
from collections import namedtuple
import sqlalchemy
import pandas as pd
def get_securities():
return pd.read_sql('securities', con=sqlalchemy.create_engine('sqlite:///../data/jq.db'))
@lru_cache(maxsize=1)
def get_profit_forecast(today: str):
assert today is not None # 这个参数是为了cache需要,只要在一个日子中,就不需要重复从数据库拿数据
df = pd.read_sql('profit_forecast', con=sqlalchemy.create_engine('sqlite:///../data/em1.db'))\
.set_index('code')\
.drop('index', axis=1)
return df[['eps_2019', 'eps_2020', 'eps_2021']].apply(pd.to_numeric, errors='coerce', downcast='float')
@lru_cache(maxsize=1)
def get_indicator(year: str = '2018'):
assert year == '2018'
indicator: pd.DataFrame = pd.read_sql('indicator2018', con=sqlalchemy.create_engine('sqlite:///../data/ts.db'))
indicator['ts_code'] = indicator['ts_code'].map(lambda x: x[:6])
return indicator.set_index('ts_code')\
.drop('index', axis=1)
def _today() -> str:
return datetime.datetime.now().strftime("%Y-%m-%d")
@lru_cache(maxsize=1)
def get_financial_indicator(today: str = _today()) -> pd.DataFrame:
"""
get the tushare financial indicator from ts.db
That is a table with ts_code, end_date and grossprofit_margin column
Parameters
----------
today : str
today这个参数是为了cache需要,只要在一个日子中,就不需要重复从数据库拿数据
Returns
-------
table : DataFrame
剔除了毛利率异常的数据(grossprofit_margin<=0 or grossprofit_margin>=100)
按ts_code、end_date字典序排序
"""
return pd.read_sql('SELECT ts_code, end_date, grossprofit_margin FROM financial_indicator \
WHERE 0 <= grossprofit_margin and grossprofit_margin <= 100\
ORDER BY ts_code, end_date',
con=sqlalchemy.create_engine('sqlite:///../../data/ts.db'))\
.set_index(['ts_code', 'end_date'])
@lru_cache(maxsize=1)
def get_ts_statement(name: str, today: str = _today()) -> pd.DataFrame:
"""
get the statement from ts.db
Parameters
----------
name: str
statement name, for example, 'balancesheet'
today : str
today这个参数是为了cache需要,只要在一个日子中,就不需要重复从数据库拿数据
Returns
-------
table : DataFrame
"""
return pd.read_sql(f'SELECT * FROM {name}',
con=sqlalchemy.create_engine('sqlite:///../../data/ts.db'))\
.set_index(['ts_code', 'end_date'])
def get_financial_indicator_by_code(code: str) -> pd.DataFrame:
""" 获取某个公司最近数年的财务指标
输入假设:
code 符合tushare要求的上市公司代码
输出规定:
列名同tushare的财务指标表格,包含了code的所有数据
"""
return get_financial_indicator().loc[code]
def save_profitability_index_to_db(data: List[Tuple[str, float, int, float, int]]) -> None:
"""
把各个公司代码、盈利增长指标、盈利增长百分位、盈利稳定指标、盈利稳定百分位等信息保存到数据库
:param data: list of tuple
元组中第一项为公司代码,以后依次为ms, ms rank,mg,mg rank
:return: None
"""
p = | pd.DataFrame(data, columns=['ts_code', 'mg', 'mg_rank', 'ms', 'ms_rank']) | pandas.DataFrame |
# %%
import os
import pandas as pd
import numpy as np
import threading
import time
base_dir = os.getcwd()
# %%
# 初始化表头
header = ['user', 'n_op', 'n_trans', 'op_type_0', 'op_type_1', 'op_type_2', 'op_type_3', 'op_type_4', 'op_type_5',
'op_type_6', 'op_type_7', 'op_type_8', 'op_type_9', 'op_type_perc', 'op_type_std', 'op_type_n', 'op_mode_0',
'op_mode_1', 'op_mode_2', 'op_mode_3', 'op_mode_4', 'op_mode_5', 'op_mode_6', 'op_mode_7', 'op_mode_8',
'op_mode_9', 'op_mode_perc', 'op_mode_std', 'op_mode_n', 'op_device_perc', 'op_device_std',
'op_device_nan_perc', 'op_device_n', 'op_ip_perc', 'op_ip_std', 'op_ip_nan_perc', 'op_ip_n', 'op_net_type_0',
'op_net_type_1', 'op_net_type_2', 'op_net_type_3', 'op_net_type_perc', 'op_net_type_std',
'op_net_type_nan_perc', 'op_channel_0', 'op_channel_1', 'op_channel_2', 'op_channel_3', 'op_channel_4',
'op_channel_perc', 'op_channel_std', 'op_channel_n', 'op_ip_3_perc', 'op_ip_3_std', 'op_ip_3_nan_perc',
'op_ip_3_n', 'op_ip_3_ch_freq', 'op_ip_48h_n', 'op_device_48h_n',
'op_48h_n', 'trans_platform_0', 'trans_platform_1', 'trans_platform_2', 'trans_platform_3',
'trans_platform_4', 'trans_platform_5', 'trans_platform_perc', 'trans_platform_std', 'trans_platform_n',
'trans_tunnel_in_0', 'trans_tunnel_in_1', 'trans_tunnel_in_2', 'trans_tunnel_in_3', 'trans_tunnel_in_4',
'trans_tunnel_in_5', 'trans_tunnel_in_perc', 'trans_tunnel_in_std', 'trans_tunnel_in_n',
'trans_tunnel_in_nan_perc', 'trans_tunnel_out_0', 'trans_tunnel_out_1', 'trans_tunnel_out_2',
'trans_tunnel_out_3', 'trans_tunnel_out_perc', 'trans_tunnel_out_std', 'trans_tunnel_n', 'trans_amount_max',
'trans_amount_avg', 'trans_amount_std', 'trans_type1_0', 'trans_type1_1', 'trans_type1_2', 'trans_type1_3',
'trans_type1_4', 'trans_type1_perc', 'trans_type1_std', 'trans_ip_perc', 'trans_ip_std', 'trans_ip_nan_perc',
'trans_ip_n', 'trans_type2_0', 'trans_type2_1', 'trans_type2_2', 'trans_type2_3', 'trans_type2_4',
'trans_type2_perc', 'trans_type2_std', 'trans_ip_3_perc', 'trans_ip_3_std', 'trans_ip_3_nan_perc',
'trans_ip_3_n', 'trans_ip_3_ch_freq',
'trans_amount_48h_n', 'trans_48h_n', 'trans_platform_48h_n', 'trans_ip_48h_n']
print(len(header))
# %%
feature_train = pd.DataFrame(columns=header)
feature_test_a = pd.DataFrame(columns=header)
feature_test_b = pd.DataFrame(columns=header)
train_base_df = pd.read_csv(base_dir + '/dataset/dataset2/trainset/train_base.csv')
train_op_df = pd.read_csv(base_dir + '/dataset/dataset2/trainset/train_op.csv')
train_trans_df = pd.read_csv(base_dir + '/dataset/dataset2/trainset/train_trans.csv')
test_a_base_df = pd.read_csv(base_dir + '/dataset/dataset2/testset/test_a_base.csv')
test_a_op_df = pd.read_csv(base_dir + '/dataset/dataset2/testset/test_a_op.csv')
test_a_trans_df = pd.read_csv(base_dir + '/dataset/dataset2/testset/test_a_trans.csv')
test_b_base_df = pd.read_csv(base_dir + '/dataset/dataset2/testset/test_b_base.csv')
test_b_op_df = pd.read_csv(base_dir + '/dataset/dataset2/testset/test_b_op.csv')
test_b_trans_df = pd.read_csv(base_dir + '/dataset/dataset2/testset/test_b_trans.csv')
n_train = len(train_base_df)
n_test_a = len(test_a_base_df)
n_test_b = len(test_b_base_df)
# %%
# load encoder
op_type = pd.read_csv(base_dir + '/dataset/dataset2/encoders/enc_op_type.csv')
mp_op_type = {}
for col in op_type.columns.values:
mp_op_type[col] = op_type[col].values
op_mode = pd.read_csv(base_dir + '/dataset/dataset2/encoders/enc_op_mode.csv')
mp_op_mode = {}
for col in op_mode.columns.values:
mp_op_mode[col] = op_mode[col].values
net_type = pd.read_csv(base_dir + '/dataset/dataset2/encoders/enc_op_net_type.csv')
mp_net_type = {}
for col in net_type.columns.values:
mp_net_type[col] = net_type[col].values
channel = pd.read_csv(base_dir + '/dataset/dataset2/encoders/enc_op_channel.csv')
mp_channel = {}
for col in channel.columns.values:
mp_channel[col] = channel[col].values
platform = pd.read_csv(base_dir + '/dataset/dataset2/encoders/enc_trans_platform.csv')
mp_platform = {}
for col in platform.columns.values:
mp_platform[col] = platform[col].values
tunnel_in = pd.read_csv(base_dir + '/dataset/dataset2/encoders/enc_trans_tunnel_in.csv')
mp_tunnel_in = {}
for col in tunnel_in.columns.values:
mp_tunnel_in[col] = tunnel_in[col].values
tunnel_out = pd.read_csv(base_dir + '/dataset/dataset2/encoders/enc_trans_tunnel_out.csv')
mp_tunnel_out = {}
for col in tunnel_out.columns.values:
mp_tunnel_out[col] = tunnel_out[col].values
type1 = pd.read_csv(base_dir + '/dataset/dataset2/encoders/enc_trans_type1.csv')
mp_type1 = {}
for col in type1.columns.values:
mp_type1[col] = type1[col].values
type2 = pd.read_csv(base_dir + '/dataset/dataset2/encoders/enc_trans_type2.csv')
mp_type2 = {}
for col in type2.columns.values:
mp_type2[col] = type2[col].values
# %%
def process(n, isTrain=True, isA=False):
for i in range(n):
if i % 1000 == 0:
print("train - " if isTrain else "test_a - " if isA else "test_b - ", end='')
print(i)
if isTrain:
cur_user = train_base_df['user'].loc[i]
tr_trans_user = train_trans_df[train_trans_df['user'] == cur_user] # 该用户的trans记录
tr_op_user = train_op_df[train_op_df['user'] == cur_user] # 该用户的op记录
elif isA:
cur_user = test_a_base_df['user'].loc[i]
tr_trans_user = test_a_trans_df[test_a_trans_df['user'] == cur_user] # 该用户的trans记录
tr_op_user = test_a_op_df[test_a_op_df['user'] == cur_user] # 该用户的op记录
else:
cur_user = test_b_base_df['user'].loc[i]
tr_trans_user = test_b_trans_df[test_b_trans_df['user'] == cur_user] # 该用户的trans记录
tr_op_user = test_b_op_df[test_b_op_df['user'] == cur_user] # 该用户的op记录
n_tr_trans_user = len(tr_trans_user) # 该用户的trans记录条数
n_tr_op_user = len(tr_op_user) # 该用户的op记录条数
line = [cur_user, n_tr_op_user, n_tr_trans_user] # 一行,即当前用户的所有二次特征
if n_tr_op_user > 0:
### op_type
mode_op_type = tr_op_user['op_type'].mode()[0]
code = mp_op_type[mode_op_type]
line.extend(code)
line.append(sum(tr_op_user['op_type'].apply(lambda x: 1 if x == mode_op_type else 0)) / n_tr_op_user)
s = tr_op_user['op_type'].value_counts()
line.append(np.std(s.values))
line.append(len(s))
### op_mode
mode_op_mode = tr_op_user['op_mode'].mode()[0]
code = mp_op_mode[mode_op_mode]
line.extend(code)
line.append(sum(tr_op_user['op_mode'].apply(lambda x: 1 if x == mode_op_mode else 0)) / n_tr_op_user)
s = tr_op_user['op_mode'].value_counts()
line.append(np.std(s.values))
line.append(len(s))
### op_device
mode_op_device = tr_op_user['op_device'].mode()[0]
line.append(sum(tr_op_user['op_device'].apply(lambda x: 1 if x == mode_op_device else 0)) / n_tr_op_user)
s = tr_op_user['op_device'].value_counts()
line.append(np.std(s.values))
# line.append(tr_op_user['op_device'].isnull().sum() / n_tr_op_user)
line.append(sum(tr_op_user['op_device'].apply(lambda x: 1 if x == 'op_device_nan' else 0)) / n_tr_op_user)
line.append(len(s))
### op_ip
mode_op_ip = tr_op_user['ip'].mode()[0]
line.append(sum(tr_op_user['ip'].apply(lambda x: 1 if x == mode_op_ip else 0)) / n_tr_op_user)
s = tr_op_user['ip'].value_counts()
line.append(np.std(s.values))
# line.append(tr_op_user['ip'].isnull().sum() / n_tr_op_user)
line.append(sum(tr_op_user['ip'].apply(lambda x: 1 if x == 'ip_nan' else 0)) / n_tr_op_user)
line.append(len(s))
### op_net_type
mode_op_net_type = tr_op_user['net_type'].mode()[0]
code = mp_net_type[mode_op_net_type]
line.extend(code)
line.append(sum(tr_op_user['net_type'].apply(lambda x: 1 if x == mode_op_net_type else 0)) / n_tr_op_user)
s = tr_op_user['net_type'].value_counts()
line.append(np.std(s.values))
# line.append(tr_op_user['net_type'].isnull().sum() / n_tr_op_user)
line.append(sum(tr_op_user['net_type'].apply(lambda x: 1 if x == 'net_type_nan' else 0)) / n_tr_op_user)
### channel
mode_op_channel = tr_op_user['channel'].mode()[0]
code = mp_channel[mode_op_channel]
line.extend(code)
line.append(sum(tr_op_user['channel'].apply(lambda x: 1 if x == mode_op_channel else 0)) / n_tr_op_user)
s = tr_op_user['channel'].value_counts()
line.append(np.std(s.values))
line.append(len(s))
### ip_3
mode_op_ip_3 = tr_op_user['ip_3'].mode()[0]
line.append(sum(tr_op_user['ip_3'].apply(lambda x: 1 if x == mode_op_ip_3 else 0)) / n_tr_op_user)
s = tr_op_user['ip_3'].value_counts()
line.append(np.std(s.values))
# line.append(tr_op_user['ip_3'].isnull().sum() / n_tr_op_user)
line.append(sum(tr_op_user['ip_3'].apply(lambda x: 1 if x == 'ip_3_nan' else 0)) / n_tr_op_user)
line.append(len(s))
### 对tm_diff排序
tr_op_user.sort_values('tm_diff', inplace=True)
cnt = 0
l = tr_op_user['ip_3'].values
pre = l[0]
for j in range(1, n_tr_op_user):
if l[j] != pre:
pre = l[j]
cnt += 1
line.append(cnt)
### 48h最高ip种类数量、最高的op_device种类数量、最高的op记录次数
tr_op_tm_max = tr_op_user['tm_diff'].values.max()
tr_op_tm_min = tr_op_user['tm_diff'].values.min()
gap = 48 * 3600
start = tr_op_tm_min
end = start + gap
max_48h_ip_n = 0
max_48h_op_device_n = 0
max_48h_op_n = 0
while start <= tr_op_tm_max:
gap_df = tr_op_user[(start <= tr_op_user['tm_diff']) & (tr_op_user['tm_diff'] < end)]
max_48h_ip_n = max(max_48h_ip_n, gap_df['ip'].nunique())
max_48h_op_device_n = max(max_48h_op_device_n, gap_df['op_device'].nunique())
max_48h_op_n = max(max_48h_op_n, len(gap_df))
start = end
end += gap
line.extend([max_48h_ip_n, max_48h_op_device_n, max_48h_op_n])
else:
line.extend([-1] * 57)
if n_tr_trans_user > 0:
### platform
mode_trans_platform = tr_trans_user['platform'].mode()[0]
code = mp_platform[mode_trans_platform]
line.extend(code)
line.append(
sum(tr_trans_user['platform'].apply(lambda x: 1 if x == mode_trans_platform else 0)) / n_tr_trans_user)
s = tr_trans_user['platform'].value_counts()
line.append(np.std(s.values))
line.append(len(s))
### tunnel_in
mode_trans_tunnel_in = tr_trans_user['tunnel_in'].mode()[0]
code = mp_tunnel_in[mode_trans_tunnel_in]
line.extend(code)
line.append(sum(
tr_trans_user['tunnel_in'].apply(lambda x: 1 if x == mode_trans_tunnel_in else 0)) / n_tr_trans_user)
s = tr_trans_user['tunnel_in'].value_counts()
line.append(np.std(s.values))
line.append(len(s))
# line.append(tr_trans_user['tunnel_in'].isnull().sum() / n_tr_trans_user)
line.append(
sum(tr_trans_user['tunnel_in'].apply(lambda x: 1 if x == 'tunnel_in_nan' else 0)) / n_tr_trans_user)
### tunnel_out
mode_trans_tunnel_out = tr_trans_user['tunnel_out'].mode()[0]
code = mp_tunnel_out[mode_trans_tunnel_out]
line.extend(code)
line.append(sum(
tr_trans_user['tunnel_out'].apply(lambda x: 1 if x == mode_trans_tunnel_out else 0)) / n_tr_trans_user)
s = tr_trans_user['tunnel_out'].value_counts()
line.append(np.std(s.values))
line.append(len(s))
### amount
s = tr_trans_user['amount']
line.append(s.values.max())
line.append(s.values.mean())
line.append(s.values.std())
### type1
mode_trans_type1 = tr_trans_user['type1'].mode()[0]
code = mp_type1[mode_trans_type1]
line.extend(code)
line.append(
sum(tr_trans_user['type1'].apply(lambda x: 1 if x == mode_trans_type1 else 0)) / n_tr_trans_user)
s = tr_trans_user['type1'].value_counts()
line.append(np.std(s.values))
### trans_ip
mode_trans_ip = tr_trans_user['ip'].mode()[0]
line.append(sum(tr_trans_user['ip'].apply(lambda x: 1 if x == mode_trans_ip else 0)) / n_tr_trans_user)
s = tr_trans_user['ip'].value_counts()
line.append(np.std(s.values))
# line.append(tr_trans_user['ip'].isnull().sum() / n_tr_trans_user)
line.append(sum(tr_trans_user['ip'].apply(lambda x: 1 if x == 'ip_nan' else 0)) / n_tr_trans_user)
line.append(len(s))
### type2
mode_trans_type2 = tr_trans_user['type2'].mode()[0]
code = mp_type2[mode_trans_type2]
line.extend(code)
line.append(
sum(tr_trans_user['type2'].apply(lambda x: 1 if x == mode_trans_type2 else 0)) / n_tr_trans_user)
s = tr_trans_user['type2'].value_counts()
line.append(np.std(s.values))
### trans_ip_3
mode_trans_ip_3 = tr_trans_user['ip_3'].mode()[0]
line.append(sum(tr_trans_user['ip_3'].apply(lambda x: 1 if x == mode_trans_ip_3 else 0)) / n_tr_trans_user)
s = tr_trans_user['ip'].value_counts()
line.append(np.std(s.values))
line.append(sum(tr_trans_user['ip_3'].apply(lambda x: 1 if x == 'ip_3_nan' else 0)) / n_tr_trans_user)
line.append(len(s))
### 对tm_diff排序
tr_trans_user.sort_values('tm_diff', inplace=True)
cnt = 0
l = tr_trans_user['ip_3'].values
pre = l[0]
for j in range(1, n_tr_trans_user):
if l[j] != pre:
pre = l[j]
cnt += 1
line.append(cnt)
### 48h最高amount总量、最高的trans数量、最高的platform种类数量、最高的ip种类数量
tr_trans_tm_max = tr_trans_user['tm_diff'].values.max()
tr_trans_tm_min = tr_trans_user['tm_diff'].values.min()
gap = 48 * 3600
start = tr_trans_tm_min
end = start + gap
max_48h_sum_amount = 0
max_48h_trans_n = 0
max_48h_platform_n = 0
max_48h_ip_n = 0
while start <= tr_trans_tm_max:
gap_df = tr_trans_user[(start <= tr_trans_user['tm_diff']) & (tr_trans_user['tm_diff'] < end)]
max_48h_sum_amount = max(max_48h_sum_amount, gap_df['amount'].values.sum())
max_48h_trans_n = max(max_48h_trans_n, len(gap_df))
max_48h_platform_n = max(max_48h_platform_n, gap_df['platform'].nunique())
max_48h_ip_n = max(max_48h_ip_n, gap_df['ip'].nunique())
start = end
end += gap
line.extend([max_48h_sum_amount, max_48h_trans_n, max_48h_platform_n, max_48h_ip_n])
else:
line.extend([-1] * 56)
# print(len(line))
### 填入feature矩阵
if isTrain:
feature_train.loc[len(feature_train)] = line
elif isA:
feature_test_a.loc[len(feature_test_a)] = line
else:
feature_test_b.loc[len(feature_test_b)] = line
# 存
if isTrain:
feature_train.to_csv(base_dir + '/dataset/dataset2/trainset/feature_train.csv', index=False)
elif isA:
feature_test_a.to_csv(base_dir + '/dataset/dataset2/testset/feature_test_a.csv', index=False)
else:
feature_test_b.to_csv(base_dir + '/dataset/dataset2/testset/feature_test_b.csv', index=False)
# %%
process(n_train, isTrain=True)
process(n_test_a, isTrain=False, isA=True)
process(n_test_b, isTrain=False, isA=False)
# %%
# 多线程
def process_threaded(n_train, n_test_a, n_test_b):
def process1():
process(n_train, isTrain=True)
def process2():
process(n_test_a, isTrain=False, isA=True)
def process3():
process(n_test_b, isTrain=False, isA=False)
t1 = threading.Thread(target=process1)
t1.start()
t2 = threading.Thread(target=process2)
t2.start()
t3 = threading.Thread(target=process3)
t3.start()
# %%
process_threaded(n_train, n_test_a, n_test_b)
# %%
# 并入主矩阵
### 以下l六行可以不跑
feature_train = pd.read_csv(base_dir + '/dataset/dataset2/trainset/feature_train.csv')
feature_test_a = pd.read_csv(base_dir + '/dataset/dataset2/testset/feature_test_a.csv')
feature_test_b = pd.read_csv(base_dir + '/dataset/dataset2/testset/feature_test_b.csv')
train_base_df = pd.read_csv(base_dir + '/dataset/dataset2/trainset/train_base.csv')
test_a_base_df = pd.read_csv(base_dir + '/dataset/dataset2/testset/test_a_base.csv')
test_b_base_df = pd.read_csv(base_dir + '/dataset/dataset2/testset/test_b_base.csv')
feature_train = feature_train.drop(labels='user', axis=1)
feature_test_a = feature_test_a.drop(labels='user', axis=1)
feature_test_b = feature_test_b.drop(labels='user', axis=1)
train_base_df = train_base_df.join(feature_train)
test_a_base_df = test_a_base_df.join(feature_test_a)
test_b_base_df = test_b_base_df.join(feature_test_b)
train_base_df.to_csv(base_dir + '/dataset/dataset2/trainset/train_main.csv', index=False)
test_a_base_df.to_csv(base_dir + '/dataset/dataset2/testset/test_a_main.csv', index=False)
test_b_base_df.to_csv(base_dir + '/dataset/dataset2/testset/test_b_main.csv', index=False)
# %%
# #######################以下为试水专用########################### #
feature_train = pd.read_csv(base_dir + '/dataset/dataset2/trainset/feature_train.csv')
feature_test = pd.read_csv(base_dir + '/dataset/dataset2/testset/feature_test.csv')
feature_train = feature_train.drop(labels=['op_freq', 'op_ip_freq', 'op_ip_3_freq', 'trans_freq', 'trans_amount_freq',
'trans_ip_freq', 'trans_ip_3_freq'], axis=1)
feature_test = feature_test.drop(labels=['op_freq', 'op_ip_freq', 'op_ip_3_freq', 'trans_freq', 'trans_amount_freq',
'trans_ip_freq', 'trans_ip_3_freq'], axis=1)
feature_train.to_csv(base_dir + '/dataset/dataset2/trainset/feature_train.csv', index=False)
feature_test.to_csv(base_dir + '/dataset/dataset2/testset/feature_test.csv', index=False)
# %%
feature_train = pd.read_csv(base_dir + '/dataset/dataset2/trainset/feature_train.csv')
feature_test = pd.read_csv(base_dir + '/dataset/dataset2/testset/feature_test.csv')
train_base_df = pd.read_csv(base_dir + '/dataset/dataset2/trainset/train_base.csv')
train_op_df = pd.read_csv(base_dir + '/dataset/dataset2/trainset/train_op.csv')
train_trans_df = pd.read_csv(base_dir + '/dataset/dataset2/trainset/train_trans.csv')
test_base_df = pd.read_csv(base_dir + '/dataset/dataset2/testset/test_a_base.csv')
test_op_df = | pd.read_csv(base_dir + '/dataset/dataset2/testset/test_a_op.csv') | pandas.read_csv |
import os
import multiprocessing as mp
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import tqdm
import cv2
# Specify HSV color range for detection
lower = (25, 40, 200)
upper = (30, 100, 255)
data_dir = 'data/pufferfish-struggle'
num_images = len([name for name in os.listdir(data_dir) if os.path.isfile(os.path.join(data_dir, name))])
progress = np.zeros(num_images)
print(f'Total number of frames: {num_images:d}')
output_file = 'output.csv'
# Counts number of pixels in a given image that fall within specified HSV range
# Input images must match the filename format 'frameXXXX.png' with XXXX being contiguous values from 0000 to 9999
def count_pixels(idx):
filename = f'frame{idx:04d}.png'
path = os.path.join(data_dir, filename)
im_orig = cv2.imread(path)
im_orig = cv2.cvtColor(im_orig, cv2.COLOR_BGR2RGB)
im = im_orig[275:395, 1860:1980]
# Convert to HSV space
hsv_im = cv2.cvtColor(im, cv2.COLOR_RGB2HSV)
mask = cv2.inRange(hsv_im, lower, upper)
count = mask.sum() / 255.
return count
# Parallel process to count pixels in indexed images
with mp.Pool(mp.cpu_count()) as pool:
progress = list(tqdm.tqdm(pool.imap(count_pixels, range(num_images)), total=num_images))
# Save data
df = | pd.DataFrame(progress) | pandas.DataFrame |
import nose
import os
import string
from distutils.version import LooseVersion
from datetime import datetime, date, timedelta
from pandas import Series, DataFrame, MultiIndex, PeriodIndex, date_range
from pandas.compat import range, lrange, StringIO, lmap, lzip, u, zip
import pandas.util.testing as tm
from pandas.util.testing import ensure_clean
from pandas.core.config import set_option
import numpy as np
from numpy import random
from numpy.random import randn
from numpy.testing import assert_array_equal
from numpy.testing.decorators import slow
import pandas.tools.plotting as plotting
def _skip_if_no_scipy():
try:
import scipy
except ImportError:
raise nose.SkipTest("no scipy")
@tm.mplskip
class TestSeriesPlots(tm.TestCase):
def setUp(self):
import matplotlib as mpl
self.mpl_le_1_2_1 = str(mpl.__version__) <= LooseVersion('1.2.1')
self.ts = tm.makeTimeSeries()
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
self.iseries = tm.makePeriodSeries()
self.iseries.name = 'iseries'
def tearDown(self):
tm.close()
@slow
def test_plot(self):
_check_plot_works(self.ts.plot, label='foo')
_check_plot_works(self.ts.plot, use_index=False)
_check_plot_works(self.ts.plot, rot=0)
_check_plot_works(self.ts.plot, style='.', logy=True)
_check_plot_works(self.ts.plot, style='.', logx=True)
_check_plot_works(self.ts.plot, style='.', loglog=True)
_check_plot_works(self.ts[:10].plot, kind='bar')
_check_plot_works(self.iseries.plot)
_check_plot_works(self.series[:5].plot, kind='bar')
_check_plot_works(self.series[:5].plot, kind='line')
_check_plot_works(self.series[:5].plot, kind='barh')
_check_plot_works(self.series[:10].plot, kind='barh')
_check_plot_works(Series(randn(10)).plot, kind='bar', color='black')
@slow
def test_plot_figsize_and_title(self):
# figsize and title
import matplotlib.pyplot as plt
ax = self.series.plot(title='Test', figsize=(16, 8))
self.assertEqual(ax.title.get_text(), 'Test')
assert_array_equal(np.round(ax.figure.get_size_inches()),
np.array((16., 8.)))
@slow
def test_bar_colors(self):
import matplotlib.pyplot as plt
import matplotlib.colors as colors
default_colors = plt.rcParams.get('axes.color_cycle')
custom_colors = 'rgcby'
df = DataFrame(randn(5, 5))
ax = df.plot(kind='bar')
rects = ax.patches
conv = colors.colorConverter
for i, rect in enumerate(rects[::5]):
xp = conv.to_rgba(default_colors[i % len(default_colors)])
rs = rect.get_facecolor()
self.assertEqual(xp, rs)
tm.close()
ax = df.plot(kind='bar', color=custom_colors)
rects = ax.patches
conv = colors.colorConverter
for i, rect in enumerate(rects[::5]):
xp = conv.to_rgba(custom_colors[i])
rs = rect.get_facecolor()
self.assertEqual(xp, rs)
tm.close()
from matplotlib import cm
# Test str -> colormap functionality
ax = df.plot(kind='bar', colormap='jet')
rects = ax.patches
rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5))
for i, rect in enumerate(rects[::5]):
xp = rgba_colors[i]
rs = rect.get_facecolor()
self.assertEqual(xp, rs)
tm.close()
# Test colormap functionality
ax = df.plot(kind='bar', colormap=cm.jet)
rects = ax.patches
rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5))
for i, rect in enumerate(rects[::5]):
xp = rgba_colors[i]
rs = rect.get_facecolor()
self.assertEqual(xp, rs)
tm.close()
df.ix[:, [0]].plot(kind='bar', color='DodgerBlue')
@slow
def test_bar_linewidth(self):
df = DataFrame(randn(5, 5))
# regular
ax = df.plot(kind='bar', linewidth=2)
for r in ax.patches:
self.assertEqual(r.get_linewidth(), 2)
# stacked
ax = df.plot(kind='bar', stacked=True, linewidth=2)
for r in ax.patches:
self.assertEqual(r.get_linewidth(), 2)
# subplots
axes = df.plot(kind='bar', linewidth=2, subplots=True)
for ax in axes:
for r in ax.patches:
self.assertEqual(r.get_linewidth(), 2)
@slow
def test_bar_log(self):
expected = np.array([1., 10., 100., 1000.])
if not self.mpl_le_1_2_1:
expected = np.hstack((.1, expected, 1e4))
ax = Series([200, 500]).plot(log=True, kind='bar')
assert_array_equal(ax.yaxis.get_ticklocs(), expected)
def test_rotation(self):
df = DataFrame(randn(5, 5))
ax = df.plot(rot=30)
for l in ax.get_xticklabels():
self.assertEqual(l.get_rotation(), 30)
def test_irregular_datetime(self):
rng = date_range('1/1/2000', '3/1/2000')
rng = rng[[0, 1, 2, 3, 5, 9, 10, 11, 12]]
ser = Series(randn(len(rng)), rng)
ax = ser.plot()
xp = datetime(1999, 1, 1).toordinal()
ax.set_xlim('1/1/1999', '1/1/2001')
self.assertEqual(xp, ax.get_xlim()[0])
@slow
def test_hist(self):
_check_plot_works(self.ts.hist)
_check_plot_works(self.ts.hist, grid=False)
_check_plot_works(self.ts.hist, figsize=(8, 10))
_check_plot_works(self.ts.hist, by=self.ts.index.month)
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 1)
_check_plot_works(self.ts.hist, ax=ax)
_check_plot_works(self.ts.hist, ax=ax, figure=fig)
_check_plot_works(self.ts.hist, figure=fig)
tm.close()
fig, (ax1, ax2) = plt.subplots(1, 2)
_check_plot_works(self.ts.hist, figure=fig, ax=ax1)
_check_plot_works(self.ts.hist, figure=fig, ax=ax2)
with tm.assertRaises(ValueError):
self.ts.hist(by=self.ts.index, figure=fig)
@slow
def test_hist_layout(self):
n = 10
gender = tm.choice(['Male', 'Female'], size=n)
df = DataFrame({'gender': gender,
'height': random.normal(66, 4, size=n), 'weight':
random.normal(161, 32, size=n)})
with tm.assertRaises(ValueError):
df.height.hist(layout=(1, 1))
with tm.assertRaises(ValueError):
df.height.hist(layout=[1, 1])
@slow
def test_hist_layout_with_by(self):
import matplotlib.pyplot as plt
n = 10
gender = tm.choice(['Male', 'Female'], size=n)
df = DataFrame({'gender': gender,
'height': random.normal(66, 4, size=n), 'weight':
random.normal(161, 32, size=n),
'category': random.randint(4, size=n)})
_check_plot_works(df.height.hist, by=df.gender, layout=(2, 1))
tm.close()
_check_plot_works(df.height.hist, by=df.gender, layout=(1, 2))
tm.close()
_check_plot_works(df.weight.hist, by=df.category, layout=(1, 4))
tm.close()
_check_plot_works(df.weight.hist, by=df.category, layout=(4, 1))
tm.close()
@slow
def test_hist_no_overlap(self):
from matplotlib.pyplot import subplot, gcf, close
x = Series(randn(2))
y = Series(randn(2))
subplot(121)
x.hist()
subplot(122)
y.hist()
fig = gcf()
axes = fig.get_axes()
self.assertEqual(len(axes), 2)
@slow
def test_plot_fails_with_dupe_color_and_style(self):
x = Series(randn(2))
with tm.assertRaises(ValueError):
x.plot(style='k--', color='k')
@slow
def test_hist_by_no_extra_plots(self):
import matplotlib.pyplot as plt
n = 10
df = DataFrame({'gender': tm.choice(['Male', 'Female'], size=n),
'height': random.normal(66, 4, size=n)})
axes = df.height.hist(by=df.gender)
self.assertEqual(len(plt.get_fignums()), 1)
def test_plot_fails_when_ax_differs_from_figure(self):
from pylab import figure, close
fig1 = figure()
fig2 = figure()
ax1 = fig1.add_subplot(111)
with tm.assertRaises(AssertionError):
self.ts.hist(ax=ax1, figure=fig2)
@slow
def test_kde(self):
_skip_if_no_scipy()
_check_plot_works(self.ts.plot, kind='kde')
_check_plot_works(self.ts.plot, kind='density')
ax = self.ts.plot(kind='kde', logy=True)
self.assertEqual(ax.get_yscale(), 'log')
@slow
def test_kde_kwargs(self):
_skip_if_no_scipy()
from numpy import linspace
_check_plot_works(self.ts.plot, kind='kde', bw_method=.5, ind=linspace(-100,100,20))
_check_plot_works(self.ts.plot, kind='density', bw_method=.5, ind=linspace(-100,100,20))
ax = self.ts.plot(kind='kde', logy=True, bw_method=.5, ind=linspace(-100,100,20))
self.assertEqual(ax.get_yscale(), 'log')
@slow
def test_kde_color(self):
_skip_if_no_scipy()
ax = self.ts.plot(kind='kde', logy=True, color='r')
lines = ax.get_lines()
self.assertEqual(len(lines), 1)
self.assertEqual(lines[0].get_color(), 'r')
@slow
def test_autocorrelation_plot(self):
from pandas.tools.plotting import autocorrelation_plot
_check_plot_works(autocorrelation_plot, self.ts)
_check_plot_works(autocorrelation_plot, self.ts.values)
@slow
def test_lag_plot(self):
from pandas.tools.plotting import lag_plot
_check_plot_works(lag_plot, self.ts)
_check_plot_works(lag_plot, self.ts, lag=5)
@slow
def test_bootstrap_plot(self):
from pandas.tools.plotting import bootstrap_plot
_check_plot_works(bootstrap_plot, self.ts, size=10)
def test_invalid_plot_data(self):
s = Series(list('abcd'))
kinds = 'line', 'bar', 'barh', 'kde', 'density'
for kind in kinds:
with tm.assertRaises(TypeError):
s.plot(kind=kind)
@slow
def test_valid_object_plot(self):
s = Series(lrange(10), dtype=object)
kinds = 'line', 'bar', 'barh', 'kde', 'density'
for kind in kinds:
_check_plot_works(s.plot, kind=kind)
def test_partially_invalid_plot_data(self):
s = Series(['a', 'b', 1.0, 2])
kinds = 'line', 'bar', 'barh', 'kde', 'density'
for kind in kinds:
with tm.assertRaises(TypeError):
s.plot(kind=kind)
def test_invalid_kind(self):
s = Series([1, 2])
with tm.assertRaises(ValueError):
s.plot(kind='aasdf')
@slow
def test_dup_datetime_index_plot(self):
dr1 = date_range('1/1/2009', periods=4)
dr2 = date_range('1/2/2009', periods=4)
index = dr1.append(dr2)
values = randn(index.size)
s = Series(values, index=index)
_check_plot_works(s.plot)
@tm.mplskip
class TestDataFramePlots(tm.TestCase):
def setUp(self):
import matplotlib as mpl
self.mpl_le_1_2_1 = str(mpl.__version__) <= LooseVersion('1.2.1')
def tearDown(self):
tm.close()
@slow
def test_plot(self):
df = tm.makeTimeDataFrame()
_check_plot_works(df.plot, grid=False)
_check_plot_works(df.plot, subplots=True)
_check_plot_works(df.plot, subplots=True, use_index=False)
df = DataFrame({'x': [1, 2], 'y': [3, 4]})
self._check_plot_fails(df.plot, kind='line', blarg=True)
df = DataFrame(np.random.rand(10, 3),
index=list(string.ascii_letters[:10]))
_check_plot_works(df.plot, use_index=True)
_check_plot_works(df.plot, sort_columns=False)
_check_plot_works(df.plot, yticks=[1, 5, 10])
_check_plot_works(df.plot, xticks=[1, 5, 10])
_check_plot_works(df.plot, ylim=(-100, 100), xlim=(-100, 100))
_check_plot_works(df.plot, subplots=True, title='blah')
_check_plot_works(df.plot, title='blah')
tuples = lzip(string.ascii_letters[:10], range(10))
df = DataFrame(np.random.rand(10, 3),
index=MultiIndex.from_tuples(tuples))
_check_plot_works(df.plot, use_index=True)
# unicode
index = MultiIndex.from_tuples([(u('\u03b1'), 0),
(u('\u03b1'), 1),
(u('\u03b2'), 2),
(u('\u03b2'), 3),
(u('\u03b3'), 4),
(u('\u03b3'), 5),
(u('\u03b4'), 6),
(u('\u03b4'), 7)], names=['i0', 'i1'])
columns = MultiIndex.from_tuples([('bar', u('\u0394')),
('bar', u('\u0395'))], names=['c0',
'c1'])
df = DataFrame(np.random.randint(0, 10, (8, 2)),
columns=columns,
index=index)
_check_plot_works(df.plot, title=u('\u03A3'))
def test_nonnumeric_exclude(self):
import matplotlib.pyplot as plt
df = DataFrame({'A': ["x", "y", "z"], 'B': [1, 2, 3]})
ax = df.plot()
self.assertEqual(len(ax.get_lines()), 1) # B was plotted
@slow
def test_implicit_label(self):
df = DataFrame(randn(10, 3), columns=['a', 'b', 'c'])
ax = df.plot(x='a', y='b')
self.assertEqual(ax.xaxis.get_label().get_text(), 'a')
@slow
def test_explicit_label(self):
df = DataFrame(randn(10, 3), columns=['a', 'b', 'c'])
ax = df.plot(x='a', y='b', label='LABEL')
self.assertEqual(ax.xaxis.get_label().get_text(), 'LABEL')
@slow
def test_plot_xy(self):
import matplotlib.pyplot as plt
# columns.inferred_type == 'string'
df = tm.makeTimeDataFrame()
self._check_data(df.plot(x=0, y=1),
df.set_index('A')['B'].plot())
self._check_data(df.plot(x=0), df.set_index('A').plot())
self._check_data(df.plot(y=0), df.B.plot())
self._check_data(df.plot(x='A', y='B'),
df.set_index('A').B.plot())
self._check_data(df.plot(x='A'), df.set_index('A').plot())
self._check_data(df.plot(y='B'), df.B.plot())
# columns.inferred_type == 'integer'
df.columns = lrange(1, len(df.columns) + 1)
self._check_data(df.plot(x=1, y=2),
df.set_index(1)[2].plot())
self._check_data(df.plot(x=1), df.set_index(1).plot())
self._check_data(df.plot(y=1), df[1].plot())
# figsize and title
ax = df.plot(x=1, y=2, title='Test', figsize=(16, 8))
self.assertEqual(ax.title.get_text(), 'Test')
assert_array_equal(np.round(ax.figure.get_size_inches()),
np.array((16., 8.)))
# columns.inferred_type == 'mixed'
# TODO add MultiIndex test
@slow
def test_xcompat(self):
import pandas as pd
import matplotlib.pyplot as plt
df = tm.makeTimeDataFrame()
ax = df.plot(x_compat=True)
lines = ax.get_lines()
self.assert_(not isinstance(lines[0].get_xdata(), PeriodIndex))
tm.close()
pd.plot_params['xaxis.compat'] = True
ax = df.plot()
lines = ax.get_lines()
self.assert_(not isinstance(lines[0].get_xdata(), PeriodIndex))
tm.close()
pd.plot_params['x_compat'] = False
ax = df.plot()
lines = ax.get_lines()
tm.assert_isinstance(lines[0].get_xdata(), PeriodIndex)
tm.close()
# useful if you're plotting a bunch together
with pd.plot_params.use('x_compat', True):
ax = df.plot()
lines = ax.get_lines()
self.assert_(not isinstance(lines[0].get_xdata(), PeriodIndex))
tm.close()
ax = df.plot()
lines = ax.get_lines()
tm.assert_isinstance(lines[0].get_xdata(), PeriodIndex)
def test_unsorted_index(self):
df = DataFrame({'y': np.arange(100)},
index=np.arange(99, -1, -1), dtype=np.int64)
ax = df.plot()
l = ax.get_lines()[0]
rs = l.get_xydata()
rs = Series(rs[:, 1], rs[:, 0], dtype=np.int64)
tm.assert_series_equal(rs, df.y)
def _check_data(self, xp, rs):
xp_lines = xp.get_lines()
rs_lines = rs.get_lines()
def check_line(xpl, rsl):
xpdata = xpl.get_xydata()
rsdata = rsl.get_xydata()
assert_array_equal(xpdata, rsdata)
[check_line(xpl, rsl) for xpl, rsl in zip(xp_lines, rs_lines)]
tm.close()
@slow
def test_subplots(self):
df = DataFrame(np.random.rand(10, 3),
index=list(string.ascii_letters[:10]))
axes = df.plot(subplots=True, sharex=True, legend=True)
for ax in axes:
self.assert_(ax.get_legend() is not None)
axes = df.plot(subplots=True, sharex=True)
for ax in axes[:-2]:
[self.assert_(not label.get_visible())
for label in ax.get_xticklabels()]
[self.assert_(label.get_visible())
for label in ax.get_yticklabels()]
[self.assert_(label.get_visible())
for label in axes[-1].get_xticklabels()]
[self.assert_(label.get_visible())
for label in axes[-1].get_yticklabels()]
axes = df.plot(subplots=True, sharex=False)
for ax in axes:
[self.assert_(label.get_visible())
for label in ax.get_xticklabels()]
[self.assert_(label.get_visible())
for label in ax.get_yticklabels()]
@slow
def test_plot_scatter(self):
from matplotlib.pylab import close
df = DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['x', 'y', 'z', 'four'])
_check_plot_works(df.plot, x='x', y='y', kind='scatter')
_check_plot_works(df.plot, x=1, y=2, kind='scatter')
with tm.assertRaises(ValueError):
df.plot(x='x', kind='scatter')
with tm.assertRaises(ValueError):
df.plot(y='y', kind='scatter')
@slow
def test_plot_bar(self):
from matplotlib.pylab import close
df = DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['one', 'two', 'three', 'four'])
_check_plot_works(df.plot, kind='bar')
_check_plot_works(df.plot, kind='bar', legend=False)
_check_plot_works(df.plot, kind='bar', subplots=True)
_check_plot_works(df.plot, kind='bar', stacked=True)
df = DataFrame(randn(10, 15),
index=list(string.ascii_letters[:10]),
columns=lrange(15))
_check_plot_works(df.plot, kind='bar')
df = DataFrame({'a': [0, 1], 'b': [1, 0]})
_check_plot_works(df.plot, kind='bar')
def test_bar_stacked_center(self):
# GH2157
df = DataFrame({'A': [3] * 5, 'B': lrange(5)}, index=lrange(5))
ax = df.plot(kind='bar', stacked='True', grid=True)
self.assertEqual(ax.xaxis.get_ticklocs()[0],
ax.patches[0].get_x() + ax.patches[0].get_width() / 2)
def test_bar_center(self):
df = DataFrame({'A': [3] * 5, 'B': lrange(5)}, index=lrange(5))
ax = df.plot(kind='bar', grid=True)
self.assertEqual(ax.xaxis.get_ticklocs()[0],
ax.patches[0].get_x() + ax.patches[0].get_width())
@slow
def test_bar_log_no_subplots(self):
# GH3254, GH3298 matplotlib/matplotlib#1882, #1892
# regressions in 1.2.1
expected = np.array([1., 10.])
if not self.mpl_le_1_2_1:
expected = np.hstack((.1, expected, 100))
# no subplots
df = DataFrame({'A': [3] * 5, 'B': lrange(1, 6)}, index=lrange(5))
ax = df.plot(kind='bar', grid=True, log=True)
assert_array_equal(ax.yaxis.get_ticklocs(), expected)
@slow
def test_bar_log_subplots(self):
expected = np.array([1., 10., 100., 1000.])
if not self.mpl_le_1_2_1:
expected = np.hstack((.1, expected, 1e4))
ax = DataFrame([Series([200, 300]),
Series([300, 500])]).plot(log=True, kind='bar',
subplots=True)
assert_array_equal(ax[0].yaxis.get_ticklocs(), expected)
assert_array_equal(ax[1].yaxis.get_ticklocs(), expected)
@slow
def test_boxplot(self):
df = DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['one', 'two', 'three', 'four'])
df['indic'] = ['foo', 'bar'] * 3
df['indic2'] = ['foo', 'bar', 'foo'] * 2
_check_plot_works(df.boxplot)
_check_plot_works(df.boxplot, column=['one', 'two'])
_check_plot_works(df.boxplot, column=['one', 'two'], by='indic')
_check_plot_works(df.boxplot, column='one', by=['indic', 'indic2'])
_check_plot_works(df.boxplot, by='indic')
_check_plot_works(df.boxplot, by=['indic', 'indic2'])
_check_plot_works(plotting.boxplot, df['one'])
_check_plot_works(df.boxplot, notch=1)
_check_plot_works(df.boxplot, by='indic', notch=1)
df = DataFrame(np.random.rand(10, 2), columns=['Col1', 'Col2'])
df['X'] = Series(['A', 'A', 'A', 'A', 'A', 'B', 'B', 'B', 'B', 'B'])
_check_plot_works(df.boxplot, by='X')
@slow
def test_kde(self):
_skip_if_no_scipy()
df = DataFrame(randn(100, 4))
_check_plot_works(df.plot, kind='kde')
_check_plot_works(df.plot, kind='kde', subplots=True)
ax = df.plot(kind='kde')
self.assert_(ax.get_legend() is not None)
axes = df.plot(kind='kde', logy=True, subplots=True)
for ax in axes:
self.assertEqual(ax.get_yscale(), 'log')
@slow
def test_hist(self):
import matplotlib.pyplot as plt
df = DataFrame(randn(100, 4))
_check_plot_works(df.hist)
_check_plot_works(df.hist, grid=False)
# make sure layout is handled
df = DataFrame(randn(100, 3))
_check_plot_works(df.hist)
axes = df.hist(grid=False)
self.assert_(not axes[1, 1].get_visible())
df = DataFrame(randn(100, 1))
_check_plot_works(df.hist)
# make sure layout is handled
df = DataFrame(randn(100, 6))
_check_plot_works(df.hist)
# make sure sharex, sharey is handled
_check_plot_works(df.hist, sharex=True, sharey=True)
# handle figsize arg
_check_plot_works(df.hist, figsize=(8, 10))
# make sure xlabelsize and xrot are handled
ser = df[0]
xf, yf = 20, 20
xrot, yrot = 30, 30
ax = ser.hist(xlabelsize=xf, xrot=30, ylabelsize=yf, yrot=30)
ytick = ax.get_yticklabels()[0]
xtick = ax.get_xticklabels()[0]
self.assertAlmostEqual(ytick.get_fontsize(), yf)
self.assertAlmostEqual(ytick.get_rotation(), yrot)
self.assertAlmostEqual(xtick.get_fontsize(), xf)
self.assertAlmostEqual(xtick.get_rotation(), xrot)
xf, yf = 20, 20
xrot, yrot = 30, 30
axes = df.hist(xlabelsize=xf, xrot=30, ylabelsize=yf, yrot=30)
for i, ax in enumerate(axes.ravel()):
if i < len(df.columns):
ytick = ax.get_yticklabels()[0]
xtick = ax.get_xticklabels()[0]
self.assertAlmostEqual(ytick.get_fontsize(), yf)
self.assertAlmostEqual(ytick.get_rotation(), yrot)
self.assertAlmostEqual(xtick.get_fontsize(), xf)
self.assertAlmostEqual(xtick.get_rotation(), xrot)
tm.close()
# make sure kwargs to hist are handled
ax = ser.hist(normed=True, cumulative=True, bins=4)
# height of last bin (index 5) must be 1.0
self.assertAlmostEqual(ax.get_children()[5].get_height(), 1.0)
tm.close()
ax = ser.hist(log=True)
# scale of y must be 'log'
self.assertEqual(ax.get_yscale(), 'log')
tm.close()
# propagate attr exception from matplotlib.Axes.hist
with tm.assertRaises(AttributeError):
ser.hist(foo='bar')
@slow
def test_hist_layout(self):
import matplotlib.pyplot as plt
df = DataFrame(randn(100, 4))
layout_to_expected_size = (
{'layout': None, 'expected_size': (2, 2)}, # default is 2x2
{'layout': (2, 2), 'expected_size': (2, 2)},
{'layout': (4, 1), 'expected_size': (4, 1)},
{'layout': (1, 4), 'expected_size': (1, 4)},
{'layout': (3, 3), 'expected_size': (3, 3)},
)
for layout_test in layout_to_expected_size:
ax = df.hist(layout=layout_test['layout'])
self.assertEqual(len(ax), layout_test['expected_size'][0])
self.assertEqual(len(ax[0]), layout_test['expected_size'][1])
# layout too small for all 4 plots
with tm.assertRaises(ValueError):
df.hist(layout=(1, 1))
# invalid format for layout
with tm.assertRaises(ValueError):
df.hist(layout=(1,))
@slow
def test_scatter(self):
_skip_if_no_scipy()
df = DataFrame(randn(100, 2))
import pandas.tools.plotting as plt
def scat(**kwds):
return plt.scatter_matrix(df, **kwds)
_check_plot_works(scat)
_check_plot_works(scat, marker='+')
_check_plot_works(scat, vmin=0)
_check_plot_works(scat, diagonal='kde')
_check_plot_works(scat, diagonal='density')
_check_plot_works(scat, diagonal='hist')
def scat2(x, y, by=None, ax=None, figsize=None):
return plt.scatter_plot(df, x, y, by, ax, figsize=None)
_check_plot_works(scat2, 0, 1)
grouper = Series(np.repeat([1, 2, 3, 4, 5], 20), df.index)
_check_plot_works(scat2, 0, 1, by=grouper)
@slow
def test_andrews_curves(self):
from pandas import read_csv
from pandas.tools.plotting import andrews_curves
path = os.path.join(curpath(), 'data', 'iris.csv')
df = read_csv(path)
_check_plot_works(andrews_curves, df, 'Name')
@slow
def test_parallel_coordinates(self):
from pandas import read_csv
from pandas.tools.plotting import parallel_coordinates
from matplotlib import cm
path = os.path.join(curpath(), 'data', 'iris.csv')
df = read_csv(path)
_check_plot_works(parallel_coordinates, df, 'Name')
_check_plot_works(parallel_coordinates, df, 'Name',
colors=('#556270', '#4ECDC4', '#C7F464'))
_check_plot_works(parallel_coordinates, df, 'Name',
colors=['dodgerblue', 'aquamarine', 'seagreen'])
_check_plot_works(parallel_coordinates, df, 'Name',
colors=('#556270', '#4ECDC4', '#C7F464'))
_check_plot_works(parallel_coordinates, df, 'Name',
colors=['dodgerblue', 'aquamarine', 'seagreen'])
_check_plot_works(parallel_coordinates, df, 'Name', colormap=cm.jet)
df = read_csv(path, header=None, skiprows=1, names=[1, 2, 4, 8,
'Name'])
_check_plot_works(parallel_coordinates, df, 'Name', use_columns=True)
_check_plot_works(parallel_coordinates, df, 'Name',
xticks=[1, 5, 25, 125])
@slow
def test_radviz(self):
from pandas import read_csv
from pandas.tools.plotting import radviz
from matplotlib import cm
path = os.path.join(curpath(), 'data', 'iris.csv')
df = read_csv(path)
_check_plot_works(radviz, df, 'Name')
_check_plot_works(radviz, df, 'Name', colormap=cm.jet)
@slow
def test_plot_int_columns(self):
df = DataFrame(randn(100, 4)).cumsum()
_check_plot_works(df.plot, legend=True)
def test_legend_name(self):
multi = DataFrame(randn(4, 4),
columns=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
multi.columns.names = ['group', 'individual']
ax = multi.plot()
leg_title = ax.legend_.get_title()
self.assertEqual(leg_title.get_text(), 'group,individual')
def _check_plot_fails(self, f, *args, **kwargs):
with tm.assertRaises(Exception):
f(*args, **kwargs)
@slow
def test_style_by_column(self):
import matplotlib.pyplot as plt
fig = plt.gcf()
df = DataFrame(randn(100, 3))
for markers in [{0: '^', 1: '+', 2: 'o'},
{0: '^', 1: '+'},
['^', '+', 'o'],
['^', '+']]:
fig.clf()
fig.add_subplot(111)
ax = df.plot(style=markers)
for i, l in enumerate(ax.get_lines()[:len(markers)]):
self.assertEqual(l.get_marker(), markers[i])
@slow
def test_line_colors(self):
import matplotlib.pyplot as plt
import sys
from matplotlib import cm
custom_colors = 'rgcby'
df = DataFrame(randn(5, 5))
ax = df.plot(color=custom_colors)
lines = ax.get_lines()
for i, l in enumerate(lines):
xp = custom_colors[i]
rs = l.get_color()
self.assertEqual(xp, rs)
tmp = sys.stderr
sys.stderr = StringIO()
try:
tm.close()
ax2 = df.plot(colors=custom_colors)
lines2 = ax2.get_lines()
for l1, l2 in zip(lines, lines2):
self.assertEqual(l1.get_color(), l2.get_color())
finally:
sys.stderr = tmp
tm.close()
ax = df.plot(colormap='jet')
rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
lines = ax.get_lines()
for i, l in enumerate(lines):
xp = rgba_colors[i]
rs = l.get_color()
self.assertEqual(xp, rs)
| tm.close() | pandas.util.testing.close |
# -*- coding: utf-8 -*-
import argparse
import json
import os
import re
from io import StringIO
from pathlib import Path
import dotenv
import pandas as pd
import requests
from utils import get_gene_id2length
DOTENV_KEY2VAL = dotenv.dotenv_values()
def make_tissue2subtissue2sample_id(rawdir: str) -> pd.DataFrame:
"""Construct multi-indexed pd.Series that maps each tissue-subtissue
combination to the corresponding column names"""
sample_id_df = pd.read_csv(
Path(rawdir) / "GTEx_Analysis_v8_Annotations_SampleAttributesDS.txt", sep="\t",
)
# SMTS refers to main tissue type, SMTSD refers to subtissue type
sample_id2tissue_type_subtype_df = pd.DataFrame(
index=sample_id_df["SAMPID"].values,
data=sample_id_df[["SMTS", "SMTSD"]].values,
columns=["tissue", "subtissue"],
)
# Now invert to go from tissue-subtissue to sample_id
tissue2subtissue2sample_id = pd.Series(
index= | pd.MultiIndex.from_frame(sample_id2tissue_type_subtype_df) | pandas.MultiIndex.from_frame |
import unittest
import pandas as pd
import numpy as np
from autopandas_v2.ml.featurization.featurizer import RelationGraph
from autopandas_v2.ml.featurization.graph import GraphEdge, GraphEdgeType, GraphNodeType, GraphNode
from autopandas_v2.ml.featurization.options import GraphOptions
get_node_type = GraphNodeType.get_node_type
class TestRelationGraphFeaturizer(unittest.TestCase):
def test_basic_max(self):
input_df = pd.DataFrame([[1, 2], [2, 3], [2, 0]])
input_00 = GraphNode("I0", '[0,0]', get_node_type(input_df.iat[0, 0]))
input_01 = GraphNode("I0", '[0,1]', get_node_type(input_df.iat[0, 1]))
input_10 = GraphNode("I0", '[1,0]', get_node_type(input_df.iat[1, 0]))
input_11 = GraphNode("I0", '[1,1]', get_node_type(input_df.iat[1, 1]))
input_20 = GraphNode("I0", '[2,0]', get_node_type(input_df.iat[2, 0]))
input_21 = GraphNode("I0", '[2,1]', get_node_type(input_df.iat[2, 1]))
output_df = pd.DataFrame([[2, 3]])
output_00 = GraphNode("O0", '[0,0]', get_node_type(output_df.iat[0, 0]))
output_01 = GraphNode("O0", '[0,1]', get_node_type(output_df.iat[0, 1]))
options = GraphOptions()
options.NODE_TYPES = True
rel_graph: RelationGraph = RelationGraph(options)
rel_graph.from_input_output([input_df], output_df)
rel_graph_edges = rel_graph.edges
# positional edges
positional_edges = [
GraphEdge(input_00, input_01, GraphEdgeType.ADJACENCY),
GraphEdge(input_00, input_10, GraphEdgeType.ADJACENCY),
GraphEdge(input_10, input_11, GraphEdgeType.ADJACENCY),
GraphEdge(input_10, input_20, GraphEdgeType.ADJACENCY),
GraphEdge(input_20, input_21, GraphEdgeType.ADJACENCY),
GraphEdge(input_01, input_11, GraphEdgeType.ADJACENCY),
GraphEdge(input_11, input_21, GraphEdgeType.ADJACENCY),
GraphEdge(output_00, output_01, GraphEdgeType.ADJACENCY)
]
for edge in positional_edges:
self.assertTrue(edge in rel_graph_edges,
"Could not find edge %s in set of edges:\n%s" % (edge, rel_graph_edges))
# equality edges
equality_edges = [
GraphEdge(input_10, output_00, GraphEdgeType.EQUALITY),
GraphEdge(input_20, output_00, GraphEdgeType.EQUALITY),
GraphEdge(input_01, output_00, GraphEdgeType.EQUALITY), # redundant
GraphEdge(input_11, output_01, GraphEdgeType.EQUALITY)
]
for edge in equality_edges:
self.assertTrue(edge in rel_graph_edges,
"Could not find edge %s in set of edges:\n%s" % (edge, rel_graph_edges))
def test_max_series(self):
input_df = pd.DataFrame([[1, 2], [2, 3], [2, 0]])
input_00 = GraphNode("I0", '[0,0]', get_node_type(input_df.iat[0, 0]))
input_01 = GraphNode("I0", '[0,1]', get_node_type(input_df.iat[0, 1]))
input_10 = GraphNode("I0", '[1,0]', get_node_type(input_df.iat[1, 0]))
input_11 = GraphNode("I0", '[1,1]', get_node_type(input_df.iat[1, 1]))
input_20 = GraphNode("I0", '[2,0]', get_node_type(input_df.iat[2, 0]))
input_21 = GraphNode("I0", '[2,1]', get_node_type(input_df.iat[2, 1]))
output = pd.DataFrame.max(input_df)
output_00 = GraphNode("O0", '[0,0]', get_node_type(output.iat[0]))
output_10 = GraphNode("O0", '[1,0]', get_node_type(output.iat[1]))
options = GraphOptions()
options.NODE_TYPES = True
rel_graph: RelationGraph = RelationGraph(options)
rel_graph.from_input_output([input_df], output)
rel_graph_edges = rel_graph.edges
# positional edges
positional_edges = [
GraphEdge(input_00, input_01, GraphEdgeType.ADJACENCY),
GraphEdge(input_00, input_10, GraphEdgeType.ADJACENCY),
GraphEdge(input_10, input_11, GraphEdgeType.ADJACENCY),
GraphEdge(input_10, input_20, GraphEdgeType.ADJACENCY),
GraphEdge(input_20, input_21, GraphEdgeType.ADJACENCY),
GraphEdge(input_01, input_11, GraphEdgeType.ADJACENCY),
GraphEdge(input_11, input_21, GraphEdgeType.ADJACENCY),
GraphEdge(output_00, output_10, GraphEdgeType.ADJACENCY)
]
for edge in positional_edges:
self.assertTrue(edge in rel_graph_edges,
"Could not find edge %s in set of edges:\n%s" % (edge, rel_graph_edges))
# equality edges
equality_edges = [
GraphEdge(input_10, output_00, GraphEdgeType.EQUALITY),
GraphEdge(input_20, output_00, GraphEdgeType.EQUALITY),
GraphEdge(input_01, output_00, GraphEdgeType.EQUALITY), # redundant
GraphEdge(input_11, output_10, GraphEdgeType.EQUALITY)
]
for edge in equality_edges:
self.assertTrue(edge in rel_graph_edges,
"Could not find edge %s in set of edges:\n%s" % (edge, rel_graph_edges))
def test_values(self):
input_df = pd.DataFrame([[1, 2], [3, 4]])
input_00 = GraphNode("I0", '[0,0]', get_node_type(input_df.iat[0, 0]))
input_01 = GraphNode("I0", '[0,1]', get_node_type(input_df.iat[0, 1]))
input_10 = GraphNode("I0", '[1,0]', get_node_type(input_df.iat[1, 0]))
input_11 = GraphNode("I0", '[1,1]', get_node_type(input_df.iat[1, 1]))
output = input_df.values
output_00 = GraphNode("O0", '[0,0]', get_node_type(output[0, 0]))
output_01 = GraphNode("O0", '[0,1]', get_node_type(output[0, 1]))
output_10 = GraphNode("O0", '[1,0]', get_node_type(output[1, 0]))
output_11 = GraphNode("O0", '[1,1]', get_node_type(output[1, 1]))
options = GraphOptions()
options.NODE_TYPES = True
rel_graph: RelationGraph = RelationGraph(options)
rel_graph.from_input_output([input_df], output)
rel_graph_edges = rel_graph.edges
# positional edges
positional_edges = [
GraphEdge(input_00, input_01, GraphEdgeType.ADJACENCY),
GraphEdge(input_00, input_10, GraphEdgeType.ADJACENCY),
GraphEdge(input_10, input_11, GraphEdgeType.ADJACENCY),
GraphEdge(input_01, input_11, GraphEdgeType.ADJACENCY),
GraphEdge(output_00, output_01, GraphEdgeType.ADJACENCY),
GraphEdge(output_00, output_10, GraphEdgeType.ADJACENCY),
GraphEdge(output_10, output_11, GraphEdgeType.ADJACENCY),
GraphEdge(output_01, output_11, GraphEdgeType.ADJACENCY)
]
for edge in positional_edges:
self.assertTrue(edge in rel_graph_edges,
"Could not find edge %s in set of edges:\n%s" % (edge, rel_graph_edges))
equality_edges = [
GraphEdge(input_00, output_00, GraphEdgeType.EQUALITY),
GraphEdge(input_10, output_10, GraphEdgeType.EQUALITY),
GraphEdge(input_01, output_01, GraphEdgeType.EQUALITY),
GraphEdge(input_11, output_11, GraphEdgeType.EQUALITY)
]
for edge in equality_edges:
self.assertTrue(edge in rel_graph_edges,
"Could not find edge %s in set of edges:\n%s" % (edge, rel_graph_edges))
def test_dict(self):
input_df = pd.DataFrame([[1, 2], [3, 4]])
input_00 = GraphNode("I0", '[0,0]', get_node_type(input_df.iat[0, 0]))
input_01 = GraphNode("I0", '[0,1]', get_node_type(input_df.iat[0, 1]))
input_10 = GraphNode("I0", '[1,0]', get_node_type(input_df.iat[1, 0]))
input_11 = GraphNode("I0", '[1,1]', get_node_type(input_df.iat[1, 1]))
output = {"A": [1, 3], "B": [2, 4]}
output_00 = GraphNode("O0", '[0,0]', get_node_type(output['A'][0]))
output_01 = GraphNode("O0", '[0,1]', get_node_type(output['B'][0]))
output_10 = GraphNode("O0", '[1,0]', get_node_type(output['A'][1]))
output_11 = GraphNode("O0", '[1,1]', get_node_type(output['B'][1]))
options = GraphOptions()
options.NODE_TYPES = True
rel_graph: RelationGraph = RelationGraph(options)
rel_graph.from_input_output([input_df], output)
rel_graph_edges = rel_graph.edges
positional_edges = [
GraphEdge(input_00, input_01, GraphEdgeType.ADJACENCY),
GraphEdge(input_00, input_10, GraphEdgeType.ADJACENCY),
GraphEdge(input_10, input_11, GraphEdgeType.ADJACENCY),
GraphEdge(input_01, input_11, GraphEdgeType.ADJACENCY),
GraphEdge(output_00, output_01, GraphEdgeType.ADJACENCY),
GraphEdge(output_00, output_10, GraphEdgeType.ADJACENCY),
GraphEdge(output_10, output_11, GraphEdgeType.ADJACENCY),
GraphEdge(output_01, output_11, GraphEdgeType.ADJACENCY)
]
for edge in positional_edges:
self.assertTrue(edge in rel_graph_edges,
"Could not find edge %s in set of edges:\n%s" % (edge, rel_graph_edges))
equality_edges = [
GraphEdge(input_00, output_00, GraphEdgeType.EQUALITY),
GraphEdge(input_10, output_10, GraphEdgeType.EQUALITY),
GraphEdge(input_01, output_01, GraphEdgeType.EQUALITY),
GraphEdge(input_11, output_11, GraphEdgeType.EQUALITY)
]
for edge in equality_edges:
self.assertTrue(edge in rel_graph_edges,
"Could not find edge %s in set of edges:\n%s" % (edge, rel_graph_edges))
def test_groupby_output(self):
input_df = pd.DataFrame({
"Name": ["Alice", "Bob", "Mallory", "Mallory", "Bob", "Mallory"],
"City": ["Seattle", "Seattle", "Portland", "Seattle", "Seattle", "Portland"]})
output = input_df.groupby("Name")
options = GraphOptions()
options.NODE_TYPES = True
options.ADJACENCY_EDGES = False
rel_graph: RelationGraph = RelationGraph(options)
rel_graph.from_input_output([input_df], output)
rel_graph_edges = rel_graph.edges
alice_nodes_in = [
GraphNode("I0", '[0,0]', GraphNodeType.STR)
]
alice_nodes_out = [
GraphNode("O0_0", '[0,0]', GraphNodeType.STR)
]
bob_nodes_in = [
GraphNode("I0", '[1,0]', GraphNodeType.STR),
GraphNode("I0", '[4,0]', GraphNodeType.STR)
]
bob_nodes_out = [
GraphNode("O0_1", '[0,0]', GraphNodeType.STR),
GraphNode("O0_1", '[1,0]', GraphNodeType.STR)
]
mallory_nodes_in = [
GraphNode("I0", '[2,0]', GraphNodeType.STR),
GraphNode("I0", '[3,0]', GraphNodeType.STR),
GraphNode("I0", '[5,0]', GraphNodeType.STR)
]
mallory_nodes_out = [
GraphNode("O0_2", '[0,0]', GraphNodeType.STR),
GraphNode("O0_2", '[1,0]', GraphNodeType.STR),
GraphNode("O0_2", '[2,0]', GraphNodeType.STR)
]
seattle_nodes_in = [
GraphNode("I0", '[0,1]', GraphNodeType.STR),
GraphNode("I0", '[1,1]', GraphNodeType.STR),
GraphNode("I0", '[3,1]', GraphNodeType.STR),
GraphNode("I0", '[4,1]', GraphNodeType.STR),
]
seattle_nodes_out = [
GraphNode("O0_0", '[0,1]', GraphNodeType.STR),
GraphNode("O0_1", '[0,1]', GraphNodeType.STR),
GraphNode("O0_2", '[1,1]', GraphNodeType.STR)
]
portland_nodes_in = [
GraphNode("I0", '[2,1]', GraphNodeType.STR),
GraphNode("I0", '[5,1]', GraphNodeType.STR)
]
portland_nodes_out = [
GraphNode("O0_2", '[0,1]', GraphNodeType.STR),
GraphNode("O0_2", '[2,1]', GraphNodeType.STR)
]
def check_edges(in_nodes, out_nodes):
for in_node in in_nodes:
for out_node in out_nodes:
edge = GraphEdge(in_node, out_node, GraphEdgeType.EQUALITY)
self.assertTrue(edge in rel_graph_edges,
"Could not find edge %s in set of edges:\n%s" % (edge, rel_graph_edges))
check_edges(alice_nodes_in, alice_nodes_out)
check_edges(bob_nodes_in, bob_nodes_out)
check_edges(mallory_nodes_in, mallory_nodes_out)
check_edges(portland_nodes_in, portland_nodes_out)
check_edges(seattle_nodes_in, seattle_nodes_out)
def test_groupby_input(self):
df = pd.DataFrame({
"Name": ["Alice", "Bob", "Mallory", "Mallory", "Bob", "Mallory"],
"City": ["Seattle", "Seattle", "Portland", "Seattle", "Seattle", "Portland"]})
input_ = df.groupby("Name")
output = input_.count().reset_index()
options = GraphOptions()
options.NODE_TYPES = True
options.ADJACENCY_EDGES = False
rel_graph: RelationGraph = RelationGraph(options)
rel_graph.from_input_output([input_], output)
rel_graph_edges = rel_graph.edges
alice_nodes_in = [
GraphNode("I0_0", '[0,0]', GraphNodeType.STR)
]
alice_nodes_out = [
GraphNode("O0", '[0,0]', GraphNodeType.STR)
]
bob_nodes_in = [
GraphNode("I0_1", '[0,0]', GraphNodeType.STR),
GraphNode("I0_1", '[1,0]', GraphNodeType.STR)
]
bob_nodes_out = [
GraphNode("O0", '[1,0]', GraphNodeType.STR)
]
mallory_nodes_in = [
GraphNode("I0_2", '[0,0]', GraphNodeType.STR),
GraphNode("I0_2", '[1,0]', GraphNodeType.STR),
GraphNode("I0_2", '[2,0]', GraphNodeType.STR)
]
mallory_nodes_out = [
GraphNode("O0", '[2,0]', GraphNodeType.STR)
]
def check_edges(in_nodes, out_nodes):
for in_node in in_nodes:
for out_node in out_nodes:
edge = GraphEdge(in_node, out_node, GraphEdgeType.EQUALITY)
self.assertTrue(edge in rel_graph_edges,
"Could not find edge %s in set of edges:\n%s" % (edge, rel_graph_edges))
check_edges(alice_nodes_in, alice_nodes_out)
check_edges(bob_nodes_in, bob_nodes_out)
check_edges(mallory_nodes_in, mallory_nodes_out)
def test_idx_multi(self):
tuples = [("bar", "one"), ("bar", "two")]
index = | pd.MultiIndex.from_tuples(tuples) | pandas.MultiIndex.from_tuples |
# coding: utf-8
# CS FutureMobility Tool
# See full license in LICENSE.txt.
import numpy as np
import pandas as pd
#import openmatrix as omx
from IPython.display import display
from openpyxl import load_workbook,Workbook
from time import strftime
import os.path
import mode_choice.model_defs as md
import mode_choice.matrix_utils as mtx
import config
''' Utilities to summarize the outputs of Mode Choice '''
def display_mode_share(mc_obj):
'''
This displays a mode share summary by market segment (with / without vehicle, peak / off-peak) on the IPython notebook.
:param mc_obj: mode choice module object as defined in the IPython notebook
'''
# display mode share tables
avg_trips_by_mode = pd.DataFrame(None)
for purpose in ['HBW','HBO', 'NHB', 'HBSc1', 'HBSc2', 'HBSc3']:
avg_trips_by_mode = avg_trips_by_mode.add(pd.DataFrame({pv:{mode:(mc_obj.table_container.get_table(purpose)[pv][mode].sum()) for mode in mc_obj.table_container.get_table(purpose)[pv]} for pv in ['0_PK','1_PK','0_OP','1_OP']}).T,
fill_value = 0)
avg_mode_share = avg_trips_by_mode.divide(avg_trips_by_mode.sum(1),axis = 0)
display(avg_mode_share.style.format("{:.2%}"))
def write_boston_neighbortown_mode_share_to_excel(mc_obj):
'''
Writes mode share summary by purpose and market segment to an Excel workbook.
Applies only to trips to/from Boston
:param mc_obj: mode choice module object as defined in the IPython notebook
:param out_excel_fn: output Excel filename, by default in the output path defined in config.py
'''
out_excel_fn = mc_obj.config.out_path + "mode_share_bosNB_{0}.xlsx".format(strftime("%Y%m%d"))
# check if file exists.
if os.path.isfile(out_excel_fn):
book = load_workbook(out_excel_fn)
else:
book = Workbook()
book.save(out_excel_fn)
writer = pd.ExcelWriter(out_excel_fn,engine = 'openpyxl')
writer.book = book
for purp in md.purposes:
mode_share = pd.DataFrame(columns = md.peak_veh)
trip_table = mc_obj.table_container.get_table(purp)
for pv in md.peak_veh:
for mode in trip_table[pv].keys():
#study area zones might not start at zone 0 and could have discontinous TAZ IDs
trip_table_o = mtx.OD_slice(trip_table[pv][mode], O_slice = md.taz['BOSTON'], D_slice = md.taz['BOS_AND_NEI'])
trip_table_d = mtx.OD_slice(trip_table[pv][mode], O_slice = md.taz['BOS_AND_NEI'], D_slice = md.taz['BOSTON'])
trip_table_b = mtx.OD_slice(trip_table[pv][mode], O_slice = md.taz['BOSTON'], D_slice = md.taz['BOSTON'])
trip_table_bos = trip_table_o + trip_table_d - trip_table_b
mode_share.loc[mode,pv] = trip_table_bos.sum()
mode_share['Total'] = mode_share.sum(1)
mode_share['Share'] = mode_share['Total'] / mode_share['Total'].sum()
if purp in book.sheetnames: # if sheetname exists, delete
book.remove(book[purp])
writer.save()
mode_share.to_excel(writer, sheet_name = purp)
writer.save()
def write_study_area_mode_share_to_excel(mc_obj, out_excel_fn = None):
'''
Writes mode share summary by purpose and market segment to an Excel workbook.
Applies only to trips to/from study area
:param mc_obj: mode choice module object as defined in the IPython notebook
:param out_excel_fn: output Excel filename, by default in the output path defined in config.py
'''
if out_excel_fn is None:
out_excel_fn = mc_obj.config.out_path + "mode_share_study_area_{0}.xlsx".format(strftime("%Y%m%d"))
# check if file exists.
if os.path.isfile(out_excel_fn):
book = load_workbook(out_excel_fn)
else:
book = Workbook()
book.save(out_excel_fn)
writer = pd.ExcelWriter(out_excel_fn,engine = 'openpyxl')
writer.book = book
for purp in md.purposes:
mode_share = pd.DataFrame(columns = md.peak_veh)
trip_table = mc_obj.table_container.get_table(purp)
for pv in md.peak_veh:
for mode in trip_table[pv].keys():
trip_table_o = mtx.OD_slice(trip_table[pv][mode], O_slice = md.study_area)
trip_table_d = mtx.OD_slice(trip_table[pv][mode], D_slice = md.study_area)
trip_table_ii = mtx.OD_slice(trip_table[pv][mode], O_slice = md.study_area, D_slice = md.study_area)
trip_table_sa = trip_table_o + trip_table_d - trip_table_ii
mode_share.loc[mode,pv] = trip_table_sa.sum()
mode_share['Total'] = mode_share.sum(1)
mode_share['Share'] = mode_share['Total'] / mode_share['Total'].sum()
if purp in book.sheetnames: # if sheetname exists, delete
book.remove(book[purp])
writer.save()
mode_share.to_excel(writer, sheet_name = purp)
writer.save()
def write_mode_share_to_excel(mc_obj,purpose, out_excel_fn = None):
'''
Writes mode share summary by purpose and market segment to an Excel workbook.
:param mc_obj: mode choice module object as defined in the IPython notebook
:param purpose: can be a single purpose or 'all', in which case the Excel workbook has six sheets, one for each purpose.
:param out_excel_fn: output Excel filename, by default in the output path defined in config.py
'''
if out_excel_fn is None:
out_excel_fn = mc_obj.config.out_path + "MC_mode_share_{0}_{1}.xlsx".format(purpose, strftime("%Y%m%d"))
if purpose == 'all':
# check if file exists.
if os.path.isfile(out_excel_fn):
book = load_workbook(out_excel_fn)
else:
book = Workbook()
book.save(out_excel_fn)
writer = pd.ExcelWriter(out_excel_fn,engine = 'openpyxl')
writer.book = book
for purp in md.purposes:
trip_table = mc_obj.table_container.get_table(purp)
mode_share = pd.DataFrame(columns = md.peak_veh)
for pv in md.peak_veh:
for mode in trip_table[pv].keys():
mode_share.loc[mode,pv] = trip_table[pv][mode].sum()
mode_share['Total'] = mode_share.sum(1)
mode_share['Share'] = mode_share['Total'] / mode_share['Total'].sum()
if purp in book.sheetnames: # if sheetname exists, delete
book.remove(book[purp])
writer.save()
mode_share.to_excel(writer, sheet_name = purp)
writer.save()
elif purpose in md.purposes:
# check if file exists.
if os.path.isfile(out_excel_fn):
book = load_workbook(out_excel_fn)
else:
book = Workbook()
book.save(out_excel_fn)
writer = pd.ExcelWriter(out_excel_fn,engine = 'openpyxl')
writer.book = book
mode_share = pd.DataFrame(columns = md.peak_veh)
for pv in md.peak_veh:
for mode in mc_obj.trips_by_mode[pv].keys():
mode_share.loc[mode,pv] = mc_obj.trips_by_mode[pv][mode].sum()
mode_share['Total'] = mode_share.sum(1)
mode_share['Share'] = mode_share['Total'] / mode_share['Total'].sum()
if purpose in book.sheetnames: # if sheetname exists, delete
book.remove(book[purpose])
writer.save()
mode_share.to_excel(writer, sheet_name = purpose)
writer.save()
def __mt_prod_attr_nhood(mc_obj, trip_table, skim): # miles traveled. For VMT and PMT, by neighborhood
# sum prodct of trip_table - skims
mt_total = trip_table * skim['Length (Skim)']
# calculate marginals
prod = pd.DataFrame(np.sum(mt_total,axis = 1)/2, columns = ['Production'])
attr = pd.DataFrame(np.sum(mt_total,axis = 0) / 2, columns = ['Attraction'])
towns = mc_obj.taz.sort_values(md.taz_ID_field).iloc[0:md.max_zone]
mt_taz = pd.concat([towns[[md.taz_ID_field,'BOSTON_NB']],prod,attr],axis = 1,join = 'inner')
mt_taz.index.names=['Boston Neighborhood']
return mt_taz.groupby(['BOSTON_NB']).sum()[['Production','Attraction']].reset_index()
def __trip_prod_attr_nhood(mc_obj, trip_table):
mt_total = trip_table
# calculate marginals
prod = pd.DataFrame(np.sum(mt_total,axis = 1), columns = ['Production'])
attr = pd.DataFrame(np.sum(mt_total,axis = 0), columns = ['Attraction'])
towns = mc_obj.taz.sort_values(md.taz_ID_field).iloc[0:md.max_zone]
mt_taz = pd.concat([towns[[md.taz_ID_field,'BOSTON_NB']],prod,attr],axis = 1,join = 'inner')
mt_taz.index.names=['Boston Neighborhood']
return mt_taz.groupby(['BOSTON_NB']).sum()[['Production','Attraction']].reset_index()
def sm_vmt_by_neighborhood(mc_obj, out_fn = None, by = None, sm_mode = 'SM_RA'):
'''
Summarizes VMT production and attraction by the 26 Boston neighborhoods for Shared Mobility Modes.
:param mc_obj: mode choice module object as defined in the IPython notebook
:param out_fn: output csv filename; if None specified, in the output path defined in config.py
:param by: grouping used for the summary; if None specified, only aggregate production and attraction will be provided.
'''
if out_fn is None and by is None:
out_fn = mc_obj.config.out_path + sm_mode + f'_vmt_by_neighborhood.csv'
elif out_fn is None and by:
out_fn = mc_obj.config.out_path + sm_mode + f'_vmt_by_neighborhood_by_{by}.csv'
skim_dict = {'PK': mc_obj.drive_skim_PK,'OP':mc_obj.drive_skim_OP}
if by in ['peak','veh_own','purpose'] == False:
print('Only supports VMT by neighborhood, peak / vehicle ownership, purpose.')
return
else:
vmt_master_table = pd.DataFrame(columns = ['Production','Attraction','peak','veh_own','purpose'])
for purpose in md.purposes:
for peak in ['PK','OP']:
for veh_own in ['0','1']:
if mc_obj.table_container.get_table(purpose):
auto_trip_table = mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}'][sm_mode] / md.AO_dict[sm_mode]
vmt_table = __mt_prod_attr_nhood(mc_obj,auto_trip_table,skim_dict[peak])
vmt_table['peak'] = peak
vmt_table['veh_own'] = veh_own
vmt_table['purpose'] = purpose
vmt_master_table = vmt_master_table.append(vmt_table, sort = True)
if by == None:
vmt_summary = vmt_master_table.groupby('BOSTON_NB').sum()
elif by == 'peak':
vmt_summary = pd.concat([
vmt_master_table.groupby(['peak','BOSTON_NB']).sum().loc[peak] for peak in ['PK','OP']], axis = 1, keys = ['PK','OP'])
elif by == 'veh_own':
vmt_summary = pd.concat([
vmt_master_table.groupby(['veh_own','BOSTON_NB']).sum().loc[veh_own] for veh_own in ['0','1']], axis = 1, keys = ['No car', 'With car']
)
elif by == 'purpose':
vmt_summary = pd.concat([
vmt_master_table.groupby(['purpose','BOSTON_NB']).sum().loc[purpose] for purpose in vmt_master_table.purpose.unique()],axis = 1, keys= vmt_master_table.purpose.unique())
vmt_summary.to_csv(out_fn)
def vmt_by_neighborhood(mc_obj, out_fn = None, by = None):
'''
Summarizes VMT production and attraction by the 26 Boston neighborhoods.
:param mc_obj: mode choice module object as defined in the IPython notebook
:param out_fn: output csv filename; if None specified, in the output path defined in config.py
:param by: grouping used for the summary; if None specified, only aggregate production and attraction will be provided.
'''
if out_fn is None and by is None:
out_fn = mc_obj.config.out_path + f'vmt_by_neighborhood.csv'
elif out_fn is None and by:
out_fn = mc_obj.config.out_path + f'vmt_by_neighborhood_by_{by}.csv'
skim_dict = {'PK': mc_obj.drive_skim_PK,'OP':mc_obj.drive_skim_OP}
if by in ['peak','veh_own','purpose'] == False:
print('Only supports VMT by neighborhood, peak / vehicle ownership, purpose.')
return
else:
vmt_master_table = pd.DataFrame(columns = ['Production','Attraction','peak','veh_own','purpose'])
for purpose in md.purposes:
for peak in ['PK','OP']:
for veh_own in ['0','1']:
if mc_obj.table_container.get_table(purpose):
drive_modes = mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}'].keys()
auto_trip_table = sum([
mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}'][mode] / md.AO_dict[mode]
for mode in ['DA','SR2','SR3+','SM_RA','SM_SH'] if mode in drive_modes])
vmt_table = __mt_prod_attr_nhood(mc_obj,auto_trip_table,skim_dict[peak])
vmt_table['peak'] = peak
vmt_table['veh_own'] = veh_own
vmt_table['purpose'] = purpose
vmt_master_table = vmt_master_table.append(vmt_table, sort = True)
if by == None:
vmt_summary = vmt_master_table.groupby('BOSTON_NB').sum()
elif by == 'peak':
vmt_summary = pd.concat([
vmt_master_table.groupby(['peak','BOSTON_NB']).sum().loc[peak] for peak in ['PK','OP']], axis = 1, keys = ['PK','OP'])
elif by == 'veh_own':
vmt_summary = pd.concat([
vmt_master_table.groupby(['veh_own','BOSTON_NB']).sum().loc[veh_own] for veh_own in ['0','1']], axis = 1, keys = ['No car', 'With car']
)
elif by == 'purpose':
vmt_summary = pd.concat([
vmt_master_table.groupby(['purpose','BOSTON_NB']).sum().loc[purpose] for purpose in vmt_master_table.purpose.unique()],axis = 1, keys= vmt_master_table.purpose.unique())
vmt_summary.to_csv(out_fn)
def pmt_by_neighborhood(mc_obj, out_fn = None, by = None):
'''
Summarizes PMT production and attraction by the 26 Boston neighborhoods.
:param mc_obj: mode choice module object as defined in the IPython notebook
:param out_fn: output csv filename; if None specified, in the output path defined in config.py
:param by: grouping used for the summary; if None specified, only aggregate production and attraction will be provided.
'''
if out_fn is None and by is None:
out_fn = mc_obj.config.out_path + f'pmt_by_neighborhood.csv'
elif out_fn is None and by:
out_fn = mc_obj.config.out_path + f'pmt_by_neighborhood_by_{by}.csv'
skim_dict = {'PK': mc_obj.drive_skim_PK,'OP':mc_obj.drive_skim_OP}
if by in ['peak','veh_own','purpose'] == False:
print('Only supports PMT by neighborhood, peak / vehicle ownership, purpose.')
return
else:
pmt_master_table = pd.DataFrame(columns = ['Production','Attraction','peak','veh_own','purpose'])
for purpose in md.purposes:
for peak in ['PK','OP']:
for veh_own in ['0','1']:
if mc_obj.table_container.get_table(purpose):
drive_modes = mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}'].keys()
person_trip_table = sum([mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}'][mode] for mode in md.modes if mode in drive_modes])
pmt_table = __mt_prod_attr_nhood(mc_obj,person_trip_table,skim_dict[peak])
pmt_table['peak'] = peak
pmt_table['veh_own'] = veh_own
pmt_table['purpose'] = purpose
pmt_master_table = pmt_master_table.append(pmt_table, sort = True)
if by == None:
pmt_summary = pmt_master_table.groupby('BOSTON_NB').sum()
elif by == 'peak':
pmt_summary = pd.concat([
pmt_master_table.groupby(['peak','BOSTON_NB']).sum().loc[peak] for peak in ['PK','OP']], axis = 1, keys = ['PK','OP'])
elif by == 'veh_own':
pmt_summary = pd.concat([
pmt_master_table.groupby(['veh_own','BOSTON_NB']).sum().loc[veh_own] for veh_own in ['0','1']], axis = 1, keys = ['No car', 'With car']
)
elif by == 'purpose':
pmt_summary = pd.concat([
pmt_master_table.groupby(['purpose','BOSTON_NB']).sum().loc[purpose] for purpose in pmt_master_table.purpose.unique()],axis = 1, keys= pmt_master_table.purpose.unique())
pmt_summary.to_csv(out_fn)
def act_pmt_by_neighborhood(mc_obj, out_fn = None, by = None):
'''
Summarizes PMT production and attraction by the 26 Boston neighborhoods for active modes.
:param mc_obj: mode choice module object as defined in the IPython notebook
:param out_fn: output csv filename; if None specified, in the output path defined in config.py
:param by: grouping used for the summary; if None specified, only aggregate production and attraction will be provided.
'''
if out_fn is None and by is None:
out_fn = mc_obj.config.out_path + f'act_pmt_by_neighborhood.csv'
elif out_fn is None and by:
out_fn = mc_obj.config.out_path + f'act_pmt_by_neighborhood_by_{by}.csv'
skim_dict = {'PK': mc_obj.drive_skim_PK,'OP':mc_obj.drive_skim_OP}
if by in ['peak','veh_own','purpose'] == False:
print('Only supports PMT by neighborhood, peak / vehicle ownership, purpose.')
return
else:
pmt_master_table = pd.DataFrame(columns = ['Production','Attraction','peak','veh_own','purpose'])
for purpose in md.purposes:
for peak in ['PK','OP']:
for veh_own in ['0','1']:
if mc_obj.table_container.get_table(purpose):
drive_modes = mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}'].keys()
person_trip_table = sum([mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}'][mode] for mode in ['Walk','Bike'] if mode in drive_modes])
pmt_table = __mt_prod_attr_nhood(mc_obj,person_trip_table,skim_dict[peak])
pmt_table['peak'] = peak
pmt_table['veh_own'] = veh_own
pmt_table['purpose'] = purpose
pmt_master_table = pmt_master_table.append(pmt_table, sort = True)
if by == None:
pmt_summary = pmt_master_table.groupby('BOSTON_NB').sum()
elif by == 'peak':
pmt_summary = pd.concat([
pmt_master_table.groupby(['peak','BOSTON_NB']).sum().loc[peak] for peak in ['PK','OP']], axis = 1, keys = ['PK','OP'])
elif by == 'veh_own':
pmt_summary = pd.concat([
pmt_master_table.groupby(['veh_own','BOSTON_NB']).sum().loc[veh_own] for veh_own in ['0','1']], axis = 1, keys = ['No car', 'With car']
)
elif by == 'purpose':
pmt_summary = pd.concat([
pmt_master_table.groupby(['purpose','BOSTON_NB']).sum().loc[purpose] for purpose in pmt_master_table.purpose.unique()],axis = 1, keys= pmt_master_table.purpose.unique())
pmt_summary.to_csv(out_fn)
def sm_trips_by_neighborhood(mc_obj, out_fn = None, by = None, sm_mode = 'SM_RA'):
'''
Summarizes PMT production and attraction by the 26 Boston neighborhoods for Shared Mobility Modes.
:param mc_obj: mode choice module object as defined in the IPython notebook
:param out_fn: output csv filename; if None specified, in the output path defined in config.py
:param by: grouping used for the summary; if None specified, only aggregate production and attraction will be provided.
:param sm_mode: Smart Mobility Mode name
'''
if out_fn is None and by is None:
out_fn = mc_obj.config.out_path + sm_mode + f'_trips_by_neighborhood.csv'
elif out_fn is None and by:
out_fn = mc_obj.config.out_path + sm_mode + f'_trips_by_neighborhood_by_{by}.csv'
if by in ['peak','veh_own','purpose'] == False:
print('Only supports Trips by neighborhood, peak / vehicle ownership, purpose.')
return
else:
trp_master_table = pd.DataFrame(columns = ['Production','Attraction','peak','veh_own','purpose'])
for purpose in md.purposes:
for peak in ['PK','OP']:
for veh_own in ['0','1']:
if mc_obj.table_container.get_table(purpose):
person_trip_table = mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}'][sm_mode]
trp_table = __trip_prod_attr_nhood(mc_obj,person_trip_table)
trp_table['peak'] = peak
trp_table['veh_own'] = veh_own
trp_table['purpose'] = purpose
trp_master_table = trp_master_table.append(trp_table, sort = True)
if by == None:
trp_summary = trp_master_table.groupby('BOSTON_NB').sum()
elif by == 'peak':
trp_summary = pd.concat([
trp_master_table.groupby(['peak','BOSTON_NB']).sum().loc[peak] for peak in ['PK','OP']], axis = 1, keys = ['PK','OP'])
elif by == 'veh_own':
trp_summary = pd.concat([
trp_master_table.groupby(['veh_own','BOSTON_NB']).sum().loc[veh_own] for veh_own in ['0','1']], axis = 1, keys = ['No car', 'With car']
)
elif by == 'purpose':
trp_summary = pd.concat([
trp_master_table.groupby(['purpose','BOSTON_NB']).sum().loc[purpose] for purpose in trp_master_table.purpose.unique()],axis = 1, keys= trp_master_table.purpose.unique())
trp_summary.to_csv(out_fn)
def trips_by_neighborhood(mc_obj, out_fn = None, by = None):
'''
Summarizes PMT production and attraction by the 26 Boston neighborhoods.
:param mc_obj: mode choice module object as defined in the IPython notebook
:param out_fn: output csv filename; if None specified, in the output path defined in config.py
:param by: grouping used for the summary; if None specified, only aggregate production and attraction will be provided.
'''
if out_fn is None and by is None:
out_fn = mc_obj.config.out_path + f'trips_by_neighborhood.csv'
elif out_fn is None and by:
out_fn = mc_obj.config.out_path + f'trips_by_neighborhood_by_{by}.csv'
if by in ['peak','veh_own','purpose'] == False:
print('Only supports Trips by neighborhood, peak / vehicle ownership, purpose.')
return
else:
trp_master_table = pd.DataFrame(columns = ['Production','Attraction','peak','veh_own','purpose'])
for purpose in md.purposes:
for peak in ['PK','OP']:
for veh_own in ['0','1']:
if mc_obj.table_container.get_table(purpose):
drive_modes = mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}'].keys()
person_trip_table = sum([mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}'][mode] for mode in md.modes if mode in drive_modes])
trp_table = __trip_prod_attr_nhood(mc_obj,person_trip_table)
trp_table['peak'] = peak
trp_table['veh_own'] = veh_own
trp_table['purpose'] = purpose
trp_master_table = trp_master_table.append(trp_table, sort = True)
if by == None:
trp_summary = trp_master_table.groupby('BOSTON_NB').sum()
elif by == 'peak':
trp_summary = pd.concat([
trp_master_table.groupby(['peak','BOSTON_NB']).sum().loc[peak] for peak in ['PK','OP']], axis = 1, keys = ['PK','OP'])
elif by == 'veh_own':
trp_summary = pd.concat([
trp_master_table.groupby(['veh_own','BOSTON_NB']).sum().loc[veh_own] for veh_own in ['0','1']], axis = 1, keys = ['No car', 'With car']
)
elif by == 'purpose':
trp_summary = pd.concat([
trp_master_table.groupby(['purpose','BOSTON_NB']).sum().loc[purpose] for purpose in trp_master_table.purpose.unique()],axis = 1, keys= trp_master_table.purpose.unique())
trp_summary.to_csv(out_fn)
def mode_share_by_neighborhood(mc_obj, out_fn = None, by = None):
'''
Summarizes mode share as the average of trips to/from the 26 Boston neighborhoods, in three categories - drive, non-motorized and transit.
:param mc_obj: mode choice module object as defined in the IPython notebook
:param out_fn: output csv filename; if None specified, in the output path defined in config.py
:param by: grouping used for the summary
'''
if out_fn is None and by is None:
out_fn = mc_obj.config.out_path + f'mode_share_by_neighborhood.csv'
elif out_fn is None and by:
out_fn = mc_obj.config.out_path + f'mode_share_by_neighborhood_by_{by}.csv'
if by in ['peak','veh_own','purpose'] == False:
print('Only supports mode share by neighborhood, peak / vehicle ownership, purpose.')
return
else:
share_master_table = pd.DataFrame(columns = ['drive','non-motorized','transit','peak','veh_own','purpose'])
for purpose in md.purposes:
for peak in ['PK','OP']:
for veh_own in ['0','1']:
if mc_obj.table_container.get_table(purpose):
share_table = pd.DataFrame(index = range(0,md.max_zone),columns = ['drive','non-motorized','transit','smart mobility']).fillna(0)
for mode in mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}']:
trip_table = mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}'][mode]
category = md.mode_categories[mode]
share_table[category] += (trip_table.sum(axis = 1)+trip_table.sum(axis = 0))/2
towns = mc_obj.taz.sort_values(md.taz_ID_field).iloc[0:md.max_zone]
trips = pd.concat([towns[[md.taz_ID_field,'BOSTON_NB']],share_table],axis = 1,join = 'inner').groupby(['BOSTON_NB']).sum().drop([md.taz_ID_field],axis = 1)
trips['peak'] = peak
trips['veh_own'] = veh_own
trips['purpose'] = purpose
share_master_table = share_master_table.append(trips.reset_index(), sort = True)
if by == None:
trip_summary = share_master_table.groupby('BOSTON_NB').sum()
share_summary = trip_summary.divide(trip_summary.sum(axis = 1),axis = 0)
elif by == 'peak':
share_summary = pd.concat([
share_master_table.groupby(['peak','BOSTON_NB']).sum().loc[peak].divide(
share_master_table.groupby(['peak','BOSTON_NB']).sum().loc[peak].sum(axis=1),axis = 0)
for peak in ['PK','OP']
], axis = 1, keys = ['PK','OP'])
elif by == 'veh_own':
share_summary = pd.concat([
share_master_table.groupby(['veh_own','BOSTON_NB']).sum().loc[veh_own].divide(
share_master_table.groupby(['veh_own','BOSTON_NB']).sum().loc[veh_own].sum(axis=1),axis = 0)
for veh_own in ['0','1']
], axis = 1, keys = ['No car', 'With car'])
elif by == 'purpose':
share_summary = pd.concat([
share_master_table.groupby(['purpose','BOSTON_NB']).sum().loc[purpose].divide(
share_master_table.groupby(['purpose','BOSTON_NB']).sum().loc[purpose].sum(axis=1),axis = 0)
for purpose in share_master_table.purpose.unique()
],axis = 1, keys= share_master_table.purpose.unique())
share_summary.to_csv(out_fn)
# Seaport method
def mode_share_by_subarea(mc_obj, out_fn = None, by = None):
'''
Summarizes mode share as the average of trips to/from the 7 Seaport sub-areas, in three categories - drive, non-motorized and transit.
:param mc_obj: mode choice module object as defined in the IPython notebook
:param out_fn: output csv filename; if None specified, in the output path defined in config.py
:param by: grouping used for the summary
'''
if out_fn is None and by is None:
out_fn = mc_obj.config.out_path + f'mode_share_by_subarea.csv'
elif out_fn is None and by:
out_fn = mc_obj.config.out_path + f'mode_share_by_subarea_by_{by}.csv'
if by in ['peak','veh_own','purpose'] == False:
print('Only supports mode share by subarea, peak / vehicle ownership, purpose.')
return
else:
share_master_table = pd.DataFrame(columns = ['drive','non-motorized','transit','peak','veh_own','purpose'])
for purpose in md.purposes:
for peak in ['PK','OP']:
for veh_own in ['0','1']:
if mc_obj.table_container.get_table(purpose):
share_table = pd.DataFrame(index = range(0,md.max_zone),columns = ['drive','non-motorized','transit','smart mobility']).fillna(0)
for mode in mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}']:
trip_table = mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}'][mode]
category = md.mode_categories[mode]
share_table[category] += (trip_table.sum(axis = 1)+trip_table.sum(axis = 0))/2
towns = mc_obj.taz.sort_values(md.taz_ID_field).iloc[0:md.max_zone]
towns['REPORT_AREA'] = towns['REPORT_AREA'][towns['REPORT_AREA'].isin(['South Station', 'Seaport Blvd', 'Design Center',
'Southeast Seaport', 'BCEC', 'Fort Point', 'Broadway'])]
trips = pd.concat([towns[[md.taz_ID_field,'REPORT_AREA']],share_table],axis = 1,join = 'inner').groupby(['REPORT_AREA']).sum().drop([md.taz_ID_field],axis = 1)
trips['peak'] = peak
trips['veh_own'] = veh_own
trips['purpose'] = purpose
share_master_table = share_master_table.append(trips.reset_index(), sort = True)
if by == None:
trip_summary = share_master_table.groupby('REPORT_AREA').sum()
share_summary = trip_summary.divide(trip_summary.sum(axis = 1),axis = 0)
elif by == 'peak':
share_summary = pd.concat([
share_master_table.groupby(['peak','REPORT_AREA']).sum().loc[peak].divide(
share_master_table.groupby(['peak','REPORT_AREA']).sum().loc[peak].sum(axis=1),axis = 0)
for peak in ['PK','OP']
], axis = 1, keys = ['PK','OP'])
elif by == 'veh_own':
share_summary = pd.concat([
share_master_table.groupby(['veh_own','REPORT_AREA']).sum().loc[veh_own].divide(
share_master_table.groupby(['veh_own','REPORT_AREA']).sum().loc[veh_own].sum(axis=1),axis = 0)
for veh_own in ['0','1']
], axis = 1, keys = ['No car', 'With car'])
elif by == 'purpose':
share_summary = pd.concat([
share_master_table.groupby(['purpose','REPORT_AREA']).sum().loc[purpose].divide(
share_master_table.groupby(['purpose','REPORT_AREA']).sum().loc[purpose].sum(axis=1),axis = 0)
for purpose in share_master_table.purpose.unique()
],axis = 1, keys= share_master_table.purpose.unique())
share_summary.to_csv(out_fn)
def __sm_compute_summary_by_subregion(mc_obj,metric = 'VMT',subregion = 'neighboring', sm_mode='SM_RA'):
''' Computing function used by write_summary_by_subregion(), does not produce outputs'''
if metric.lower() not in ('vmt','pmt','mode share','trip', 'pmt_act'):
print('Only supports trip, VMT, PMT and mode share calculations.')
return
if subregion.lower() not in ('boston','neighboring','i93','i495','region'):
print('Only supports within boston, "neighboring" for towns neighboring Boston, I93, I495 or Region.')
return
subregion_dict = {'boston':'BOSTON','neighboring':'BOS_AND_NEI','i93':'in_i95i93','i495':'in_i495'}
if metric.lower() == 'vmt':
skim_dict = {'PK': mc_obj.drive_skim_PK,'OP':mc_obj.drive_skim_OP}
vmt_table = np.zeros((md.max_zone,md.max_zone))
for purpose in md.purposes:
for peak in ['PK','OP']:
for veh_own in ['0','1']:
if mc_obj.table_container.get_table(purpose):
trip_table = mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}'][sm_mode] / md.AO_dict[sm_mode]
vmt_table += trip_table * skim_dict[peak]['Length (Skim)']
if subregion.lower() in subregion_dict:
field = subregion_dict[subregion.lower()]
boston_o_auto_vmt = mtx.OD_slice(vmt_table,O_slice = md.taz['BOSTON'], D_slice = md.taz[field]== True)
boston_d_auto_vmt = mtx.OD_slice(vmt_table,md.taz[field]== True,D_slice = md.taz['BOSTON'])
#boston_o_auto_vmt = vmt_table[md.taz['BOSTON'],:][:, md.taz[field]== True]
#boston_d_auto_vmt = vmt_table[md.taz[field]== True,:][:,md.taz['BOSTON']]
town_definition = md.taz[md.taz[field]== True]
elif subregion.lower() == 'region':
boston_o_auto_vmt = mtx.OD_slice(vmt_table,O_slice = md.taz['BOSTON'])
boston_d_auto_vmt = mtx.OD_slice(vmt_table,D_slice = md.taz['BOSTON'])
#boston_o_auto_vmt = vmt_table[md.taz['BOSTON'],:]
#boston_d_auto_vmt = vmt_table[:][:,md.taz['BOSTON']]
town_definition = md.taz
zone_vmt_daily_o = pd.DataFrame(np.sum(boston_o_auto_vmt,axis=1)/2 ,columns=["VMT"])
zone_vmt_daily_d = pd.DataFrame(np.sum(boston_d_auto_vmt,axis=0)/2 ,columns=["VMT"])
town_vmt_o=pd.concat([town_definition,zone_vmt_daily_o],axis=1,join='inner')
town_vmt_d=pd.concat([town_definition,zone_vmt_daily_d],axis=1,join='inner')
vmt_sum_o = town_vmt_o[town_vmt_o['TOWN']=='BOSTON,MA'].groupby(['TOWN']).sum()['VMT']
vmt_sum_d = town_vmt_d[town_vmt_d['TOWN']=='BOSTON,MA'].groupby(['TOWN']).sum()['VMT']
subregion_vmt = (vmt_sum_o + vmt_sum_d).values[0]
return subregion_vmt
elif metric.lower() == 'trip':
skim_dict = {'PK': mc_obj.drive_skim_PK,'OP':mc_obj.drive_skim_OP}
tripsum_table = np.zeros((md.max_zone,md.max_zone))
for purpose in md.purposes:
for peak in ['PK','OP']:
for veh_own in ['0','1']:
if mc_obj.table_container.get_table(purpose):
trip_table = mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}'][sm_mode]
tripsum_table += trip_table
if subregion.lower() in subregion_dict:
field = subregion_dict[subregion.lower()]
boston_o_trip = mtx.OD_slice(tripsum_table, O_slice = md.taz['BOSTON'],D_slice = md.taz[field]== True)
boston_d_trip = mtx.OD_slice(tripsum_table, O_slice = md.taz[field]== True, D_slice = md.taz['BOSTON'])
#boston_o_trip = tripsum_table[md.taz['BOSTON'],:][:, md.taz[field]== True]
#boston_d_trip = tripsum_table[md.taz[field]== True,:][:,md.taz['BOSTON']]
town_definition = md.taz[md.taz[field]== True]
elif subregion.lower() == 'region':
boston_o_trip = mtx.OD_slice(tripsum_table, O_slice = md.taz['BOSTON'])
boston_d_trip = mtx.OD_slice(tripsum_table, D_slice = md.taz['BOSTON'])
#boston_o_trip = tripsum_table[md.taz['BOSTON'],:]
#boston_d_trip = tripsum_table[:][:,md.taz['BOSTON']]
town_definition = md.taz
zone_daily_o = pd.DataFrame(np.sum(boston_o_trip,axis=1) ,columns=["trips"])
zone_daily_d = pd.DataFrame(np.sum(boston_d_trip,axis=0) ,columns=["trips"])
town_o=pd.concat([town_definition,zone_daily_o],axis=1,join='inner')
town_d=pd.concat([town_definition,zone_daily_d],axis=1,join='inner')
sum_o = town_o[town_o['TOWN']=='BOSTON,MA'].groupby(['TOWN']).sum()['trips']
sum_d = town_d[town_d['TOWN']=='BOSTON,MA'].groupby(['TOWN']).sum()['trips']
subregion_trip = (sum_o + sum_d).values[0]
return subregion_trip
def __compute_metric_by_zone(mc_obj,metric = 'VMT'):
''' Computing function used by write_summary_by_subregion(), does not produce outputs'''
if metric.lower() == 'vmt':
skim_dict = {'PK': mc_obj.drive_skim_PK,'OP':mc_obj.drive_skim_OP}
vmt_table = np.zeros((md.max_zone,md.max_zone))
for purpose in md.purposes:
for peak in ['PK','OP']:
for veh_own in ['0','1']:
if mc_obj.table_container.get_table(purpose):
drive_modes = mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}'].keys()
trip_table = sum([mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}'][mode] / md.AO_dict[mode] for mode in md.auto_modes if mode in drive_modes])
vmt_table += trip_table * skim_dict[peak]['Length (Skim)']
boston_o_auto_vmt = mtx.OD_slice(vmt_table, O_slice = md.taz['BOSTON'])
boston_d_auto_vmt = mtx.OD_slice(vmt_table,D_slice = md.taz['BOSTON'])
#boston_o_auto_vmt = vmt_table[md.taz['BOSTON'],:]
#boston_d_auto_vmt = vmt_table[:][:,md.taz['BOSTON']]
town_definition = md.taz
zone_vmt_daily_o = pd.DataFrame(np.sum(boston_o_auto_vmt,axis=0)/2 ,columns=["VMT"])
zone_vmt_daily_d = pd.DataFrame(np.sum(boston_d_auto_vmt,axis=1)/2 ,columns=["VMT"])
town_vmt_o=pd.concat([town_definition,zone_vmt_daily_o],axis=1,join='inner')
town_vmt_d=pd.concat([town_definition,zone_vmt_daily_d],axis=1,join='inner')
town_vmt = town_vmt_o.groupby(['TOWN']).sum()['VMT'] + town_vmt_d.groupby(['TOWN']).sum()['VMT']
return town_vmt
elif metric.lower() == 'pmt':
skim_dict = {'PK': mc_obj.drive_skim_PK,'OP':mc_obj.drive_skim_OP}
pmt_table = np.zeros((md.max_zone,md.max_zone))
for purpose in md.purposes:
for peak in ['PK','OP']:
for veh_own in ['0','1']:
if mc_obj.table_container.get_table(purpose):
drive_modes = mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}'].keys()
trip_table = sum([mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}'][mode]
for mode in md.modes if mode in drive_modes])
pmt_table += trip_table * skim_dict[peak]['Length (Skim)']
boston_o_auto_pmt = mtx.OD_slice(pmt_table, O_slice = md.taz['BOSTON'])
boston_d_auto_pmt = mtx.OD_slice(pmt_table, D_slice = md.taz['BOSTON'])
#boston_o_auto_pmt = pmt_table[md.taz['BOSTON'],:]
#boston_d_auto_pmt = pmt_table[:][:,md.taz['BOSTON']]
town_definition = md.taz
zone_pmt_daily_o = pd.DataFrame(np.sum(boston_o_auto_pmt,axis=0)/2 ,columns=["VMT"])
zone_pmt_daily_d = pd.DataFrame(np.sum(boston_d_auto_pmt,axis=1)/2 ,columns=["VMT"])
town_pmt_o=pd.concat([town_definition,zone_pmt_daily_o],axis=1,join='inner')
town_pmt_d=pd.concat([town_definition,zone_pmt_daily_d],axis=1,join='inner')
town_pmt = town_pmt_o.groupby(['TOWN']).sum()['VMT'] + town_pmt_d.groupby(['TOWN']).sum()['VMT']
return town_pmt
elif metric.lower() == 'pmt_act':
skim_dict = {'PK': mc_obj.drive_skim_PK,'OP':mc_obj.drive_skim_OP}
pmt_table = np.zeros((md.max_zone,md.max_zone))
for purpose in md.purposes:
for peak in ['PK','OP']:
for veh_own in ['0','1']:
if mc_obj.table_container.get_table(purpose):
drive_modes = mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}'].keys()
trip_table = sum([mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}'][mode]
for mode in ['Walk','Bike'] if mode in drive_modes])
pmt_table += trip_table * skim_dict[peak]['Length (Skim)']
boston_o_auto_pmt = mtx.OD_slice(pmt_table, O_slice = md.taz['BOSTON'])
boston_d_auto_pmt = mtx.OD_slice(pmt_table, D_slice = md.taz['BOSTON'])
#boston_o_auto_pmt = pmt_table[taz['BOSTON'],:]
#boston_d_auto_pmt = pmt_table[:][:,taz['BOSTON']]
town_definition = md.taz
zone_pmt_daily_o = pd.DataFrame(np.sum(boston_o_auto_pmt,axis=0)/2 ,columns=["VMT"])
zone_pmt_daily_d = pd.DataFrame(np.sum(boston_d_auto_pmt,axis=1)/2 ,columns=["VMT"])
town_pmt_o=pd.concat([town_definition,zone_pmt_daily_o],axis=1,join='inner')
town_pmt_d=pd.concat([town_definition,zone_pmt_daily_d],axis=1,join='inner')
town_pmt = town_pmt_o.groupby(['TOWN']).sum()['VMT'] + town_pmt_d.groupby(['TOWN']).sum()['VMT']
return town_pmt
def __compute_summary_by_subregion(mc_obj,metric = 'VMT',subregion = 'neighboring'):
''' Computing function used by write_summary_by_subregion(), does not produce outputs'''
if metric.lower() not in ('vmt','pmt','mode share','trip', 'pmt_act'):
print('Only supports trip, VMT, PMT and mode share calculations.')
return
if subregion.lower() not in ('boston','neighboring','i93','i495','region'):
print('Only supports within boston, "neighboring" for towns neighboring Boston, I93, I495 or Region.')
return
subregion_dict = {'boston':'BOSTON','neighboring':'BOS_AND_NEI','i93':'in_i95i93','i495':'in_i495'}
if metric.lower() == 'vmt':
skim_dict = {'PK': mc_obj.drive_skim_PK,'OP':mc_obj.drive_skim_OP}
vmt_table = np.zeros((md.max_zone,md.max_zone))
for purpose in md.purposes:
for peak in ['PK','OP']:
for veh_own in ['0','1']:
if mc_obj.table_container.get_table(purpose):
modes = mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}'].keys()
trip_table = sum([mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}'][mode] / md.AO_dict[mode] for mode in md.auto_modes if mode in modes])
vmt_table += trip_table * skim_dict[peak]['Length (Skim)']
if subregion.lower() in subregion_dict:
field = subregion_dict[subregion.lower()]
#boston_o_auto_vmt = vmt_table[md.taz['BOSTON'],:][:, md.taz[field]== True]
#boston_d_auto_vmt = vmt_table[md.taz[field]== True,:][:,md.taz['BOSTON']]
boston_o_auto_vmt = mtx.OD_slice(vmt_table, O_slice = md.taz['BOSTON'], D_slice = md.taz[field]== True)
boston_d_auto_vmt = mtx.OD_slice(vmt_table, O_slice = md.taz[field]== True, D_slice = md.taz['BOSTON'])
town_definition = md.taz[md.taz[field]== True]
elif subregion.lower() == 'region':
# boston_o_auto_vmt = vmt_table[md.taz['BOSTON'],:]
# boston_d_auto_vmt = vmt_table[:][:,md.taz['BOSTON']]
boston_o_auto_vmt = mtx.OD_slice(vmt_table, O_slice = md.taz['BOSTON'])
boston_d_auto_vmt = mtx.OD_slice(vmt_table, D_slice = md.taz['BOSTON'])
town_definition = md.taz
zone_vmt_daily_o = pd.DataFrame(np.sum(boston_o_auto_vmt,axis=1)/2 ,columns=["VMT"])
zone_vmt_daily_d = pd.DataFrame(np.sum(boston_d_auto_vmt,axis=0)/2 ,columns=["VMT"])
town_vmt_o= | pd.concat([town_definition,zone_vmt_daily_o],axis=1,join='inner') | pandas.concat |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2022/5/11 17:52
Desc: 加密货币
https://cn.investing.com/crypto/currencies
高频数据
https://bitcoincharts.com/about/markets-api/
"""
import math
import pandas as pd
import requests
from tqdm import tqdm
from akshare.datasets import get_crypto_info_csv
def crypto_name_url_table(symbol: str = "web") -> pd.DataFrame:
"""
加密货币名称、代码和 ID,每次更新较慢
https://cn.investing.com/crypto/ethereum/historical-data
:param symbol: choice of {"web", "local"}; web 表示从网页获取最新,local 表示利用本地本文件
:type symbol: str
:return: 加密货币名称、代码和 ID
:rtype: pandas.DataFrame
"""
if symbol == "web":
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
url = "https://cn.investing.com/crypto/Service/LoadCryptoCurrencies"
payload = {
'draw': '14',
'columns[0][data]': 'currencies_order',
'columns[0][name]': 'currencies_order',
'columns[0][searchable]': 'true',
'columns[0][orderable]': 'true',
'columns[0][search][value]': '',
'columns[0][search][regex]': 'false',
'columns[1][data]': 'function',
'columns[1][name]': 'crypto_id',
'columns[1][searchable]': 'true',
'columns[1][orderable]': 'false',
'columns[1][search][value]': '',
'columns[1][search][regex]': 'false',
'columns[2][data]': 'function',
'columns[2][name]': 'name',
'columns[2][searchable]': 'true',
'columns[2][orderable]': 'true',
'columns[2][search][value]': '',
'columns[2][search][regex]': 'false',
'columns[3][data]': 'symbol',
'columns[3][name]': 'symbol',
'columns[3][searchable]': 'true',
'columns[3][orderable]': 'true',
'columns[3][search][value]': '',
'columns[3][search][regex]': 'false',
'columns[4][data]': 'function',
'columns[4][name]': 'price_usd',
'columns[4][searchable]': 'true',
'columns[4][orderable]': 'true',
'columns[4][search][value]': '',
'columns[4][search][regex]': 'false',
'columns[5][data]': 'market_cap_formatted',
'columns[5][name]': 'market_cap_usd',
'columns[5][searchable]': 'true',
'columns[5][orderable]': 'true',
'columns[5][search][value]': '',
'columns[5][search][regex]': 'false',
'columns[6][data]': '24h_volume_formatted',
'columns[6][name]': '24h_volume_usd',
'columns[6][searchable]': 'true',
'columns[6][orderable]': 'true',
'columns[6][search][value]': '',
'columns[6][search][regex]': 'false',
'columns[7][data]': 'total_volume',
'columns[7][name]': 'total_volume',
'columns[7][searchable]': 'true',
'columns[7][orderable]': 'true',
'columns[7][search][value]': '',
'columns[7][search][regex]': 'false',
'columns[8][data]': 'change_percent_formatted',
'columns[8][name]': 'change_percent',
'columns[8][searchable]': 'true',
'columns[8][orderable]': 'true',
'columns[8][search][value]': '',
'columns[8][search][regex]': 'false',
'columns[9][data]': 'percent_change_7d_formatted',
'columns[9][name]': 'percent_change_7d',
'columns[9][searchable]': 'true',
'columns[9][orderable]': 'true',
'columns[9][search][value]': '',
'columns[9][search][regex]': 'false',
'order[0][column]': 'currencies_order',
'order[0][dir]': 'asc',
'start': '0',
'length': '100',
'search[value]': '',
'search[regex]': 'false',
'currencyId': '12',
}
r = requests.post(url, data=payload, headers=headers)
data_json = r.json()
total_page = math.ceil(int(data_json['recordsTotal']) / 100)
big_df = | pd.DataFrame() | pandas.DataFrame |
import inspect
from datetime import datetime
from tralo.utils import filter_args, sha1_hash_object, valid_run, AttributeDict, get_attribute
import yaml
import os
import json
import re
import torch
from os.path import join, isfile, expanduser, realpath
from tralo.log import log
def load_model(checkpoint_id, weights_file=None, strict=True, model_args='from_config', with_config=False):
config = json.load(open(join('logs', checkpoint_id, 'config.json')))
if model_args != 'from_config' and type(model_args) != dict:
raise ValueError('model_args must either be "from_config" or a dictionary of values')
model_cls = get_attribute(config['model'])
# load model
if model_args == 'from_config':
_, model_args, _ = filter_args(config, inspect.signature(model_cls).parameters)
print(model_args)
model = model_cls(**model_args)
if weights_file is None:
weights_file = realpath(join('logs', checkpoint_id, 'weights.pth'))
else:
weights_file = realpath(join('logs', checkpoint_id, weights_file))
if isfile(weights_file):
weights = torch.load(weights_file)
for _, w in weights.items():
assert not torch.any(torch.isnan(w)), 'weights contain NaNs'
model.load_state_dict(weights, strict=strict)
else:
raise FileNotFoundError(f'model checkpoint {weights_file} was not found')
if with_config:
return model, config
return model
class Results(dict):
""" results representation allowing html or print output """
def _repr_html_(self):
table, cols, diff_cols = self.table()
# table to string, needed because of fixed column width
table = [{k: self.to_str(v) for k, v in row.items()} for row in table]
tab = '<table>'
tab += '<thead>' + ''.join(f'<td>{k}</td>' for k in diff_cols) + '</thead>'
tab += ''.join('<tr>' + ''.join(f'<td>{row[k]}</td>' for k in diff_cols) + '</tr>' for row in table)
tab += '</table>'
return tab
def table(self):
# scores_flat = [{f'{kk}': val for kk, val in s} for s in self['scores']]
# import ipdb
# ipdb.set_trace()
def name(k, kk):
return f'{k}_{kk}' if len(k) > 0 else kk
scores_flat = [dict((name(k, kk), val) for k, sc in config_scores for kk, val in sc)
for config_scores in self['scores']]
# scores_flat = [{f'{k}_{kk}': val for k in s for kk, val in s[k]} for s in self['scores']]
table = [{**a, **b, **c} for a, b,c in zip(self['configurations'], scores_flat, self['stats'])]
cols = list()
[cols.append(k) if k not in cols else None for row in table for k in row.keys()]
# re-order the columns
first_col_order = ['name']
cols = sorted(cols, key=lambda k: first_col_order.index(k) - 10000 if k in first_col_order else cols.index(k))
print()
# make sure all cols have values
table = [{k: row[k] if k in row else None for k in cols} for row in table]
# identify columns that have different values, use str to be compatible with list/tuples
different_cols = [c for c in cols if len(set(str(tab[c]) if c in tab else None for tab in table)) > 1]
return table, cols, different_cols
def dataframe(self):
from pandas import DataFrame
table, _, _ = self.table()
return | DataFrame(table) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from unittest import TestCase
from parameterized import parameterized
import pandas as pd
import numpy as np
from numpy.testing.utils import assert_array_equal
from pandas import (MultiIndex,
Index)
from pandas.util.testing import assert_frame_equal, assert_series_equal
from alphaware.enums import OutputDataFormat, FreqType
from alphaware.const import INDEX_FACTOR
from alphaware.utils import (convert_df_format,
top,
group_by_freq,
fwd_return,
weighted_rank)
from datetime import datetime as dt
class TestPandasUtils(TestCase):
@parameterized.expand([( | pd.DataFrame({'001': [1, 2, 3], '002': [2, 3, 4]}, index=['2014', '2015', '2016']) | pandas.DataFrame |
import torch
from torch.utils.data import Dataset
import pandas as pd
import numpy as np
class DeepInflammationDataset(Dataset):
def __init__(self, c1_data, c2_data):
"""
- c1_data: pandas dataframe of the cell 1 data
- c2_data: pandas dataframe of the cell 2 data
"""
super().__init__()
self.c1_data = c1_data
self.c2_data = c2_data
def __len__(self):
"""Return the number of samples."""
return self.c1_data.shape[0] + self.c2_data.shape[0]
def __getitem__(self, idx):
"""
Input:
- idx: index of sample to retrieve
Return:
- sample: the idx'th sample
- label: the label for the sample
"""
c = | pd.concat([self.c1_data['expr'], self.c2_data['expr']], axis=0, ignore_index=True) | pandas.concat |
import textattack
import textattack.datasets as datasets
import random
import pandas as pd
from textattack.transformations.word_swap_embedding import WordSwapEmbedding as WordSwapEmbedding
from textattack.constraints.semantics.word_embedding_distance import WordEmbeddingDistance as WordEmbeddingDistance
NUM_NEAREST = 5
NUM_SYMS = 1000
def main():
datasets_names = [datasets.classification.AGNews, datasets.classification.IMDBSentiment, datasets.classification.MovieReviewSentiment, datasets.classification.YelpSentiment]
cand_words = list()
swap = WordSwapEmbedding(max_candidates=NUM_NEAREST)
for name in datasets_names:
data = name()
for label, text in data:
words = textattack.tokenized_text.raw_words(text)
for word in words:
if word.lower() in swap.stopwords:
continue
cand_words.append(word)
random.shuffle(cand_words)
df = | pd.DataFrame() | pandas.DataFrame |
#!/usr/local/bin/python
import argparse
import os
import sys
import pandas as pd
import numpy as np
import time
pd.options.mode.chained_assignment = None
parser = argparse.ArgumentParser(prog='snvScore')
parser.add_argument('SampleBED',type=str,help='Path to the mosdepth per-base BED output')
parser.add_argument('SNVGermlineTXT',type=str,help='Path to Clivar-generated table with pathogenic germline SNVs')
parser.add_argument('SNVSomaticTXT',type=str,help='Path to Clivar-generated table with pathogenic somatic SNVs')
parser.add_argument('Threshold',type=int,nargs='?',help='SNV coverage quality threshold (optional, positive)',default=0)
args = parser.parse_args()
sample_name = args.SampleBED
while sample_name.find('/')!=-1:
sample_name = sample_name[sample_name.find('/')+1:]
def snv_coverage(snv,chrom_cover):
snv = snv.dropna()
snv['coverage']=0.0
snv=snv.drop_duplicates()
snv = snv.reset_index(drop=True)
cover_reg = chrom_cover[(chrom_cover.end>snv.position.iloc[0]) & (chrom_cover.start<=snv.position.iloc[-1])]
cover_reg = cover_reg.reset_index(drop=True)
for ind in snv.index:
buf = cover_reg[(cover_reg.end>snv.position[ind]) & (cover_reg.start<=snv.position[ind])]
snv.coverage[ind] = buf.coverage
return snv
def CatchChromoRegs(BED_fname,chrom_names):
BED = open(BED_fname, 'rt')
# chrom_names = ['chr1', 'chr2', 'chr3', 'chr4', 'chr5', 'chr6', 'chr7', 'chr8',
# 'chr9', 'chr10', 'chr11', 'chr12', 'chr13', 'chr14', 'chr15',
# 'chr16', 'chr17', 'chr18', 'chr19', 'chr20', 'chr21', 'chr22',
# 'chrX', 'chrY','chrM']
chrom_start_pos = np.zeros(len(chrom_names)+1,dtype='int32')
line_num = 0
for chrom,i in zip(chrom_names,np.arange(len(chrom_names))):
pos_catched = False
while not pos_catched:
line = BED.readline()
line = line[:line.find('\t')]
if line == chrom:
pos_catched = True
chrom_start_pos[i] = line_num
line_num+=1
while line =='chrM':
line = BED.readline()
line = line[:line.find('\t')]
line_num+=1
chrom_start_pos[-1]=line_num-1
return chrom_start_pos
def ExecuteClinicalCoverageDepthCalc(chrom_names,SNVG,SNVS,SampleBED):
snv_cov = pd.DataFrame(columns=['chr','position','coverage','type'])
all_cov = np.array([])
# start = time.time()
res = CatchChromoRegs(SampleBED,chrom_names)
rows = ['' for i in range(24)]
for chrom,chr_num in zip(chrom_names[:-1],np.arange(24)):
# for chrom,chr_num in zip(chrom_names[:3],np.arange(3)):
chrom_cover = pd.read_csv(SampleBED,delimiter='\t',header=None,names=['chr','start','end','coverage'],skiprows=res[chr_num],nrows=res[chr_num+1]-res[chr_num])
all_cov = np.append(all_cov,chrom_cover.coverage.values,axis=0)
snvg_part = SNVG[SNVG.chr==chrom]
snvs_part = SNVS[SNVS.chr==chrom]
if snvg_part.size>0:
snvg_part = snv_coverage(snvg_part,chrom_cover)
snvg_part['type'] = 'germline'
snv_cov=pd.concat([snv_cov,snvg_part])
germ_row = '%8.0f %10.0f %6.0f %6.0f %6.0f '%(len(snvg_part),
np.median(snvg_part.coverage),
np.std(snvg_part.coverage),
np.min(snvg_part.coverage),
np.max(snvg_part.coverage))
else:
germ_row = '%8.0f %10.0f %6.0f %6.0f %6.0f '%(0,0,0,0,0)
if snvs_part.size>0:
snvs_part=snv_coverage(snvs_part,chrom_cover)
snvs_part['type'] = 'somatic'
snv_cov=pd.concat([snv_cov,snvs_part])
soma_row = '%8.0f %10.0f %6.0f %6.0f %6.0f'%(len(snvs_part),
np.median(snvs_part.coverage),
np.std(snvs_part.coverage),
np.min(snvs_part.coverage),
np.max(snvs_part.coverage))
else:
soma_row = '%8.0f %10.0f %6.0f %6.0f %6.0f'%(0,0,0,0,0)
rows[chr_num] = '%6s'%(chrom)+' '+germ_row+soma_row
# end=time.time()
germ_cov = snv_cov[snv_cov.type=='germline']
soma_cov = snv_cov[snv_cov.type=='somatic']
above_thres = [np.sum(germ_cov.coverage>=args.Threshold),np.sum(soma_cov.coverage>=args.Threshold)]
out_table = open('output/'+sample_name+'.snvScore.txt','w')
out_table.write('SNV coverage report - %s\n\n'%(sample_name))
if args.Threshold>0:
out_table.write('%2.0f%% of all pathogenic germline SNVs and %2.0f%% of all pathogenic somatic SNVs are covered above threshold (%2.0f)\n\n'%(above_thres[0]/len(germ_cov)*100,above_thres[1]/len(soma_cov)*100,args.Threshold))
print('%2.0f%% of all pathogenic germline SNVs and %2.0f%% of all pathogenic somatic SNVs are covered above threshold (%2.0f)\n\n'%(above_thres[0]/len(germ_cov)*100,above_thres[1]/len(soma_cov)*100,args.Threshold))
out_table.write('Whole genome coverage:\n')
out_table.write('%6s %4s %4s %12s %12s %4s %7s\n'%('median',
'mean',
'std',
'1st quartile',
'3rd quartile',
'min',
'max'))
out_table.write('%6.0f %4.0f %4.0f %12.0f %12.0f %4.0f %7.0f\n\n'%(np.median(all_cov),
np.mean(all_cov),
np.std(all_cov),
np.quantile(all_cov,0.25),
np.quantile(all_cov,0.75),
np.min(all_cov),
np.max(all_cov)))
out_table.write('Pathogenic (G - germline, S - somatic) SNV coverage:\n')
out_table.write('(\'count\' is the number of variants in a given region)\n\n')
out_table.write('%6s %8s %10s %6s %6s %6s %8s %10s %6s %6s %6s\n'%('region','count(G)','median(G)','std(G)','min(G)','max(G)','count(S)','median(S)','std(S)','min(S)','max(S)'))
out_table.write('----------------------------------------------------------------------------------------\n')
out_table.write('%6s %8.0f %10.0f %6.0f %6.0f %6.0f %8.0f %10.0f %6.0f %6.0f %6.0f\n'%('ALL',len(germ_cov),np.median(germ_cov.coverage),np.std(germ_cov.coverage),np.min(germ_cov.coverage),np.max(germ_cov.coverage),len(soma_cov),np.median(soma_cov.coverage),np.std(soma_cov.coverage),np.min(soma_cov.coverage),np.max(soma_cov.coverage)))
out_table.write('----------------------------------------------------------------------------------------\n')
for row in rows:
out_table.write(row+'\n')
out_table.write('----------------------------------------------------------------------------------------\n')
out_table.close()
# print('Elased time is %3.2f sec.'%(end-start))
chrom_names = ['chr1','chr2','chr3','chr4','chr5',
'chr6','chr7','chr8','chr9','chr10',
'chr11','chr12','chr13','chr14','chr15',
'chr16','chr17','chr18','chr19','chr20',
'chr21','chr22','chrX','chrY','chrM']
SNVG = | pd.read_csv(args.SNVGermlineTXT,sep='\t') | pandas.read_csv |
#!/usr/bin/python3
#By <NAME>
import os
import re
import argparse
import time
import timeit
import numpy as np
import pandas as pd
import pipeline_base
def merge_window(intervals,vcf,ref_sequence,log_file=False,fullcheck=True,ignored=True,info_just_indel=False,drop_info=False,intervals_alignment_bool=False,intervals_alignment=None):
'''Function that joins a given window in a vcf file by position'''
global vcf_slice
intervals_used = []
for contig,pos_start,pos_end in intervals:
flag_ignored = False
if any([(pos_start_used <= pos_start <= pos_end_used) and contig==contig_used for contig_used,pos_start_used,pos_end_used in intervals_used]):
print('Warning this interval used, interval ignored',[contig,pos_start,pos_end])
if log_file:
log_file.write('Warning this interval used, interval ignored ' + str([contig,pos_start,pos_end])+'\n')
continue
#contig = vcf.loc[pos_start,'#CHROM']
#print(contig,pos_start,pos_end)
vcf_slice = vcf[(pos_start <= vcf['POS']) & (vcf['POS'] <= pos_end) & (vcf['#CHROM']==contig)]
#print(vcf_slice)
if vcf_slice.shape[0] <= 1:
print('Warning vcf_slice <= 1',contig,pos_start,pos_end)
if log_file:
log_file.write('Warning vcf_slice <= 1' +'\n')
continue
if drop_info:
if len(set(vcf_slice['INFO'].values) - set(drop_info)) == 0:
print('Warning vcf slice contain only', *drop_info,set(vcf_slice['INFO'].values),contig,pos_start,pos_end)
#print('Warning vcf slice contain only',str([contig,pos_start,pos_end]))
if log_file:
log_file.write('Warning vcf slice contain only' + str([contig,pos_start,pos_end])+'\n')
continue
#sample_bin_dict = {i:None for i in vcf_slice.iloc[:,9:]}#!!!maybe need
sample_bin_dict = dict()
position_first = int(vcf_slice.iloc[0]['POS'])
index_first = vcf_slice.index[0]
position_start = int(vcf_slice.iloc[0]['POS'])
max_over = max([len(ref) + pos - 1 for ref,pos in zip(vcf_slice['REF'],vcf_slice['POS'])])
sample_dict = {i:list(ref_sequence[position_first-1:max_over]) for i in vcf_slice.iloc[:,9:]}
sample_dict_info = {i:'' for i in vcf_slice.iloc[:,9:]}
sample_dict_mem = {i:position_first for i in vcf_slice.iloc[:,9:]}
ref_mod = ref_sequence[position_first-1:max_over]
sample_will_deleted = set()
#check for if overlay out interval
if fullcheck:
'''
vcf_slice_end = vcf[(vcf['POS'] > pos_end) & (vcf['#CHROM']==contig)]
if not vcf_slice_end.empty:
position_outside = int(vcf_slice_end.iloc[0]['POS'])
for position_row in vcf_slice.index[:-1]:
if position_row + len(vcf_slice.loc[position_row]['REF']) - position_outside > 0:
print('Warning overlay window','interval',[pos_start,pos_end])
log_file.write('Warning overlay window, interval '+str([pos_start,pos_end])+'\n')
'''
flag = False
if any((vcf_slice.iloc[:,9:].isna().any()) & (~(vcf_slice.iloc[:,9:].isna().all()))):
print('Warning indeterminate genotype of the sample1 interval ignored',[pos_start,pos_end])
if log_file:
log_file.write('Warning indeterminate genotype of the sample1 interval ignored ' + str([pos_start,pos_end])+'\n')
for sample in sample_dict:
if not all(vcf_slice[sample].isna()) and not all(~vcf_slice[sample].isna()):#!
#print('Warning indeterminate genotype of the sample')
#assert False, "'Warning indeterminate genotype of the sample'"
flag = True
if flag:
#print('Warning2 indeterminate genotype of the sample, interval ignored',[pos_start,pos_end])
#log_file.write('Warning2 indeterminate genotype of the sample, interval ignored ' + str([pos_start,pos_end])+'\n')
if ignored:
print('Warning2 indeterminate genotype of the sample, interval ignored',[pos_start,pos_end])
if log_file:
log_file.write('Warning2 indeterminate genotype of the sample, interval ignored ' + str([pos_start,pos_end])+'\n')
continue
if not (vcf_slice.iloc[:,9:]).isna().any().any():#!req to change
intervals_slice_contain = intervals_alignment[( pos_end >= intervals_alignment[ref_assemble_name + '_start']) & (pos_start <= intervals_alignment[ref_assemble_name + '_end'])]
if intervals_slice_contain.isin([0]).any().any():
if ignored:
print('Warning gap between defined variant, interval ignored',[pos_start,pos_end])
if log_file:
log_file.write('Warning gap between defined variant, interval ignored ' + str([pos_start,pos_end])+'\n')
continue
if intervals_alignment_bool:
intervals_alignment_slice = intervals_alignment[(intervals_alignment['#contig'] == contig ) &
#(intervals_alignment['name'] == sample ) &
((intervals_alignment['start_position_ref'] <= pos_start) &
(intervals_alignment['end_position_ref'] >= pos_start))|
((intervals_alignment['start_position_ref'] <= pos_end)&
(intervals_alignment['end_position_ref'] >= pos_end))]
sample_tmp_storage = []
#print(pos_start,pos_end)
for sample in sample_dict:
#intervals_alignment['#contig'] == contig
#print(sample,pos_start,pos_end)
#print(intervals_alignment_slice[intervals_alignment_slice['name'] == sample])
if intervals_alignment_slice[intervals_alignment_slice['name'] == sample].shape[0] >= 2:
sample_will_deleted.add(sample)
sample_tmp_storage.append(sample)
#print(intervals_alignment_slice[intervals_alignment_slice['name'] == sample])
#print('\n\n\n')
if sample_tmp_storage:
print('Warning merge different contig',contig,pos_start,pos_end,sample_tmp_storage)
if log_file:
log_file.write('Warning merge different contig ' + str([contig,pos_start,pos_end,sample_tmp_storage])+'\n')
variant = []
flag_break = False
for row_index in vcf_slice.index:
if flag_break:
break
#position_row
#print(row_index)
variant = [vcf_slice.loc[row_index]['REF']] + vcf_slice.loc[row_index]['ALT'].split(',')
position_inter = int(vcf_slice.loc[row_index]['POS'])
#vcf.loc[position_row,'REF'] = ref_mod #maybe need
for sample in sample_dict:
#if vcf_slice.loc[row_index][sample] == '.':
if np.isclose(vcf_slice.loc[row_index][sample],np.nan,equal_nan=True):
#sample_dict[sample]
sample_will_deleted.add(sample)
else:
if vcf_slice.loc[row_index][sample] != 0:
position_row_shift = int(vcf_slice.loc[row_index]['POS']) - position_first
var_len = len(variant[int(vcf_slice.loc[row_index][sample])])
ref_len = len(vcf_slice.loc[row_index]['REF'])
#print(sample)
#print(sample_dict,file=log_file)
if any([i=='' or type(i)==list for i in sample_dict[sample][position_row_shift:position_row_shift+ref_len]]):
print(row_index,sample,'Error, position was used for variant, maybe wrong output,interval ignored',[pos_start,pos_end])
print('\n',position_row_shift,position_row_shift+ref_len,sample_dict,file=log_file)
if log_file:
log_file.write('Error, position was used for variant, maybe wrong output,interval ignored ' + str([pos_start,pos_end])+'\n')
flag_ignored = True
flag_break=True
break
#for cut end ref_sequence maybe
ref_var = vcf_slice.loc[row_index]['REF']
alt_var = variant[int(vcf_slice.loc[row_index][sample])]
#if len(ref_var)>1 or len(alt_var)>1:
alt_cut = 0
'''
for sym_ref, sym_alt in zip(ref_var[::-1],alt_var[::-1]):
if sym_ref == sym_alt:
pass
#ref_len -= 1
#alt_cut -= 1
#alt_var = alt_var[:-1]
else:
break
'''
alt_cut = None if alt_cut==0 else alt_cut
if len(variant[int(vcf_slice.loc[row_index][sample])]) == ref_len:#!
sample_dict[sample][position_row_shift:position_row_shift+ref_len] = ([list(variant[int(vcf_slice.loc[row_index][sample])])])[:alt_cut]
#sample_dict_info[sample] = sample_dict_info[sample]+vcf_slice.loc[row_index]['INFO'] + '_'
elif len(variant[int(vcf_slice.loc[row_index][sample])]) != ref_len:
sample_dict[sample][position_row_shift:position_row_shift+ref_len] = ([list(variant[int(vcf_slice.loc[row_index][sample])])])[:alt_cut] + [''] * (ref_len - 1) #(len(sample_dict[sample][position_row_shift:position_row_shift+ref_len]) - 1)
sample_dict_info[sample] = sample_dict_info[sample]+vcf_slice.loc[row_index]['INFO'] + '+'
sample_dict_mem[sample] += len(vcf_slice.loc[row_index]['REF'])
else:
continue
position_start = position_inter
if flag_ignored:
continue
intervals_used.append([contig,pos_start,pos_end])
#for redefine bin in sample
bin_value = 1
used = []
used_bin = []
flag = False
sample_lst = []
for sample in sample_will_deleted:
del sample_dict[sample]
sample_bin_dict[sample] = np.nan
for sample in sample_dict:
sample_dict[sample] = [i if type(i) == str else ''.join(i) for i in sample_dict[sample]]
sample_dict = {i:''.join(j) for i,j in sample_dict.items()}
sample_dict_info = {i:j[:-1] for i,j in sample_dict_info.items()}
ref_mod
#sample_bin_dict = dict()
sample_seq_set = set()
sample_uniq_dict = dict()
info_lst = []
sample_uniq_dict[ref_mod] = 0
sample_seq_set.add(ref_mod)
bin_value = 1
for sample,sample_seq_uniq in sample_dict.items():
if sample_seq_uniq not in sample_seq_set:
sample_seq_set.add(sample_seq_uniq)
sample_lst.append(sample_seq_uniq)
info_lst.append(sample_dict_info[sample])
sample_uniq_dict[sample_seq_uniq] = bin_value
bin_value+=1
for sample in sample_dict:
sample_bin_dict[sample] = sample_uniq_dict[sample_dict[sample]]
info = '&'.join(info_lst)
#print('sample_bin_dict =',sample_dict)
'''
for sample in sample_dict:
if all(vcf_slice[sample].astype(int) == 0): #!need check . values
sample_bin_dict[sample] = 0
else:
if sample not in used:
for sample_compared in sample_dict:
if (vcf_slice.iloc[:,9:][sample_compared].astype(int)).equals(vcf_slice.iloc[:,9:][sample].astype(int)) and sample_compared not in used:# and list(vcf_slice.iloc[:,9:][sample_compared].values) not in used_bin:# and sample_compared != sample:
sample_bin_dict[sample_compared] = bin_value
used += [sample_compared]
used_bin.append(list(vcf_slice.iloc[:,9:][sample_compared].values))
flag = True
if flag == True:
sample_lst += [sample_dict[sample]]
bin_value += 1
flag = False
'''
alt = ','.join(sample_lst)
#print(vcf_slice['INFO'].values)
if info_just_indel:
info = 'INDEL'
print('Add compressed interval to vcf',pos_start,pos_end,str(pos_end-pos_start))
if log_file:
log_file.write('Add compressed interval to vcf ' + str([pos_start,pos_end])+ str(pos_end-pos_start) + '\n')
vcf_variant_dict = {'#CHROM':contig,'POS':position_first,'ID':'.','REF':ref_mod,
'ALT':alt,'QUAL':40,'FILTER':'PASS','INFO':info,'FORMAT':'GT'}
vcf_variant_dict.update(sample_bin_dict)
for_merged = pd.Series(vcf_variant_dict,name=index_first)
vcf.drop(vcf_slice.index,inplace=True)
vcf = vcf.append(for_merged,ignore_index=False)#!True
vcf.sort_values(by=['#CHROM','POS'],ascending=[False,True],inplace=True)
return vcf
def definer_overlap_window(vcf,overlap_extra=0,type_merge='hard',log_file=False):
over = 0
interval_noexact = []
interval_exact = []
pos = vcf.iloc[0]["POS"]
#for num,position_row in enumerate(vcf.loc[pos:].index):
for num,position_row in enumerate(vcf.index):
contig = vcf.iloc[num]['#CHROM']
interval = []
interval_dict = {}
interval2 = []
window_sum = vcf.loc[position_row]['POS'] + len(vcf.loc[position_row]['REF'])-1 + overlap_extra
#print(position_row,window_sum)
for position_row2 in vcf.index[num+1:]:
if vcf.loc[position_row2]['POS'] <= window_sum:
if vcf.loc[position_row2]['#CHROM'] != contig:
print('Warning different contig',vcf.loc[position_row2]['#CHROM'], contig,position_row,position_row2)
break
over = vcf.loc[position_row2]['POS']+len(vcf.loc[position_row2]['REF'])-1 + overlap_extra
#print('minus=',window_sum,over,window_sum - over)
if over > window_sum: #or
window_sum = over
if window_sum - vcf.loc[position_row]['POS']>1000:
print('Warning interval contain large indel','interval ignored',[position_row,position_row2],'Indel lenght =',window_sum-vcf.loc[position_row]['POS'])#for filter large variant
if log_file:
log_file.write('Warning interval contain large indel, interval ignored ' + str([position_row,position_row2])+' Indel lenght = ' + str(window_sum-vcf.loc[position_row]['POS']) +'\n')
break
#vcf_slice = vcf[(vcf.loc[position_row]['POS'] <= vcf['POS']) & (vcf['POS'] <= vcf.loc[position_row2]['POS']) & (vcf['#CHROM']==contig)]#!window_sum
if type_merge == 'soft':
vcf_slice = vcf[(vcf.loc[position_row]['POS'] <= vcf['POS']) & (vcf['POS'] <= vcf.loc[position_row2]['POS']) & (vcf['#CHROM']==contig)]#!window_sum
#print(vcf_slice)
if any((vcf_slice.iloc[:,9:].isna().any()) & (~(vcf_slice.iloc[:,9:].isna().all()))):
print('Warning interval contain indeterminate variant,interval ignored',[position_row,position_row2])
if log_file:
log_file.write('Warning interval contain indeterminate variant,interval ignored ' + str([position_row,position_row2])+'\n')
break
interval = [vcf.loc[position_row]['POS'],window_sum]
#interval2 = [vcf.loc[position_row]['POS'],vcf.loc[position_row2]['POS']]
interval2 = [contig,vcf.loc[position_row]['POS'],vcf.loc[position_row2]['POS']]
interval_dict[contig] = [vcf.loc[position_row]['POS'],vcf.loc[position_row2]['POS']]
else:
break
if interval:
interval_noexact.append(interval)
if interval2:
#interval_exact.append(interval2)
interval_exact.append(interval2)
return interval_exact,interval_noexact
def intervals_concat(intervals_path):
'''intervals_path = list of path file interval'''
intervals = []
for interval in intervals_path:
if test:
intervals.append( | pd.read_csv(interval,sep='\t') | pandas.read_csv |
from __future__ import annotations
import os
import pandas as pd
import streamlit as st
st.set_page_config(layout="wide")
class Pager:
"""Generates cycled stepper indices."""
def __init__(self, count) -> None:
self.count = count
self.current = 0
@property
def next(self) -> int:
"""Fetches next index."""
n = self.current + 1
# Cycles to beginning.
if n > self.count-1:
n -= self.count
return n
@property
def prev(self) -> int:
"""Fetches previous index."""
n = self.current - 1
# Cycles to end.
if n < 0 :
n += self.count
return n
def set_current(self, current) -> None:
"""Sets current index."""
self.current = current
class Indices:
"""Collects all index strings from data dir."""
def __init__(self) -> None:
self.values = self._get_indices()
self.count = len(self.values)
@staticmethod
def _get_contents() -> list:
"""Fetches lits of all files."""
return os.listdir(datapath)
def _get_gifs(self) -> list:
"""Fetches list of all gif file names."""
contents = self._get_contents()
return [file for file in contents if file.endswith('.gif')]
def _get_indices(self) -> list:
"""Fetches list of all string indices."""
gifs = self._get_gifs()
return [name.split('.')[0].split('_')[-1] for name in gifs]
def index_of(self, index_str: str) -> int:
"""Fetches base index of string index."""
return self.values.index(index_str)
def session_next_set():
"""Updates session to next page index."""
pager.set_current(st.session_state['page'])
st.session_state['page'] = pager.next
def session_prev_set():
"""Updates session to previous page index."""
pager.set_current(st.session_state['page'])
st.session_state['page'] = pager.prev
def update_df(idx: int, eval_val: str, comment_val: str) -> None:
"""Updates dataframe with inputs and opens next set."""
df.at[idx, 'eval'] = eval_val
df.at[idx, 'comment'] = comment_val
session_next_set()
def save_df() -> None:
"""Saves dataframe at current session."""
df.to_csv('viewer.csv', index=False)
with st.sidebar:
st.info('df saved to viewer.csv')
def populate(index_frame: str) -> tuple:
"""Populates image data and stores eval input."""
idx = indices.index_of(index_frame)
with col_raw:
st.subheader("Raw")
st.image(f'{datapath}/raw_{index_frame}.png')
eval_val = col_raw.radio('Eval:', eval_choices)
with col_ann:
st.subheader("Annotated")
st.image(f'{datapath}/annotated_{index_frame}.png')
val = df.loc[df["index"] == index_frame, "eval"].iloc[0]
st.text(f'Stored Eval: {val}')
with col_gif:
st.subheader("GIF")
st.image(f'{datapath}/anim_{index_frame}.gif')
comment_val = df.loc[df["index"] == index_frame, "comment"].iloc[0]
comment_val = col_gif.text_input('Comments: ', value = comment_val)
st.button(
'SUBMIT',
on_click=update_df,
args=(idx, eval_val, comment_val)
)
# Sets evaluation choices.
eval_choices = ['Good', 'Bad']
# Sets datapath.
with st.sidebar:
datapath = st.text_input(
'Path to data:',
f'{os.getcwd()}/data'
)
indices = Indices()
pager = Pager(indices.count)
@st.cache(allow_output_mutation=True)
def make_df():
# Creates base dataframe to store inputs.
default_eval = [''] * indices.count
default_comment = [''] * indices.count
data = {
'index': indices.values,
'eval': default_eval,
'comment': default_comment
}
return | pd.DataFrame(data) | pandas.DataFrame |
####################################################
## TO TRAIN AND TEST CLASSIFIER
## train : to train classifier with train dataset
## test : to predict labels of validation dataset
## submit: to predict labels of test dataset
####################################################
import time
import numpy as np
import pandas as pd
from copy import deepcopy
import torch
import csv
from utils import eval_metrics, pred_to_label
def train(model, n_epochs, trainloader,valloader, criterion, optimizer, scheduler,tri ,device):
best_model = deepcopy(model)
best_f1 = -np.inf
for epoch in range(n_epochs):
model.train()
start_perf_counter = time.perf_counter()
start_process_time = time.process_time()
print(f'n_epoch:{epoch}, lr:{scheduler.get_last_lr()}')
running_loss = 0.0
running_f1 = 0.0
train_loss = 0.0
train_f1 = 0.0
for i,data in enumerate(trainloader, 0):
inputs, labels = data['image'], data['label']
inputs = inputs.to(device)
labels = labels.to(device)
optimizer.zero_grad()
outputs = model.forward(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
proba = torch.nn.Softmax(dim=1)(outputs)
running_loss += loss.item()
running_f1 += eval_metrics(np.array(proba.detach().cpu()),np.array(labels.detach().cpu()))
train_loss += loss.item()
train_f1 += eval_metrics(np.array(proba.detach().cpu()), np.array(labels.detach().cpu()))
if i%100 == 99:
print(f'[epoch_{epoch+1}, batch_{i+1}] loss: {running_loss/100}, f1; {running_f1/100}')
running_loss = 0.0
running_f1 = 0.0
end_perf_counter = time.perf_counter()-start_perf_counter
end_process_time = time.process_time()-start_process_time
print('train_loss:',train_loss/len(trainloader), 'train_f1:',train_f1/len(trainloader))
print(f'perf_counter : {end_perf_counter}')
print(f'process_time : {end_process_time}')
test_loss, test_f1 = test(model, valloader, criterion, device)
print('test_loss:',test_loss, 'test_f1:',test_f1)
torch.save(model.state_dict(), './model/'+tri+'_epoch'+str(epoch)+'.pth')
scheduler.step()
if test_f1 > best_f1:
best_model = deepcopy(model)
torch.save(model.state_dict(), './model/'+tri+'_best_epoch'+str(epoch)+'_'+time.strftime('%H:%M:%S')+'.pth')
return best_model
def test(model, data_loader, criterion, device):
model.eval()
total_loss=0
total_f1=0
with torch.no_grad():
for i,data in enumerate(data_loader, 0):
inputs, labels = data['image'], data['label']
inputs = inputs.to(device)
labels = labels.to(device)
outputs = model.forward(inputs)
loss = criterion(outputs, labels)
total_loss += loss.item()
proba = torch.nn.Softmax(dim=1)(outputs)
total_f1 += eval_metrics(np.array(proba.cpu()),np.array( labels.cpu()))
'''for idx,_ in enumerate(labels):
print('predicted_labels:', torch.max(outputs.data, dim=1).indices[idx], 'label:', labels[idx].cpu())'''
return total_loss/len(data_loader), total_f1/len(data_loader)
def submit(model, file_name, data_loader, device):
model.eval()
results_df = pd.DataFrame()
with torch.no_grad():
for i,data in enumerate(data_loader, 0):
inputs = data['image']
code,num = data['code'], data['num']
inputs = inputs.to(device)
outputs = model.forward(inputs)
proba = torch.nn.Softmax(dim=1)(outputs)
pred = np.argmax(np.array(proba.cpu()),1)
label = pred_to_label(pred)
for idx,_ in enumerate(inputs):
row = [label[idx]]
row_df = pd.DataFrame([row])
results_df = | pd.concat([results_df, row_df]) | pandas.concat |
from flask import Flask, render_template, request, redirect, url_for, session
import pandas as pd
import pymysql
import os
import io
#from werkzeug.utils import secure_filename
from pulp import *
import numpy as np
import pymysql
import pymysql.cursors
from pandas.io import sql
#from sqlalchemy import create_engine
import pandas as pd
import numpy as np
#import io
import statsmodels.formula.api as smf
import statsmodels.api as sm
import scipy.optimize as optimize
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
#from flask import Flask, render_template, request, redirect, url_for, session, g
from sklearn.linear_model import LogisticRegression
from math import sin, cos, sqrt, atan2, radians
from statsmodels.tsa.arima_model import ARIMA
#from sqlalchemy import create_engine
from collections import defaultdict
from sklearn import linear_model
import statsmodels.api as sm
import scipy.stats as st
import pandas as pd
import numpy as np
from pulp import *
import pymysql
import math
app = Flask(__name__)
app.secret_key = os.urandom(24)
localaddress="D:\\home\\site\\wwwroot"
localpath=localaddress
os.chdir(localaddress)
@app.route('/')
def index():
return redirect(url_for('home'))
@app.route('/home')
def home():
return render_template('home.html')
@app.route('/demandplanning')
def demandplanning():
return render_template("Demand_Planning.html")
@app.route("/elasticopt",methods = ['GET','POST'])
def elasticopt():
if request.method== 'POST':
start_date =request.form['from']
end_date=request.form['to']
prdct_name=request.form['typedf']
# connection = pymysql.connect(host='localhost',
# user='user',
# password='',
# db='test',
# charset='utf8mb4',
# cursorclass=pymysql.cursors.DictCursor)
#
# x=connection.cursor()
# x.execute("select * from `transcdata`")
# connection.commit()
# datass=pd.DataFrame(x.fetchall())
datass = pd.read_csv("C:\\Users\\1026819\\Downloads\\optimizdata.csv")
# datas = datass[(datass['Week']>=start_date) & (datass['Week']<=end_date )]
datas=datass
df = datas[datas['Product'] == prdct_name]
df=datass
changeData=pd.concat([df['Product_Price'],df['Product_Qty']],axis=1)
changep=[]
changed=[]
for i in range(0,len(changeData)-1):
changep.append(changeData['Product_Price'].iloc[i]-changeData['Product_Price'].iloc[i+1])
changed.append(changeData['Product_Qty'].iloc[1]-changeData['Product_Qty'].iloc[i+1])
cpd=pd.concat([pd.DataFrame(changep),pd.DataFrame(changed)],axis=1)
cpd.columns=['Product_Price','Product_Qty']
sortedpricedata=df.sort_values(['Product_Price'], ascending=[True])
spq=pd.concat([sortedpricedata['Product_Price'],sortedpricedata['Product_Qty']],axis=1).reset_index(drop=True)
pint=[]
dint=[]
x = spq['Product_Price']
num_bins = 5
# n, pint, patches = plt.hist(x, num_bins, facecolor='blue', alpha=0.5)
y = spq['Product_Qty']
num_bins = 5
# n, dint, patches = plt.hist(y, num_bins, facecolor='blue', alpha=0.5)
arr= np.zeros(shape=(len(pint),len(dint)))
count=0
for i in range(0, len(pint)):
lbp=pint[i]
if i==len(pint)-1:
ubp=pint[i]+1
else:
ubp=pint[i+1]
for j in range(0, len(dint)):
lbd=dint[j]
if j==len(dint)-1:
ubd=dint[j]+1
else:
ubd=dint[j+1]
print(lbd,ubd)
for k in range(0, len(spq)):
if (spq['Product_Price'].iloc[k]>=lbp\
and spq['Product_Price'].iloc[k]<ubp):
if(spq['Product_Qty'].iloc[k]>=lbd\
and spq['Product_Qty'].iloc[k]<ubd):
count+=1
arr[i][j]+=1
price_range=np.zeros(shape=(len(pint),2))
for j in range(0,len(pint)):
lbp=pint[j]
price_range[j][0]=lbp
if j==len(pint)-1:
ubp=pint[j]+1
price_range[j][1]=ubp
else:
ubp=pint[j+1]
price_range[j][1]=ubp
demand_range=np.zeros(shape=(len(dint),2))
for j in range(0,len(dint)):
lbd=dint[j]
demand_range[j][0]=lbd
if j==len(dint)-1:
ubd=dint[j]+1
demand_range[j][1]=ubd
else:
ubd=dint[j+1]
demand_range[j][1]=ubd
pr=pd.DataFrame(price_range)
pr.columns=['Price','Demand']
dr=pd.DataFrame(demand_range)
dr.columns=['Price','Demand']
priceranges=pr.Price.astype(str).str.cat(pr.Demand.astype(str), sep='-')
demandranges=dr.Price.astype(str).str.cat(dr.Demand.astype(str), sep='-')
price=pd.DataFrame(arr)
price.columns=demandranges
price.index=priceranges
pp=price.reset_index()
global data
data=pd.concat([df['Week'],df['Product_Qty'],df['Product_Price'],df['Comp_Prod_Price'],df['Promo1'],df['Promo2'],df['overallsale']],axis=1)
return render_template('dataview.html',cpd=cpd.values,pp=pp.to_html(index=False),data=data.to_html(index=False),graphdata=data.values,ss=1)
return render_template('dataview.html')
@app.route('/priceelasticity',methods = ['GET','POST'])
def priceelasticity():
return render_template('Optimisation_heatmap_revenue.html')
@app.route("/elasticity",methods = ['GET','POST'])
def elasticity():
if request.method== 'POST':
Price=0
Average_Price=0
Promotions=0
Promotionss=0
if request.form.get('Price'):
Price=1
if request.form.get('Average_Price'):
Average_Price=1
if request.form.get('Promotion_1'):
Promotions=1
if request.form.get('Promotion_2'):
Promotionss=1
Modeldata=pd.DataFrame()
Modeldata['Product_Qty']=data.Product_Qty
lst=[]
for row in data.index:
lst.append(row+1)
Modeldata['Week']=np.log(lst)
if Price == 1:
Modeldata['Product_Price']=data['Product_Price']
if Price == 0:
Modeldata['Product_Price']=0
if Average_Price==1:
Modeldata['Comp_Prod_Price']=data['Comp_Prod_Price']
if Average_Price==0:
Modeldata['Comp_Prod_Price']=0
if Promotions==1:
Modeldata['Promo1']=data['Promo1']
if Promotions==0:
Modeldata['Promo1']=0
if Promotionss==1:
Modeldata['Promo2']=data['Promo2']
if Promotionss==0:
Modeldata['Promo2']=0
diffpriceprodvscomp= (Modeldata['Product_Price']-Modeldata['Comp_Prod_Price'])
promo1=Modeldata.Promo1
promo2=Modeldata.Promo2
week=Modeldata.Week
quantityproduct=Modeldata.Product_Qty
df=pd.concat([quantityproduct,diffpriceprodvscomp,promo1,promo2,week],axis=1)
df.columns=['quantityproduct','diffpriceprodvscomp','promo1','promo2','week']
Model = smf.ols(formula='df.quantityproduct ~ df.diffpriceprodvscomp + df.promo1 + df.promo2 + df.week', data=df)
res = Model.fit()
global intercept,diffpriceprodvscomp_param,promo1_param,promo2_param,week_param
intercept=res.params[0]
diffpriceprodvscomp_param=res.params[1]
promo1_param=res.params[2]
promo2_param=res.params[3]
week_param=res.params[4]
Product_Price_min=0
maxvalue_of_price=int(Modeldata['Product_Price'].max())
Product_Price_max=int(Modeldata['Product_Price'].max())
if maxvalue_of_price==0:
Product_Price_max=1
maxfunction=[]
pricev=[]
weeks=[]
dd=[]
ddl=[]
for vatr in range(0,len(Modeldata)):
weeks.append(lst[vatr])
for Product_Price in range(Product_Price_min,Product_Price_max+1):
function=0
function=(intercept+(Modeldata['Promo1'].iloc[vatr]*promo1_param)+(Modeldata['Promo2'].iloc[vatr]*promo2_param) +
(diffpriceprodvscomp_param*(Product_Price-Modeldata['Comp_Prod_Price'].iloc[vatr]))+(Modeldata['Week'].iloc[vatr]*lst[vatr]))
maxfunction.append(function)
dd.append(Product_Price)
ddl.append(vatr)
for Product_Price in range(Product_Price_min,Product_Price_max+1):
pricev.append(Product_Price)
df1=pd.DataFrame(maxfunction)
df2=pd.DataFrame(dd)
df3=pd.DataFrame(ddl)
dfo=pd.concat([df3,df2,df1],axis=1)
dfo.columns=['weeks','prices','Demandfunctions']
demand=[]
for rows in dfo.values:
w=int(rows[0])
p=int(rows[1])
d=int(rows[2])
demand.append([w,p,d])
Co_eff=pd.DataFrame(res.params.values)#intercept
standard_error=pd.DataFrame(res.bse.values)#standard error
p_values=pd.DataFrame(res.pvalues.values)
conf_lower =pd.DataFrame(res.conf_int()[0].values)
conf_higher =pd.DataFrame(res.conf_int()[1].values)
R_square=res.rsquared
atr=['Intercept','DeltaPrice','Promo1','Promo2','Week']
atribute=pd.DataFrame(atr)
SummaryTable=pd.concat([atribute,Co_eff,standard_error,p_values,conf_lower,conf_higher],axis=1)
SummaryTable.columns=['Atributes','Co_eff','Standard_error','P_values','conf_lower','conf_higher']
reshapedf=df1.values.reshape(len(Modeldata),(-Product_Price_min+(Product_Price_max+1)))
dataofmas=pd.DataFrame(reshapedf)
maxv=dataofmas.apply( max, axis=1 )
minv=dataofmas.apply(min,axis=1)
avgv=dataofmas.sum(axis=1)/(-Product_Price_min+(Product_Price_max+1))
wks=pd.DataFrame(weeks)
ddofs=pd.concat([wks,minv,avgv,maxv],axis=1)
dataofmas=pd.DataFrame(reshapedf)
kk=pd.DataFrame()
sums=0
for i in range(0,len(dataofmas.columns)):
sums=sums+i
vv=i*dataofmas[[i]]
kk=pd.concat([kk,vv],axis=1)
dfr=pd.DataFrame(kk)
mrevenue=dfr.apply( max, axis=1 )
prices=dfr.idxmax(axis=1)
wks=pd.DataFrame(weeks)
revenuedf=pd.concat([wks,mrevenue,prices],axis=1)
return render_template('Optimisation_heatmap_revenue.html',revenuedf=revenuedf.values,ddofs=ddofs.values,SummaryTable=SummaryTable.to_html(index=False),ss=1,weeks=weeks,demand=demand,pricev=pricev,R_square=R_square)
@app.route('/inputtomaxm',methods=["GET","POST"])
def inputtomaxm():
return render_template("Optimize.html")
@app.route("/maxm",methods=["GET","POST"])
def maxm():
if request.method=="POST":
week=request.form['TimePeriod']
price_low=request.form['Price_Lower']
price_max=request.form['Price_Upper']
promofirst=request.form['Promotion_1']
promosecond=request.form['Promotion_2']
# week=24
# price_low=6
# price_max=20
# promofirst=1
# promosecond=0
#
# time_period=24
#
# global a
# a=243.226225
# global b
# b=-9.699634
# global d
# d=1.671505
# global pr1
# pr1=21.866260
# global pr2
# pr2=-0.511606
# global cm
# cm=-14.559594
# global s_0
# s_0= 2000
# promo1=1
# promo2=0
time_period=int(week)
global a
a=intercept
global b
b=diffpriceprodvscomp_param
global d
d=week_param
global pr1
pr1=promo1_param
global pr2
pr2=promo2_param
global s_0
s_0= 2000
promo1=int(promofirst)
promo2=int(promosecond)
global comp
comp=np.random.randint(7,15,time_period)
def demand(p, a=a, b=b, d=d, promo1=promo1,promo2_param=promo2,comp=comp, t=np.linspace(1,time_period,time_period)):
""" Return demand given an array of prices p for times t
(see equation 5 above)"""
return a+(b*(p-comp))+(d*t)+(promo1*pr1)+(promo2*pr2)
def objective(p_t, a, b, d,promo1,promo2, comp, t=np.linspace(1,time_period,time_period)):
return -1.0 * np.sum( p_t * demand(p_t, a, b, d,promo1,promo2, comp, t) )
def constraint_1(p_t, s_0, a, b, d, promo1,promo2, comp, t=np.linspace(1,time_period,time_period)):
""" Inventory constraint. s_0 - np.sum(x_t) >= 0.
This is an inequality constraint. See more below.
"""
return s_0 - np.sum(demand(p_t, a, b, d,promo1,promo2, comp, t))
def constraint_2(p_t):
#""" Positive demand. Another inequality constraint x_t >= 0 """
return p_t
t = np.linspace(1,time_period,time_period)
# Starting values :
b_min=int(price_low)
p_start = b_min * np.ones(len(t))
# bounds on the values :
bmax=int(price_max)
bounds = tuple((0,bmax) for x in p_start)
import scipy.optimize as optimize
# Constraints :
constraints = ({'type': 'ineq', 'fun': lambda x, s_0=s_0: constraint_1(x,s_0, a, b, d,promo1,promo2, comp, t=t)},
{'type': 'ineq', 'fun': lambda x: constraint_2(x)}
)
opt_results = optimize.minimize(objective, p_start, args=(a, b, d,promo1,promo2, comp, t),
method='SLSQP', bounds=bounds, constraints=constraints)
np.sum(opt_results['x'])
opt_price=opt_results['x']
opt_demand=demand(opt_results['x'], a, b, d, promo1,promo2_param, comp, t=t)
weeks=[]
for row in range(1,len(opt_price)+1):
weeks.append(row)
d=pd.DataFrame(weeks).astype(int)
dd=pd.DataFrame(opt_price)
optimumumprice_perweek=pd.concat([d,dd,pd.DataFrame(opt_demand).astype(int)],axis=1)
optimumumprice_perweek.columns=['Week','Price','Demand']
dataval=optimumumprice_perweek
diff=[]
diffs=[]
for i in range(0,len(opt_demand)-1):
valss=opt_demand[i]-opt_demand[i+1]
diff.append(valss)
diffs.append(i+1)
differenceofdemand_df=pd.concat([pd.DataFrame(diffs),pd.DataFrame(diff)],axis=1)
MP=round(optimumumprice_perweek.loc[optimumumprice_perweek['Price'].idxmin()],1)
minimumprice=pd.DataFrame(MP).T
MaxP=round(optimumumprice_perweek.loc[optimumumprice_perweek['Price'].idxmax()],1)
maximumprice=pd.DataFrame(MaxP).T
averageprice=round((optimumumprice_perweek['Price'].sum()/len(optimumumprice_perweek)),2)
MD=round(optimumumprice_perweek.loc[optimumumprice_perweek['Demand'].idxmin()],0)
minimumDemand=pd.DataFrame(MD).T
MaxD=round(optimumumprice_perweek.loc[optimumumprice_perweek['Demand'].idxmax()],0)
maximumDemand=pd.DataFrame(MaxD).T
averageDemand=round((optimumumprice_perweek['Demand'].sum()/len(optimumumprice_perweek)),0)
totaldemand=round(optimumumprice_perweek['Demand'].sum(),0)
return render_template("Optimize.html",totaldemand=totaldemand,averageDemand=averageDemand,maximumDemand=maximumDemand.values,minimumDemand=minimumDemand.values,averageprice=averageprice,maximumprice=maximumprice.values,minimumprice=minimumprice.values,dataval=dataval.values,differenceofdemand_df=differenceofdemand_df.values,optimumumprice_perweek=optimumumprice_perweek.to_html(index=False),ll=1)
@app.route("/Inventorymanagment",methods=["GET","POST"])
def Inventorymanagment():
return render_template("Inventory_Management.html")
@app.route("/DISTRIBUTION_NETWORK_OPT",methods=["GET","POST"])
def DISTRIBUTION_NETWORK_OPT():
return render_template("DISTRIBUTION_NETWORK_OPTIMIZATION.html")
@app.route("/Procurement_Plan",methods=["GET","POST"])
def Procurement_Plan():
return render_template("Procurement_Planning.html")
#<NAME>
@app.route("/fleetallocation")
def fleetallocation():
return render_template('fleetallocation.html')
@app.route("/reset")
def reset():
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("DELETE FROM `input`")
cur.execute("DELETE FROM `output`")
cur.execute("DELETE FROM `Scenario`")
conn.commit()
conn.close()
open(localaddress+'\\static\\demodata.txt', 'w').close()
return render_template('fleetallocation.html')
@app.route("/dalink",methods = ['GET','POST'])
def dalink():
sql = "INSERT INTO `input` (`Route`,`SLoc`,`Ship-to Abb`,`Primary Equipment`,`Batch`,`Prod Dt`,`SW`,`Met Held`,`Heat No`,`Delivery Qty`,`Width`,`Length`,`Test Cut`,`Customer Priority`) VALUES( %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
if request.method == 'POST':
typ = request.form.get('type')
frm = request.form.get('from')
to = request.form.get('to')
if typ and frm and to:
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
cur.execute("SELECT * FROM `inventory_data` WHERE `Primary Equipment` = '" + typ + "' AND `Prod Dt` BETWEEN '" + frm + "' AND '" + to + "'")
res = cur.fetchall()
if len(res)==0:
conn.close()
return render_template('fleetallocation.html',alert='No data available')
sfile = pd.DataFrame(res)
df1 = pd.DataFrame(sfile)
df1['Prod Dt'] =df1['Prod Dt'].astype(object)
for index, i in df1.iterrows():
data = (i['Route'],i['SLoc'],i['Ship-to Abb'],i['Primary Equipment'],i['Batch'],i['Prod Dt'],i['SW'],i['Met Held'],i['Heat No'],i['Delivery Qty'],i['Width'],i['Length'],i['Test Cut'],i['Customer Priority'])
curr.execute(sql,data)
conn.commit()
conn.close()
return render_template('fleetallocation.html',typ=" Equipment type: "+typ,frm="From: "+frm,to=" To:"+to,data = sfile.to_html(index=False))
else:
return render_template('fleetallocation.html',alert ='All input fields are required')
return render_template('fleetallocation.html')
@app.route('/optimise', methods=['GET', 'POST'])
def optimise():
open(localaddress+'\\static\\demodata.txt', 'w').close()
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
cur.execute("DELETE FROM `output`")
conn.commit()
os.system('python optimising.py')
sa=1
cur.execute("SELECT * FROM `output`")
result = cur.fetchall()
if len(result)==0:
say=0
else:
say=1
curr.execute("SELECT * FROM `input`")
sfile = curr.fetchall()
if len(sfile)==0:
conn.close()
return render_template('fleetallocation.html',say=say,sa=sa,alert='No data available')
sfile = pd.DataFrame(sfile)
conn.close()
with open(localaddress+"\\static\\demodata.txt", "r") as f:
content = f.read()
return render_template('fleetallocation.html',say=say,sa=sa,data = sfile.to_html(index=False),content=content)
@app.route("/scenario")
def scenario():
return render_template('scenario.html')
@app.route("/scenario_insert", methods=['GET','POST'])
def scenario_insert():
if request.method == 'POST':
scenario = request.form.getlist("scenario[]")
customer_priority = request.form.getlist("customer_priority[]")
oldest_sw = request.form.getlist("oldest_sw[]")
production_date = request.form.getlist("production_date[]")
met_held_group = request.form.getlist("met_held_group[]")
test_cut_group = request.form.getlist("test_cut_group[]")
sub_grouping_rules = request.form.getlist("sub_grouping_rules[]")
load_lower_bounds = request.form.getlist("load_lower_bounds[]")
load_upper_bounds = request.form.getlist("load_upper_bounds[]")
width_bounds = request.form.getlist("width_bounds[]")
length_bounds = request.form.getlist("length_bounds[]")
description = request.form.getlist("description[]")
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
lngth = len(scenario)
curr.execute("DELETE FROM `scenario`")
if scenario and customer_priority and oldest_sw and production_date and met_held_group and test_cut_group and sub_grouping_rules and load_lower_bounds and load_upper_bounds and width_bounds and length_bounds and description:
say=0
for i in range(lngth):
scenario_clean = scenario[i]
customer_priority_clean = customer_priority[i]
oldest_sw_clean = oldest_sw[i]
production_date_clean = production_date[i]
met_held_group_clean = met_held_group[i]
test_cut_group_clean = test_cut_group[i]
sub_grouping_rules_clean = sub_grouping_rules[i]
load_lower_bounds_clean = load_lower_bounds[i]
load_upper_bounds_clean = load_upper_bounds[i]
width_bounds_clean = width_bounds[i]
length_bounds_clean = length_bounds[i]
description_clean = description[i]
if scenario_clean and customer_priority_clean and oldest_sw_clean and production_date_clean and met_held_group_clean and test_cut_group_clean and sub_grouping_rules_clean and load_lower_bounds_clean and load_upper_bounds_clean and width_bounds_clean and length_bounds_clean:
cur.execute("INSERT INTO `scenario`(scenario, customer_priority, oldest_sw, production_date, met_held_group, test_cut_group, sub_grouping_rules, load_lower_bounds, load_upper_bounds, width_bounds, length_bounds, description) VALUES('"+scenario_clean+"' ,'"+customer_priority_clean+"','"+oldest_sw_clean+"','"+production_date_clean+"','"+met_held_group_clean+"','"+test_cut_group_clean+"', '"+sub_grouping_rules_clean+"','"+load_lower_bounds_clean+"', '"+load_upper_bounds_clean+"','"+width_bounds_clean+"','"+length_bounds_clean+"','"+description_clean+"')")
else:
say = 1
conn.commit()
if(say==0):
alert='All Scenarios inserted'
else:
alert='Some scenarios were not inserted'
return (alert)
conn.close()
return ('All fields are required!')
return ('Failed!!!')
@app.route("/fetch", methods=['GET','POST'])
def fetch():
if request.method == 'POST':
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("SELECT * FROM scenario")
result = cur.fetchall()
if len(result)==0:
conn.close()
return render_template('scenario.html',alert1='No scenarios Available')
result1 = pd.DataFrame(result)
result1 = result1.drop('Sub-grouping rules', axis=1)
conn.close()
return render_template('scenario.html',sdata = result1.to_html(index=False))
return ("Error")
@app.route("/delete", methods=['GET','POST'])
def delete():
if request.method == 'POST':
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("DELETE FROM scenario")
conn.commit()
conn.close()
return render_template('scenario.html',alert1="All the scenerios were dropped!")
return ("Error")
@app.route('/papadashboard', methods=['GET', 'POST'])
def papadashboard():
sql1 = "SELECT `Scenario`, MAX(`Wagon-No`) AS 'Wagon Used', COUNT(`Batch`) AS 'Products Allocated', SUM(`Delivery Qty`) AS 'Total Product Allocated', SUM(`Delivery Qty`)/(MAX(`Wagon-No`)) AS 'Average Load Carried', SUM(`Width`)/(MAX(`Wagon-No`)) AS 'Average Width Used' FROM `output` WHERE `Wagon-No`>0 GROUP BY `Scenario`"
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
curs = conn.cursor()
curs.execute("SELECT `scenario` FROM `scenario`")
sdata = curs.fetchall()
if len(sdata)==0:
conn.close()
return render_template('warning.html',alert='No data available')
cur1 = conn.cursor()
cur1.execute(sql1)
data1 = cur1.fetchall()
if len(data1)==0:
conn.close()
return render_template('warning.html',alert='Infeasible to due Insufficient Load')
cu = conn.cursor()
cu.execute("SELECT `length_bounds`,`width_bounds`,`load_lower_bounds`,`load_upper_bounds` FROM `scenario`")
sdaa = cu.fetchall()
sdaa = pd.DataFrame(sdaa)
asa=list()
for index, i in sdaa.iterrows():
hover = "Length Bound:"+str(i['length_bounds'])+", Width Bound:"+str(i['width_bounds'])+", Load Upper Bound:"+str(i['load_upper_bounds'])+", Load Lower Bound:"+str(i['load_lower_bounds'])
asa.append(hover)
asa=pd.DataFrame(asa)
asa.columns=['Details']
data1 = pd.DataFrame(data1)
data1['Average Width Used'] = data1['Average Width Used'].astype(int)
data1['Total Product Allocated'] = data1['Total Product Allocated'].astype(int)
data1['Average Load Carried'] = data1['Average Load Carried'].astype(float)
data1['Average Load Carried'] = round(data1['Average Load Carried'],2)
data1['Average Load Carried'] = data1['Average Load Carried'].astype(str)
fdata = pd.DataFrame(columns=['Scenario','Wagon Used','Products Allocated','Total Product Allocated','Average Load Carried','Average Width Used','Details'])
fdata[['Scenario','Wagon Used','Products Allocated','Total Product Allocated','Average Load Carried','Average Width Used']] = data1[['Scenario','Wagon Used','Products Allocated','Total Product Allocated','Average Load Carried','Average Width Used']]
fdata['Details'] = asa['Details']
fdata = fdata.values
sql11 = "SELECT `Scenario`, SUM(`Delivery Qty`)/(MAX(`Wagon-No`)) AS 'Average Load Carried', COUNT(`Batch`) AS 'Allocated', SUM(`Delivery Qty`) AS 'Load Allocated' FROM `output`WHERE `Wagon-No`>0 GROUP BY `Scenario`"
sql21 = "SELECT COUNT(`Batch`) AS 'Total Allocated' FROM `output` GROUP BY `Scenario`"
sql31 = "SELECT `load_upper_bounds` FROM `scenario`"
conn1 = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur11 = conn1.cursor()
cur21 = conn1.cursor()
cur31 = conn1.cursor()
cur11.execute(sql11)
data11 = cur11.fetchall()
data11 = pd.DataFrame(data11)
cur21.execute(sql21)
data21 = cur21.fetchall()
data21 = pd.DataFrame(data21)
cur31.execute(sql31)
data31 = cur31.fetchall()
data31 = pd.DataFrame(data31)
data11['Average Load Carried']=data11['Average Load Carried'].astype(float)
fdata1 = pd.DataFrame(columns=['Scenario','Utilisation Percent','Allocation Percent','Total Load Allocated'])
fdata1['Utilisation Percent'] = round(100*(data11['Average Load Carried']/data31['load_upper_bounds']),2)
data11['Load Allocated']=data11['Load Allocated'].astype(int)
fdata1[['Scenario','Total Load Allocated']]=data11[['Scenario','Load Allocated']]
data11['Allocated']=data11['Allocated'].astype(float)
data21['Total Allocated']=data21['Total Allocated'].astype(float)
fdata1['Allocation Percent'] = round(100*(data11['Allocated']/data21['Total Allocated']),2)
fdata1['Allocation Percent'] = fdata1['Allocation Percent'].astype(str)
fdat1 = fdata1.values
conn1.close()
if request.method == 'POST':
conn2 = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn2.cursor()
ata = request.form['name']
cur.execute("SELECT * FROM `output` WHERE `Scenario` = '"+ata+"' ")
ssdata = cur.fetchall()
datasss = pd.DataFrame(ssdata)
data=datasss.replace("Not Allocated", 0)
df=data[['Delivery Qty','Wagon-No','Width','Group-Number']]
df['Wagon-No']=df['Wagon-No'].astype(int)
a=df['Wagon-No'].max()
##bar1
result_array = np.array([])
for i in range (a):
data_i = df[df['Wagon-No'] == i+1]
del_sum_i = data_i['Delivery Qty'].sum()
per_i=[((del_sum_i)/(205000)*100)]
result_array = np.append(result_array, per_i)
result_array1 = np.array([])
for j in range (a):
data_j = df[df['Wagon-No'] == j+1]
del_sum_j = data_j['Width'].sum()
per_util_j=[((del_sum_j)/(370)*100)]
result_array1 = np.append(result_array1, per_util_j)
##pie1
df112 = df[df['Wagon-No'] == 0]
pie1 = df112 ['Width'].sum()
df221 = df[df['Wagon-No'] > 0]
pie11 = df221['Width'].sum()
df1=data[['SW','Group-Number']]
dff1 = df1[data['Wagon-No'] == 0]
da1 =dff1.groupby(['SW']).count()
re11 = np.array([])
res12 = np.append(re11,da1)
da1['SW'] = da1.index
r1 = np.array([])
r12 = np.append(r1, da1['SW'])
df0=data[['Group-Number','Route','SLoc','Ship-to Abb','Wagon-No','Primary Equipment']]
df1=df0.replace("Not Allocated", 0)
f2 = pd.DataFrame(df1)
f2['Wagon-No']=f2['Wagon-No'].astype(int)
####Not-Allocated
f2['Group']=data['Group-Number']
df=f2[['Group','Wagon-No']]
dee = df[df['Wagon-No'] == 0]
deer =dee.groupby(['Group']).count()##Not Allocated
deer['Group'] = deer.index
##Total-Data
f2['Group1']=data['Group-Number']
dfc=f2[['Group1','Wagon-No']]
dfa=pd.DataFrame(dfc)
der = dfa[dfa['Wagon-No'] >= 0]
dear =der.groupby(['Group1']).count()##Wagons >1
dear['Group1'] = dear.index
dear.rename(columns={'Wagon-No': 'Allocated'}, inplace=True)
result = pd.concat([deer, dear], axis=1, join_axes=[dear.index])
resu=result[['Group1','Wagon-No','Allocated']]
result1=resu.fillna(00)
r5 = np.array([])
r6 = np.append(r5, result1['Wagon-No'])
r66=r6[0:73]###Not Allocated
r7 = np.append(r5, result1['Allocated'])
r77=r7[0:73]####total
r8 = np.append(r5, result1['Group1'])
r88=r8[0:73]###group
conn2.close()
return render_template('papadashboard.html',say=1,data=fdata,data1=fdat1,ata=ata,bar1=result_array,bar11=result_array1,pie11=pie1,pie111=pie11,x=r12,y=res12,xname=r88, bar7=r77,bar8=r66)
conn.close()
return render_template('papadashboard.html',data=fdata,data1=fdat1)
@app.route('/facilityallocation')
def facilityallocation():
return render_template('facilityhome.html')
@app.route('/dataimport')
def dataimport():
return render_template('facilityimport.html')
@app.route('/dataimport1')
def dataimport1():
return redirect(url_for('dataimport'))
@app.route('/facility_location')
def facility_location():
return render_template('facility_location.html')
@app.route('/facility')
def facility():
return redirect(url_for('facilityallocation'))
@app.route("/imprt", methods=['GET','POST'])
def imprt():
global customerdata
global factorydata
global Facyy
global Custo
customerfile = request.files['CustomerData'].read()
factoryfile = request.files['FactoryData'].read()
if len(customerfile)==0 or len(factoryfile)==0:
return render_template('facilityhome.html',warning='Data Invalid')
cdat=pd.read_csv(io.StringIO(customerfile.decode('utf-8')))
customerdata=pd.DataFrame(cdat)
fdat=pd.read_csv(io.StringIO(factoryfile.decode('utf-8')))
factorydata=pd.DataFrame(fdat)
Custo=customerdata.drop(['Lat','Long'],axis=1)
Facyy=factorydata.drop(['Lat','Long'],axis=1)
return render_template('facilityimport1.html',loc1=factorydata.values,loc2=customerdata.values,factory=Facyy.to_html(index=False),customer=Custo.to_html(index=False))
@app.route("/gmap")
def gmap():
custdata=customerdata
Factorydata=factorydata
price=1
#to get distance beetween customer and factory
#first get the Dimension
#get no of factories
Numberoffact=len(Factorydata)
#get Number of Customer
Numberofcust=len(custdata)
#Get The dist/unit cost
cost=price
#def function for distance calculation
# approximate radius of earth in km
def dist(lati1,long1,lati2,long2,cost):
R = 6373.0
lat1 = radians(lati1)
lon1 = radians(long1)
lat2 = radians(lati2)
lon2 = radians(long2)
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
distance =round(R * c,2)
return distance*cost
#Create a list for customer and factory
def costtable(custdata,Factorydata):
distance=list()
for lat1,long1 in zip(custdata.Lat, custdata.Long):
for lat2,long2 in zip(Factorydata.Lat, Factorydata.Long):
distance.append(dist(lat1,long1,lat2,long2,cost))
distable=np.reshape(distance, (Numberofcust,Numberoffact)).T
tab=pd.DataFrame(distable,index=[Factorydata.Factory],columns=[custdata.Customer])
return tab
DelCost=costtable(custdata,Factorydata)#return cost table of the customer and factoery
#creating Demand Table
demand=np.array(custdata.Demand)
col1=np.array(custdata.Customer)
Demand=pd.DataFrame(demand,col1).T
cols=sorted(col1)
#Creating capacity table
fact=np.array(Factorydata.Capacity)
col2=np.array(Factorydata.Factory)
Capacity=pd.DataFrame(fact,index=col2).T
colo=sorted(col2)
#creating Fixed cost table
fixed_c=np.array(Factorydata.FixedCost)
col3=np.array(Factorydata.Factory)
FixedCost= pd.DataFrame(fixed_c,index=col3)
# Create the 'prob' variable to contain the problem data
model = LpProblem("Min Cost Facility Location problem",LpMinimize)
production = pulp.LpVariable.dicts("Production",
((factory, cust) for factory in Capacity for cust in Demand),
lowBound=0,
cat='Integer')
factory_status =pulp.LpVariable.dicts("factory_status", (factory for factory in Capacity),
cat='Binary')
cap_slack =pulp.LpVariable.dicts("capslack",
(cust for cust in Demand),
lowBound=0,
cat='Integer')
model += pulp.lpSum(
[DelCost.loc[factory, cust] * production[factory, cust] for factory in Capacity for cust in Demand]
+ [FixedCost.loc[factory] * factory_status[factory] for factory in Capacity] + 5000000*cap_slack[cust] for cust in Demand)
for cust in Demand:
model += pulp.lpSum(production[factory, cust] for factory in Capacity)+cap_slack[cust] == Demand[cust]
for factory in Capacity:
model += pulp.lpSum(production[factory, cust] for cust in Demand) <= Capacity[factory]*factory_status[factory]
model.solve()
print("Status:", LpStatus[model.status])
for v in model.variables():
print(v.name, "=", v.varValue)
print("Total Cost of Ingredients per can = ", value(model.objective))
# Getting the table for the Factorywise Allocation
def factoryalloc(model,Numberoffact,Numberofcust,listoffac,listofcus):
listj=list()
listk=list()
listcaps=list()
for v in model.variables():
listj.append(v.varValue)
customer=listj[(len(listj)-Numberofcust-Numberoffact):(len(listj)-Numberoffact)]
del listj[(len(listj)-Numberoffact-Numberofcust):len(listj)]
for row in listj:
if row==0:
listk.append(0)
else:
listk.append(1)
x=np.reshape(listj,(Numberoffact,Numberofcust))
y=np.reshape(listk,(Numberoffact,Numberofcust))
FactoryAlloc_table=pd.DataFrame(x,index=listoffac,columns=listofcus)
Factorystatus=pd.DataFrame(y,index=listoffac,columns=listofcus)
return FactoryAlloc_table,Factorystatus,customer
Alltable,FactorystatusTable,ded=factoryalloc(model,Numberoffact,Numberofcust,colo,cols)
Allstatus=list()
dede=pd.DataFrame(ded,columns=['UnSatisfied'])
finaldede=dede[dede.UnSatisfied != 0]
colss=pd.DataFrame(cols,columns=['CustomerLocation'])
fina=pd.concat([colss,finaldede],axis=1, join='inner')
print(fina)
for i in range(len(Alltable)):
for j in range(len(Alltable.columns)):
if (Alltable.loc[Alltable.index[i], Alltable.columns[j]]>0):
all=[Alltable.index[i], Alltable.columns[j], Alltable.loc[Alltable.index[i], Alltable.columns[j]]]
Allstatus.append(all)
Status=pd.DataFrame(Allstatus,columns=['Factory','Customer','Allocation']).astype(str)
#To get the Factory Data
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
#Making Connection to the Database
cur = con.cursor()
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Status.to_sql(con=engine, name='facilityallocation',index=False, if_exists='replace')
cur = con.cursor()
cur1 = con.cursor()
cur.execute("SELECT * FROM `facilityallocation`")
file=cur.fetchall()
dat=pd.DataFrame(file)
lst=dat[['Factory','Customer']]
mlst=[]
names=lst['Factory'].unique().tolist()
for name in names:
lsty=lst.loc[lst.Factory==name]
mlst.append(lsty.values)
data=dat[['Factory','Customer','Allocation']]
sql="SELECT SUM(`Allocation`) AS 'UseCapacity', `Factory` FROM `facilityallocation` GROUP BY `Factory`"
cur1.execute(sql)
file2=cur1.fetchall()
udata=pd.DataFrame(file2)
bdata=factorydata.sort_values(by=['Factory'])
adata=bdata['Capacity']
con.close()
infdata=dat[['Customer','Factory','Allocation']]
infodata=infdata.sort_values(by=['Customer'])
namess=infodata.Customer.unique()
lstyy=[]
for nam in namess:
bb=infodata[infodata.Customer==nam]
comment=bb['Factory']+":"+bb['Allocation']
prin=[nam,str(comment.values).strip('[]')]
lstyy.append(prin)
return render_template('facilityoptimise.html',say=1,lstyy=lstyy,x1=adata.values,x2=udata.values,dat=mlst,loc1=factorydata.values,
loc2=customerdata.values,factory=Facyy.to_html(index=False),customer=Custo.to_html(index=False),summary=data.to_html(index=False))
#Demand Forecast
@app.route('/demandforecast')
def demandforecast():
return render_template('demandforecast.html')
@app.route("/demandforecastdataimport",methods = ['GET','POST'])
def demandforecastdataimport():
if request.method== 'POST':
global actualforecastdata
flat=request.files['flat'].read()
if len(flat)==0:
return('No Data Selected')
cdat=pd.read_csv(io.StringIO(flat.decode('utf-8')))
actualforecastdata=pd.DataFrame(cdat)
return render_template('demandforecast.html',data=actualforecastdata.to_html(index=False))
@app.route('/demandforecastinput', methods = ['GET', 'POST'])
def demandforecastinput():
if request.method=='POST':
global demandforecastfrm
global demandforecasttoo
global demandforecastinputdata
demandforecastfrm=request.form['from']
demandforecasttoo=request.form['to']
value=request.form['typedf']
demandforecastinputdata=actualforecastdata[(actualforecastdata['Date'] >= demandforecastfrm) & (actualforecastdata['Date'] <= demandforecasttoo)]
if value=='monthly': ##monthly
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
demandforecastinputdata.to_sql(con=engine, name='demandforecastinputdata', index=False,if_exists='replace')
return redirect(url_for('monthlyforecast'))
if value=='quarterly': ##quarterly
global Quaterdata
dated2 = demandforecastinputdata['Date']
nlst=[]
for var in dated2:
var1 = int(var[5:7])
if var1 >=1 and var1 <4:
varr=var[:4]+'-01-01'
elif var1 >=4 and var1 <7:
varr=var[:4]+'-04-01'
elif var1 >=7 and var1 <10:
varr=var[:4]+'-07-01'
else:
varr=var[:4]+'-10-01'
nlst.append(varr)
nwlst=pd.DataFrame(nlst,columns=['Newyear'])
demandforecastinputdata=demandforecastinputdata.reset_index()
demandforecastinputdata['Date']=nwlst['Newyear']
Quaterdata=demandforecastinputdata.groupby(['Date']).sum()
Quaterdata=Quaterdata.reset_index()
Quaterdata=Quaterdata.drop('index',axis=1)
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Quaterdata.to_sql(con=engine, name='demandforecastinputdata', index=False,if_exists='replace')
return redirect(url_for('quarterlyforecast'))
if value=='yearly': ##yearly
global Yeardata
#copydata=demandforecastinputdata
dated1 = demandforecastinputdata['Date']
lst=[]
for var in dated1:
var1 = var[:4]+'-01-01'
lst.append(var1)
newlst=pd.DataFrame(lst,columns=['NewYear'])
demandforecastinputdata=demandforecastinputdata.reset_index()
demandforecastinputdata['Date']=newlst['NewYear']
Yeardata=demandforecastinputdata.groupby(['Date']).sum()
Yeardata=Yeardata.reset_index()
Yeardata=Yeardata.drop('index',axis=1)
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Yeardata.to_sql(con=engine, name='demandforecastinputdata', index=False,if_exists='replace')
return redirect(url_for('yearlyforecast'))
#if value=='weakly': ##weakly
# return redirect(url_for('output4'))
return render_template('demandforecast.html')
@app.route("/monthlyforecast",methods = ['GET','POST'])
def monthlyforecast():
data = pd.DataFrame(demandforecastinputdata)
# container1
a1=data.sort_values(['GDP','TotalDemand'], ascending=[True,True])
# container2
a2=data.sort_values(['Pi_Exports','TotalDemand'], ascending=[True,True])
# container3
a3=data.sort_values(['Market_Share','TotalDemand'], ascending=[True,True])
# container4
a4=data.sort_values(['Advertisement_Expense','TotalDemand'], ascending=[True,True])
# container1
df=a1[['GDP']]
re11 = np.array([])
res11 = np.append(re11,df)
df1=a1[['TotalDemand']]
r1 = np.array([])
r11 = np.append(r1, df1)
# top graph
tdf=data['Date'].astype(str)
tre11 = np.array([])
tres11 = np.append(tre11,tdf)
tr1 = np.array([])
tr11 = np.append(tr1, df1)
# container2
udf=a2[['Pi_Exports']]
ure11 = np.array([])
ures11 = np.append(ure11,udf)
ur1 = np.array([])
ur11 = np.append(ur1, df1)
# container3
vdf=a3[['Market_Share']]
vre11 = np.array([])
vres11 = np.append(vre11,vdf)
vr1 = np.array([])
vr11 = np.append(vr1, df1)
# container4
wdf=a4[['Advertisement_Expense']]
wre11 = np.array([])
wres11 = np.append(wre11,wdf)
wr1 = np.array([])
wr11 = np.append(wr1, df1)
if request.method == 'POST':
mov=0
exp=0
reg=0
ari=0
arx=0
till = request.form.get('till')
if request.form.get('moving'):
mov=1
if request.form.get('ESPO'):
exp=1
if request.form.get('regression'):
reg=1
if request.form.get('ARIMA'):
ari=1
if request.form.get('ARIMAX'):
arx=1
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS `ftech` (`mov` VARCHAR(1),`exp` VARCHAR(1), `reg` VARCHAR(1),`ari` VARCHAR(1),`arx` VARCHAR(1),`till` VARCHAR(10))")
cur.execute("DELETE FROM `ftech`")
con.commit()
cur.execute("INSERT INTO `ftech` VALUES('"+str(mov)+"','"+str(exp)+"','"+str(reg)+"','"+str(ari)+"','"+str(arx)+"','"+str(till)+"')")
con.commit()
cur.execute("CREATE TABLE IF NOT EXISTS `forecastoutput`(`Model` VARCHAR(25),`Date` VARCHAR(10),`TotalDemand` VARCHAR(10),`RatioIncrease` VARCHAR(10),`Spain` VARCHAR(10),`Austria` VARCHAR(10),`Japan` VARCHAR(10),`Hungary` VARCHAR(10),`Germany` VARCHAR(10),`Polland` VARCHAR(10),`UK` VARCHAR(10),`France` VARCHAR(10),`Romania` VARCHAR(10),`Italy` VARCHAR(10),`Greece` VARCHAR(10),`Crotia` VARCHAR(10),`Holland` VARCHAR(10),`Finland` VARCHAR(10),`Hongkong` VARCHAR(10))")
con.commit()
cur.execute("DELETE FROM `forecastoutput`")
con.commit()
sql = "INSERT INTO `forecastoutput` (`Model`,`Date`,`TotalDemand`,`RatioIncrease`,`Spain`,`Austria`,`Japan`,`Hungary`,`Germany`,`Polland`,`UK`,`France`,`Romania`,`Italy`,`Greece`,`Crotia`,`Holland`,`Finland`,`Hongkong`) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
#read the monthly file and index that with time
df=data.set_index('Date')
split_point =int(0.7*len(df))
D, V = df[0:split_point],df[split_point:]
data=pd.DataFrame(D)
#Functions for ME, MAE, MAPE
#ME
def ME(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(y_true - y_pred)
#MAE
def MAE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs(y_true - y_pred))
#MAPE
def MAPE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_pred)) * 100
cur1=con.cursor()
cur1.execute("SELECT * FROM `ftech`")
ftech=pd.DataFrame(cur1.fetchall())
ari=int(ftech['ari'])
arx=int(ftech['arx'])
exp=int(ftech['exp'])
mov=int(ftech['mov'])
reg=int(ftech['reg'])
start_index1=str(D['GDP'].index[-1])
end_index1=str(ftech['till'][0])
#end_index1=indx[:4]
df2 = pd.DataFrame(data=0,index=["ME","MAE","MAPE"],columns=["Moving Average","ARIMA","Exponential Smoothing","Regression"])
if mov==1:
#2---------------simple moving average-------------------------
#################################MovingAverage#######################
list1=list()
def mavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(0,0,1))
results_ARIMA1=model1.fit(disp=0)
# start_index1 = '2017-01-01'
# end_index1 = '2022-01-01' #4 year forecast
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list1.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["Moving Average"].iloc[0]=s
df2["Moving Average"].iloc[1]=so
df2["Moving Average"].iloc[2]=son
s=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(s)+1):
a=s.iloc[j-2]
b=s.iloc[j-1]
ratio_inc.append(int(((b-a)/a)*100))
return list1,ratio_inc
print(data)
Ma_Out,ratio_incma=mavg(data)
dfs=pd.DataFrame(Ma_Out)
tdfs=dfs.T
print(tdfs)
tdfs.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
tdfs['Model']='Moving Average'
tdfs['RatioIncrease']=ratio_incma
tdfs['Date']=(tdfs.index).strftime("20%y-%m-%d")
tdfs.astype(str)
for index, i in tdfs.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if ari==1:
##--------------min errors--ARIMA (1,0,0)-----------------------------
############################for Total Demand Monthly####################################
list2=list()
def AutoRimavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(1,0,0))
results_ARIMA1=model1.fit(disp=0)
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list2.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["ARIMA"].iloc[0]=s
df2["ARIMA"].iloc[1]=so
df2["ARIMA"].iloc[2]=son
Ars=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(Ars)+1):
As=(Ars.iloc[j-2])
bs=(Ars.iloc[j-1])
ratio_inc.append(int(((As-bs)/As)*100))
return list1,ratio_inc
Arimamodel,ratio_inc=AutoRimavg(data)
Amodel=pd.DataFrame(Arimamodel)
Results=Amodel.T
Results.astype(str)
Results.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
Results['Model']="ARIMA"
Results['RatioIncrease']=ratio_inc
Results['Date']=Results.index.astype(str)
for index, i in Results.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if reg==1:
#Linear Regression
#Regression Modeling
dates=pd.date_range(start_index1,end_index1,freq='M')
lprd=len(dates)
dateofterms= pd.PeriodIndex(freq='M', start=start_index1, periods=lprd+1)
dofterm=dateofterms.strftime("20%y-%m-%d")
Rdate=pd.DataFrame(dofterm)
noofterms=len(dofterm)
def regression(data,V,noofterms):
#Getting length of Data Frame
lenofdf=len(data.columns.tolist())
#Getting List Of Atributes in Data Frame
listofatr=list()
listofatr=data.columns.tolist()
#making list of pred
pred=pd.DataFrame()
#now riun for each row
for i in range(0,(lenofdf)-5):
df=pd.DataFrame(data[data.columns.tolist()[i]].reset_index())
xvar=list()
for row in df[listofatr[i]]:
xvar.append(row)
df5=pd.DataFrame(xvar)
yvar=list()
for j in range(0,len(df[listofatr[i]])):
yvar.append(j)
dfss=pd.DataFrame(yvar)
clf = linear_model.LinearRegression()
clf.fit(dfss,df5)
# Make predictions using the testing set
dfv=pd.DataFrame(V[V.columns.tolist()[i]].reset_index())
k=list()
for l in range(len(df[listofatr[i]]),len(df[listofatr[i]])+len(dfv)):
k.append(l)
ks=pd.DataFrame(k)
#Future prediction
predlist=list()
for j in range(len(df[listofatr[i]]),len(df[listofatr[i]])+noofterms):
predlist.append(j)
dataframeoflenofpred=pd.DataFrame(predlist)
dateframeofpred=pd.DataFrame(clf.predict(dataframeoflenofpred))
pred=pd.concat([pred,dateframeofpred],axis=1)
#Accuracy Of the mODEL
y_pred = clf.predict(ks)
if(i==0):
meanerror=ME(dfv[listofatr[i]], y_pred)
mae=MAE(dfv[listofatr[i]], y_pred)
mape=MAPE(dfv[listofatr[i]],y_pred)
df2["Regression"].iloc[0]=meanerror
df2["Regression"].iloc[1]=mae
df2["Regression"].iloc[2]=mape
regp=pd.DataFrame(pred)
ratio_incrr=[]
ratio_incrr.append(0)
for j in range(2,len(regp)+1):
Ra=regp.iloc[j-2]
Rb=regp.iloc[j-1]
ratio_incrr.append(int(((Rb-Ra)/Ra)*100))
return pred,ratio_incrr
monthlyRegression,ratio_incrr=regression(data,V,noofterms)
r=pd.DataFrame(monthlyRegression)
r.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
r['Model']="Regression"
r['Date']=Rdate
r['RatioIncrease']=ratio_incrr
r.astype(str)
for index, i in r.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if exp==1:
#Exponential Smoothing
dates=pd.date_range(start_index1,end_index1,freq='M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
Edate=pd.DataFrame(dateofterms)
predictonterm=len(Edate)
def exponential_smoothing(series, alpha,predictonterm):
result = [series[0]] # first value is same as series
for i in range(1,len(series)):
result.append(alpha * series[i] + (1 - alpha) * result[i-1])
preds=result[len(series)-1]#pred
actual=series[len(series)-1]#actual
forecastlist=[]
for i in range(0,predictonterm):
forecast=(alpha*actual)+((1-alpha)*preds)
forecastlist.append(forecast)
actual=preds
preds=forecast
return result,forecastlist
def Exponentialmooth(data,alpha,predicterm):
predexp=list()
forecaste=pd.DataFrame()
m=len(data.columns.tolist())
for i in range(0,m-5):
pred,forecasts=exponential_smoothing(data[data.columns.tolist()[i]],0.5,predictonterm)
ss=pd.DataFrame(forecasts)
predexp.append(pred)
forecaste=pd.concat([forecaste,ss],axis=1)
if(i==0):
meanerr=ME(len(data[data.columns.tolist()[i]]),predexp)
meanaverr=MAE(data[data.columns.tolist()[i]],predexp)
mperr=MAPE(data[data.columns.tolist()[i]],predexp)
df2["Exponential Smoothing"].iloc[0]=meanerr
df2["Exponential Smoothing"].iloc[1]=meanaverr
df2["Exponential Smoothing"].iloc[2]=mperr
Exponentials=pd.DataFrame(forecaste)
ratio_incex=[]
ratio_incex.append(0)
for j in range(2,len(Exponentials)+1):
Ea=Exponentials.iloc[j-2]
Eb=Exponentials.iloc[j-1]
ratio_incex.append(int(((Eb-Ea)/Ea)*100))
return forecaste,ratio_incex
fore,ratio_incex=Exponentialmooth(data,0.5,predictonterm)
skf=pd.DataFrame(fore)
skf.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
skf['Model']="Exponential Smoothing"
skf['Date']=Edate
skf['RatioIncrease']=ratio_incex
skf.astype(str)
for index, i in skf.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
dates=pd.date_range(start_index1,end_index1,freq='M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
ss=pd.DataFrame(dateofterms,columns=['Date'])
dataframeforsum=pd.concat([ss])
if mov==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'Moving Average'" )
Xmdata = cur.fetchall()
Xmadata = pd.DataFrame(Xmdata)
movsummm=pd.DataFrame(Xmadata)
movsummm.columns=['Moving Average']
dataframeforsum=pd.concat([dataframeforsum,movsummm],axis=1)
if ari==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'ARIMA'" )
Xadata = cur.fetchall()
Xardata = pd.DataFrame(Xadata)
movsumma=pd.DataFrame(Xardata)
movsumma.columns=['ARIMA']
dataframeforsum=pd.concat([dataframeforsum,movsumma],axis=1)
if exp==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'Exponential Smoothing'" )
Xedata = cur.fetchall()
Xesdata = pd.DataFrame(Xedata)
exp=pd.DataFrame(Xesdata)
exp.columns=['Exponential Smoothing']
dataframeforsum=pd.concat([dataframeforsum,exp],axis=1)
if reg==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'Regression'" )
Xrdata = cur.fetchall()
Xredata = pd.DataFrame(Xrdata)
regr=pd.DataFrame(Xredata)
regr.columns=['Regression']
dataframeforsum=pd.concat([dataframeforsum,regr],axis=1)
dataframeforsum.astype(str)
from pandas.io import sql
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
dataframeforsum.to_sql(con=engine, name='summaryoutput',index=False, if_exists='replace')
engine2 = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
df2.to_sql(con=engine2, name='summaryerror',index=False, if_exists='replace')
con.commit()
cnr=con.cursor()
cnr.execute("SELECT * FROM `summaryoutput`")
sdata = cnr.fetchall()
summaryq = pd.DataFrame(sdata)
con.close()
return render_template('monthly.html',summaryq=summaryq.to_html(index=False),sayy=1,smt='Monthly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
return render_template('monthly.html',sayy=1,smt='Monthly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
##quarterly
@app.route("/quarterlyforecast",methods = ['GET','POST'])
def quarterlyforecast():
data = pd.DataFrame(Quaterdata)
a1=data.sort_values(['GDP','TotalDemand'], ascending=[True,True])# container1
a2=data.sort_values(['Pi_Exports','TotalDemand'], ascending=[True,True])# container2
a3=data.sort_values(['Market_Share','TotalDemand'], ascending=[True,True])# container3
a4=data.sort_values(['Advertisement_Expense','TotalDemand'], ascending=[True,True])# container4
# container1
df=a1[['GDP']]/3
re11 = np.array([])
res11 = np.append(re11,df)
df1=a1[['TotalDemand']]
r1 = np.array([])
r11 = np.append(r1, df1)
# top graph
tdf=data['Date'].astype(str)
tre11 = np.array([])
tres11 = np.append(tre11,tdf)
tr1 = np.array([])
tr11 = np.append(tr1, df1)
# container2
udf=a2[['Pi_Exports']]
ure11 = np.array([])
ures11 = np.append(ure11,udf)
ur1 = np.array([])
ur11 = np.append(ur1, df1)
# container3
vdf=a3[['Market_Share']]/3
vre11 = np.array([])
vres11 = np.append(vre11,vdf)
vr1 = np.array([])
vr11 = np.append(vr1, df1)
# container4
wdf=a4[['Advertisement_Expense']]
wre11 = np.array([])
wres11 = np.append(wre11,wdf)
wr1 = np.array([])
wr11 = np.append(wr1, df1)
if request.method == 'POST':
mov=0
exp=0
reg=0
ari=0
arx=0
till = request.form.get('till')
if request.form.get('moving'):
mov=1
if request.form.get('ESPO'):
exp=1
if request.form.get('regression'):
reg=1
if request.form.get('ARIMA'):
ari=1
if request.form.get('ARIMAX'):
arx=1
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS `ftech` (`mov` VARCHAR(1),`exp` VARCHAR(1), `reg` VARCHAR(1),`ari` VARCHAR(1),`arx` VARCHAR(1),`till` VARCHAR(10))")
cur.execute("DELETE FROM `ftech`")
con.commit()
cur.execute("INSERT INTO `ftech` VALUES('"+str(mov)+"','"+str(exp)+"','"+str(reg)+"','"+str(ari)+"','"+str(arx)+"','"+str(till)+"')")
con.commit()
cur.execute("CREATE TABLE IF NOT EXISTS `forecastoutputq`(`Model` VARCHAR(25),`Date` VARCHAR(10),`TotalDemand` VARCHAR(10),`RatioIncrease` VARCHAR(10),`Spain` VARCHAR(10),`Austria` VARCHAR(10),`Japan` VARCHAR(10),`Hungary` VARCHAR(10),`Germany` VARCHAR(10),`Polland` VARCHAR(10),`UK` VARCHAR(10),`France` VARCHAR(10),`Romania` VARCHAR(10),`Italy` VARCHAR(10),`Greece` VARCHAR(10),`Crotia` VARCHAR(10),`Holland` VARCHAR(10),`Finland` VARCHAR(10),`Hongkong` VARCHAR(10))")
con.commit()
cur.execute("DELETE FROM `forecastoutputq`")
con.commit()
sql = "INSERT INTO `forecastoutputq` (`Model`,`Date`,`TotalDemand`,`RatioIncrease`,`Spain`,`Austria`,`Japan`,`Hungary`,`Germany`,`Polland`,`UK`,`France`,`Romania`,`Italy`,`Greece`,`Crotia`,`Holland`,`Finland`,`Hongkong`) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
#read the monthly file and index that with time
df=data.set_index('Date')
split_point =int(0.7*len(df))
D, V = df[0:split_point],df[split_point:]
data=pd.DataFrame(D)
#Functions for ME, MAE, MAPE
#ME
def ME(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(y_true - y_pred)
#MAE
def MAE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs(y_true - y_pred))
#MAPE
def MAPE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_pred)) * 100
cur1=con.cursor()
cur1.execute("SELECT * FROM `ftech`")
ftech=pd.DataFrame(cur1.fetchall())
ari=int(ftech['ari'])
arx=int(ftech['arx'])
exp=int(ftech['exp'])
mov=int(ftech['mov'])
reg=int(ftech['reg'])
start_index1=str(D['GDP'].index[-1])
end_index1=str(ftech['till'][0])
#end_index1=indx[:4]
df2 = pd.DataFrame(data=0,index=["ME","MAE","MAPE"],columns=["Moving Average","ARIMA","Exponential Smoothing","Regression"])
if mov==1:
#2---------------simple moving average-------------------------
#################################MovingAverage#######################
list1=list()
def mavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(0,0,1))
results_ARIMA1=model1.fit(disp=0)
# start_index1 = '2017-01-01'
# end_index1 = '2022-01-01' #4 year forecast
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list1.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["Moving Average"].iloc[0]=s
df2["Moving Average"].iloc[1]=so
df2["Moving Average"].iloc[2]=son
s=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(s)+1):
a=s.iloc[j-2]
b=s.iloc[j-1]
ratio_inc.append(int(((b-a)/a)*100))
return list1,ratio_inc
print(data)
Ma_Out,ratio_incma=mavg(data)
dfs=pd.DataFrame(Ma_Out)
tdfs=dfs.T
print(tdfs)
tdfs.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
tdfs['Model']='Moving Average'
tdfs['RatioIncrease']=ratio_incma
tdfs['Date']=(tdfs.index).strftime("20%y-%m-%d")
tdfs.astype(str)
for index, i in tdfs.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if ari==1:
##--------------min errors--ARIMA (1,0,0)-----------------------------
############################for Total Demand Monthly####################################
list2=list()
def AutoRimavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(1,0,0))
results_ARIMA1=model1.fit(disp=0)
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list2.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["ARIMA"].iloc[0]=s
df2["ARIMA"].iloc[1]=so
df2["ARIMA"].iloc[2]=son
Ars=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(Ars)+1):
As=(Ars.iloc[j-2])
bs=(Ars.iloc[j-1])
ratio_inc.append(int(((As-bs)/As)*100))
return list1,ratio_inc
Arimamodel,ratio_inc=AutoRimavg(data)
Amodel=pd.DataFrame(Arimamodel)
Results=Amodel.T
Results.astype(str)
Results.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
Results['Model']="ARIMA"
Results['RatioIncrease']=ratio_inc
Results['Date']=Results.index.astype(str)
for index, i in Results.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if reg==1:
#Linear Regression
#Regression Modeling
dates=pd.date_range(start_index1,end_index1,freq='3M')
lprd=len(dates)
dateofterms= pd.PeriodIndex(freq='3M', start=start_index1, periods=lprd+1)
dofterm=dateofterms.strftime("20%y-%m-%d")
Rdate=pd.DataFrame(dofterm)
noofterms=len(dofterm)
def regression(data,V,noofterms):
#Getting length of Data Frame
lenofdf=len(data.columns.tolist())
#Getting List Of Atributes in Data Frame
listofatr=list()
listofatr=data.columns.tolist()
#making list of pred
pred=pd.DataFrame()
#now riun for each row
for i in range(0,(lenofdf)-5):
df=pd.DataFrame(data[data.columns.tolist()[i]].reset_index())
xvar=list()
for row in df[listofatr[i]]:
xvar.append(row)
df5=pd.DataFrame(xvar)
yvar=list()
for j in range(0,len(df[listofatr[i]])):
yvar.append(j)
dfss=pd.DataFrame(yvar)
clf = linear_model.LinearRegression()
clf.fit(dfss,df5)
# Make predictions using the testing set
dfv=pd.DataFrame(V[V.columns.tolist()[i]].reset_index())
k=list()
for l in range(len(df[listofatr[i]]),len(df[listofatr[i]])+len(dfv)):
k.append(l)
ks=pd.DataFrame(k)
#Future prediction
predlist=list()
for j in range(len(df[listofatr[i]]),len(df[listofatr[i]])+noofterms):
predlist.append(j)
dataframeoflenofpred=pd.DataFrame(predlist)
dateframeofpred=pd.DataFrame(clf.predict(dataframeoflenofpred))
pred=pd.concat([pred,dateframeofpred],axis=1)
#Accuracy Of the mODEL
y_pred = clf.predict(ks)
if(i==0):
meanerror=ME(dfv[listofatr[i]], y_pred)
mae=MAE(dfv[listofatr[i]], y_pred)
mape=MAPE(dfv[listofatr[i]],y_pred)
df2["Regression"].iloc[0]=meanerror
df2["Regression"].iloc[1]=mae
df2["Regression"].iloc[2]=mape
regp=pd.DataFrame(pred)
ratio_incrr=[]
ratio_incrr.append(0)
for j in range(2,len(regp)+1):
Ra=regp.iloc[j-2]
Rb=regp.iloc[j-1]
ratio_incrr.append(int(((Rb-Ra)/Ra)*100))
return pred,ratio_incrr
monthlyRegression,ratio_incrr=regression(data,V,noofterms)
r=pd.DataFrame(monthlyRegression)
r.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
r['Model']="Regression"
r['Date']=Rdate
r['RatioIncrease']=ratio_incrr
r.astype(str)
for index, i in r.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if exp==1:
#Exponential Smoothing
dates=pd.date_range(start_index1,end_index1,freq='3M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='3M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
Edate=pd.DataFrame(dateofterms)
predictonterm=len(Edate)
def exponential_smoothing(series, alpha,predictonterm):
result = [series[0]] # first value is same as series
for i in range(1,len(series)):
result.append(alpha * series[i] + (1 - alpha) * result[i-1])
preds=result[len(series)-1]#pred
actual=series[len(series)-1]#actual
forecastlist=[]
for i in range(0,predictonterm):
forecast=(alpha*actual)+((1-alpha)*preds)
forecastlist.append(forecast)
actual=preds
preds=forecast
return result,forecastlist
def Exponentialmooth(data,alpha,predicterm):
predexp=list()
forecaste=pd.DataFrame()
m=len(data.columns.tolist())
for i in range(0,m-5):
pred,forecasts=exponential_smoothing(data[data.columns.tolist()[i]],0.5,predictonterm)
ss=pd.DataFrame(forecasts)
predexp.append(pred)
forecaste=pd.concat([forecaste,ss],axis=1)
if(i==0):
meanerr=ME(len(data[data.columns.tolist()[i]]),predexp)
meanaverr=MAE(data[data.columns.tolist()[i]],predexp)
mperr=MAPE(data[data.columns.tolist()[i]],predexp)
df2["Exponential Smoothing"].iloc[0]=meanerr
df2["Exponential Smoothing"].iloc[1]=meanaverr
df2["Exponential Smoothing"].iloc[2]=mperr
Exponentials=pd.DataFrame(forecaste)
ratio_incex=[]
ratio_incex.append(0)
for j in range(2,len(Exponentials)+1):
Ea=Exponentials.iloc[j-2]
Eb=Exponentials.iloc[j-1]
ratio_incex.append(int(((Eb-Ea)/Ea)*100))
return forecaste,ratio_incex
fore,ratio_incex=Exponentialmooth(data,0.5,predictonterm)
skf=pd.DataFrame(fore)
skf.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
skf['Model']="Exponential Smoothing"
skf['Date']=Edate
skf['RatioIncrease']=ratio_incex
skf.astype(str)
for index, i in skf.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
dates=pd.date_range(start_index1,end_index1,freq='3M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='3M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
ss=pd.DataFrame(dateofterms,columns=['Date'])
dataframeforsum=pd.concat([ss])
if mov==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputq` WHERE `Model`= 'Moving Average'" )
Xmdata = cur.fetchall()
Xmadata = pd.DataFrame(Xmdata)
movsummm=pd.DataFrame(Xmadata)
movsummm.columns=['Moving Average']
dataframeforsum=pd.concat([dataframeforsum,movsummm],axis=1)
if ari==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputq` WHERE `Model`= 'ARIMA'" )
Xadata = cur.fetchall()
Xardata = pd.DataFrame(Xadata)
movsumma=pd.DataFrame(Xardata)
movsumma.columns=['ARIMA']
dataframeforsum=pd.concat([dataframeforsum,movsumma],axis=1)
if exp==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputq` WHERE `Model`= 'Exponential Smoothing'" )
Xedata = cur.fetchall()
Xesdata = pd.DataFrame(Xedata)
exp=pd.DataFrame(Xesdata)
exp.columns=['Exponential Smoothing']
dataframeforsum=pd.concat([dataframeforsum,exp],axis=1)
if reg==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputq` WHERE `Model`= 'Regression'" )
Xrdata = cur.fetchall()
Xredata = pd.DataFrame(Xrdata)
regr=pd.DataFrame(Xredata)
regr.columns=['Regression']
dataframeforsum=pd.concat([dataframeforsum,regr],axis=1)
dataframeforsum.astype(str)
from pandas.io import sql
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
dataframeforsum.to_sql(con=engine, name='summaryoutputq',index=False, if_exists='replace')
engine2 = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
df2.to_sql(con=engine2, name='summaryerror',index=False, if_exists='replace')
con.commit()
cnr=con.cursor()
cnr.execute("SELECT * FROM `summaryoutputq`")
sdata = cnr.fetchall()
summaryq = pd.DataFrame(sdata)
con.close()
return render_template('quarterly.html',summaryq=summaryq.to_html(index=False),sayy=1,smt='Quarterly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
return render_template('quarterly.html',sayy=1,smt='Quarterly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
##yearly
@app.route("/yearlyforecast",methods = ['GET','POST'])
def yearlyforecast():
data = pd.DataFrame(Yeardata)
a1=data.sort_values(['GDP','TotalDemand'], ascending=[True,True])# container1
a2=data.sort_values(['Pi_Exports','TotalDemand'], ascending=[True,True])# container2
a3=data.sort_values(['Market_Share','TotalDemand'], ascending=[True,True])# container3
a4=data.sort_values(['Advertisement_Expense','TotalDemand'], ascending=[True,True])# container4
# container1
df=a1[['GDP']]/12
re11 = np.array([])
res11 = np.append(re11,df)
df1=a1[['TotalDemand']]
r1 = np.array([])
r11 = np.append(r1, df1)
# top graph
tdf=data['Date']
vari=[]
for var in tdf:
vari.append(var[:4])
tres11 = vari
tr1 = np.array([])
tr11 = np.append(tr1, df1)
# container2
udf=a2[['Pi_Exports']]
ure11 = np.array([])
ures11 = np.append(ure11,udf)
ur1 = np.array([])
ur11 = np.append(ur1, df1)
# container3
vdf=a3[['Market_Share']]/12
vre11 = np.array([])
vres11 = np.append(vre11,vdf)
vr1 = np.array([])
vr11 = np.append(vr1, df1)
# container4
wdf=a4[['Advertisement_Expense']]
wre11 = np.array([])
wres11 = np.append(wre11,wdf)
wr1 = np.array([])
wr11 = np.append(wr1, df1)
if request.method == 'POST':
mov=0
exp=0
reg=0
ari=0
arx=0
till = request.form.get('till')
if request.form.get('moving'):
mov=1
if request.form.get('ESPO'):
exp=1
if request.form.get('regression'):
reg=1
if request.form.get('ARIMA'):
ari=1
if request.form.get('ARIMAX'):
arx=1
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS `ftech` (`mov` VARCHAR(1),`exp` VARCHAR(1), `reg` VARCHAR(1),`ari` VARCHAR(1),`arx` VARCHAR(1),`till` VARCHAR(10))")
cur.execute("DELETE FROM `ftech`")
con.commit()
cur.execute("INSERT INTO `ftech` VALUES('"+str(mov)+"','"+str(exp)+"','"+str(reg)+"','"+str(ari)+"','"+str(arx)+"','"+str(till)+"')")
con.commit()
cur.execute("CREATE TABLE IF NOT EXISTS `forecastoutputy`(`Model` VARCHAR(25),`Date` VARCHAR(10),`TotalDemand` VARCHAR(10),`RatioIncrease` VARCHAR(10),`Spain` VARCHAR(10),`Austria` VARCHAR(10),`Japan` VARCHAR(10),`Hungary` VARCHAR(10),`Germany` VARCHAR(10),`Polland` VARCHAR(10),`UK` VARCHAR(10),`France` VARCHAR(10),`Romania` VARCHAR(10),`Italy` VARCHAR(10),`Greece` VARCHAR(10),`Crotia` VARCHAR(10),`Holland` VARCHAR(10),`Finland` VARCHAR(10),`Hongkong` VARCHAR(10))")
con.commit()
cur.execute("DELETE FROM `forecastoutputy`")
con.commit()
sql = "INSERT INTO `forecastoutputy` (`Model`,`Date`,`TotalDemand`,`RatioIncrease`,`Spain`,`Austria`,`Japan`,`Hungary`,`Germany`,`Polland`,`UK`,`France`,`Romania`,`Italy`,`Greece`,`Crotia`,`Holland`,`Finland`,`Hongkong`) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
#read the monthly file and index that with time
df=data.set_index('Date')
split_point =int(0.7*len(df))
D, V = df[0:split_point],df[split_point:]
data=pd.DataFrame(D)
#Functions for ME, MAE, MAPE
#ME
def ME(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(y_true - y_pred)
#MAE
def MAE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs(y_true - y_pred))
#MAPE
def MAPE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_pred)) * 100
cur1=con.cursor()
cur1.execute("SELECT * FROM `ftech`")
ftech=pd.DataFrame(cur1.fetchall())
ari=int(ftech['ari'])
arx=int(ftech['arx'])
exp=int(ftech['exp'])
mov=int(ftech['mov'])
reg=int(ftech['reg'])
start_index1=str(D['GDP'].index[-1])
end_index1=str(ftech['till'][0])
#end_index1=indx[:4]
df2 = pd.DataFrame(data=0,index=["ME","MAE","MAPE"],columns=["Moving Average","ARIMA","Exponential Smoothing","Regression"])
if mov==1:
#2---------------simple moving average-------------------------
#################################MovingAverage#######################
list1=list()
def mavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(0,0,1))
results_ARIMA1=model1.fit(disp=0)
# start_index1 = '2017-01-01'
# end_index1 = '2022-01-01' #4 year forecast
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list1.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["Moving Average"].iloc[0]=s
df2["Moving Average"].iloc[1]=so
df2["Moving Average"].iloc[2]=son
s=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(s)+1):
a=s.iloc[j-2]
b=s.iloc[j-1]
ratio_inc.append(int(((b-a)/a)*100))
return list1,ratio_inc
print(data)
Ma_Out,ratio_incma=mavg(data)
dfs=pd.DataFrame(Ma_Out)
tdfs=dfs.T
print(tdfs)
tdfs.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
tdfs['Model']='Moving Average'
tdfs['RatioIncrease']=ratio_incma
dindex=(tdfs.index).strftime("20%y")
tdfs['Date']=(dindex)
tdfs.astype(str)
for index, i in tdfs.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if ari==1:
##--------------min errors--ARIMA (1,0,0)-----------------------------
############################for Total Demand Monthly####################################
list2=list()
def AutoRimavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(1,0,0))
results_ARIMA1=model1.fit(disp=0)
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list2.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["ARIMA"].iloc[0]=s
df2["ARIMA"].iloc[1]=so
df2["ARIMA"].iloc[2]=son
Ars=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(Ars)+1):
As=(Ars.iloc[j-2])
bs=(Ars.iloc[j-1])
ratio_inc.append(int(((As-bs)/As)*100))
return list1,ratio_inc
Arimamodel,ratio_inc=AutoRimavg(data)
Amodel=pd.DataFrame(Arimamodel)
Results=Amodel.T
Results.astype(str)
Results.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
Results['Model']="ARIMA"
Results['RatioIncrease']=ratio_inc
Results['Date']=Results.index.astype(str)
for index, i in Results.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if reg==1:
#Linear Regression
#Regression Modeling
dates=pd.date_range(start_index1,end_index1,freq='A')
lprd=len(dates)
dateofterms= pd.PeriodIndex(freq='A', start=start_index1, periods=lprd+1)
dofterm=dateofterms.strftime("20%y")
Rdate=pd.DataFrame(dofterm)
noofterms=len(dofterm)
def regression(data,V,noofterms):
#Getting length of Data Frame
lenofdf=len(data.columns.tolist())
#Getting List Of Atributes in Data Frame
listofatr=list()
listofatr=data.columns.tolist()
#making list of pred
pred=pd.DataFrame()
#now riun for each row
for i in range(0,(lenofdf)-5):
df=pd.DataFrame(data[data.columns.tolist()[i]].reset_index())
xvar=list()
for row in df[listofatr[i]]:
xvar.append(row)
df5=pd.DataFrame(xvar)
yvar=list()
for j in range(0,len(df[listofatr[i]])):
yvar.append(j)
dfss=pd.DataFrame(yvar)
clf = linear_model.LinearRegression()
clf.fit(dfss,df5)
# Make predictions using the testing set
dfv=pd.DataFrame(V[V.columns.tolist()[i]].reset_index())
k=list()
for l in range(len(df[listofatr[i]]),len(df[listofatr[i]])+len(dfv)):
k.append(l)
ks=pd.DataFrame(k)
#Future prediction
predlist=list()
for j in range(len(df[listofatr[i]]),len(df[listofatr[i]])+noofterms):
predlist.append(j)
dataframeoflenofpred=pd.DataFrame(predlist)
dateframeofpred=pd.DataFrame(clf.predict(dataframeoflenofpred))
pred=pd.concat([pred,dateframeofpred],axis=1)
#Accuracy Of the mODEL
y_pred = clf.predict(ks)
if(i==0):
meanerror=ME(dfv[listofatr[i]], y_pred)
mae=MAE(dfv[listofatr[i]], y_pred)
mape=MAPE(dfv[listofatr[i]],y_pred)
df2["Regression"].iloc[0]=meanerror
df2["Regression"].iloc[1]=mae
df2["Regression"].iloc[2]=mape
regp=pd.DataFrame(pred)
ratio_incrr=[]
ratio_incrr.append(0)
for j in range(2,len(regp)+1):
Ra=regp.iloc[j-2]
Rb=regp.iloc[j-1]
ratio_incrr.append(int(((Rb-Ra)/Ra)*100))
return pred,ratio_incrr
monthlyRegression,ratio_incrr=regression(data,V,noofterms)
r=pd.DataFrame(monthlyRegression)
r.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
r['Model']="Regression"
r['Date']=Rdate
r['RatioIncrease']=ratio_incrr
r.astype(str)
for index, i in r.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if exp==1:
#Exponential Smoothing
dates=pd.date_range(start_index1,end_index1,freq='A')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='A', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y")
Edate=pd.DataFrame(dateofterms)
predictonterm=len(Edate)
def exponential_smoothing(series, alpha,predictonterm):
result = [series[0]] # first value is same as series
for i in range(1,len(series)):
result.append(alpha * series[i] + (1 - alpha) * result[i-1])
preds=result[len(series)-1]#pred
actual=series[len(series)-1]#actual
forecastlist=[]
for i in range(0,predictonterm):
forecast=(alpha*actual)+((1-alpha)*preds)
forecastlist.append(forecast)
actual=preds
preds=forecast
return result,forecastlist
def Exponentialmooth(data,alpha,predicterm):
predexp=list()
forecaste=pd.DataFrame()
m=len(data.columns.tolist())
for i in range(0,m-5):
pred,forecasts=exponential_smoothing(data[data.columns.tolist()[i]],0.5,predictonterm)
ss=pd.DataFrame(forecasts)
predexp.append(pred)
forecaste=pd.concat([forecaste,ss],axis=1)
if(i==0):
meanerr=ME(len(data[data.columns.tolist()[i]]),predexp)
meanaverr=MAE(data[data.columns.tolist()[i]],predexp)
mperr=MAPE(data[data.columns.tolist()[i]],predexp)
df2["Exponential Smoothing"].iloc[0]=meanerr
df2["Exponential Smoothing"].iloc[1]=meanaverr
df2["Exponential Smoothing"].iloc[2]=mperr
Exponentials=pd.DataFrame(forecaste)
ratio_incex=[]
ratio_incex.append(0)
for j in range(2,len(Exponentials)+1):
Ea=Exponentials.iloc[j-2]
Eb=Exponentials.iloc[j-1]
ratio_incex.append(int(((Eb-Ea)/Ea)*100))
return forecaste,ratio_incex
fore,ratio_incex=Exponentialmooth(data,0.5,predictonterm)
skf=pd.DataFrame(fore)
skf.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
skf['Model']="Exponential Smoothing"
skf['Date']=Edate
skf['RatioIncrease']=ratio_incex
skf.astype(str)
for index, i in skf.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
dates=pd.date_range(start_index1,end_index1,freq='A')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='A', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y")
ss=pd.DataFrame(dateofterms,columns=['Date'])
dataframeforsum=pd.concat([ss])
if mov==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputy` WHERE `Model`= 'Moving Average'" )
Xmdata = cur.fetchall()
Xmadata = pd.DataFrame(Xmdata)
movsummm=pd.DataFrame(Xmadata)
movsummm.columns=['Moving Average']
dataframeforsum=pd.concat([dataframeforsum,movsummm],axis=1)
if ari==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputy` WHERE `Model`= 'ARIMA'" )
Xadata = cur.fetchall()
Xardata = pd.DataFrame(Xadata)
movsumma=pd.DataFrame(Xardata)
movsumma.columns=['ARIMA']
dataframeforsum=pd.concat([dataframeforsum,movsumma],axis=1)
if exp==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputy` WHERE `Model`= 'Exponential Smoothing'" )
Xedata = cur.fetchall()
Xesdata = pd.DataFrame(Xedata)
exp=pd.DataFrame(Xesdata)
exp.columns=['Exponential Smoothing']
dataframeforsum=pd.concat([dataframeforsum,exp],axis=1)
if reg==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputy` WHERE `Model`= 'Regression'" )
Xrdata = cur.fetchall()
Xredata = pd.DataFrame(Xrdata)
regr=pd.DataFrame(Xredata)
regr.columns=['Regression']
dataframeforsum=pd.concat([dataframeforsum,regr],axis=1)
dataframeforsum.astype(str)
from pandas.io import sql
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
dataframeforsum.to_sql(con=engine, name='summaryoutputy',index=False, if_exists='replace')
engine2 = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
df2.to_sql(con=engine2, name='summaryerror',index=False, if_exists='replace')
con.commit()
cnr=con.cursor()
cnr.execute("SELECT * FROM `summaryoutputy`")
sdata = cnr.fetchall()
summaryq = pd.DataFrame(sdata)
con.close()
return render_template('yearly.html',summaryq=summaryq.to_html(index=False),sayy=1,smt='Yearly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
return render_template('yearly.html',sayy=1,smt='Yearly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
#############################Dashboard#######################################
#yearly
@app.route('/youtgraph', methods = ['GET','POST'])
def youtgraph():
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("SELECT `Model` FROM `forecastoutputy` GROUP BY `Model`")
sfile=cur.fetchall()
global yqst
qlist=pd.DataFrame(sfile)
qlst=qlist['Model'].astype(str)
yqst=qlst.values
con.close()
return render_template('ydashboard.html',qulist=yqst)
@app.route('/youtgraph1', methods = ['GET', 'POST'])
def youtgraph1():
if request.method=='POST':
value=request.form['item']
qconn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
qcur = qconn.cursor()
qcur.execute("SELECT * FROM `demandforecastinputdata")
qsdata = qcur.fetchall()
qdata = pd.DataFrame(qsdata)
#graph1
adata=qdata['TotalDemand']
x_axis=qdata['Date'].astype(str)
#predictedgraph1
pcur = qconn.cursor()
pcur.execute("SELECT * FROM `forecastoutputy` WHERE `Model`='"+value+"'")
psdata = pcur.fetchall()
edata = pd.DataFrame(psdata)
eedata=edata['TotalDemand'].astype(float)
ldata=eedata.values
nur = qconn.cursor()
nur.execute("SELECT MIN(`Date`) AS 'MIN' FROM `forecastoutputy` WHERE `Model`='"+value+"'")
MIN=nur.fetchone()
pdata=[]
i=0
k=0
a="null"
while(x_axis[i]<MIN['MIN']):
pdata.append(a)
i=i+1
k=k+1
ata=np.concatenate((pdata,ldata),axis=0)
#x axis
fcur = qconn.cursor()
fcur.execute("SELECT `Date` FROM `demandforecastinputdata` WHERE `Date`<'"+MIN['MIN']+"'")
fsdata = fcur.fetchall()
indx = pd.DataFrame(fsdata)
indx=indx['Date']
index=np.concatenate((indx,edata['Date'].values),axis=0)
yindx=[]
for var in index:
var1 = var[:4]
yindx.append(var1)
#bargraph
bcur = qconn.cursor()
bcur.execute("SELECT * FROM `forecastoutputy` WHERE `Model`='"+value+"'")
bsdata = bcur.fetchall()
bdata = pd.DataFrame(bsdata)
btdf=bdata['Date'].astype(str)
btre11 = np.array([])
btres11 = np.append(btre11,btdf)
b1tdf1=bdata[['Spain']] #spain
b1tr1 = np.array([])
b1tr11 = np.append(b1tr1, b1tdf1)
b2tdf1=bdata[['Austria']] #austria
b2tr1 = np.array([])
b2tr11 = np.append(b2tr1, b2tdf1)
b3tdf1=bdata[['Japan']] #japan
b3tr1 = np.array([])
b3tr11 = np.append(b3tr1, b3tdf1)
b4tdf1=bdata[['Hungary']] #hungry
b4tr1 = np.array([])
b4tr11 = np.append(b4tr1, b4tdf1)
b5tdf1=bdata[['Germany']] #germany
b5tr1 = np.array([])
b5tr11 = np.append(b5tr1, b5tdf1)
b6tdf1=bdata[['TotalDemand']] #total
b6tr1 = np.array([])
b6tr11 = np.append(b6tr1, b6tdf1)
#comparisonbar
ccur = qconn.cursor()
ccur.execute("SELECT * FROM `forecastoutputy` WHERE `Model`='"+value+"'")
csdata = ccur.fetchall()
cdata = pd.DataFrame(csdata)
ctdf=cdata['Date'].astype(str)
ctre11 = np.array([])
ctres11 = np.append(ctre11,ctdf)
c1tdf1=cdata[['RatioIncrease']] #ratioincrease
c1tr1 = np.array([])
c1tr11 = np.append(c1tr1, c1tdf1)
qcur.execute("SELECT * FROM `summaryerror`")
sdata = qcur.fetchall()
mape = pd.DataFrame(sdata)
qconn.close()
return render_template('ydashboard.html',mon=value,qulist=yqst,mape=mape.to_html(index=False),say=1,pdata=ata,adata=adata.values,x_axis=yindx,frm=len(qdata)-1,to=k,x13=btres11,x14=ctres11,y13=b1tr11,y14=b2tr11,y15=b3tr11,y16=b4tr11,y17=b5tr11,y18=b6tr11,y19=c1tr11)
#monthly
@app.route('/moutgraph', methods = ['GET','POST'])
def moutgraph():
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("SELECT `Model` FROM `forecastoutput` GROUP BY `Model`")
sfile=cur.fetchall()
global mqst
qlist=pd.DataFrame(sfile)
qlst=qlist['Model'].astype(str)
mqst=qlst.values
con.close()
return render_template('mdashboard.html',qulist=mqst)
@app.route('/moutgraph1', methods = ['GET', 'POST'])
def moutgraph1():
if request.method=='POST':
value=request.form['item']
qconn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
qcur = qconn.cursor()
qcur.execute("SELECT * FROM `demandforecastinputdata")
qsdata = qcur.fetchall()
qdata = pd.DataFrame(qsdata)
#graph1
adata=qdata['TotalDemand']
x_axis=qdata['Date'].astype(str)
#predictedgraph1
pcur = qconn.cursor()
pcur.execute("SELECT * FROM `forecastoutput` WHERE `Model`='"+value+"'")
psdata = pcur.fetchall()
edata = pd.DataFrame(psdata)
eedata=edata['TotalDemand'].astype(float)
ldata=eedata.values
nur = qconn.cursor()
nur.execute("SELECT MIN(`Date`) AS 'MIN' FROM `forecastoutput` WHERE `Model`='"+value+"'")
MIN=nur.fetchone()
pdata=[]
i=0
k=0
a="null"
while(x_axis[i]<MIN['MIN']):
pdata.append(a)
i=i+1
k=k+1
ata=np.concatenate((pdata,ldata),axis=0)
#x axis
fcur = qconn.cursor()
fcur.execute("SELECT `Date` FROM `demandforecastinputdata` WHERE `Date`<'"+MIN['MIN']+"'")
fsdata = fcur.fetchall()
indx = pd.DataFrame(fsdata)
indx=indx['Date'].astype(str).values
index=np.concatenate((indx,edata['Date'].values),axis=0)
#bargraph
bcur = qconn.cursor()
bcur.execute("SELECT * FROM `forecastoutput` WHERE `Model`='"+value+"'")
bsdata = bcur.fetchall()
bdata = pd.DataFrame(bsdata)
btdf=bdata['Date'].astype(str)
btre11 = np.array([])
btres11 = np.append(btre11,btdf)
b1tdf1=bdata[['Spain']] #spain
b1tr1 = np.array([])
b1tr11 = np.append(b1tr1, b1tdf1)
b2tdf1=bdata[['Austria']] #austria
b2tr1 = np.array([])
b2tr11 = np.append(b2tr1, b2tdf1)
b3tdf1=bdata[['Japan']] #japan
b3tr1 = np.array([])
b3tr11 = np.append(b3tr1, b3tdf1)
b4tdf1=bdata[['Hungary']] #hungry
b4tr1 = np.array([])
b4tr11 = np.append(b4tr1, b4tdf1)
b5tdf1=bdata[['Germany']] #germany
b5tr1 = np.array([])
b5tr11 = np.append(b5tr1, b5tdf1)
b6tdf1=bdata[['TotalDemand']] #total
b6tr1 = np.array([])
b6tr11 = np.append(b6tr1, b6tdf1)
#comparisonbar
ccur = qconn.cursor()
ccur.execute("SELECT * FROM `forecastoutput` WHERE `Model`='"+value+"'")
csdata = ccur.fetchall()
cdata = pd.DataFrame(csdata)
ctdf=cdata['Date'].astype(str)
ctre11 = np.array([])
ctres11 = np.append(ctre11,ctdf)
c1tdf1=cdata[['RatioIncrease']] #ratioincrease
c1tr1 = np.array([])
c1tr11 = np.append(c1tr1, c1tdf1)
qcur.execute("SELECT * FROM `summaryerror`")
sdata = qcur.fetchall()
mape = pd.DataFrame(sdata)
qconn.close()
return render_template('mdashboard.html',mon=value,qulist=mqst,mape=mape.to_html(index=False),say=1,pdata=ata,adata=adata.values,x_axis=index,frm=len(qdata)-1,to=k,x13=btres11,x14=ctres11,y13=b1tr11,y14=b2tr11,y15=b3tr11,y16=b4tr11,y17=b5tr11,y18=b6tr11,y19=c1tr11)
#quarterly
@app.route('/qoutgraph', methods = ['GET','POST'])
def qoutgraph():
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("SELECT `Model` FROM `forecastoutputq` GROUP BY `Model`")
sfile=cur.fetchall()
global qst
qlist=pd.DataFrame(sfile)
qlst=qlist['Model'].astype(str)
qst=qlst.values
con.close()
return render_template('qdashboard.html',qulist=qst)
@app.route('/qoutgraph1', methods = ['GET', 'POST'])
def qoutgraph1():
if request.method=='POST':
value=request.form['item']
qconn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
qcur = qconn.cursor()
qcur.execute("SELECT * FROM `demandforecastinputdata")
qsdata = qcur.fetchall()
qdata = pd.DataFrame(qsdata)
#graph1
adata=qdata['TotalDemand']
x_axis=qdata['Date'].astype(str)
#predictedgraph1
pcur = qconn.cursor()
pcur.execute("SELECT * FROM `forecastoutputq` WHERE `Model`='"+value+"'")
psdata = pcur.fetchall()
edata = pd.DataFrame(psdata)
eedata=edata['TotalDemand'].astype(float)
ldata=eedata.values
nur = qconn.cursor()
nur.execute("SELECT MIN(`Date`) AS 'MIN' FROM `forecastoutputq` WHERE `Model`='"+value+"'")
MIN=nur.fetchone()
pdata=[]
i=0
k=0
a="null"
while(x_axis[i]<MIN['MIN']):
pdata.append(a)
i=i+1
k=k+1
ata=np.concatenate((pdata,ldata),axis=0)
#x axis
fcur = qconn.cursor()
fcur.execute("SELECT `Date` FROM `demandforecastinputdata` WHERE `Date`<'"+MIN['MIN']+"'")
fsdata = fcur.fetchall()
indx = pd.DataFrame(fsdata)
indx=indx['Date'].astype(str).values
index=np.concatenate((indx,edata['Date'].values),axis=0)
#bargraph
bcur = qconn.cursor()
bcur.execute("SELECT * FROM `forecastoutputq` WHERE `Model`='"+value+"'")
bsdata = bcur.fetchall()
bdata = pd.DataFrame(bsdata)
btdf=bdata['Date'].astype(str)
btre11 = np.array([])
btres11 = np.append(btre11,btdf)
b1tdf1=bdata[['Spain']] #spain
b1tr1 = np.array([])
b1tr11 = np.append(b1tr1, b1tdf1)
b2tdf1=bdata[['Austria']] #austria
b2tr1 = np.array([])
b2tr11 = np.append(b2tr1, b2tdf1)
b3tdf1=bdata[['Japan']] #japan
b3tr1 = np.array([])
b3tr11 = np.append(b3tr1, b3tdf1)
b4tdf1=bdata[['Hungary']] #hungry
b4tr1 = np.array([])
b4tr11 = np.append(b4tr1, b4tdf1)
b5tdf1=bdata[['Germany']] #germany
b5tr1 = np.array([])
b5tr11 = np.append(b5tr1, b5tdf1)
b6tdf1=bdata[['TotalDemand']] #total
b6tr1 = np.array([])
b6tr11 = np.append(b6tr1, b6tdf1)
#comparisonbar
ccur = qconn.cursor()
ccur.execute("SELECT * FROM `forecastoutputq` WHERE `Model`='"+value+"'")
csdata = ccur.fetchall()
cdata = pd.DataFrame(csdata)
ctdf=cdata['Date'].astype(str)
ctre11 = np.array([])
ctres11 = np.append(ctre11,ctdf)
c1tdf1=cdata[['RatioIncrease']] #ratioincrease
c1tr1 = np.array([])
c1tr11 = np.append(c1tr1, c1tdf1)
qcur.execute("SELECT * FROM `summaryerror`")
sdata = qcur.fetchall()
mape = pd.DataFrame(sdata)
qconn.close()
return render_template('qdashboard.html',mon=value,qulist=qst,mape=mape.to_html(index=False),say=1,pdata=ata,adata=adata.values,x_axis=index,frm=len(qdata)-1,to=k,x13=btres11,x14=ctres11,y13=b1tr11,y14=b2tr11,y15=b3tr11,y16=b4tr11,y17=b5tr11,y18=b6tr11,y19=c1tr11)
@app.route("/yearlysimulation",methods = ['GET','POST'])
def yearlysimulation():
if request.method == 'POST':
gdp=0
pi=0
ms=0
adv=0
gdp_dis=request.form.get('gdp_dis')
pi_dis=request.form.get('pi_dis')
ms_dis=request.form.get('ms_dis')
adv_dis=request.form.get('adv_dis')
min=request.form.get('min')
max=request.form.get('max')
mue=request.form.get('mue')
sig=request.form.get('sig')
cval=request.form.get('cval')
min1=request.form.get('min1')
max1=request.form.get('max1')
mue1=request.form.get('mue1')
sig1=request.form.get('sig1')
cval1=request.form.get('cval1')
min2=request.form.get('min2')
max2=request.form.get('max2')
mue2=request.form.get('mue2')
sig2=request.form.get('sig2')
cval2=request.form.get('cval2')
min3=request.form.get('min3')
max3=request.form.get('max3')
mue3=request.form.get('mue3')
sig3=request.form.get('sig3')
cval3=request.form.get('cval3')
itr= int(request.form.get('itr'))
frm = request.form.get('from')
sfrm=int(frm[:4])
to = request.form.get('to')
sto=int(to[:4])
kwargs={}
atrtable=[]
if request.form.get('gdp'):
gdp=1
atrtable.append('Gdp')
if gdp_dis == 'gdp_dis1':
min=request.form.get('min')
max=request.form.get('max')
kwargs['Gdp_dis']='Uniform'
kwargs['gdpvalues']=[min,max]
if gdp_dis == 'gdp_dis2':
mue=request.form.get('mue')
sig=request.form.get('sig')
kwargs['Gdp_dis']='Normal'
kwargs['gdpvalues']=[mue,sig]
if gdp_dis == 'gdp_dis3':
kwargs['Gdp_dis']='Random'
pass
if gdp_dis == 'gdp_dis4':
cval=request.form.get('cval')
kwargs['Gdp_dis']='Constant'
kwargs['gdpvalues']=[cval]
if request.form.get('pi'):
pi=1
atrtable.append('Pi')
if pi_dis == 'pi_dis1':
min1=request.form.get('min1')
max1=request.form.get('max1')
kwargs['Pi_dis']='Uniform'
kwargs['pivalues']=[min1,max1]
if pi_dis == 'pi_dis2':
mue1=request.form.get('mue1')
sig1=request.form.get('sig1')
kwargs['Pi_dis']='Normal'
kwargs['pivalues']=[mue1,sig1]
if pi_dis == 'pi_dis3':
kwargs['Pi_dis']='Random'
pass
if pi_dis == 'pi_dis4':
cval1=request.form.get('cval1')
kwargs['Pi_dis']='Constant'
kwargs['pivalues']=[cval1]
if request.form.get('ms'):
ms=1
atrtable.append('Ms')
if ms_dis == 'ms_dis1':
min=request.form.get('min2')
max=request.form.get('max2')
kwargs['Ms_dis']='Uniform'
kwargs['msvalues']=[min2,max2]
if ms_dis == 'ms_dis2':
mue=request.form.get('mue2')
sig=request.form.get('sig2')
kwargs['Ms_dis']='Normal'
kwargs['msvalues']=[mue2,sig2]
if ms_dis == 'ms_dis3':
kwargs['Ms_dis']='Random'
pass
if ms_dis == 'ms_dis4':
cval=request.form.get('cval2')
kwargs['Ms_dis']='Constant'
kwargs['msvalues']=[cval2]
if request.form.get('adv'):
adv=1
atrtable.append('Adv')
if adv_dis == 'adv_dis1':
min=request.form.get('min3')
max=request.form.get('max3')
kwargs['Adv_dis']='Uniform'
kwargs['advvalues']=[min3,max3]
if adv_dis == 'adv_dis2':
mue=request.form.get('mue3')
sig=request.form.get('sig3')
kwargs['Adv_dis']='Normal'
kwargs['advvalues']=[mue3,sig3]
if adv_dis == 'adv_dis3':
kwargs['Adv_dis']='Random'
pass
if adv_dis == 'adv_dis4':
cval=request.form.get('cval3')
kwargs['Adv_dis']='Constant'
kwargs['advvalues']=[cval3]
#print(kwargs)
#print(atrtable)
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS `stech` (`gdp` VARCHAR(1),`pi` VARCHAR(1), `ms` VARCHAR(1),`adv` VARCHAR(1),`itr` VARCHAR(5),`sfrm` VARCHAR(10),`sto` VARCHAR(10))")
cur.execute("DELETE FROM `stech`")
con.commit()
cur.execute("INSERT INTO `stech` VALUES('"+str(gdp)+"','"+str(pi)+"','"+str(ms)+"','"+str(adv)+"','"+str(itr)+"','"+str(sfrm)+"','"+str(sto)+"')")
con.commit()
data = pd.DataFrame(Yeardata)
#print(data)
data.columns
xvar=pd.concat([data['GDP'],data['Pi_Exports'],data['Market_Share'],data['Advertisement_Expense']],axis=1)
yvar= | pd.DataFrame(data['TotalDemand']) | pandas.DataFrame |
from rdkit import Chem
from rdkit.Chem import rdmolops, rdMolDescriptors, Crippen, GraphDescriptors
import numpy as np
import pandas as pd
import pkg_resources
import sys
def crippenHContribs(mol,contribs):
"""Adds Crippen molar refractivity atomic contributions from attached H atoms to a heavy atom's contribution"""
res = 0.0
ccontribs = []
for i,at in enumerate(mol.GetAtoms()):
if at.GetAtomicNum() != 1: #look through heavy atoms
contrib = contribs[i]
for nbr in at.GetNeighbors():
if nbr.GetAtomicNum()==1: #look at attached hydrogens
contrib += contribs[nbr.GetIdx()]
res += contrib
ccontribs.append(contrib)
return res,ccontribs
def degreeContribs(mol):
contribs = []
for i,at in enumerate(mol.GetAtoms()): #grab all atom contributions
deg = at.GetDegree()
contribs.append(deg)
return contribs
def load_mcgowan():
stream = pkg_resources.resource_stream(__name__, 'mcgowan.csv')
return | pd.read_csv(stream) | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""This is to find out why my R peaks at a value around 2021-07-01, that is
much higher than RIVM's.
Created on Fri Jul 23 12:52:53 2021
@author: hk_nien
"""
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import tools
import nlcovidstats as nlcs
def get_Rt_rivm(mindate, maxdate):
"""Return Series with R(rivm). Note timestamps are always at time 12:00:00."""
df_rivm = nlcs.DFS['Rt_rivm'].copy()
# get 4 days extra from 'prognosis'
prog = df_rivm.loc[df_rivm['R'].isna()].iloc[:4]
prog_R = np.around(np.sqrt(prog['Rmin']*prog['Rmax']), 2)
df_rivm.loc[prog_R.index, 'R'] = prog_R
R_rivm = df_rivm.loc[~df_rivm['R'].isna(), 'R']
return R_rivm.loc[(R_rivm.index >= mindate) & (R_rivm.index <= maxdate)]
def get_Rt_mine(mindate, maxdate, slide_delay=True, cdf=None):
"""Return my Rt estimate, sampled at 12:00 daily.
Optionally provide cdf as test case; DataFrame with time index and
'Delta7r' column (7-day rolling average daily positive cases).
"""
from scipy.interpolate import interp1d
delay = nlcs.DELAY_INF2REP if slide_delay else 4.0
if cdf is None:
cdf, _npop = nlcs.get_region_data('Nederland', lastday=-1, correct_anomalies=True)
Rdf = nlcs.estimate_Rt_df(cdf['Delta7r'].iloc[10:], delay=delay, Tc=4.0)
r_interp = interp1d(
Rdf.index.astype(np.int64), Rdf['Rt'], bounds_error=False,
fill_value=(Rdf['Rt'].iloc[0], Rdf['Rt'].iloc[-1])
)
tlims = [pd.to_datetime(t).strftime('%Y-%m-%dT12:00')
for t in [mindate, maxdate]
]
index = pd.date_range(*tlims, freq='1d')
R_mine = pd.Series(r_interp(index.astype(int)), index=index)
return R_mine
def get_Rt_test_case(mindate, maxdate, case='step', slide_delay=True):
index = pd.date_range('2021-01-01', 'now', freq='1d')
cdf = pd.DataFrame(index=index + | pd.Timedelta(4, 'd') | pandas.Timedelta |
'''
This is a script for editing OS's ITN Road Network shapefiles so that:
- the attributeds include an id for the 'to' and 'from' nodes
- line strings are duplicated along links that are bidirectional
Purpose of this script is to use the extracted orientation information from the gml data to edit the roads linesting shape file such that
two way road are duplicated with one linestring orientated in one direction and the other orientated in another direction.
'''
import json
import numpy as np
import pandas as pd
import geopandas as gpd
import os
import networkx as nx
import osmnx
from shapely.geometry import Point, MultiPoint, Polygon, MultiPolygon, LineString, MultiLineString
from shapely import ops
import itertools
import copy
import fiona
######################
#
#
# Functions
#
#
######################
import math
def make_linestring_coords_2d(l):
new_coords = []
for c in l.coords:
if len(c) > 1:
new_coords.append((c[0],c[1]))
return LineString(new_coords)
def dot(vA, vB):
return vA[0]*vB[0]+vA[1]*vB[1]
def ang(lineA, lineB):
# Get nicer vector form
lACoords = lineA.coords
lBCoords = lineB.coords
vA = [(lACoords[0][0]-lACoords[1][0]), (lACoords[0][1]-lACoords[1][1])]
vB = [(lBCoords[0][0]-lBCoords[1][0]), (lBCoords[0][1]-lBCoords[1][1])]
# Get dot prod
dot_prod = dot(vA, vB)
# Get magnitudes
magA = dot(vA, vA)**0.5
magB = dot(vB, vB)**0.5
# Get cosine value
cos_ = round(dot_prod/magA/magB, 4)
# Get angle in radians and then convert to degrees
angle = math.acos(cos_)
# Basically doing angle <- angle mod 360
ang_deg = math.degrees(angle)%360
if ang_deg-180>=0:
# As in if statement
return 360 - ang_deg
else:
return ang_deg
def group(lst, n, overlapp = 1):
'''Group a list in to groups of size n, with overlap number of iterms overlapping
'''
for i in range(0, len(lst), n - overlapp):
val = lst[i:i+n]
if len(val) == n:
yield tuple(val)
def split_line(l, npoints = 2):
'''Takes a list string geometry and splits it into subline strings made up of npoints.
Useful for simplifying a linestring geometry
'''
coord_pairs = group(l.coords, npoints, overlapp = 1)
for cpair in coord_pairs:
yield LineString(cpair)
def simplify_line_angle(l, angle_threshold = 10):
'''Break a linestring into components such that the anglular distance along each component
is below the threshold. Also simplify the linestrings to be two coordinates only. Cleans road network so that each line string
acts as maximal angular deviation unit of angular distance
Might want to separate these two cleaning operations
'''
simplified_lines = []
split_lines = split_line(l, 2)
la = next(split_lines)
lb = None
c1 = la.coords[0]
angle = 0.0
for lb in split_lines:
angle+=abs(ang(la,lb))
# If angle has reached threshold create simplified linestring
if angle >= angle_threshold:
c2 = la.coords[-1]
simplified_lines.append(LineString((c1,c2)))
c1 = c2
angle = 0
la = lb
c2 = lb.coords[-1]
simplified_lines.append(LineString((c1,c2)))
return simplified_lines
def break_line_by_angle(l, angle_threshold = 10, min_link_length = 15):
'''Break a linestring into components such that the anglular distance along each component
is below the threshold. Also simplify the linestrings to be two coordinates only. Cleans road network so that each line string
acts as maximal angular deviation unit of angular distance
Might want to separate these two cleaning operations
'''
simplified_lines = []
split_lines = list(split_line(l, 2))
la = split_lines[0]
lb = None
c1 = la.coords[0]
angle = 0.0
dist = la.length
break_line = False
for i in range(1, len(split_lines)):
lb = split_lines[i]
angle+=abs(ang(la,lb))
# First check if enough distance covered or angular deviation to break link, if not continue
if (dist>min_link_length) & (angle >= angle_threshold):
# Also need to check that next link will surpass distance threshold
remaining_dist = lb.length
for j in range(i+1, len(split_lines)):
remaining_dist += split_lines[j].length
if (remaining_dist > min_link_length):
break_line = True
# If angle and distance conditions satisfied create simplified linestring
if break_line:
c2 = la.coords[-1]
simplified_lines.append(LineString((c1,c2)))
c1 = c2
angle = 0
dist = 0
break_line = False
la = lb
dist += la.length
c2 = lb.coords[-1]
simplified_lines.append(LineString((c1,c2)))
return simplified_lines
# Disolved geometries are multi polygons, explode to single polygons
def simplify_line_gdf_by_angle(indf, angle_threshold, id_col, new_id_col):
outdf = gpd.GeoDataFrame(columns=indf.columns)
outdf[new_id_col] = np.nan
for idx, row in indf.iterrows():
l = row['geometry']
if len(l.coords) == 2:
row[new_id_col] = row[id_col]
outdf = outdf.append(row,ignore_index=True)
else:
simplified_lines = simplify_line_angle(l, angle_threshold)
multdf = gpd.GeoDataFrame(columns=indf.columns)
nlines = len(simplified_lines)
multdf = multdf.append([row]*nlines,ignore_index=True)
for i in range(nlines):
multdf.loc[i,'geometry'] = simplified_lines[i]
multdf.loc[i, new_id_col] = multdf.loc[i, id_col] + "_{}".format(i)
outdf = outdf.append(multdf,ignore_index=True)
return outdf
def _is_point_in_nodes(p, nodes):
# Search new nodes for this coordinate
point_in_node = False
for v in nodes.values():
if (p.x == v['x']) & (p.y == v['y']):
# This coordiante has already been recorded as a new node
point_in_node = True
break
return point_in_node
def _node_data_from_point(p):
d = {}
d['x'] = p.x
d['y'] = p.y
d['geometry'] = p
return d
def _match_nodes_to_geometry(g, u, v, graph_nodes, other_nodes):
"""Find start and end nodes for the new edge geometry.
First check if input u and v nodes match start and end point of geometry. Then search through input list of nodes.
Allows distingusing between nodes that already are in a graph (u and v) and node that need to be added.
"""
new_u = None
new_u_data = None
if (g.coords[0][0] == graph_nodes[u]['x']) & (g.coords[0][1] == graph_nodes[u]['y']):
new_u = u
new_u_data = graph_nodes[new_u]
elif (g.coords[0][0] == graph_nodes[v]['x']) & (g.coords[0][1] == graph_nodes[v]['y']):
new_u = v
new_u_data = graph_nodes[new_u]
else:
# Find which newly created nodes matched the end of this line string
for node_id, node_data in other_nodes.items():
if (g.coords[0][0] == node_data['x']) & (g.coords[0][1] == node_data['y']):
new_u = node_id
new_u_data = node_data
break
new_v = None
new_v_data = None
if (g.coords[-1][0] == graph_nodes[u]['x']) & (g.coords[-1][1] == graph_nodes[u]['y']):
new_v = u
new_v_data = graph_nodes[new_v]
elif (g.coords[-1][0] == graph_nodes[v]['x']) & (g.coords[-1][1] == graph_nodes[v]['y']):
new_v = v
new_v_data = graph_nodes[new_v]
else:
# Find which newly created nodes matched the end of this line string
for node_id, node_data in other_nodes.items():
if (g.coords[-1][0] == node_data['x']) & (g.coords[-1][1] == node_data['y']):
new_v = node_id
new_v_data = node_data
break
return (new_u, new_u_data), (new_v, new_v_data)
# Disolved geometries are multi polygons, explode to single polygons
def break_edges_by_angle(G, angle_threshold, min_link_length, id_col, new_id_col):
"""Given and input graph, break up edges that contain more than two coordinates where
the angle between edge segments is greater thatn the input angle threshold.
"""
H = nx.MultiGraph()
H.graph = G.graph
new_nodes = {} # Record the points of intersections between edge geometries and use these as addition nodes
nodes = G.nodes(data=True)
new_node_index = 0
# loop over the edges in the graph
for e in G.edges(data=True, keys = True):
e_data = copy.deepcopy(e[-1])
g = e_data['geometry']
if len(g.coords) == 2:
e_data[new_id_col] = e_data[id_col]
u_data = nodes[e[0]]
v_data = nodes[e[1]]
H.add_node(e[0], **u_data) # If node has already been added the graph will be unchanged
H.add_node(e[1], **v_data) # If node has already been added the graph will be unchanged
H.add_edge(e[0], e[1], key = e[2], **e_data)
else:
# Break geometry into component edges
component_geoms = break_line_by_angle(g, angle_threshold, min_link_length)
# Add these component edges to the graph
for i, cg in enumerate(component_geoms):
comp_e_data = copy.deepcopy(e_data)
comp_e_data[new_id_col] = "{}_{}".format(comp_e_data[id_col], i)
comp_e_data['geometry'] = cg
# Add start and end points of geometry to new nodes
for p in (Point(cg.coords[0]), Point(cg.coords[-1])):
point_in_nodes = _is_point_in_nodes(p, new_nodes)
if point_in_nodes:
continue
else:
node_id = "break_angle_node_{}".format(new_node_index)
new_node_index += 1
new_nodes[node_id] = _node_data_from_point(p)
# Get the nodes for this geometry
(new_u, new_u_data), (new_v, new_v_data) = _match_nodes_to_geometry(cg, e[0], e[1], nodes, new_nodes)
# Finally add the edge to the new graph
if (new_u is None) | (new_v is None):
print("New nodes not found for edge {}".format(e))
else:
H.add_node(new_v, **new_v_data) # If node has already been added the graph will be unchanged
H.add_node(new_u, **new_u_data) # If node has already been added the graph will be unchanged
H.add_edge(new_u, new_v, **comp_e_data)
return H
def largest_connected_component_nodes_within_dist(G, source_node, dist, weight):
if G.is_directed():
lccNodes = max(nx.weakly_connected_components(G), key=len)
else:
lccNodes = max(nx.connected_components(G), key=len)
lccG = G.subgraph(lccNodes).copy()
shortest_paths = nx.single_source_dijkstra(lccG, source_node, target=None, cutoff=dist, weight=weight)
reachable_nodes = shortest_paths[0].keys()
return reachable_nodes
def nodes_gdf_from_edges_gdf(gdf_edges, u, v):
'''Given a geo data frame of network edges with LineString geometries, etract the start and end points of the
LineString geometries and create a nodes geo data frame from these.
Set the u and v columns of the edges geo data frame to the corresponding node ids
----------
edges_gdf : geopandas.GeoDataFrame
input edges
u : str
column to use for u node id
v : str
column to use for v node id
Returns
-------
tuple
(gdf_nodes, gdf_edges)
'''
gdf_edges['c1'] = gdf_edges['geometry'].map(lambda g: Point(g.coords[0]))
gdf_edges['c2'] = gdf_edges['geometry'].map(lambda g: Point(g.coords[1]))
node_geoms = pd.concat([gdf_edges['c1'], gdf_edges['c2']])
node_coords = node_geoms.map(lambda x: x.coords[0]).drop_duplicates()
node_geoms = node_coords.map(lambda x: Point(x))
node_ids = ['or_node_{}'.format(i) for i in np.arange(len(node_geoms))]
gdf_nodes = gpd.GeoDataFrame({'node_fid': node_ids, 'geometry':node_geoms})
gdf_nodes.crs = gdf_edges.crs
# Join nodes to edges on coordinate
gdf_edges = gdf_edges.set_geometry("c1")
gdf_edges = gpd.geopandas.sjoin(gdf_edges, gdf_nodes, how='inner', op='intersects', lsuffix='left', rsuffix='right')
assert gdf_edges['node_fid'].isnull().any() == False
gdf_edges.rename(columns={'node_fid':u}, inplace=True)
gdf_edges = gdf_edges.drop(['index_right'], axis = 1)
gdf_edges = gdf_edges.set_geometry("c2")
gdf_edges = gpd.geopandas.sjoin(gdf_edges, gdf_nodes , how='inner', op='intersects', lsuffix='left', rsuffix='right')
assert gdf_edges['node_fid'].isnull().any() == False
gdf_edges.rename(columns={'node_fid':v}, inplace=True)
gdf_edges = gdf_edges.drop(['index_right'], axis = 1)
# Tidy up
gdf_edges = gdf_edges.set_geometry("geometry")
gdf_edges = gdf_edges.drop(["c1", "c2"], axis = 1)
return gdf_nodes, gdf_edges
def simplify_graph(G, strict=True, remove_rings=True, rebuild_geoms = False):
"""
Simplify a graph's topology by removing interstitial nodes.
Simplifies graph topology by removing all nodes that are not intersections
or dead-ends. Create an edge directly between the end points that
encapsulate them, but retain the geometry of the original edges, saved as
a new `geometry` attribute on the new edge. Note that only simplified
edges receive a `geometry` attribute. Some of the resulting consolidated
edges may comprise multiple OSM ways, and if so, their multiple attribute
values are stored as a list.
Parameters
----------
G : networkx.MultiDiGraph
input graph
strict : bool
if False, allow nodes to be end points even if they fail all other
rules but have incident edges with different OSM IDs. Lets you keep
nodes at elbow two-way intersections, but sometimes individual blocks
have multiple OSM IDs within them too.
remove_rings : bool
if True, remove isolated self-contained rings that have no endpoints
Returns
-------
G : networkx.MultiDiGraph
topologically simplified graph, with a new `geometry` attribute on
each simplified edge
"""
if "simplified" in G.graph and G.graph["simplified"]: # pragma: no cover
raise Exception("This graph has already been simplified, cannot simplify it again.")
osmnx.utils.log("Begin topologically simplifying the graph...")
# make a copy to not mutate original graph object caller passed in
G = G.copy()
initial_node_count = len(G)
initial_edge_count = len(G.edges)
all_nodes_to_remove = []
all_edges_to_add = []
# generate each path that needs to be simplified
for path in osmnx.simplification._get_paths_to_simplify(G, strict=strict):
# add the interstitial edges we're removing to a list so we can retain
# their spatial geometry
edge_attributes = dict()
for u, v in zip(path[:-1], path[1:]):
# there should rarely be multiple edges between interstitial nodes
# usually happens if OSM has duplicate ways digitized for just one
# street... we will keep only one of the edges (see below)
if G.number_of_edges(u, v) != 1:
osmnx.utils.log(f"Found multiple edges between {u} and {v} when simplifying")
# get edge between these nodes: if multiple edges exist between
# them (see above), we retain only one in the simplified graph
edge = G.edges[u, v, 0]
for key in edge:
if key in edge_attributes:
# if this key already exists in the dict, append it to the
# value list
if isinstance(edge[key], (list, tuple)):
for item in edge[key]:
edge_attributes[key].append(item)
else:
edge_attributes[key].append(edge[key])
else:
# if this key doesn't already exist, but the value is a list set the value to the edge value
# otherwise set the value to a list containing the one value
if isinstance(edge[key], (list, tuple)):
edge_attributes[key] = list(edge[key])
else:
edge_attributes[key] = [edge[key]]
for key in edge_attributes:
# don't touch the length or geometry attribute, we'll sum it at the end
if key in ["length", "geometry"]:
continue
elif len(set(edge_attributes[key])) == 1:
# if there's only 1 unique value in this attribute list,
# consolidate it to the single value (the zero-th)
edge_attributes[key] = edge_attributes[key][0]
else:
# otherwise, if there are multiple values, keep one of each value
edge_attributes[key] = list(set(edge_attributes[key]))
# construct the geometry and sum the lengths of the segments
if rebuild_geoms:
edge_attributes["geometry"] = LineString(
[Point((G.nodes[node]["x"], G.nodes[node]["y"])) for node in path]
)
edge_attributes["length"] = sum(edge_attributes["length"])
else:
# Create single geometry from the coordinates of the component geometries
merged_line = ops.linemerge(MultiLineString(edge_attributes["geometry"]))
edge_attributes["geometry"] = merged_line
edge_attributes["length"] = merged_line.length
# add the nodes and edges to their lists for processing at the end
all_nodes_to_remove.extend(path[1:-1])
all_edges_to_add.append(
{"origin": path[0], "destination": path[-1], "attr_dict": edge_attributes}
)
# for each edge to add in the list we assembled, create a new edge between
# the origin and destination
for edge in all_edges_to_add:
G.add_edge(edge["origin"], edge["destination"], **edge["attr_dict"])
# finally remove all the interstitial nodes between the new edges
G.remove_nodes_from(set(all_nodes_to_remove))
if remove_rings:
# remove any connected components that form a self-contained ring
# without any endpoints
wccs = nx.weakly_connected_components(G)
nodes_in_rings = set()
for wcc in wccs:
if not any(osmnx.simplification._is_endpoint(G, n) for n in wcc):
nodes_in_rings.update(wcc)
G.remove_nodes_from(nodes_in_rings)
# mark graph as having been simplified
G.graph["simplified"] = True
msg = (
f"Simplified graph: {initial_node_count} to {len(G)} nodes, "
f"{initial_edge_count} to {len(G.edges)} edges"
)
osmnx.utils.log(msg)
return G
def break_overlapping_edges(G, id_attr = 'strg_id'):
"""Used to deal with edges geometries that overlap. Create nodes at intersection between edges
and break edge geometries at these points. Rebuild graph by assigning nodes to start and end of graph.
--------------
Returns:
geopandas.GeoDataFrame, geopandas.GeoDataFrame
gdfNodes, gdfLinks
"""
print("Breaking overlapping edges")
H = nx.MultiGraph()
H.graph = G.graph
new_nodes = {} # Record the points of intersections between edge geometries and use these as addition nodes
nodes = G.nodes(data=True)
new_node_index = 0
for n in nodes:
u = n[0]
# Now loop through pairs of edges attached to this node
for e1, e2 in itertools.combinations(G.edges(u,data=True, keys=True), 2):
# Unpack for ease
u1,v1,k1,d1 = e1
u2,v2,k2,d2 = e2
g1 = copy.deepcopy(d1['geometry'])
g2 = copy.deepcopy(d2['geometry'])
# Remove overlaps between geometries
g1 = g1.difference(g2)
if (g1.is_empty) & ~(g2.is_empty):
print("Empty line. e1:{}, e2:{}".format((u1,v1), (u2,v2)))
continue
elif ~(g1.is_empty) & (g2.is_empty):
print("Empty line. e1:{}, e2:{}".format((u1,v1), (u2,v2)))
continue
# Find intersecting points of geometries
intersection = g1.intersection(g2)
if isinstance(intersection, Point):
#node_data['geometry'].append(intersection)
intersection = MultiPoint([intersection])
elif isinstance(intersection, MultiPoint):
pass
else:
print("Unexpected intersection. e1:{}, e2:{}".format((u1,v1,k1), (u2,v2,k2)))
# Add intersection points to dict of new nodes
for p in intersection:
point_in_nodes = _is_point_in_nodes(p, new_nodes)
if point_in_nodes:
continue
else:
node_id = "overlap_node_{}".format(new_node_index)
new_node_index += 1
new_nodes[node_id] = _node_data_from_point(p)
# Create multipoint geometry containing all the nodes, use this to split edges
points = [Point(n[-1]['x'],n[-1]['y']) for n in nodes]
points += [v['geometry'] for k,v in new_nodes.items()]
mp = MultiPoint(points)
# Split edge geometries by the intersecting points
for e in G.edges(data=True, keys=True):
data = copy.deepcopy(e[-1])
g = data['geometry']
new_edge_index = 0
for new_geom in ops.split(g, mp):
# Find start and end nodes for the new edge geometry
# Although start and end coords might match points of intersection, retain original node IDs where possible
# Means that only intersection node sthat are used should be added to the graph, otherwise will get duplicated node geometries
(new_u, new_u_data), (new_v, new_v_data) = _match_nodes_to_geometry(new_geom, e[0], e[1], nodes, new_nodes)
# Finally add the edge to the new graph
if (new_u is None) | (new_v is None):
print("New nodes not found for edge {}".format(e))
else:
H.add_node(new_v, **new_v_data) # If node has already been added the graph will be unchanged
H.add_node(new_u, **new_u_data) # If node has already been added the graph will be unchanged
new_data = data
new_data['geometry'] = new_geom
new_data['sub_'+id_attr] = "sub_{}".format(new_edge_index)
new_edge_index += 1
H.add_edge(new_u, new_v, **new_data)
return H
def remove_duplicated_edges(G):
"""Breaking up edges where they overlap can create multiple links between the same two nodes with the same geometry.
These are considered duplicate link. This functions removes these links from the graph.
"""
D = G.copy()
# Loop through node pairs, if multiple keys check if geometry is duplicated and delete if so
for u in D.nodes():
if isinstance(G, nx.MultiGraph):
neighbours = set(D.neighbors(u))
elif isinstance(G, nx.MultiDiGraph):
neighbours = set(list(D.predecessors(u)) + list(D.successors(u)))
for v in neighbours:
keys = list(D[u][v].keys())
if len(keys)==1:
# no multi edge between these nodes
continue
else:
# Check for duplicate geometries
geoms = [D[u][v][k]['geometry'] for k in keys]
for k1, k2 in itertools.combinations(keys, 2):
try:
g1 = D[u][v][k1]['geometry']
g2 = D[u][v][k2]['geometry']
except KeyError as ke:
# Get key error if an edge has been removed
continue
if g1.equals(g2):
D.remove_edge(u,v,key = k2)
# Check there is still an edge between u and v
try:
e = D[u][v]
except KeyError:
assert False
return D
def duplicate_geometry_row_ids(gdf, geometry = 'geometry'):
dup_ids = []
for ix, row in gdf.iterrows():
g1 = row[geometry]
temp = [ix]
for ix_, row_ in gdf.loc[ix:,].iterrows():
if ix_ == ix:
continue
g2 = row_[geometry]
if g1.equals(g2):
temp.append(ix_)
if len(temp) > 1:
dup_ids.append(temp)
return dup_ids
def drop_duplicate_geometries(gdf, geometry = 'geometry', **kwargs):
duplicated_ids = duplicate_geometry_row_ids(gdf, geometry = geometry)
for id_group in duplicated_ids:
gdf = gdf.drop(id_group[1:], axis=0, errors = 'ignore') # Keep the first entry, ignore errors since possible that this will try to drop the same row multiple times
return gdf
######################
#
#
# Initialise variables and paths to data inputs and outputs
#
#
#####################
projectCRS = "epsg:27700"
with open("config.json") as f:
config = json.load(f)
gis_data_dir = config['gis_data_dir']
itn_file = os.path.join(gis_data_dir, config['mastermap_itn_file'])
open_roads_link_file = os.path.join(gis_data_dir, config['open_roads_link_file'])
open_roads_node_file = os.path.join(gis_data_dir, config['open_roads_node_file'])
poi_file = os.path.join(gis_data_dir, config['poi_file'])
output_directory = os.path.join(gis_data_dir, "processed_gis_data")
if os.path.isdir(output_directory) == False:
os.mkdir(output_directory)
selection_layer_file = os.path.join(gis_data_dir, config['clip_file'])
output_itn_link_file = os.path.join(output_directory, config["mastermap_link_processed_file"])
output_itn_node_file = os.path.join(output_directory, config["mastermap_node_processed_file"])
output_or_link_file = os.path.join(output_directory, config["openroads_link_processed_file"])
output_or_node_file = os.path.join(output_directory, config["openroads_node_processed_file"])
def run():
###########################
#
#
# Load Data
#
#
##########################
# Mastermap ITN data - for road network
gdfITNLink = gpd.read_file(itn_file, layer = "RoadLink")
if gdfITNLink.crs is None:
gdfITNLink.crs = projectCRS
else:
assert gdfITNLink.crs.to_string().lower() == projectCRS
gdfITNNode = gpd.read_file(itn_file, layer = "RoadNode")
if gdfITNNode.crs is None:
gdfITNNode.crs = projectCRS
else:
assert gdfITNNode.crs.to_string().lower() == projectCRS
# OS Open Road - for ped road network
gdfORLink = gpd.read_file(open_roads_link_file)
if gdfORLink.crs is None:
gdfORLink.crs = projectCRS
else:
assert gdfORLink.crs.to_string().lower() == projectCRS
gdfORNode = gpd.read_file(open_roads_node_file)
if gdfORNode.crs is None:
gdfORNode.crs = projectCRS
else:
assert gdfORNode.crs.to_string().lower() == projectCRS
c = fiona.open(poi_file)
gdfPOIs = gpd.GeoDataFrame.from_features(c)
if gdfPOIs.crs is None:
gdfPOIs.crs = projectCRS
else:
assert gdfPOIs.crs.to_string().lower() == projectCRS
# Study area polygon - to select data within the study area
centre_poi = gdfPOIs.loc[gdfPOIs['ref_no'] == config['centre_poi_ref']]
centre_poi_geom = centre_poi['geometry'].values[0]
seriesStudyArea = centre_poi.buffer(config['study_area_dist'])
seriesStudyArea.to_file(os.path.join(gis_data_dir, "study_area.shp"))
studyPolygon = seriesStudyArea.values[0]
################################
#
# First clip ITN and OR networks to study area buffer, reduces size of data, makes it easier to work with
# Select the ITN Road Network that lies in the study area
#
# Need to use study polygon rather than network distance method because at this stage the ITN network has not been created.
# This is done in the script makeITNdirectional.py
#
################################
# Select only the polygons that intersect or lie within the junc clip area
gdfITNLink = gdfITNLink.loc[ (gdfITNLink.geometry.intersects(studyPolygon)) | (gdfITNLink.geometry.within(studyPolygon))]
gdfITNNode = gpd.sjoin(gdfITNNode, gdfITNLink.loc[:,['fid','geometry']], op = 'intersects', lsuffix = 'node', rsuffix = 'line')
# Clean up
gdfITNNode.drop(['fid_line', 'index_line'], axis = 1, inplace=True)
gdfITNNode.drop_duplicates(inplace = True)
gdfITNNode.rename(columns = {'fid_node':'fid'}, inplace = True)
# Repeat for Open Roads
gdfORLink = gdfORLink.loc[ (gdfORLink.geometry.intersects(studyPolygon)) | (gdfORLink.geometry.within(studyPolygon))]
gdfORNode = gpd.sjoin(gdfORNode, gdfORLink.loc[:,['identifier','geometry']], op = 'intersects', lsuffix = 'node', rsuffix = 'line')
# Clean up
gdfORNode.drop(['identifier_line', 'index_line'], axis = 1, inplace=True)
gdfORNode.drop_duplicates(inplace = True)
gdfORNode.rename(columns = {'identifier_node':'identifier'}, inplace = True)
##############################
#
# Select the OS Open Road data that lies in the study area
#
# OS Open Road Data is used for pedestrian routing. Less detail on lanes and roundabouts so more suitable for peds
#
##############################
# Get into format required for osmnx compliant graph
gdfORLink = gdfORLink[ gdfORLink['geometry'].type == "LineString"]
# Handle multi links
gdfORLink['key'] = None
gdfORLink['key'] = gdfORLink.groupby(['startNode','endNode'])['key'].transform(lambda df: np.arange(df.shape[0]))
assert gdfORLink.loc[:, ['startNode','endNode', 'key']].duplicated().any() == False
# Represent undirected network as directed graph
gdfORLinkReversed = gdfORLink.copy()
gdfORLinkReversed = gdfORLink.rename(columns = {'startNode':'endNode', 'endNode':'startNode'})
gdfORLinkReversed['geometry'] = gdfORLinkReversed['geometry'].map(lambda g: LineString(g.coords[::-1]))
gdfORLink = | pd.concat([gdfORLink, gdfORLinkReversed]) | pandas.concat |
from sklearn.ensemble import ExtraTreesRegressor, RandomForestRegressor,RandomForestClassifier
import pickle
import scikitplot as skplt
from sklearn import tree
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import confusion_matrix,precision_score,recall_score
from sklearn.ensemble import ExtraTreesRegressor
import seaborn as sns
from sklearn.linear_model import LogisticRegression
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import numpy as np
from sklearn import metrics
from sklearn.neural_network import MLPClassifier
def main ():
data=pd.read_csv('Final_Classification_with_dummies.csv')
y=data.extreme_weather
data=data.drop('extreme_weather',axis=1)
data=normalize(data)
#Logistic Regression
print ("Logistic")
logistic_ExtremeWeatherConditions(data,y)
logistic_ExtremeWeatherConditions_Feature_importance(data,y)
# #
# ##Decision tree
print ("Decision Tree")
Decision_Tree_Extreme_weather(data,y)
Decision_Tree_Extreme_weather_Feature_importance(data,y)
#
# ##Naive naive_bayes
print ("Naive Bayes")
naive_bayes_Extreme_weather(data,y)
naive_bayes_Extreme_weather_Feature_importance(data,y)
## KNN
print ("KNN")
KNN_Extreme_weather(data,y)
KNN_Extreme_weather_Feature_importance(data,y)
##Neural Network
print ("NN")
NN_Extreme_weather(data,y)
NN_Extreme_weather_Feature_importance(data,y)
##Random forest
RandomForest_Extreme_weather(data,y)
RandomForest_Extreme_weather_Feature_importance(data,y)
##Analysing extreme weather condition in all three seasons
count_weather_classes()
##misclassification
misclassification()
##Bagging
bagging(data,y)
##undersampling
undersampling()
##Bar_graph
# Bar_graph(a1,a2,b1,b2,c1,c2,d1,d2,e1,e2)
def normalize(data):
for c in data.columns:
mean = data[c].mean()
max = data[c].max()
min = data[c].min()
data[c] = (data[c] - min) / (max - min)
return data
def ROC(t,p):
skplt.metrics.plot_roc(t, p, title="ROC Curve For Svm model")
plt.show()
## Logistic Regression
def logistic_ExtremeWeatherConditions(data,y):
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
model=LogisticRegression()
model=model.fit(X_train,y_train)
print("Training Accuracy",model.score(X_train,y_train))
prediction=model.predict(X_train)
print("Precision Score:",precision_score(y_train,prediction))
print("Recall Score:",recall_score(y_train, prediction))
print("Confusion MATRIX",confusion_matrix(y_train,prediction))
ROC(y_train,model.predict_proba(X_train))
print("Testing Accuracy",model.score(X_val,y_val))
prediction=model.predict(X_val)
print("Precision Score:",precision_score(y_val,prediction))
print("Recall Score:",recall_score(y_val, prediction))
print("Confusion MATRIX",confusion_matrix(y_val,prediction))
ROC(y_val,model.predict_proba(X_val))
def logistic_ExtremeWeatherConditions_Feature_importance(data,y):
tree_clf = ExtraTreesRegressor()
tree_clf.fit(data, y)
importances = tree_clf.feature_importances_
feature_names = data.columns
imp_features=dict(zip(feature_names,importances))
features_up = sorted(imp_features.items(), key=lambda x: x[1], reverse=True)
features_down = sorted(imp_features.items(), key=lambda x: x[1], reverse=False)
#best features
print(features_up[:10])
import time
start=time.time()
plt.bar(range(len(features_up)), [imp[1] for imp in features_up], align='center')
plt.xticks(np.arange(len(features_up)),[x[0] for x in features_up], rotation='vertical', label='Features')
plt.yticks(label='Feature Importance')
# plt.title('features')
plt.show()
print(time.time()-start)
import time
start=time.time()
plt.bar(range(len(features_up[4:])), [imp[1] for imp in features_up[4:]], align='center')
plt.title('features')
plt.show()
print(time.time()-start)
# x = [i[0] for i in features_up]
# x=data[x]
# X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
# model=LogisticRegression()
# model=model.fit(X_train,y_train)
# print("Training Accuracy",model.score(X_train,y_train))
# prediction=model.predict(X_train)
# print("Precision Score:",precision_score(y_train,prediction))
# print("Recall Score:",recall_score(y_train, prediction))
# print("Confusion MATRIX",confusion_matrix(y_train,prediction))
# ROC(y_train,model.predict_proba(X_train))
# print("Testing Accuracy",model.score(X_val,y_val))
# prediction=model.predict(X_val)
# print("Precision Score:",precision_score(y_val,prediction))
# print("Recall Score:",recall_score(y_val, prediction))
# print("Confusion MATRIX",confusion_matrix(y_val,prediction))
# ROC(y_val,model.predict_proba(X_val))
#Predicting AQI using all features
def Decision_Tree_Extreme_weather(data,y):
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
model = tree.DecisionTreeClassifier()
model = model.fit(X_train,y_train)
print("Training Accuracy",model.score(X_train,y_train))
prediction=model.predict(X_train)
print("Precision Score:",precision_score(y_train,prediction))
print("Recall Score:",recall_score(y_train, prediction))
print("Confusion MATRIX",confusion_matrix(y_train,prediction))
ROC(y_train,model.predict_proba(X_train))
print("Testing Accuracy",model.score(X_val,y_val))
prediction=model.predict(X_val)
print("Precision Score:",precision_score(y_val,prediction))
print("Recall Score:",recall_score(y_val, prediction))
print("Confusion MATRIX",confusion_matrix(y_val,prediction))
ROC(y_val,model.predict_proba(X_val))
#Predicting AQI using features from features importance graph
def Decision_Tree_Extreme_weather_Feature_importance(data,y):
tree_clf = ExtraTreesRegressor()
tree_clf.fit(data, y)
importances = tree_clf.feature_importances_
feature_names = data.columns
imp_features=dict(zip(feature_names,importances))
features_up = sorted(imp_features.items(), key=lambda x: x[1], reverse=True)
features_down = sorted(imp_features.items(), key=lambda x: x[1], reverse=False)
x = [i[0] for i in features_up]
x=data[x]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
model = tree.DecisionTreeClassifier()
model = model.fit(X_train,y_train)
print("Training Accuracy",model.score(X_train,y_train))
prediction=model.predict(X_train)
print("Precision Score:",precision_score(y_train,prediction))
print("Recall Score:",recall_score(y_train, prediction))
print("Confusion MATRIX",confusion_matrix(y_train,prediction))
ROC(y_train,model.predict_proba(X_train))
print("Testing Accuracy",model.score(X_val,y_val))
prediction=model.predict(X_val)
print("Precision Score:",precision_score(y_val,prediction))
print("Recall Score:",recall_score(y_val, prediction))
print("Confusion MATRIX",confusion_matrix(y_val,prediction))
ROC(y_val,model.predict_proba(X_val))
return model
def naive_bayes_Extreme_weather(data,y):
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
model = GaussianNB()
model = model.fit(X_train,y_train)
print("Training Accuracy",model.score(X_train,y_train))
prediction=model.predict(X_train)
print("Precision Score:",precision_score(y_train,prediction))
print("Recall Score:",recall_score(y_train, prediction))
print("Confusion MATRIX",confusion_matrix(y_train,prediction))
ROC(y_train,model.predict_proba(X_train))
print("Testing Accuracy",model.score(X_val,y_val))
prediction=model.predict(X_val)
print("Precision Score:",precision_score(y_val,prediction))
print("Recall Score:",recall_score(y_val, prediction))
print("Confusion MATRIX",confusion_matrix(y_val,prediction))
ROC(y_val,model.predict_proba(X_val))
#Predicting AQI using features from features importance graph
def naive_bayes_Extreme_weather_Feature_importance(data,y):
tree_clf = ExtraTreesRegressor()
tree_clf.fit(data, y)
importances = tree_clf.feature_importances_
feature_names = data.columns
imp_features=dict(zip(feature_names,importances))
features_up = sorted(imp_features.items(), key=lambda x: x[1], reverse=True)
features_down = sorted(imp_features.items(), key=lambda x: x[1], reverse=False)
x = [i[0] for i in features_up]
x=data[x]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
model = GaussianNB()
model = model.fit(X_train,y_train)
print("Training Accuracy",model.score(X_train,y_train))
prediction=model.predict(X_train)
print("Precision Score:",precision_score(y_train,prediction))
print("Recall Score:",recall_score(y_train, prediction))
print("Confusion MATRIX",confusion_matrix(y_train,prediction))
ROC(y_train,model.predict_proba(X_train))
print("Testing Accuracy",model.score(X_val,y_val))
prediction=model.predict(X_val)
print("Precision Score:",precision_score(y_val,prediction))
print("Recall Score:",recall_score(y_val, prediction))
print("Confusion MATRIX",confusion_matrix(y_val,prediction))
ROC(y_val,model.predict_proba(X_val))
def KNN_Extreme_weather(data,y):
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
model = KNeighborsClassifier(n_neighbors=3)
model = model.fit(X_train,y_train)
print("Training Accuracy",model.score(X_train,y_train))
prediction=model.predict(X_train)
print("Precision Score:",precision_score(y_train,prediction))
print("Recall Score:",recall_score(y_train, prediction))
print("Confusion MATRIX",confusion_matrix(y_train,prediction))
ROC(y_train,model.predict_proba(X_train))
print("Testing Accuracy",model.score(X_val,y_val))
prediction=model.predict(X_val)
print("Precision Score:",precision_score(y_val,prediction))
print("Recall Score:",recall_score(y_val, prediction))
print("Confusion MATRIX",confusion_matrix(y_val,prediction))
ROC(y_val,model.predict_proba(X_val))
#Predicting AQI using features from features importance graph
def KNN_Extreme_weather_Feature_importance(data,y):
tree_clf = ExtraTreesRegressor()
tree_clf.fit(data, y)
importances = tree_clf.feature_importances_
feature_names = data.columns
imp_features=dict(zip(feature_names,importances))
features_up = sorted(imp_features.items(), key=lambda x: x[1], reverse=True)
features_down = sorted(imp_features.items(), key=lambda x: x[1], reverse=False)
x = [i[0] for i in features_up]
x=data[x]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
model = KNeighborsClassifier(n_neighbors=3)
model = model.fit(X_train,y_train)
print("Training Accuracy",model.score(X_train,y_train))
prediction=model.predict(X_train)
print("Precision Score:",precision_score(y_train,prediction))
print("Recall Score:",recall_score(y_train, prediction))
print("Confusion MATRIX",confusion_matrix(y_train,prediction))
ROC(y_train,model.predict_proba(X_train))
print("Testing Accuracy",model.score(X_val,y_val))
prediction=model.predict(X_val)
print("Precision Score:",precision_score(y_val,prediction))
print("Recall Score:",recall_score(y_val, prediction))
print("Confusion MATRIX",confusion_matrix(y_val,prediction))
ROC(y_val,model.predict_proba(X_val))
return model
def NN_Extreme_weather(data,y):
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
model=MLPClassifier()
model=model.fit(X_train,y_train)
print("Training Accuracy",model.score(X_train,y_train))
prediction=model.predict(X_train)
print("Precision Score:",precision_score(y_train,prediction))
print("Recall Score:",recall_score(y_train, prediction))
print("Confusion MATRIX",confusion_matrix(y_train,prediction))
ROC(y_train,model.predict_proba(X_train))
print("Testing Accuracy",model.score(X_val,y_val))
prediction=model.predict(X_val)
print("Precision Score:",precision_score(y_val,prediction))
print("Recall Score:",recall_score(y_val, prediction))
print("Confusion MATRIX",confusion_matrix(y_val,prediction))
ROC(y_val,model.predict_proba(X_val))
#Predicting AQI using features from features importance graph
def NN_Extreme_weather_Feature_importance(data,y, layer):
tree_clf = ExtraTreesRegressor()
tree_clf.fit(data, y)
importances = tree_clf.feature_importances_
feature_names = data.columns
imp_features=dict(zip(feature_names,importances))
features_up = sorted(imp_features.items(), key=lambda x: x[1], reverse=True)
features_down = sorted(imp_features.items(), key=lambda x: x[1], reverse=False)
x = [i[0] for i in features_up]
x=data[x]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
model=MLPClassifier(solver='adam', alpha='0.01', hidden_layer_sizes=layer,batch_size=500,random_state=1)
model=model.fit(X_train,y_train)
print("Training Accuracy",model.score(X_train,y_train))
prediction=model.predict(X_train)
print("Precision Score:",precision_score(y_train,prediction))
print("Recall Score:",recall_score(y_train, prediction))
print("Confusion MATRIX",confusion_matrix(y_train,prediction))
ROC(y_train,model.predict_proba(X_train))
print("Testing Accuracy",model.score(X_val,y_val))
prediction=model.predict(X_val)
print("Precision Score:",precision_score(y_val,prediction))
print("Recall Score:",recall_score(y_val, prediction))
print("Confusion MATRIX",confusion_matrix(y_val,prediction))
ROC(y_val,model.predict_proba(X_val))
def RandomForest_Extreme_weather_Feature_importance(data,y):
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
model=RandomForestClassifier(max_depth=None, random_state=0,n_estimators=100)
model=model.fit(X_train,y_train)
print("Training", model.score(X_train, y_train))
print("Testing", model.score(X_val, y_val))
prediction = model.predict(X_val)
confusionMetrics(y_val, prediction)
print("Precision Score:", precision_score(y_val, prediction))
print("Recall Score:", recall_score(y_val, prediction))
ROC(y_train, model.predict_proba(X_train))
ROC(y_val, model.predict_proba(X_val))
return model
def RandomForest_Extreme_weather(data,y):
tree_clf = ExtraTreesRegressor()
tree_clf.fit(data, y)
importances = tree_clf.feature_importances_
feature_names = data.columns
imp_features=dict(zip(feature_names,importances))
features_up = sorted(imp_features.items(), key=lambda x: x[1], reverse=True)
features_down = sorted(imp_features.items(), key=lambda x: x[1], reverse=False)
x = [i[0] for i in features_up]
x=data[x]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
model=RandomForestClassifier(max_depth=None, random_state=0,n_estimators=100)
model=model.fit(X_train,y_train)
print("Training", model.score(X_train, y_train))
print("Testing", model.score(X_val, y_val))
prediction = model.predict(X_val)
confusionMetrics(y_val, prediction)
print("Precision Score:", precision_score(y_val, prediction))
print("Recall Score:", recall_score(y_val, prediction))
ROC(y_train, model.predict_proba(X_train))
ROC(y_val, model.predict_proba(X_val))
def confusionMetrics(a, b):
print(confusion_matrix(a, b))
def misclassification():
with open("misclassified.txt", "rb") as fp: # Unpickling
misclassified = pickle.load(fp)[0]
data = pd.read_csv('Final_Classification_with_dummies.csv')
mc = [1 if i in misclassified else 0 for i in data.index]
data['ind'] = [i for i in data.index]
data['misclassified'] = mc
# data.plot.scatter(x='ind', y='AQI', c='misclassified',colormap='viridis')
# sns.heatmap(pd.crosstab(data.extreme_weather, data.year,values= data['AQI'], aggfunc="mean"),
# cmap="coolwarm", annot=True, cbar=True)
# plt.title("Average Temprature 1996-2016")
# plt.plot()
d = data.loc[11500:, :]
sns.countplot(x='extreme_weather',data=d)
plt.show()
sns.countplot(x='misclassified',data=d)
plt.show()
def bagging(data,y):
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
models = []
models.append(Decision_Tree_Extreme_weather_Feature_importance(X_train, y_train))
models.append(RandomForest_Extreme_weather_Feature_importance(X_train, y_train))
models.append(KNN_Extreme_weather_Feature_importance(X_train, y_train))
predictions=[]
print("Bagging......")
X_df = pd.DataFrame(X_val)
for i in X_df.index:
x = X_df.loc[i,:].values
x=x.reshape(1, -1)
pred = []
for i in range (len(models)):
pred.append(models[i].predict(x))
# print(np.sum(pred))
if(np.sum(pred)>=2):
predictions.append(1)
else:
predictions.append(0)
print("bagging test Accuracy",metrics.accuracy_score(predictions,y_val))
def count_weather_classes():
data1 = pd.read_csv('Classification_autumn_data.csv')
y = data1.extreme_weather
a = y.to_numpy()
count_0 = np.where(a == 0)
count_0_autom=len(count_0[0])
count_1_autom=len(y)-count_0_autom
data1 = pd.read_csv('Classification_summer_data.csv')
y = data1.extreme_weather
a = y.to_numpy()
count_0 = np.where(a == 0)
print(len(count_0[0]))
count_0_summer=len(count_0[0])
count_1_summer=len(y)-count_0_autom
data1 = | pd.read_csv('Classification_winter_data.csv') | pandas.read_csv |
import os
import cv2
import pandas as pd
import dataset_settings
from util import insert_into_df, write_info, resize_image
def prepare_subset_data(data_path, train_csv_path, test_csv_path, source_url):
count = {'normal': 0, 'pneumonia': 0, 'covid-19': 0}
train_csv = pd.read_csv(train_csv_path, nrows=None)
test_csv = | pd.read_csv(test_csv_path, nrows=None) | pandas.read_csv |
import plotly.graph_objs as go
import math
import pandas as pd
color_palette = ['#586BA4',
'#324376',
'#F5DD90',
'#F68E5F',
'#F76C5E']
def create_timedelta_graph(events_df):
if events_df.empty:
x_values = list()
y_values = list()
ticktext = list()
else:
events_df['delta_2'] = (
| pd.to_datetime(events_df['timestamp']) | pandas.to_datetime |
# coding: utf-8
# In[1]:
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout
from sklearn import preprocessing
from keras.optimizers import SGD
import pandas as pd
# In[4]:
def xtrain_and_test(df_all):
'''
得到训练数据和测试数据
'''
df_label = pd.read_csv('../data/public/train.csv')
df_test_label = | pd.read_csv('../data/public/evaluation_public.csv') | pandas.read_csv |
import pandas as pd
import numpy as np
from sklearn.metrics import mean_squared_error
from sklearn.externals import joblib
from collections import OrderedDict
import json
import argparse
import os
# 加上48小时前obs 信息
# 处理 RAIN 值 去除 35以上数值
target_list=['t2m','rh2m','w10m']
from datetime import timedelta
from datetime import datetime
def datelist(beginDate, endDate):
date_l=[datetime.strftime(x,'%Y-%m-%d') for x in list(pd.date_range(start=beginDate, end=endDate))]
return date_l
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--log-level', dest='log_level', default='info', type=str,
help='Logging level.')
parser.add_argument('--model_dir', dest='model_dir',
default='../checkpoints/lgb_global',
type=str)
parser.add_argument('--data_dir',dest='data_dir',
default='../data/testb7/merge',type=str)
parser.add_argument('--dst_dir',dest='dst_dir',
default='../result/lgb_global')
parser.add_argument('--first_day',dest='first_day',
default='20181028',type=str)
parser.add_argument('--last_day',dest='last_day',
default='20181103',type=str)
opt = parser.parse_args()
feature_columns = ['t2m_obs', 'rh2m_obs', 'w10m_obs', 'psur_obs', 'q2m_obs', 'u10m_obs',
'v10m_obs', 'RAIN_obs',
't2m_prophet', 'rh2m_prophet', 'w10m_prophet',
't2m_M', 'rh2m_M', 'w10m_M', 'hour_sin', 'hour_cos', 'month_sin', 'month_cos',
'psfc_M', 'q2m_M', 'u10m_M', 'v10m_M',
'SWD_M', 'GLW_M', 'HFX_M', 'RAIN_M', 'PBLH_M', 'TC975_M', 'TC925_M',
'TC850_M', 'TC700_M', 'TC500_M', 'wspd925_M', 'wspd850_M', 'wspd700_M', 'wspd500_M',
'location_90001', 'location_90002', 'location_90003', 'location_90004',
'location_90005', 'location_90006', 'location_90007', 'location_90008',
'location_90009', 'location_90010']
if opt.model_dir.endswith('_q'):
feature_columns = feature_columns + ['Q975_M', 'Q925_M', 'Q850_M', 'Q700_M', 'Q500_M', 'LH_M']
history_num = 24
begin_dates = datelist(pd.to_datetime(opt.first_day)-timedelta(days=2),
| pd.to_datetime(opt.last_day) | pandas.to_datetime |
import os
import sys
sys.path.append(os.path.abspath(os.path.dirname(__file__) + '/' + '../..'))
import numpy as np
import pandas as pd
from python.tools import (
clean_folder
)
# Formatters for LaTeX output
def f1(x):
return '%1.0f' % x
def f2(x):
return '%1.2f' % x
################
## Parameters ##
################
output_folder = './regressions/output/correlate_epiestim/'
input_folder = './regressions/input/correlate_epiestim/'
min_T = 20 # Minimum number of time-series observations
###############
## Load data ##
###############
clean_folder(output_folder)
# Load data on our estimates of R
df = pd.read_csv('{}/estimated_R.csv'.format(input_folder))
df['Date'] = pd.to_datetime(df['Date'])
# Look at benchmark estimates
mask = df['days_infectious'] == 7
df = df.loc[mask, ]
# Remove World aggregate
mask = df['Country/Region'] == 'World'
df = df.loc[~mask, ]
# Load estimates of R using the Cori et al method
df_epi = pd.read_csv('{}/R_EpiEstim.csv'.format(input_folder))
## Clean up EpiEstim estimates
# Only look at country-level estimates
mask = (df_epi['resolution'] == 'country')
df_temp = df_epi.loc[mask, ].copy()
# Manually set missing codes to NaN & clean up
mask = (df_temp['Rt_plot'] == -888) | (df_temp['Rt_plot'] == -88)
df_temp.loc[mask, 'Rt_plot'] = np.nan
df_temp = df_temp[['dispID', 'date', 'Rt_plot']]
df_temp.rename(columns = {'dispID': 'Country/Region',
'date': 'Date',
'Rt_plot': 'R_EpiEstim'},
inplace = True)
df_temp['Date'] = pd.to_datetime(df_temp['Date'])
# Replace country names with consistency
# with our naming conventions
mask = (df_temp['Country/Region'] == 'Korea, South')
df_temp.loc[mask, 'Country/Region'] = 'South Korea'
mask = (df_temp['Country/Region'] == 'Taiwan*')
df_temp.loc[mask, 'Country/Region'] = 'Taiwan'
# Merge in to main dataset
df = pd.merge(df, df_temp,
on = ['Country/Region', 'Date'], how = 'left')
############################
## Calculate correlations ##
############################
res = []
for country in df['Country/Region'].unique():
mask = df['Country/Region'] == country
df_temp = df.loc[mask, ].copy()
corr = df_temp[['R', 'R_EpiEstim']].corr().values[0, 1]
N = np.min([df_temp['R'].count(), df_temp['R_EpiEstim'].count()])
res.append({'Country/Region': country,
'corr': corr,
'T': N})
res = | pd.DataFrame(res) | pandas.DataFrame |
import pandas as pd
import matplotlib.pyplot as plt
import glob
import numpy
# open btc csv clean and get ready
btc = pd.read_csv('btc_dataset.csv')
btc['Date'] = pd.to_datetime(btc['Date'])#
btc= btc.set_index('Date')
Btc = | pd.DataFrame(btc.ix['2015-01-01':, "Avg_price"]) | pandas.DataFrame |
# coding=utf-8
import pandas as pd
from mock import MagicMock
from sparkmagic.livyclientlib.exceptions import BadUserDataException
from nose.tools import assert_raises, assert_equals
from sparkmagic.livyclientlib.command import Command
import sparkmagic.utils.constants as constants
from sparkmagic.livyclientlib.sendpandasdftosparkcommand import (
SendPandasDfToSparkCommand,
)
def test_send_to_scala():
input_variable_name = "input"
input_variable_value = pd.DataFrame({"A": [1], "B": [2]})
output_variable_name = "output"
maxrows = 1
sparkcommand = SendPandasDfToSparkCommand(
input_variable_name, input_variable_value, output_variable_name, maxrows
)
sparkcommand._scala_command = MagicMock(return_value=MagicMock())
sparkcommand.to_command(
constants.SESSION_KIND_SPARK,
input_variable_name,
input_variable_value,
output_variable_name,
)
sparkcommand._scala_command.assert_called_with(
input_variable_name, input_variable_value, output_variable_name
)
def test_send_to_r():
input_variable_name = "input"
input_variable_value = pd.DataFrame({"A": [1], "B": [2]})
output_variable_name = "output"
maxrows = 1
sparkcommand = SendPandasDfToSparkCommand(
input_variable_name, input_variable_value, output_variable_name, maxrows
)
sparkcommand._r_command = MagicMock(return_value=MagicMock())
sparkcommand.to_command(
constants.SESSION_KIND_SPARKR,
input_variable_name,
input_variable_value,
output_variable_name,
)
sparkcommand._r_command.assert_called_with(
input_variable_name, input_variable_value, output_variable_name
)
def test_send_to_python():
input_variable_name = "input"
input_variable_value = | pd.DataFrame({"A": [1], "B": [2]}) | pandas.DataFrame |
import pandas as pd
# bookings_to_arr
#
# Accepts a pandas dataframe containing bookings data and returns a pandas
# dataframe containing changes in ARR with the following columns:
# - date - the date of the change
# - type - the type of the change (new, upsell, downsell, and churn)
# - customer_id - the id of the customer
# - prior_arr - the ARR for the customer prior to the change
# - next_arr - the ARR for the customer following the change
# - delta_arr - the change in ARR
#
# The bookings (input) dataframe should contain the following columns:
# - date - the date of the booking (as pandas Timestamps)
# - customer_id - the unique id of the customer
# - arr - the amount of ARR booked
# - start_date - the start date of the contract (as pandas Timestamps)
# - end_date - the end date of the contract (as pandas Timestamps)
def bookings_to_arr(bookings):
intervals = bookings_to_intervals(bookings)
print(intervals)
def bookings_to_intervals(bookings):
interval_list = []
for index, row in bookings.sort_values(by=["customer_id", "start_date"]).iterrows():
interval_list.append(row)
if index == 2:
interval_list.append(row)
# print(f"Index: {index}")
# print(bookings.loc[index])
# print("\n")
return pd.DataFrame(data=interval_list)
test_bookings = pd.DataFrame.from_records(
[
{
"date": pd.Timestamp(ts_input="9/25/2019", tz="UTC"),
"customer_id": 1234,
"arr": 200,
"start_date": pd.Timestamp(ts_input="10/1/2019", tz="UTC"),
"end_date": pd.Timestamp(ts_input="9/30/2020", tz="UTC"),
},
{
"date": pd.Timestamp(ts_input="9/25/2018", tz="UTC"),
"customer_id": 1234,
"arr": 125,
"start_date": pd.Timestamp(ts_input="10/1/2018", tz="UTC"),
"end_date": pd.Timestamp(ts_input="9/30/2019", tz="UTC"),
},
{
"date": pd.Timestamp(ts_input="9/25/2020", tz="UTC"),
"customer_id": 1234,
"arr": 150,
"start_date": pd.Timestamp(ts_input="10/1/2020", tz="UTC"),
"end_date": pd.Timestamp(ts_input="9/30/2021", tz="UTC"),
},
{
"date": pd.Timestamp(ts_input="9/25/2021", tz="UTC"),
"customer_id": 1234,
"arr": 150,
"start_date": pd.Timestamp(ts_input="10/1/2021", tz="UTC"),
"end_date": pd.Timestamp(ts_input="9/30/2022", tz="UTC"),
},
{
"date": pd.Timestamp(ts_input="9/25/2018", tz="UTC"),
"customer_id": 5000,
"arr": 100,
"start_date": pd.Timestamp(ts_input="10/1/2018", tz="UTC"),
"end_date": pd.Timestamp(ts_input="9/30/2019", tz="UTC"),
},
{
"date": pd.Timestamp(ts_input="9/25/2019", tz="UTC"),
"customer_id": 5000,
"arr": 100,
"start_date": pd.Timestamp(ts_input="10/1/2019", tz="UTC"),
"end_date": pd.Timestamp(ts_input="9/30/2020", tz="UTC"),
},
{
"date": | pd.Timestamp(ts_input="9/25/2020", tz="UTC") | pandas.Timestamp |
# coding: utf-8
import tweepy
import json
import os
from datetime import datetime
import pandas as pd
import credentials.credentials_twitter as cred
class Twitter_Analysis:
""" copyright© 2019 — <NAME> - License MIT """
__consumer_key = cred.CONSUMER_KEY
__token = cred.TOKEN
__api = None
def __init__(self, dico_file, maxTweets, filename, company_name, companies_CSV_file):
self.dico_file, self.dico_found = self.open_dico(dico_file)
self.max_tweets = maxTweets
self.tweetsPerQry = 100 # Can't change that
self.fName = filename
self.companies_list = self.open_csv_companies(companies_CSV_file)
self.company_name = company_name
self.__class__.__api, self.apifound = self.__class__.authentificator()
if self.dico_found:
# This is what we're searching for :
self.searchQuery = " OR ".join(['\"' + item + '\"' for item in self.dico_file["eco_responsable"]])
self.searchQuery += " -filter:retweets AND -filter:replies"
@staticmethod
def open_dico(dico_file):
try:
with open(dico_file) as dico:
return(eval(dico.read()), True)
except:
return(0, False)
@staticmethod
def open_csv_companies(companies_CSV_file):
try:
if companies_CSV_file is not None:
df = pd.read_csv(companies_CSV_file, encoding='utf-8', delimiter=';')
companies_list = df["companies"].tolist()
#companies_with_twitter_account = [str(x) for x in companies_list if str(x)!='nan']
##companies_with_twitter_account = [str(x) for x in companies_list if str(x)!='nan']
return(companies_list)
else:
return([])
except:
print('No dataset found')
return([])
@classmethod
def authentificator(cls):
try:
auth = tweepy.AppAuthHandler(cls.__consumer_key, cls.__token)
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
print("Authentification success !")
return api, True
#cls.__api = api
#return("sucess")
except:
print("Impossible to Authentifiate !")
#return("fail")
return None, False
def search(self):
if self.dico_found:
try:
print('Liste des companies : '+str(self.companies_list)+'\n')
except:
pass
if len(self.companies_list)>0:
for companie in self.companies_list:
print('\n'+repr(companie)+'\n')
parameter = " AND " + repr(companie)
print('Requete Finale : '+self.searchQuery+parameter)
self.twitter_retrieval(addParam=parameter)
elif len(str(self.company_name))>0:
parameter = " AND " + repr(self.company_name)
print('Requete Finale : '+self.searchQuery+parameter)
self.twitter_retrieval(addParam=parameter)
else:
print('Requete Finale : '+self.searchQuery)
self.twitter_retrieval()
return ('JSON disponible!', True)
else:
return ('Fichier de dictionnaire manquant', False)
def twitter_retrieval(self, max_id=-1, sinceId=None, addParam=None):
# default to no upper limit, start from the most recent tweet matching the search query.
tweetCount = 0
##print(max_id)
##if (not sinceId): print(2)
print("Downloading max {0} tweets".format(self.max_tweets))
with open(str(self.fName + '.json'), 'a',encoding='utf-8') as f:
while tweetCount < self.max_tweets:
try:
if (max_id <= 0):
if (not sinceId):
new_tweets = self.__class__.__api.search(q=self.searchQuery+addParam, count=self.tweetsPerQry)
else:
new_tweets = self.__class__.__api.search(q=self.searchQuery+addParam, count=self.tweetsPerQry,
since_id=sinceId)
else:
if (not sinceId):
new_tweets = self.__class__.__api.search(q=self.searchQuery+addParam, count=self.tweetsPerQry,
max_id=str(max_id - 1))
else:
new_tweets = self.__class__.__api.search(q=self.searchQuery+addParam, count=self.tweetsPerQry,
max_id=str(max_id - 1),
since_id=sinceId)
if not new_tweets:
print("No more tweets found")
break
for tweet in new_tweets:
f.write(str({k:str(tweet._json.get(k, None)) for k in ('id_str', 'created_at', 'text', 'retweeted', 'user',
'entities', 'lang', 'retweet_count', 'geo')})+"\n")
tweetCount += len(new_tweets)
print("Downloaded {0} tweets".format(tweetCount))
max_id = new_tweets[-1].id
except tweepy.TweepError as e:
# Just exit if any error
print("some error : " + str(e))
break
print ("Downloaded {0} tweets, Saved to {1}".format(tweetCount, self.fName+'.json'))
def tweets_to_dataframe(self):
##### tweet retrieval ######
if self.dico_found:
json_found=True
try:
lines = [line.rstrip('\n') for line in open(self.fName+'.json', 'r', encoding='utf-8')]
new = [json.loads(json.dumps(eval(item))) for item in lines]
df= | pd.DataFrame(new) | pandas.DataFrame |
from copy import deepcopy
from sklearn.model_selection import KFold
import numpy as np
import pandas as pd
from .data_augmentation import DACombine
from core.models.metrics import avg_loss, mse, rejection_ratio, avg_win_loss, avg_loss_ratio, loss_sum, invariance
benchmark_functions = [avg_loss, mse, rejection_ratio, avg_win_loss, avg_loss_ratio, loss_sum, invariance]
def target_to_enforced_target_values(y, enforced_target_values):
y = y.ravel()
res = np.array(y)
for (lower, higher), value in enforced_target_values.items():
res[(lower<y) & (y<=higher)] = value
return res
def process_model(model, xTrain, yTrain, xTest, yTest, fit_kwargs=None, predict_kwargs=None, metrics=None, enforced_target_values=None):
if metrics is None:
metrics = benchmark_functions
if enforced_target_values is not None:
yTrain = target_to_enforced_target_values(yTrain, enforced_target_values)
fit_kwargs = {} if fit_kwargs is None else fit_kwargs
predict_kwargs = {} if predict_kwargs is None else predict_kwargs
model.fit(xTrain, yTrain, **fit_kwargs)
yPredict = model.predict(xTest, **predict_kwargs)
results = {func.__name__: func(yTest, yPredict) for func in metrics}
return results
def process_benchmark_cv(model, X, y, cv=5, fit_kwargs=None, predict_kwargs=None, augment_data=None, metrics=None, enforced_target_values=None):
"""
:param model: model with fit/predict methods
:param X: features
:param y: target
:param cv: (int) cross validation splits
:param fit_kwargs: (dict) kwargs for fit
:param predict_kwargs: (dict) kwargs for predict
:param augment_data: (None|1|2) 1: random, 2: upsample
"""
# We make sure original values aren't modified, even by mistake
X = np.copy(X)
y = np.copy(y)
kf = KFold(n_splits=cv)
results = []
for train_index, test_index in kf.split(X):
xTrain, yTrain = X[train_index], y[train_index]
if augment_data:
upsample = augment_data==2
xTrain, yTrain = DACombine().fit_predict(xTrain, yTrain, upsample=upsample)
xTest, yTest = X[test_index], y[test_index]
benchmark_result = process_model(deepcopy(model), xTrain, yTrain, xTest, yTest, fit_kwargs, predict_kwargs, metrics, enforced_target_values)
results.append(benchmark_result)
return pd.DataFrame(results)
def process_benchmarks(models_dict, X, y, cv=5, fit_kwargs=None, predict_kwargs=None, augment_data=None, shuffle=False, metrics=None, enforced_target_values=None):
"""
Benchmark multiple models using the same augmented data to spare time from data augmentation
:param models_dict: {key:model} dict of models with fit/predict methods
:param X: features
:param y: target
:param cv: (int) cross validation splits
:param fit_kwargs: (dict) kwargs for fit
:param predict_kwargs: (dict) kwargs for predict
:param augment_data: (None|1|2|list) None: no data-augmentation, 1: random, 2: upsample
:param shuffle: if True, shuffle data
:returns: dict of dataframe results
"""
X = np.copy(X)
y = np.copy(y)
if shuffle:
mask = np.arange(0, X.shape[0])
np.random.shuffle(mask)
X = X[mask]
y = y[mask]
if not isinstance(augment_data, (list, tuple)):
augment_data = [augment_data]
benchmark_results = {key:[] for key in models_dict}
kf = KFold(n_splits=cv)
for train_index, test_index in kf.split(X):
xTrain, yTrain = X[train_index], y[train_index]
for augment_data_step in augment_data:
if augment_data_step:
upsample = augment_data_step==2
xTrain, yTrain = DACombine().fit_predict(xTrain, yTrain, upsample=upsample)
xTest, yTest = X[test_index], y[test_index]
for key, model in models_dict.items():
benchmark_result = process_model(deepcopy(model), xTrain, yTrain, xTest, yTest, fit_kwargs, predict_kwargs, metrics, enforced_target_values)
nKey = key
if augment_data_step:
nKey += "_da" + str(augment_data_step)
nKey_results = benchmark_results.get(nKey, [])
nKey_results.append(benchmark_result)
benchmark_results[nKey] = nKey_results
return {key: | pd.DataFrame(results) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 24 00:52:56 2016
@author: ARM
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from keras.models import Sequential, Graph
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD, Adam
from sklearn.cross_validation import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report, accuracy_score
import time
from keras.layers.advanced_activations import ELU
from sklearn.metrics import *
import matplotlib.pyplot as plt
lim=13000
name1='OFF_Back-tilt.txt'
cf1=pd.read_csv(name1,' ')
vals1=(cf1.iloc[1:lim,:])
vals1_n=(vals1-vals1.min(axis=0))/(vals1.max(axis=0)-vals1.min(axis=0)) #normalize
name2='OFF_Front-tilt.txt'
cf2=pd.read_csv(name2,' ')
vals2=(cf2.iloc[1:lim,:])
vals2_n=(vals2-vals2.min(axis=0))/(vals2.max(axis=0)-vals2.min(axis=0)) #normalize
name3='OFF_Left-tilt.txt'
cf3=pd.read_csv(name3,' ')
vals3=(cf3.iloc[1:lim,:])
vals3_n=(vals3-vals3.min(axis=0))/(vals3.max(axis=0)-vals3.min(axis=0)) #normalize
name4='OFF_Right-tilt.txt'
cf4=pd.read_csv(name4,' ')
vals4=(cf4.iloc[1:lim,:])
vals4_n=(vals4-vals4.min(axis=0))/(vals4.max(axis=0)-vals4.min(axis=0)) #normalize
name5='OFF_Upright.txt'
cf5= | pd.read_csv(name5,' ') | pandas.read_csv |
import pandas as pd
import numpy as np
from sklearn.metrics import roc_curve, auc, confusion_matrix, precision_score, recall_score, f1_score
from sklearn.metrics import average_precision_score, precision_recall_curve
from ._woe_binning import woe_binning, woe_binning_2, woe_binning_3
class Metrics:
def __init__(self, df, actual, prediction):
self.df = df
self.target = actual
self.actual = df[actual]
self.prediction = df[prediction]
self.gains = self.calculate_gains()
self.ks = self.ks()
self.gini = self.gini()
self.tn, self.fp, self.fn, self.tp, self.precision, self.recall, self.f1_score = self.precision_recall_f1_score()
def calculate_gains(self):
"""Returns a pandas dataframe with gains along with KS and Gini calculated"""
self.df['scaled_score'] = (self.df['positive_probability']*1000000).round(0)
gains = self.df.groupby('scaled_score')[self.target].agg(['count','sum'])
gains.columns = ['total','responders']
gains.reset_index(inplace=True)
gains.sort_values(by='scaled_score', ascending=False)
gains['non_responders'] = gains['total'] - gains['responders']
gains['cum_resp'] = gains['responders'].cumsum()
gains['cum_non_resp'] = gains['non_responders'].cumsum()
gains['total_resp'] = gains['responders'].sum()
gains['total_non_resp'] = gains['non_responders'].sum()
gains['perc_resp'] = (gains['responders']/gains['total_resp'])*100
gains['perc_non_resp'] = (gains['non_responders']/gains['total_non_resp'])*100
gains['perc_cum_resp'] = gains['perc_resp'].cumsum()
gains['perc_cum_non_resp'] = gains['perc_non_resp'].cumsum()
gains['k_s'] = gains['perc_cum_resp'] - gains['perc_cum_non_resp']
return gains
def get_threshold(self):
"""Returns a pandas dataframe with y_pred based on threshold from roc_curve."""
fpr, tpr, threshold = roc_curve(self.actual, self.prediction)
threshold_cutoff_df = pd.DataFrame({'fpr': fpr, 'tpr': tpr, 'threshold': threshold})
threshold_cutoff_df['distance'] = ((threshold_cutoff_df['fpr']-0)**2+(threshold_cutoff_df['tpr']-1)**2)**0.5
threshold_cutoff_df['distance_diff'] = abs(threshold_cutoff_df['distance'].diff(periods=1))
for index, rows in threshold_cutoff_df.iterrows():
if index != 0 and index != threshold_cutoff_df.shape[0]-1:
curr_val = threshold_cutoff_df.loc[index, 'distance_diff']
prev_val = threshold_cutoff_df.loc[index-1, 'distance_diff']
next_val = threshold_cutoff_df.loc[index+1, 'distance_diff']
if curr_val>prev_val and curr_val>next_val:
threshold_cutoff = threshold_cutoff_df.loc[index, 'threshold']
break
return threshold_cutoff
def gini(self):
fpr, tpr, threshold = roc_curve(self.actual, self.prediction)
auroc = auc(fpr, tpr)
gini = 2*auroc -1
return gini
def ks(self):
gains = self.gains()
return gains['k_s'].max()
def precision_recall_f1_score(self):
threshold_cutoff = self.get_threshold()
self.y_pred = np.where(self.prediction>=threshold_cutoff,1,0)
self.df['y_pred'] = self.y_pred
tn, fp, fn, tp = confusion_matrix(self.actual, self.y_pred).ravel()
precision = precision_score(self.actual, self.y_pred)
recall = recall_score(self.actual, self.y_pred)
f1 = f1_score(self.actual, self.y_pred)
return tn, fp, fn, tp, precision, recall, f1
def to_dict(self):
return {'ks': self.ks, 'gini': self.gini, 'tn': self.tn, 'tp': self.tp, 'fn': self.fn, 'fp': self.fp, 'precision': self.precision, 'recall': self.recall, 'f1_score': self.f1_score}
def standard_metrics(df, target_col, prediction_col):
"""Returns a dict with all metrics - Gini, KS, Precision, Recall, F1 Score, True Negative, True Positive, False Positive, False Negative."""
metrics = Metrics(df, target_col, prediction_col)
return metrics.to_dict()
def quick_psi(dev, val):
"""Calculate PSI from 2 arrays - dev and val"""
return sum([(a-b)*np.log(a/b) for (a,b) in zip(dev,val)])
def psi(dev, val, target='positive_probability', n_bins=10):
"""
Returns a pandas dataframe with psi column (Population Stability Index) after creating 10 deciles.
Code includes creating score calculation using round(500-30 x log(100 x (p/(1-p))), 0) where p is probability.
We need to pass both dev and val at same time to apply same bins created on dev dataframe.
"""
dev['score'] = dev[target].apply(lambda x: round(500-30*np.log2(100*(x/(1-x))), 0))
val['score'] = val[target].apply(lambda x: round(500-30*np.log2(100*(x/(1-x))), 0))
_, bins = pd.qcut(dev.score, n_bins, retbins=True, precision=0)
bins = [int(i) if abs(i)!=np.inf else i for i in bins]
dev['bins'] = pd.cut(dev.score, bins)
val['bins'] = pd.cut(val.score, bins)
dev_bins = dev.bins.value_counts(sort=False, normalize=True)
val_bins = val.bins.value_counts(sort=False, normalize=True)
psi_ = pd.concat([dev_bins, val_bins], axis=1)
psi_.columns = ['dev', 'val']
psi_['psi'] = (psi_.dev - psi_.val)*np.log(psi_.dev/psi_.val)
return psi_
def gsi(data, col='GENDER', col_val='F', target='positive_probability', n_bins=10):
"""
Returns a pandas dataframe with gsi columns (Group Stability Index) after creating n bins.
Args:
data: pandas dataframe
col: Columns on which GSI has to be calculated (ex: Gender column)
col_val: selected value will be compared with rest of the values (ex: F vs Rest)
target: score column
n_bins: number of bins to be created (Default=10)
"""
df = data.copy()
df['decile'] = pd.qcut(df[target], n_bins, labels=False)
df.loc[df[col]!=col_val, col] = 'Rest'
pivot_ = df.groupby(['decile', col])[target].count().unstack()
pivot = pivot_.div(pivot_.sum(axis=0), axis=1)
pivot['gsi'] = (pivot[col_val]-pivot['Rest'])*np.log(pivot[col_val]/pivot['Rest'])
return pivot
def iv(df, suffix='_dev'):
"""Returns a pandas dataframe with calculated fields - resp_rate, perc_dist, perc_non_resp, perc_resp, raw_odds, ln_odds, iv, exp_resp, exp_non_resp, chi_square."""
df['resp_rate'+suffix] = (df['responders'+suffix]*100)/df['total'+suffix]
df['perc_dist'+suffix] = (df['total'+suffix]*100)/df.groupby('var_name')['total'+suffix].transform('sum')
df['perc_non_resp'+suffix] = (df['non_responders'+suffix]*100)/df.groupby('var_name')['non_responders'+suffix].transform('sum')
df['perc_resp'+suffix] = (df['responders'+suffix]*100)/df.groupby('var_name')['responders'+suffix].transform('sum')
df['raw_odds'+suffix] = df.apply(lambda r: 0 if r['perc_resp'+suffix]==0 else r['perc_non_resp'+suffix]/r['perc_resp'+suffix], axis=1)
df['ln_odds'+suffix] = df['raw_odds'+suffix].apply(lambda x: 0 if abs(np.log(x))==np.inf else np.log(x))
df['iv'+suffix] = (df['perc_non_resp'+suffix]-df['perc_resp'+suffix])*df['ln_odds'+suffix]/100
df['exp_resp'+suffix] = df['total'+suffix]*df.groupby('var_name')['responders'+suffix].transform('sum')/df.groupby('var_name')['total'+suffix].transform('sum')
df['exp_non_resp'+suffix] = df['total'+suffix]*df.groupby('var_name')['non_responders'+suffix].transform('sum')/df.groupby('var_name')['total'+suffix].transform('sum')
df['chi_square'+suffix] = (((df['responders'+suffix]-df['exp_resp'+suffix])**2)/df['exp_resp'+suffix]) + (((df['non_responders'+suffix]-df['exp_non_resp'+suffix])**2)/df['exp_non_resp'+suffix])
return df
def iv_var(df, var_name, resp_name, suffix='_dev', var_cuts=None):
"""Returns IV of a variable"""
summ_df, _ = woe_bins(df, var_name, resp_name, suffix, var_cuts)
iv_ = iv(summ_df, suffix)
return iv_, iv_['iv'+suffix].sum()
def woe_bins(df, var_name, resp_name, suffix='_dev', var_cuts=None):
"""
Returns a pandas dataframe, var_cuts after creating bins.
Returns:
df: pandas dataframe has var_cuts_string, total, responders, non_responders, var_name (with _dev or _val suffix)
var_cuts: list of Interval items to be used on val file.
"""
df1 = df[[resp_name, var_name]]
if (np.issubdtype(df1[var_name].dtype, np.number)):
n = df1[var_name].nunique()
if var_cuts is None:
suffix = '_dev'
var_cuts = woe_binning_3(df1, resp_name, var_name, 0.05, 0.00001, 0, 50, 'bad', 'good')
var_cuts = list(set(var_cuts))
var_cuts.sort()
df1.loc[:,'var_binned'] = pd.cut(df[var_name], var_cuts, right=True, labels=None, retbins=False, precision=10, include_lowest=False)
var_min = float(df1[var_name].min())
var_max = float(df1[var_name].max())
summ_df = df1.groupby('var_binned')[resp_name].agg(['count','sum']).reset_index()
summ_df['delta'] = summ_df['count'] - summ_df['sum']
summ_df['var_name'] = var_name
summ_df.columns = ['var_cuts', 'total'+suffix, 'responders'+suffix, 'non_responders'+suffix, 'var_name']
summ_df['var_cuts_string'+suffix] = summ_df.var_cuts.apply(lambda x: str(x.left if x.left!=-np.inf else var_min)+' To '+str(x.right if x.right!=np.inf else var_max))
else:
df1[var_name].fillna('Blank', inplace=True)
summ_df = df1.groupby(var_name)[resp_name].agg(['count','sum']).reset_index()
summ_df['delta'] = summ_df['count'] - summ_df['sum']
summ_df['var_name'] = var_name
summ_df.columns = ['var_cuts_string'+suffix, 'total'+suffix, 'responders'+suffix, 'non_responders'+suffix, 'var_name']
summ_df['var_cuts'] = summ_df['var_cuts_string'+suffix]
return summ_df[summ_df['total'+suffix]!=0], var_cuts
def csi(dev_df, val_df, var_list, resp_name):
"""Returns a pandas dataframe with csi, csi_var, perc_csi columns (Charecteristic Stability Index) calculated based on both dev and val dataframes."""
dev_df.fillna(0, inplace=True)
val_df.fillna(0, inplace=True)
dev_dfs = []
var_cuts = {}
for var_name in var_list:
summ_df, cut = woe_bins(dev_df, var_name, resp_name, '_dev')
dev_dfs.append(summ_df)
var_cuts[var_name] = cut
dev = pd.concat(dev_dfs, axis=0)
dev = iv(dev, '_dev')
val_dfs = []
val_cuts = {}
for var_name in var_list:
val_summ_df, val_cut = woe_bins(val_df, var_name, resp_name, '_val', var_cuts[var_name])
val_dfs.append(val_summ_df)
val_cuts[var_name] = val_cut
val = pd.concat(val_dfs, axis=0)
val = iv(val, '_val')
final = | pd.merge(dev, val, how='left', on=['var_name', 'var_cuts'], suffixes=['_dev','_val']) | pandas.merge |
import unittest
import qteasy as qt
import pandas as pd
from pandas import Timestamp
import numpy as np
from numpy import int64
import itertools
import datetime
from qteasy.utilfuncs import list_to_str_format, regulate_date_format, time_str_format, str_to_list
from qteasy.utilfuncs import maybe_trade_day, is_market_trade_day, prev_trade_day, next_trade_day, prev_market_trade_day
from qteasy.utilfuncs import next_market_trade_day
from qteasy.space import Space, Axis, space_around_centre, ResultPool
from qteasy.core import apply_loop
from qteasy.built_in import SelectingFinanceIndicator
from qteasy.history import stack_dataframes
from qteasy.tsfuncs import income, indicators, name_change, get_bar
from qteasy.tsfuncs import stock_basic, trade_calendar, new_share, get_index
from qteasy.tsfuncs import balance, cashflow, top_list, index_indicators, composite
from qteasy.tsfuncs import future_basic, future_daily, options_basic, options_daily
from qteasy.tsfuncs import fund_basic, fund_net_value, index_basic
from qteasy.evaluate import eval_alpha, eval_benchmark, eval_beta, eval_fv
from qteasy.evaluate import eval_info_ratio, eval_max_drawdown, eval_sharp
from qteasy.evaluate import eval_volatility
from qteasy.tafuncs import bbands, dema, ema, ht, kama, ma, mama, mavp, mid_point
from qteasy.tafuncs import mid_price, sar, sarext, sma, t3, tema, trima, wma, adx, adxr
from qteasy.tafuncs import apo, bop, cci, cmo, dx, macd, macdext, aroon, aroonosc
from qteasy.tafuncs import macdfix, mfi, minus_di, minus_dm, mom, plus_di, plus_dm
from qteasy.tafuncs import ppo, roc, rocp, rocr, rocr100, rsi, stoch, stochf, stochrsi
from qteasy.tafuncs import trix, ultosc, willr, ad, adosc, obv, atr, natr, trange
from qteasy.tafuncs import avgprice, medprice, typprice, wclprice, ht_dcperiod
from qteasy.tafuncs import ht_dcphase, ht_phasor, ht_sine, ht_trendmode, cdl2crows
from qteasy.tafuncs import cdl3blackcrows, cdl3inside, cdl3linestrike, cdl3outside
from qteasy.tafuncs import cdl3starsinsouth, cdl3whitesoldiers, cdlabandonedbaby
from qteasy.tafuncs import cdladvanceblock, cdlbelthold, cdlbreakaway, cdlclosingmarubozu
from qteasy.tafuncs import cdlconcealbabyswall, cdlcounterattack, cdldarkcloudcover
from qteasy.tafuncs import cdldoji, cdldojistar, cdldragonflydoji, cdlengulfing
from qteasy.tafuncs import cdleveningdojistar, cdleveningstar, cdlgapsidesidewhite
from qteasy.tafuncs import cdlgravestonedoji, cdlhammer, cdlhangingman, cdlharami
from qteasy.tafuncs import cdlharamicross, cdlhighwave, cdlhikkake, cdlhikkakemod
from qteasy.tafuncs import cdlhomingpigeon, cdlidentical3crows, cdlinneck
from qteasy.tafuncs import cdlinvertedhammer, cdlkicking, cdlkickingbylength
from qteasy.tafuncs import cdlladderbottom, cdllongleggeddoji, cdllongline, cdlmarubozu
from qteasy.tafuncs import cdlmatchinglow, cdlmathold, cdlmorningdojistar, cdlmorningstar
from qteasy.tafuncs import cdlonneck, cdlpiercing, cdlrickshawman, cdlrisefall3methods
from qteasy.tafuncs import cdlseparatinglines, cdlshootingstar, cdlshortline, cdlspinningtop
from qteasy.tafuncs import cdlstalledpattern, cdlsticksandwich, cdltakuri, cdltasukigap
from qteasy.tafuncs import cdlthrusting, cdltristar, cdlunique3river, cdlupsidegap2crows
from qteasy.tafuncs import cdlxsidegap3methods, beta, correl, linearreg, linearreg_angle
from qteasy.tafuncs import linearreg_intercept, linearreg_slope, stddev, tsf, var, acos
from qteasy.tafuncs import asin, atan, ceil, cos, cosh, exp, floor, ln, log10, sin, sinh
from qteasy.tafuncs import sqrt, tan, tanh, add, div, max, maxindex, min, minindex, minmax
from qteasy.tafuncs import minmaxindex, mult, sub, sum
from qteasy.history import get_financial_report_type_raw_data, get_price_type_raw_data
from qteasy.database import DataSource
from qteasy._arg_validators import _parse_string_kwargs, _valid_qt_kwargs
class TestCost(unittest.TestCase):
def setUp(self):
self.amounts = np.array([10000, 20000, 10000])
self.op = np.array([0, 1, -0.33333333])
self.prices = np.array([10, 20, 10])
self.r = qt.Cost()
def test_rate_creation(self):
print('testing rates objects\n')
self.assertIsInstance(self.r, qt.Cost, 'Type should be Rate')
def test_rate_operations(self):
self.assertEqual(self.r['buy_fix'], 0.0, 'Item got is incorrect')
self.assertEqual(self.r['sell_fix'], 0.0, 'Item got is wrong')
self.assertEqual(self.r['buy_rate'], 0.003, 'Item got is incorrect')
self.assertEqual(self.r['sell_rate'], 0.001, 'Item got is incorrect')
self.assertEqual(self.r['buy_min'], 5., 'Item got is incorrect')
self.assertEqual(self.r['sell_min'], 0.0, 'Item got is incorrect')
self.assertEqual(self.r['slipage'], 0.0, 'Item got is incorrect')
self.assertEqual(np.allclose(self.r(self.amounts), [0.003, 0.003, 0.003]), True, 'fee calculation wrong')
def test_rate_fee(self):
self.r.buy_rate = 0.003
self.r.sell_rate = 0.001
self.r.buy_fix = 0
self.r.sell_fix = 0
self.r.buy_min = 0
self.r.sell_min = 0
self.r.slipage = 0
print('\nSell result with fixed rate = 0.001 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 33299.999667, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33.333332999999996, msg='result incorrect')
print('\nSell result with fixed rate = 0.001 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 1))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 1)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 33296.67, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33.33, msg='result incorrect')
print('\nSell result with fixed rate = 0.001 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 100))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 32967.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 0))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 0)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 997.00897308, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 59.82053838484547, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 1:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 1))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 1)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 997., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -19999.82, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 59.82, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 100))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -18054., msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 54.0, msg='result incorrect')
def test_min_fee(self):
self.r.buy_rate = 0.
self.r.sell_rate = 0.
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 300
self.r.sell_min = 300
self.r.slipage = 0.
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 0))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 0)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 985, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 10:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 10))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 10)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 980, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -19900.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 100))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 900, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -18300.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\nselling result with fixed cost rate with min fee = 300 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts))
test_min_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 33033.333)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
print('\nselling result with fixed cost rate with min fee = 300 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 1))
test_min_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 1)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 33030)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
print('\nselling result with fixed cost rate with min fee = 300 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 100))
test_min_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 32700)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
def test_rate_with_min(self):
"""Test transaction cost calculated by rate with min_fee"""
self.r.buy_rate = 0.0153
self.r.sell_rate = 0.01
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 300
self.r.sell_min = 333
self.r.slipage = 0.
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 0))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 0)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 984.9305624, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 301.3887520929774, msg='result incorrect')
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 10:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 10))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 10)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 980, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -19900.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 100))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 900, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -18300.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 300.0, msg='result incorrect')
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.op, self.amounts)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32999.99967)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.33333)
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 1))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 1)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32996.7)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.3)
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 100))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32667.0)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.0)
def test_fixed_fee(self):
self.r.buy_rate = 0.
self.r.sell_rate = 0.
self.r.buy_fix = 200
self.r.sell_fix = 150
self.r.buy_min = 0
self.r.sell_min = 0
self.r.slipage = 0
print('\nselling result of fixed cost with fixed fee = 150 and moq=0:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 0))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], 33183.333, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 150.0, msg='result incorrect')
print('\nselling result of fixed cost with fixed fee = 150 and moq=100:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 100))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3300.]), True,
f'result incorrect, {test_fixed_fee_result[0]} does not equal to [0,0,-3400]')
self.assertAlmostEqual(test_fixed_fee_result[1], 32850., msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 150., msg='result incorrect')
print('\npurchase result of fixed cost with fixed fee = 200:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 0))
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 0)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 990., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 200.0, msg='result incorrect')
print('\npurchase result of fixed cost with fixed fee = 200:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 100))
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -18200.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 200.0, msg='result incorrect')
def test_slipage(self):
self.r.buy_fix = 0
self.r.sell_fix = 0
self.r.buy_min = 0
self.r.sell_min = 0
self.r.buy_rate = 0.003
self.r.sell_rate = 0.001
self.r.slipage = 1E-9
print('\npurchase result of fixed rate = 0.003 and slipage = 1E-10 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 0))
print('\npurchase result of fixed rate = 0.003 and slipage = 1E-10 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 100))
print('\nselling result with fixed rate = 0.001 and slipage = 1E-10:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3333.3333]), True,
f'{test_fixed_fee_result[0]} does not equal to [0, 0, -10000]')
self.assertAlmostEqual(test_fixed_fee_result[1], 33298.88855591,
msg=f'{test_fixed_fee_result[1]} does not equal to 99890.')
self.assertAlmostEqual(test_fixed_fee_result[2], 34.44444409,
msg=f'{test_fixed_fee_result[2]} does not equal to -36.666663.')
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 0)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 996.98909294, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 60.21814121353513, msg='result incorrect')
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -18054.36, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 54.36, msg='result incorrect')
class TestSpace(unittest.TestCase):
def test_creation(self):
"""
test if creation of space object is fine
"""
# first group of inputs, output Space with two discr axis from [0,10]
print('testing space objects\n')
# pars_list = [[(0, 10), (0, 10)],
# [[0, 10], [0, 10]]]
#
# types_list = ['discr',
# ['discr', 'discr']]
#
# input_pars = itertools.product(pars_list, types_list)
# for p in input_pars:
# # print(p)
# s = qt.Space(*p)
# b = s.boes
# t = s.types
# # print(s, t)
# self.assertIsInstance(s, qt.Space)
# self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
# self.assertEqual(t, ['discr', 'discr'], 'types incorrect')
#
pars_list = [[(0, 10), (0, 10)],
[[0, 10], [0, 10]]]
types_list = ['foo, bar',
['foo', 'bar']]
input_pars = itertools.product(pars_list, types_list)
for p in input_pars:
# print(p)
s = Space(*p)
b = s.boes
t = s.types
# print(s, t)
self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
self.assertEqual(t, ['enum', 'enum'], 'types incorrect')
pars_list = [[(0, 10), (0, 10)],
[[0, 10], [0, 10]]]
types_list = [['discr', 'foobar']]
input_pars = itertools.product(pars_list, types_list)
for p in input_pars:
# print(p)
s = Space(*p)
b = s.boes
t = s.types
# print(s, t)
self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
self.assertEqual(t, ['discr', 'enum'], 'types incorrect')
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
self.assertEqual(s.types, ['conti', 'discr'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10.0, 11))
self.assertEqual(s.shape, (np.inf, 11))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types='conti, enum')
self.assertEqual(s.types, ['conti', 'enum'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10.0, 2))
self.assertEqual(s.shape, (np.inf, 2))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
pars_list = [(1, 2), (2, 3), (3, 4)]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['discr', 'discr', 'discr'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (2, 2, 2))
self.assertEqual(s.shape, (2, 2, 2))
self.assertEqual(s.count, 8)
self.assertEqual(s.boes, [(1, 2), (2, 3), (3, 4)])
pars_list = [(1, 2, 3), (2, 3, 4), (3, 4, 5)]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum', 'enum', 'enum'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (3, 3, 3))
self.assertEqual(s.shape, (3, 3, 3))
self.assertEqual(s.count, 27)
self.assertEqual(s.boes, [(1, 2, 3), (2, 3, 4), (3, 4, 5)])
pars_list = [((1, 2, 3), (2, 3, 4), (3, 4, 5))]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum'])
self.assertEqual(s.dim, 1)
self.assertEqual(s.size, (3,))
self.assertEqual(s.shape, (3,))
self.assertEqual(s.count, 3)
pars_list = ((1, 2, 3), (2, 3, 4), (3, 4, 5))
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum', 'enum', 'enum'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (3, 3, 3))
self.assertEqual(s.shape, (3, 3, 3))
self.assertEqual(s.count, 27)
self.assertEqual(s.boes, [(1, 2, 3), (2, 3, 4), (3, 4, 5)])
def test_extract(self):
"""
:return:
"""
pars_list = [(0, 10), (0, 10)]
types_list = ['discr', 'discr']
s = Space(pars=pars_list, par_types=types_list)
extracted_int, count = s.extract(3, 'interval')
extracted_int_list = list(extracted_int)
print('extracted int\n', extracted_int_list)
self.assertEqual(count, 16, 'extraction count wrong!')
self.assertEqual(extracted_int_list, [(0, 0), (0, 3), (0, 6), (0, 9), (3, 0), (3, 3),
(3, 6), (3, 9), (6, 0), (6, 3), (6, 6), (6, 9),
(9, 0), (9, 3), (9, 6), (9, 9)],
'space extraction wrong!')
extracted_rand, count = s.extract(10, 'rand')
extracted_rand_list = list(extracted_rand)
self.assertEqual(count, 10, 'extraction count wrong!')
print('extracted rand\n', extracted_rand_list)
for point in list(extracted_rand_list):
self.assertEqual(len(point), 2)
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertLessEqual(point[1], 10)
self.assertGreaterEqual(point[1], 0)
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
extracted_int2, count = s.extract(3, 'interval')
self.assertEqual(count, 16, 'extraction count wrong!')
extracted_int_list2 = list(extracted_int2)
self.assertEqual(extracted_int_list2, [(0, 0), (0, 3), (0, 6), (0, 9), (3, 0), (3, 3),
(3, 6), (3, 9), (6, 0), (6, 3), (6, 6), (6, 9),
(9, 0), (9, 3), (9, 6), (9, 9)],
'space extraction wrong!')
print('extracted int list 2\n', extracted_int_list2)
self.assertIsInstance(extracted_int_list2[0][0], float)
self.assertIsInstance(extracted_int_list2[0][1], (int, int64))
extracted_rand2, count = s.extract(10, 'rand')
self.assertEqual(count, 10, 'extraction count wrong!')
extracted_rand_list2 = list(extracted_rand2)
print('extracted rand list 2:\n', extracted_rand_list2)
for point in extracted_rand_list2:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], float)
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertIsInstance(point[1], (int, int64))
self.assertLessEqual(point[1], 10)
self.assertGreaterEqual(point[1], 0)
pars_list = [(0., 10), ('a', 'b')]
s = Space(pars=pars_list, par_types='enum, enum')
extracted_int3, count = s.extract(1, 'interval')
self.assertEqual(count, 4, 'extraction count wrong!')
extracted_int_list3 = list(extracted_int3)
self.assertEqual(extracted_int_list3, [(0., 'a'), (0., 'b'), (10, 'a'), (10, 'b')],
'space extraction wrong!')
print('extracted int list 3\n', extracted_int_list3)
self.assertIsInstance(extracted_int_list3[0][0], float)
self.assertIsInstance(extracted_int_list3[0][1], str)
extracted_rand3, count = s.extract(3, 'rand')
self.assertEqual(count, 3, 'extraction count wrong!')
extracted_rand_list3 = list(extracted_rand3)
print('extracted rand list 3:\n', extracted_rand_list3)
for point in extracted_rand_list3:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], (float, int))
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertIsInstance(point[1], str)
self.assertIn(point[1], ['a', 'b'])
pars_list = [((0, 10), (1, 'c'), ('a', 'b'), (1, 14))]
s = Space(pars=pars_list, par_types='enum')
extracted_int4, count = s.extract(1, 'interval')
self.assertEqual(count, 4, 'extraction count wrong!')
extracted_int_list4 = list(extracted_int4)
it = zip(extracted_int_list4, [(0, 10), (1, 'c'), (0, 'b'), (1, 14)])
for item, item2 in it:
print(item, item2)
self.assertTrue(all([tuple(ext_item) == item for ext_item, item in it]))
print('extracted int list 4\n', extracted_int_list4)
self.assertIsInstance(extracted_int_list4[0], tuple)
extracted_rand4, count = s.extract(3, 'rand')
self.assertEqual(count, 3, 'extraction count wrong!')
extracted_rand_list4 = list(extracted_rand4)
print('extracted rand list 4:\n', extracted_rand_list4)
for point in extracted_rand_list4:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], (int, str))
self.assertIn(point[0], [0, 1, 'a'])
self.assertIsInstance(point[1], (int, str))
self.assertIn(point[1], [10, 14, 'b', 'c'])
self.assertIn(point, [(0., 10), (1, 'c'), ('a', 'b'), (1, 14)])
pars_list = [((0, 10), (1, 'c'), ('a', 'b'), (1, 14)), (1, 4)]
s = Space(pars=pars_list, par_types='enum, discr')
extracted_int5, count = s.extract(1, 'interval')
self.assertEqual(count, 16, 'extraction count wrong!')
extracted_int_list5 = list(extracted_int5)
for item, item2 in extracted_int_list5:
print(item, item2)
self.assertTrue(all([tuple(ext_item) == item for ext_item, item in it]))
print('extracted int list 5\n', extracted_int_list5)
self.assertIsInstance(extracted_int_list5[0], tuple)
extracted_rand5, count = s.extract(5, 'rand')
self.assertEqual(count, 5, 'extraction count wrong!')
extracted_rand_list5 = list(extracted_rand5)
print('extracted rand list 5:\n', extracted_rand_list5)
for point in extracted_rand_list5:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], tuple)
print(f'type of point[1] is {type(point[1])}')
self.assertIsInstance(point[1], (int, np.int64))
self.assertIn(point[0], [(0., 10), (1, 'c'), ('a', 'b'), (1, 14)])
print(f'test incremental extraction')
pars_list = [(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)]
s = Space(pars_list)
ext, count = s.extract(64, 'interval')
self.assertEqual(count, 4096)
points = list(ext)
# 已经取出所有的点,围绕其中10个点生成十个subspaces
# 检查是否每个subspace都为Space,是否都在s范围内,使用32生成点集,检查生成数量是否正确
for point in points[1000:1010]:
subspace = s.from_point(point, 64)
self.assertIsInstance(subspace, Space)
self.assertTrue(subspace in s)
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
ext, count = subspace.extract(32)
points = list(ext)
self.assertGreaterEqual(count, 512)
self.assertLessEqual(count, 4096)
print(f'\n---------------------------------'
f'\nthe space created around point <{point}> is'
f'\n{subspace.boes}'
f'\nand extracted {count} points, the first 5 are:'
f'\n{points[:5]}')
def test_axis_extract(self):
# test axis object with conti type
axis = Axis((0., 5))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'conti')
self.assertEqual(axis.axis_boe, (0., 5.))
self.assertEqual(axis.count, np.inf)
self.assertEqual(axis.size, 5.0)
self.assertTrue(np.allclose(axis.extract(1, 'int'), [0., 1., 2., 3., 4.]))
self.assertTrue(np.allclose(axis.extract(0.5, 'int'), [0., 0.5, 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.5]))
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(0 <= item <= 5) for item in extracted]))
# test axis object with discrete type
axis = Axis((1, 5))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'discr')
self.assertEqual(axis.axis_boe, (1, 5))
self.assertEqual(axis.count, 5)
self.assertEqual(axis.size, 5)
self.assertTrue(np.allclose(axis.extract(1, 'int'), [1, 2, 3, 4, 5]))
self.assertRaises(ValueError, axis.extract, 0.5, 'int')
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(item in [1, 2, 3, 4, 5]) for item in extracted]))
# test axis object with enumerate type
axis = Axis((1, 5, 7, 10, 'A', 'F'))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'enum')
self.assertEqual(axis.axis_boe, (1, 5, 7, 10, 'A', 'F'))
self.assertEqual(axis.count, 6)
self.assertEqual(axis.size, 6)
self.assertEqual(axis.extract(1, 'int'), [1, 5, 7, 10, 'A', 'F'])
self.assertRaises(ValueError, axis.extract, 0.5, 'int')
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(item in [1, 5, 7, 10, 'A', 'F']) for item in extracted]))
def test_from_point(self):
"""测试从一个点生成一个space"""
# 生成一个space,指定space中的一个点以及distance,生成一个sub-space
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
self.assertEqual(s.types, ['conti', 'discr'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10., 11))
self.assertEqual(s.shape, (np.inf, 11))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
print('create subspace from a point in space')
p = (3, 3)
distance = 2
subspace = s.from_point(p, distance)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'discr'])
self.assertEqual(subspace.dim, 2)
self.assertEqual(subspace.size, (4.0, 5))
self.assertEqual(subspace.shape, (np.inf, 5))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(1, 5), (1, 5)])
print('create subspace from a 6 dimensional discrete space')
s = Space(pars=[(10, 250), (10, 250), (10, 250), (10, 250), (10, 250), (10, 250)])
p = (15, 200, 150, 150, 150, 150)
d = 10
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['discr', 'discr', 'discr', 'discr', 'discr', 'discr'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 65345616)
self.assertEqual(subspace.size, (16, 21, 21, 21, 21, 21))
self.assertEqual(subspace.shape, (16, 21, 21, 21, 21, 21))
self.assertEqual(subspace.count, 65345616)
self.assertEqual(subspace.boes, [(10, 25), (190, 210), (140, 160), (140, 160), (140, 160), (140, 160)])
print('create subspace from a 6 dimensional continuous space')
s = Space(pars=[(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)])
p = (15, 200, 150, 150, 150, 150)
d = 10
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 48000000)
self.assertEqual(subspace.size, (15.0, 20.0, 20.0, 20.0, 20.0, 20.0))
self.assertEqual(subspace.shape, (np.inf, np.inf, np.inf, np.inf, np.inf, np.inf))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(10, 25), (190, 210), (140, 160), (140, 160), (140, 160), (140, 160)])
print('create subspace with different distances on each dimension')
s = Space(pars=[(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)])
p = (15, 200, 150, 150, 150, 150)
d = [10, 5, 5, 10, 10, 5]
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 6000000)
self.assertEqual(subspace.size, (15.0, 10.0, 10.0, 20.0, 20.0, 10.0))
self.assertEqual(subspace.shape, (np.inf, np.inf, np.inf, np.inf, np.inf, np.inf))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(10, 25), (195, 205), (145, 155), (140, 160), (140, 160), (145, 155)])
class TestCashPlan(unittest.TestCase):
def setUp(self):
self.cp1 = qt.CashPlan(['2012-01-01', '2010-01-01'], [10000, 20000], 0.1)
self.cp1.info()
self.cp2 = qt.CashPlan(['20100501'], 10000)
self.cp2.info()
self.cp3 = qt.CashPlan(pd.date_range(start='2019-01-01',
freq='Y',
periods=12),
[i * 1000 + 10000 for i in range(12)],
0.035)
self.cp3.info()
def test_creation(self):
self.assertIsInstance(self.cp1, qt.CashPlan, 'CashPlan object creation wrong')
self.assertIsInstance(self.cp2, qt.CashPlan, 'CashPlan object creation wrong')
self.assertIsInstance(self.cp3, qt.CashPlan, 'CashPlan object creation wrong')
# test __repr__()
print(self.cp1)
print(self.cp2)
print(self.cp3)
# test __str__()
self.cp1.info()
self.cp2.info()
self.cp3.info()
# test assersion errors
self.assertRaises(AssertionError, qt.CashPlan, '2016-01-01', [10000, 10000])
self.assertRaises(KeyError, qt.CashPlan, '2020-20-20', 10000)
def test_properties(self):
self.assertEqual(self.cp1.amounts, [20000, 10000], 'property wrong')
self.assertEqual(self.cp1.first_day, Timestamp('2010-01-01'))
self.assertEqual(self.cp1.last_day, Timestamp('2012-01-01'))
self.assertEqual(self.cp1.investment_count, 2)
self.assertEqual(self.cp1.period, 730)
self.assertEqual(self.cp1.dates, [Timestamp('2010-01-01'), Timestamp('2012-01-01')])
self.assertEqual(self.cp1.ir, 0.1)
self.assertAlmostEqual(self.cp1.closing_value, 34200)
self.assertAlmostEqual(self.cp2.closing_value, 10000)
self.assertAlmostEqual(self.cp3.closing_value, 220385.3483685)
self.assertIsInstance(self.cp1.plan, pd.DataFrame)
self.assertIsInstance(self.cp2.plan, pd.DataFrame)
self.assertIsInstance(self.cp3.plan, pd.DataFrame)
def test_operation(self):
cp_self_add = self.cp1 + self.cp1
cp_add = self.cp1 + self.cp2
cp_add_int = self.cp1 + 10000
cp_mul_int = self.cp1 * 2
cp_mul_float = self.cp2 * 1.5
cp_mul_time = 3 * self.cp2
cp_mul_time2 = 2 * self.cp1
cp_mul_time3 = 2 * self.cp3
cp_mul_float2 = 2. * self.cp3
self.assertIsInstance(cp_self_add, qt.CashPlan)
self.assertEqual(cp_self_add.amounts, [40000, 20000])
self.assertEqual(cp_add.amounts, [20000, 10000, 10000])
self.assertEqual(cp_add_int.amounts, [30000, 20000])
self.assertEqual(cp_mul_int.amounts, [40000, 20000])
self.assertEqual(cp_mul_float.amounts, [15000])
self.assertEqual(cp_mul_float.dates, [Timestamp('2010-05-01')])
self.assertEqual(cp_mul_time.amounts, [10000, 10000, 10000])
self.assertEqual(cp_mul_time.dates, [Timestamp('2010-05-01'),
Timestamp('2011-05-01'),
Timestamp('2012-04-30')])
self.assertEqual(cp_mul_time2.amounts, [20000, 10000, 20000, 10000])
self.assertEqual(cp_mul_time2.dates, [Timestamp('2010-01-01'),
Timestamp('2012-01-01'),
Timestamp('2014-01-01'),
Timestamp('2016-01-01')])
self.assertEqual(cp_mul_time3.dates, [Timestamp('2019-12-31'),
Timestamp('2020-12-31'),
Timestamp('2021-12-31'),
Timestamp('2022-12-31'),
Timestamp('2023-12-31'),
Timestamp('2024-12-31'),
Timestamp('2025-12-31'),
Timestamp('2026-12-31'),
Timestamp('2027-12-31'),
Timestamp('2028-12-31'),
Timestamp('2029-12-31'),
Timestamp('2030-12-31'),
Timestamp('2031-12-29'),
Timestamp('2032-12-29'),
Timestamp('2033-12-29'),
Timestamp('2034-12-29'),
Timestamp('2035-12-29'),
Timestamp('2036-12-29'),
Timestamp('2037-12-29'),
Timestamp('2038-12-29'),
Timestamp('2039-12-29'),
Timestamp('2040-12-29'),
Timestamp('2041-12-29'),
Timestamp('2042-12-29')])
self.assertEqual(cp_mul_float2.dates, [Timestamp('2019-12-31'),
Timestamp('2020-12-31'),
Timestamp('2021-12-31'),
Timestamp('2022-12-31'),
Timestamp('2023-12-31'),
Timestamp('2024-12-31'),
Timestamp('2025-12-31'),
Timestamp('2026-12-31'),
Timestamp('2027-12-31'),
Timestamp('2028-12-31'),
Timestamp('2029-12-31'),
Timestamp('2030-12-31')])
self.assertEqual(cp_mul_float2.amounts, [20000.0,
22000.0,
24000.0,
26000.0,
28000.0,
30000.0,
32000.0,
34000.0,
36000.0,
38000.0,
40000.0,
42000.0])
class TestPool(unittest.TestCase):
def setUp(self):
self.p = ResultPool(5)
self.items = ['first', 'second', (1, 2, 3), 'this', 24]
self.perfs = [1, 2, 3, 4, 5]
self.additional_result1 = ('abc', 12)
self.additional_result2 = ([1, 2], -1)
self.additional_result3 = (12, 5)
def test_create(self):
self.assertIsInstance(self.p, ResultPool)
def test_operation(self):
self.p.in_pool(self.additional_result1[0], self.additional_result1[1])
self.p.cut()
self.assertEqual(self.p.item_count, 1)
self.assertEqual(self.p.items, ['abc'])
for item, perf in zip(self.items, self.perfs):
self.p.in_pool(item, perf)
self.assertEqual(self.p.item_count, 6)
self.assertEqual(self.p.items, ['abc', 'first', 'second', (1, 2, 3), 'this', 24])
self.p.cut()
self.assertEqual(self.p.items, ['second', (1, 2, 3), 'this', 24, 'abc'])
self.assertEqual(self.p.perfs, [2, 3, 4, 5, 12])
self.p.in_pool(self.additional_result2[0], self.additional_result2[1])
self.p.in_pool(self.additional_result3[0], self.additional_result3[1])
self.assertEqual(self.p.item_count, 7)
self.p.cut(keep_largest=False)
self.assertEqual(self.p.items, [[1, 2], 'second', (1, 2, 3), 'this', 24])
self.assertEqual(self.p.perfs, [-1, 2, 3, 4, 5])
class TestCoreSubFuncs(unittest.TestCase):
"""Test all functions in core.py"""
def setUp(self):
pass
def test_input_to_list(self):
print('Testing input_to_list() function')
input_str = 'first'
self.assertEqual(qt.utilfuncs.input_to_list(input_str, 3), ['first', 'first', 'first'])
self.assertEqual(qt.utilfuncs.input_to_list(input_str, 4), ['first', 'first', 'first', 'first'])
self.assertEqual(qt.utilfuncs.input_to_list(input_str, 2, None), ['first', 'first'])
input_list = ['first', 'second']
self.assertEqual(qt.utilfuncs.input_to_list(input_list, 3), ['first', 'second', None])
self.assertEqual(qt.utilfuncs.input_to_list(input_list, 4, 'padder'), ['first', 'second', 'padder', 'padder'])
self.assertEqual(qt.utilfuncs.input_to_list(input_list, 1), ['first', 'second'])
self.assertEqual(qt.utilfuncs.input_to_list(input_list, -5), ['first', 'second'])
def test_point_in_space(self):
sp = Space([(0., 10.), (0., 10.), (0., 10.)])
p1 = (5.5, 3.2, 7)
p2 = (-1, 3, 10)
self.assertTrue(p1 in sp)
print(f'point {p1} is in space {sp}')
self.assertFalse(p2 in sp)
print(f'point {p2} is not in space {sp}')
sp = Space([(0., 10.), (0., 10.), range(40, 3, -2)], 'conti, conti, enum')
p1 = (5.5, 3.2, 8)
self.assertTrue(p1 in sp)
print(f'point {p1} is in space {sp}')
def test_space_in_space(self):
print('test if a space is in another space')
sp = Space([(0., 10.), (0., 10.), (0., 10.)])
sp2 = Space([(0., 10.), (0., 10.), (0., 10.)])
self.assertTrue(sp2 in sp)
self.assertTrue(sp in sp2)
print(f'space {sp2} is in space {sp}\n'
f'and space {sp} is in space {sp2}\n'
f'they are equal to each other\n')
sp2 = Space([(0, 5.), (2, 7.), (3., 9.)])
self.assertTrue(sp2 in sp)
self.assertFalse(sp in sp2)
print(f'space {sp2} is in space {sp}\n'
f'and space {sp} is not in space {sp2}\n'
f'{sp2} is a sub space of {sp}\n')
sp2 = Space([(0, 5), (2, 7), (3., 9)])
self.assertFalse(sp2 in sp)
self.assertFalse(sp in sp2)
print(f'space {sp2} is not in space {sp}\n'
f'and space {sp} is not in space {sp2}\n'
f'they have different types of axes\n')
sp = Space([(0., 10.), (0., 10.), range(40, 3, -2)])
self.assertFalse(sp in sp2)
self.assertFalse(sp2 in sp)
print(f'space {sp2} is not in space {sp}\n'
f'and space {sp} is not in space {sp2}\n'
f'they have different types of axes\n')
def test_space_around_centre(self):
sp = Space([(0., 10.), (0., 10.), (0., 10.)])
p1 = (5.5, 3.2, 7)
ssp = space_around_centre(space=sp, centre=p1, radius=1.2)
print(ssp.boes)
print('\ntest multiple diameters:')
self.assertEqual(ssp.boes, [(4.3, 6.7), (2.0, 4.4), (5.8, 8.2)])
ssp = space_around_centre(space=sp, centre=p1, radius=[1, 2, 1])
print(ssp.boes)
self.assertEqual(ssp.boes, [(4.5, 6.5), (1.2000000000000002, 5.2), (6.0, 8.0)])
print('\ntest points on edge:')
p2 = (5.5, 3.2, 10)
ssp = space_around_centre(space=sp, centre=p1, radius=3.9)
print(ssp.boes)
self.assertEqual(ssp.boes, [(1.6, 9.4), (0.0, 7.1), (3.1, 10.0)])
print('\ntest enum spaces')
sp = Space([(0, 100), range(40, 3, -2)], 'discr, enum')
p1 = [34, 12]
ssp = space_around_centre(space=sp, centre=p1, radius=5, ignore_enums=False)
self.assertEqual(ssp.boes, [(29, 39), (22, 20, 18, 16, 14, 12, 10, 8, 6, 4)])
print(ssp.boes)
print('\ntest enum space and ignore enum axis')
ssp = space_around_centre(space=sp, centre=p1, radius=5)
self.assertEqual(ssp.boes, [(29, 39),
(40, 38, 36, 34, 32, 30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4)])
print(sp.boes)
def test_time_string_format(self):
print('Testing qt.time_string_format() function:')
t = 3.14
self.assertEqual(time_str_format(t), '3s 140.0ms')
self.assertEqual(time_str_format(t, estimation=True), '3s ')
self.assertEqual(time_str_format(t, short_form=True), '3"140')
self.assertEqual(time_str_format(t, estimation=True, short_form=True), '3"')
t = 300.14
self.assertEqual(time_str_format(t), '5min 140.0ms')
self.assertEqual(time_str_format(t, estimation=True), '5min ')
self.assertEqual(time_str_format(t, short_form=True), "5'140")
self.assertEqual(time_str_format(t, estimation=True, short_form=True), "5'")
t = 7435.0014
self.assertEqual(time_str_format(t), '2hrs 3min 55s 1.4ms')
self.assertEqual(time_str_format(t, estimation=True), '2hrs ')
self.assertEqual(time_str_format(t, short_form=True), "2H3'55\"001")
self.assertEqual(time_str_format(t, estimation=True, short_form=True), "2H")
t = 88425.0509
self.assertEqual(time_str_format(t), '1days 33min 45s 50.9ms')
self.assertEqual(time_str_format(t, estimation=True), '1days ')
self.assertEqual(time_str_format(t, short_form=True), "1D33'45\"051")
self.assertEqual(time_str_format(t, estimation=True, short_form=True), "1D")
def test_get_stock_pool(self):
print(f'start test building stock pool function\n')
share_basics = stock_basic(fields='ts_code,symbol,name,area,industry,market,list_date,exchange')
print(f'\nselect all stocks by area')
stock_pool = qt.get_stock_pool(area='上海')
print(f'{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock areas are "上海"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].eq('上海').all())
print(f'\nselect all stocks by multiple areas')
stock_pool = qt.get_stock_pool(area='贵州,北京,天津')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock areas are in list of ["贵州", "北京", "天津"]\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(['贵州',
'北京',
'天津']).all())
print(f'\nselect all stocks by area and industry')
stock_pool = qt.get_stock_pool(area='四川', industry='银行, 金融')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock areas are "四川", and industry in ["银行", "金融"]\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(['银行', '金融']).all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(['四川']).all())
print(f'\nselect all stocks by industry')
stock_pool = qt.get_stock_pool(industry='银行, 金融')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stocks industry in ["银行", "金融"]\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(['银行', '金融']).all())
print(f'\nselect all stocks by market')
stock_pool = qt.get_stock_pool(market='主板')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock market is "主板"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['market'].isin(['主板']).all())
print(f'\nselect all stocks by market and list date')
stock_pool = qt.get_stock_pool(date='2000-01-01', market='主板')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock market is "主板", and list date after "2000-01-01"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['market'].isin(['主板']).all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('2000-01-01').all())
print(f'\nselect all stocks by list date')
stock_pool = qt.get_stock_pool(date='1997-01-01')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all list date after "1997-01-01"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('1997-01-01').all())
print(f'\nselect all stocks by exchange')
stock_pool = qt.get_stock_pool(exchange='SSE')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all exchanges are "SSE"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['exchange'].eq('SSE').all())
print(f'\nselect all stocks by industry, area and list date')
industry_list = ['银行', '全国地产', '互联网', '环境保护', '区域地产',
'酒店餐饮', '运输设备', '综合类', '建筑工程', '玻璃',
'家用电器', '文教休闲', '其他商业', '元器件', 'IT设备',
'其他建材', '汽车服务', '火力发电', '医药商业', '汽车配件',
'广告包装', '轻工机械', '新型电力', '多元金融', '饲料']
area_list = ['深圳', '北京', '吉林', '江苏', '辽宁', '广东',
'安徽', '四川', '浙江', '湖南', '河北', '新疆',
'山东', '河南', '山西', '江西', '青海', '湖北',
'内蒙', '海南', '重庆', '陕西', '福建', '广西',
'上海']
stock_pool = qt.get_stock_pool(date='19980101',
industry=industry_list,
area=area_list)
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all exchanges are "SSE"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('1998-01-01').all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(industry_list).all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(area_list).all())
self.assertRaises(KeyError, qt.get_stock_pool, industry=25)
self.assertRaises(KeyError, qt.get_stock_pool, share_name='000300.SH')
self.assertRaises(KeyError, qt.get_stock_pool, markets='SSE')
class TestEvaluations(unittest.TestCase):
"""Test all evaluation functions in core.py"""
# 以下手动计算结果在Excel文件中
def setUp(self):
"""用np.random生成测试用数据,使用cumsum()模拟股票走势"""
self.test_data1 = pd.DataFrame([5.34892759, 5.65768696, 5.79227076, 5.56266871, 5.88189632,
6.24795001, 5.92755558, 6.38748165, 6.31331899, 5.86001665,
5.61048472, 5.30696736, 5.40406792, 5.03180571, 5.37886353,
5.78608307, 6.26540339, 6.59348026, 6.90943801, 6.70911677,
6.33015954, 6.06697417, 5.9752499, 6.45786408, 6.95273763,
6.7691991, 6.70355481, 6.28048969, 6.61344541, 6.24620003,
6.47409983, 6.4522311, 6.8773094, 6.99727832, 6.59262674,
6.59014938, 6.63758237, 6.38331869, 6.09902105, 6.35390109,
6.51993567, 6.87244592, 6.83963485, 7.08797815, 6.88003144,
6.83657323, 6.97819483, 7.01600276, 7.12554256, 7.58941523,
7.61014457, 7.21224091, 7.48174399, 7.66490854, 7.51371968,
7.11586198, 6.97147399, 6.67453301, 6.2042138, 6.33967015,
6.22187938, 5.98426993, 6.37096079, 6.55897161, 6.26422645,
6.69363762, 7.12668015, 6.83232926, 7.30524081, 7.4262041,
7.54031383, 7.17545919, 7.20659257, 7.44886016, 7.37094393,
6.88011022, 7.08142491, 6.74992833, 6.5967097, 6.21336693,
6.35565105, 6.82347596, 6.44773408, 6.84538053, 6.47966466,
6.09699528, 5.63927014, 6.01081024, 6.20585303, 6.60528206,
7.01594726, 7.03684251, 6.76574977, 7.08740846, 6.65336462,
7.07126686, 6.80058956, 6.79241977, 6.47843472, 6.39245474],
columns=['value'])
self.test_data2 = pd.DataFrame([5.09276527, 4.83828592, 4.6000911, 4.63170487, 4.63566451,
4.50546921, 4.96390044, 4.64557907, 4.25787855, 3.76585551,
3.38826334, 3.76243422, 4.06365426, 3.87084726, 3.91400935,
4.13438822, 4.27064542, 4.56776104, 5.03800296, 5.31070529,
5.39902276, 5.21186286, 5.05683114, 4.68842046, 5.11895168,
5.27151571, 5.72294993, 6.09961056, 6.26569635, 6.48806151,
6.16058885, 6.2582459, 6.38934791, 6.57831057, 6.19508831,
5.70155153, 5.20435735, 5.36538825, 5.40450056, 5.2227697,
5.37828693, 5.53058991, 6.02996797, 5.76802181, 5.66166713,
6.07988994, 5.61794367, 5.63218151, 6.10728013, 6.0324168,
6.27164431, 6.27551239, 6.52329665, 7.00470007, 7.34163113,
7.33699083, 7.67661334, 8.09395749, 7.68086668, 7.58341161,
7.46219819, 7.58671899, 7.19348298, 7.40088323, 7.47562005,
7.93342043, 8.2286081, 8.3521632, 8.43590025, 8.34977395,
8.57563095, 8.81586328, 9.08738649, 9.01542031, 8.8653815,
9.21763111, 9.04233017, 8.59533999, 8.47590075, 8.70857222,
8.78890756, 8.92697606, 9.35743773, 9.68280866, 10.15622021,
10.55908549, 10.6337894, 10.55197128, 10.65435176, 10.54611045,
10.19432562, 10.48320884, 10.36176768, 10.03186854, 10.23656092,
10.0062843, 10.13669686, 10.30758958, 9.87904176, 10.05126375],
columns=['value'])
self.test_data3 = pd.DataFrame([5.02851874, 5.20700348, 5.02410709, 5.49836387, 5.06834371,
5.10956737, 5.15314979, 5.02256472, 5.09746382, 5.23909247,
4.93410336, 4.96316186, 5.40026682, 5.7353255, 5.53438319,
5.79092139, 5.67528173, 5.89840855, 5.75379463, 6.10855386,
5.77322365, 5.84538021, 5.6103973, 5.7518655, 5.49729695,
5.13610628, 5.30524121, 5.68093462, 5.73251319, 6.04420783,
6.26929843, 6.59610234, 6.09872345, 6.25475121, 6.72927396,
6.91395783, 7.00693283, 7.36217783, 7.71516676, 7.67580263,
7.62477511, 7.73600568, 7.53457914, 7.46170277, 7.83658014,
8.11481319, 8.03705544, 7.64948845, 7.52043731, 7.67247943,
7.46511982, 7.43541798, 7.58856517, 7.9392717, 8.25406287,
7.77031632, 8.03223447, 7.86799055, 7.57630999, 7.33230519,
7.22378732, 6.85972264, 7.17548456, 7.5387846, 7.2392632,
6.8455644, 6.59557185, 6.6496796, 6.73685623, 7.18598015,
7.13619128, 6.88060157, 7.1399681, 7.30308077, 6.94942434,
7.0247815, 7.37567798, 7.50080197, 7.59719284, 7.14520561,
7.29913484, 7.79551341, 8.15497781, 8.40456095, 8.86516528,
8.53042688, 8.94268762, 8.52048006, 8.80036284, 8.91602364,
9.19953385, 8.70828953, 8.24613093, 8.18770453, 7.79548389,
7.68627967, 7.23205036, 6.98302636, 7.06515819, 6.95068113],
columns=['value'])
self.test_data4 = pd.DataFrame([4.97926539, 5.44016005, 5.45122915, 5.74485615, 5.45600553,
5.44858945, 5.2435413, 5.47315161, 5.58464303, 5.36179749,
5.38236326, 5.29614981, 5.76523508, 5.75102892, 6.15316618,
6.03852528, 6.01442228, 5.70510182, 5.22748133, 5.46762379,
5.78926267, 5.8221362, 5.61236849, 5.30615725, 5.24200611,
5.41042642, 5.59940342, 5.28306781, 4.99451932, 5.08799266,
5.38865647, 5.58229139, 5.33492845, 5.48206276, 5.09721379,
5.39190493, 5.29965087, 5.0374415, 5.50798022, 5.43107577,
5.22759507, 4.991809, 5.43153084, 5.39966868, 5.59916352,
5.66412137, 6.00611838, 5.63564902, 5.66723484, 5.29863863,
4.91115153, 5.3749929, 5.75082334, 6.08308148, 6.58091182,
6.77848803, 7.19588758, 7.64862286, 7.99818347, 7.91824794,
8.30341071, 8.45984973, 7.98700002, 8.18924931, 8.60755649,
8.66233396, 8.91018407, 9.0782739, 9.33515448, 8.95870245,
8.98426422, 8.50340317, 8.64916085, 8.93592407, 8.63145745,
8.65322862, 8.39543204, 8.37969997, 8.23394504, 8.04062872,
7.91259763, 7.57252171, 7.72670114, 7.74486117, 8.06908188,
7.99166889, 7.92155906, 8.39956136, 8.80181323, 8.47464091,
8.06557064, 7.87145573, 8.0237959, 8.39481998, 8.68525692,
8.81185461, 8.98632237, 9.0989835, 8.89787405, 8.86508591],
columns=['value'])
self.test_data5 = pd.DataFrame([4.50258923, 4.35142568, 4.07459514, 3.87791297, 3.73715985,
3.98455684, 4.07587908, 4.00042472, 4.28276612, 4.01362051,
4.13713565, 4.49312372, 4.48633159, 4.4641207, 4.13444605,
3.79107217, 4.22941629, 4.56548511, 4.92472163, 5.27723158,
5.67409193, 6.00176917, 5.88889928, 5.55256103, 5.39308314,
5.2610492, 5.30738908, 5.22222408, 4.90332238, 4.57499908,
4.96097146, 4.81531011, 4.39115442, 4.63200662, 5.04588813,
4.67866025, 5.01705123, 4.83562258, 4.60381702, 4.66187576,
4.41292828, 4.86604507, 4.42280124, 4.07517294, 4.16317319,
4.10316596, 4.42913598, 4.06609666, 3.96725913, 4.15965746,
4.12379564, 4.04054068, 3.84342851, 3.45902867, 3.17649855,
3.09773586, 3.5502119, 3.66396995, 3.66306483, 3.29131401,
2.79558533, 2.88319542, 3.03671098, 3.44645857, 3.88167161,
3.57961874, 3.60180276, 3.96702102, 4.05429995, 4.40056979,
4.05653231, 3.59600456, 3.60792477, 4.09989922, 3.73503663,
4.01892626, 3.94597242, 3.81466605, 3.71417992, 3.93767156,
4.42806557, 4.06988106, 4.03713636, 4.34408673, 4.79810156,
5.18115011, 4.89798406, 5.3960077, 5.72504875, 5.61894017,
5.1958197, 4.85275896, 5.17550207, 4.71548987, 4.62408567,
4.55488535, 4.36532649, 4.26031979, 4.25225607, 4.58627048],
columns=['value'])
self.test_data6 = pd.DataFrame([5.08639513, 5.05761083, 4.76160923, 4.62166504, 4.62923183,
4.25070173, 4.13447513, 3.90890013, 3.76687608, 3.43342482,
3.67648224, 3.6274775, 3.9385404, 4.39771627, 4.03199346,
3.93265288, 3.50059789, 3.3851961, 3.29743973, 3.2544872,
2.93692949, 2.70893003, 2.55461976, 2.20922332, 2.29054475,
2.2144714, 2.03726827, 2.39007617, 2.29866155, 2.40607111,
2.40440444, 2.79374649, 2.66541922, 2.27018079, 2.08505127,
2.55478864, 2.22415625, 2.58517923, 2.58802256, 2.94870959,
2.69301739, 2.19991535, 2.69473146, 2.64704637, 2.62753542,
2.14240825, 2.38565154, 1.94592117, 2.32243877, 2.69337246,
2.51283854, 2.62484451, 2.15559054, 2.35410875, 2.31219177,
1.96018265, 2.34711266, 2.58083322, 2.40290041, 2.20439791,
2.31472425, 2.16228248, 2.16439749, 2.20080737, 1.73293206,
1.9264407, 2.25089861, 2.69269101, 2.59296687, 2.1420998,
1.67819153, 1.98419023, 2.14479494, 1.89055376, 1.96720648,
1.9916694, 2.37227761, 2.14446036, 2.34573903, 1.86162546,
2.1410721, 2.39204939, 2.52529064, 2.47079939, 2.9299031,
3.09452923, 2.93276708, 3.21731309, 3.06248964, 2.90413406,
2.67844632, 2.45621213, 2.41463398, 2.7373913, 3.14917045,
3.4033949, 3.82283446, 4.02285451, 3.7619638, 4.10346795],
columns=['value'])
self.test_data7 = pd.DataFrame([4.75233583, 4.47668283, 4.55894263, 4.61765848, 4.622892,
4.58941116, 4.32535872, 3.88112797, 3.47237806, 3.50898953,
3.82530406, 3.6718017, 3.78918195, 4.1800752, 4.01818557,
4.40822582, 4.65474654, 4.89287256, 4.40879274, 4.65505126,
4.36876403, 4.58418934, 4.75687172, 4.3689799, 4.16126498,
4.0203982, 3.77148242, 3.38198096, 3.07261764, 2.9014741,
2.5049543, 2.756105, 2.28779058, 2.16986991, 1.8415962,
1.83319008, 2.20898291, 2.00128981, 1.75747025, 1.26676663,
1.40316876, 1.11126484, 1.60376367, 1.22523829, 1.58816681,
1.49705679, 1.80244138, 1.55128293, 1.35339409, 1.50985759,
1.0808451, 1.05892796, 1.43414812, 1.43039101, 1.73631655,
1.43940867, 1.82864425, 1.71088265, 2.12015154, 2.45417128,
2.84777618, 2.7925612, 2.90975121, 3.25920745, 3.13801182,
3.52733677, 3.65468491, 3.69395211, 3.49862035, 3.24786017,
3.64463138, 4.00331929, 3.62509565, 3.78013949, 3.4174012,
3.76312271, 3.62054004, 3.67206716, 3.60596058, 3.38636199,
3.42580676, 3.32921095, 3.02976759, 3.28258676, 3.45760838,
3.24917528, 2.94618304, 2.86980011, 2.63191259, 2.39566759,
2.53159917, 2.96273967, 3.25626185, 2.97425402, 3.16412191,
3.58280763, 3.23257727, 3.62353556, 3.12806399, 2.92532313],
columns=['value'])
# 建立一个长度为 500 个数据点的测试数据, 用于测试数据点多于250个的情况下的评价过程
self.long_data = pd.DataFrame([ 9.879, 9.916, 10.109, 10.214, 10.361, 10.768, 10.594, 10.288,
10.082, 9.994, 10.125, 10.126, 10.384, 10.734, 10.4 , 10.87 ,
11.338, 11.061, 11.415, 11.724, 12.077, 12.196, 12.064, 12.423,
12.19 , 11.729, 11.677, 11.448, 11.485, 10.989, 11.242, 11.239,
11.113, 11.075, 11.471, 11.745, 11.754, 11.782, 12.079, 11.97 ,
12.178, 11.95 , 12.438, 12.612, 12.804, 12.952, 12.612, 12.867,
12.832, 12.832, 13.015, 13.315, 13.249, 12.904, 12.776, 12.64 ,
12.543, 12.287, 12.225, 11.844, 11.985, 11.945, 11.542, 11.871,
12.245, 12.228, 12.362, 11.899, 11.962, 12.374, 12.816, 12.649,
12.252, 12.579, 12.3 , 11.988, 12.177, 12.312, 12.744, 12.599,
12.524, 12.82 , 12.67 , 12.876, 12.986, 13.271, 13.606, 13.82 ,
14.161, 13.833, 13.831, 14.137, 13.705, 13.414, 13.037, 12.759,
12.642, 12.948, 13.297, 13.483, 13.836, 14.179, 13.709, 13.655,
13.198, 13.508, 13.953, 14.387, 14.043, 13.987, 13.561, 13.391,
12.923, 12.555, 12.503, 12.292, 11.877, 12.34 , 12.141, 11.687,
11.992, 12.458, 12.131, 11.75 , 11.739, 11.263, 11.762, 11.976,
11.578, 11.854, 12.136, 12.422, 12.311, 12.56 , 12.879, 12.861,
12.973, 13.235, 13.53 , 13.531, 13.137, 13.166, 13.31 , 13.103,
13.007, 12.643, 12.69 , 12.216, 12.385, 12.046, 12.321, 11.9 ,
11.772, 11.816, 11.871, 11.59 , 11.518, 11.94 , 11.803, 11.924,
12.183, 12.136, 12.361, 12.406, 11.932, 11.684, 11.292, 11.388,
11.874, 12.184, 12.002, 12.16 , 11.741, 11.26 , 11.123, 11.534,
11.777, 11.407, 11.275, 11.679, 11.62 , 11.218, 11.235, 11.352,
11.366, 11.061, 10.661, 10.582, 10.899, 11.352, 11.792, 11.475,
11.263, 11.538, 11.183, 10.936, 11.399, 11.171, 11.214, 10.89 ,
10.728, 11.191, 11.646, 11.62 , 11.195, 11.178, 11.18 , 10.956,
11.205, 10.87 , 11.098, 10.639, 10.487, 10.507, 10.92 , 10.558,
10.119, 9.882, 9.573, 9.515, 9.845, 9.852, 9.495, 9.726,
10.116, 10.452, 10.77 , 11.225, 10.92 , 10.824, 11.096, 11.542,
11.06 , 10.568, 10.585, 10.884, 10.401, 10.068, 9.964, 10.285,
10.239, 10.036, 10.417, 10.132, 9.839, 9.556, 9.084, 9.239,
9.304, 9.067, 8.587, 8.471, 8.007, 8.321, 8.55 , 9.008,
9.138, 9.088, 9.434, 9.156, 9.65 , 9.431, 9.654, 10.079,
10.411, 10.865, 10.51 , 10.205, 10.519, 10.367, 10.855, 10.642,
10.298, 10.622, 10.173, 9.792, 9.995, 9.904, 9.771, 9.597,
9.506, 9.212, 9.688, 10.032, 9.723, 9.839, 9.918, 10.332,
10.236, 9.989, 10.192, 10.685, 10.908, 11.275, 11.72 , 12.158,
12.045, 12.244, 12.333, 12.246, 12.552, 12.958, 13.11 , 13.53 ,
13.123, 13.138, 13.57 , 13.389, 13.511, 13.759, 13.698, 13.744,
13.467, 13.795, 13.665, 13.377, 13.423, 13.772, 13.295, 13.073,
12.718, 12.388, 12.399, 12.185, 11.941, 11.818, 11.465, 11.811,
12.163, 11.86 , 11.935, 11.809, 12.145, 12.624, 12.768, 12.321,
12.277, 11.889, 12.11 , 12.606, 12.943, 12.945, 13.112, 13.199,
13.664, 14.051, 14.189, 14.339, 14.611, 14.656, 15.112, 15.086,
15.263, 15.021, 15.346, 15.572, 15.607, 15.983, 16.151, 16.215,
16.096, 16.089, 16.32 , 16.59 , 16.657, 16.752, 16.583, 16.743,
16.373, 16.662, 16.243, 16.163, 16.491, 16.958, 16.977, 17.225,
17.637, 17.344, 17.684, 17.892, 18.036, 18.182, 17.803, 17.588,
17.101, 17.538, 17.124, 16.787, 17.167, 17.138, 16.955, 17.148,
17.135, 17.635, 17.718, 17.675, 17.622, 17.358, 17.754, 17.729,
17.576, 17.772, 18.239, 18.441, 18.729, 18.319, 18.608, 18.493,
18.069, 18.122, 18.314, 18.423, 18.709, 18.548, 18.384, 18.391,
17.988, 17.986, 17.653, 17.249, 17.298, 17.06 , 17.36 , 17.108,
17.348, 17.596, 17.46 , 17.635, 17.275, 17.291, 16.933, 17.337,
17.231, 17.146, 17.148, 16.751, 16.891, 17.038, 16.735, 16.64 ,
16.231, 15.957, 15.977, 16.077, 16.054, 15.797, 15.67 , 15.911,
16.077, 16.17 , 15.722, 15.258, 14.877, 15.138, 15. , 14.811,
14.698, 14.407, 14.583, 14.704, 15.153, 15.436, 15.634, 15.453,
15.877, 15.696, 15.563, 15.927, 16.255, 16.696, 16.266, 16.698,
16.365, 16.493, 16.973, 16.71 , 16.327, 16.605, 16.486, 16.846,
16.935, 17.21 , 17.389, 17.546, 17.773, 17.641, 17.485, 17.794,
17.354, 16.904, 16.675, 16.43 , 16.898, 16.819, 16.921, 17.201,
17.617, 17.368, 17.864, 17.484],
columns=['value'])
self.long_bench = pd.DataFrame([ 9.7 , 10.179, 10.321, 9.855, 9.936, 10.096, 10.331, 10.662,
10.59 , 11.031, 11.154, 10.945, 10.625, 10.233, 10.284, 10.252,
10.221, 10.352, 10.444, 10.773, 10.904, 11.104, 10.797, 10.55 ,
10.943, 11.352, 11.641, 11.983, 11.696, 12.138, 12.365, 12.379,
11.969, 12.454, 12.947, 13.119, 13.013, 12.763, 12.632, 13.034,
12.681, 12.561, 12.938, 12.867, 13.202, 13.132, 13.539, 13.91 ,
13.456, 13.692, 13.771, 13.904, 14.069, 13.728, 13.97 , 14.228,
13.84 , 14.041, 13.963, 13.689, 13.543, 13.858, 14.118, 13.987,
13.611, 14.028, 14.229, 14.41 , 14.74 , 15.03 , 14.915, 15.207,
15.354, 15.665, 15.877, 15.682, 15.625, 15.175, 15.105, 14.893,
14.86 , 15.097, 15.178, 15.293, 15.238, 15. , 15.283, 14.994,
14.907, 14.664, 14.888, 15.297, 15.313, 15.368, 14.956, 14.802,
14.506, 14.257, 14.619, 15.019, 15.049, 14.625, 14.894, 14.978,
15.434, 15.578, 16.038, 16.107, 16.277, 16.365, 16.204, 16.465,
16.401, 16.895, 17.057, 16.621, 16.225, 16.075, 15.863, 16.292,
16.551, 16.724, 16.817, 16.81 , 17.192, 16.86 , 16.745, 16.707,
16.552, 16.133, 16.301, 16.08 , 15.81 , 15.75 , 15.909, 16.127,
16.457, 16.204, 16.329, 16.748, 16.624, 17.011, 16.548, 16.831,
16.653, 16.791, 16.57 , 16.778, 16.928, 16.932, 17.22 , 16.876,
17.301, 17.422, 17.689, 17.316, 17.547, 17.534, 17.409, 17.669,
17.416, 17.859, 17.477, 17.307, 17.245, 17.352, 17.851, 17.412,
17.144, 17.138, 17.085, 16.926, 16.674, 16.854, 17.064, 16.95 ,
16.609, 16.957, 16.498, 16.552, 16.175, 15.858, 15.697, 15.781,
15.583, 15.36 , 15.558, 16.046, 15.968, 15.905, 16.358, 16.783,
17.048, 16.762, 17.224, 17.363, 17.246, 16.79 , 16.608, 16.423,
15.991, 15.527, 15.147, 14.759, 14.792, 15.206, 15.148, 15.046,
15.429, 14.999, 15.407, 15.124, 14.72 , 14.713, 15.022, 15.092,
14.982, 15.001, 14.734, 14.713, 14.841, 14.562, 15.005, 15.483,
15.472, 15.277, 15.503, 15.116, 15.12 , 15.442, 15.476, 15.789,
15.36 , 15.764, 16.218, 16.493, 16.642, 17.088, 16.816, 16.645,
16.336, 16.511, 16.2 , 15.994, 15.86 , 15.929, 16.316, 16.416,
16.746, 17.173, 17.531, 17.627, 17.407, 17.49 , 17.768, 17.509,
17.795, 18.147, 18.63 , 18.945, 19.021, 19.518, 19.6 , 19.744,
19.63 , 19.32 , 18.933, 19.297, 19.598, 19.446, 19.236, 19.198,
19.144, 19.159, 19.065, 19.032, 18.586, 18.272, 18.119, 18.3 ,
17.894, 17.744, 17.5 , 17.083, 17.092, 16.864, 16.453, 16.31 ,
16.681, 16.342, 16.447, 16.715, 17.068, 17.067, 16.822, 16.673,
16.675, 16.592, 16.686, 16.397, 15.902, 15.597, 15.357, 15.162,
15.348, 15.603, 15.283, 15.257, 15.082, 14.621, 14.366, 14.039,
13.957, 14.141, 13.854, 14.243, 14.414, 14.033, 13.93 , 14.104,
14.461, 14.249, 14.053, 14.165, 14.035, 14.408, 14.501, 14.019,
14.265, 14.67 , 14.797, 14.42 , 14.681, 15.16 , 14.715, 14.292,
14.411, 14.656, 15.094, 15.366, 15.055, 15.198, 14.762, 14.294,
13.854, 13.811, 13.549, 13.927, 13.897, 13.421, 13.037, 13.32 ,
13.721, 13.511, 13.999, 13.529, 13.418, 13.881, 14.326, 14.362,
13.987, 14.015, 13.599, 13.343, 13.307, 13.689, 13.851, 13.404,
13.577, 13.395, 13.619, 13.195, 12.904, 12.553, 12.294, 12.649,
12.425, 11.967, 12.062, 11.71 , 11.645, 12.058, 12.136, 11.749,
11.953, 12.401, 12.044, 11.901, 11.631, 11.396, 11.036, 11.244,
10.864, 11.207, 11.135, 11.39 , 11.723, 12.084, 11.8 , 11.471,
11.33 , 11.504, 11.295, 11.3 , 10.901, 10.494, 10.825, 11.054,
10.866, 10.713, 10.875, 10.846, 10.947, 11.422, 11.158, 10.94 ,
10.521, 10.36 , 10.411, 10.792, 10.472, 10.305, 10.525, 10.853,
10.556, 10.72 , 10.54 , 10.583, 10.299, 10.061, 10.004, 9.903,
9.796, 9.472, 9.246, 9.54 , 9.456, 9.177, 9.484, 9.557,
9.493, 9.968, 9.536, 9.39 , 8.922, 8.423, 8.518, 8.686,
8.771, 9.098, 9.281, 8.858, 9.027, 8.553, 8.784, 8.996,
9.379, 9.846, 9.855, 9.502, 9.608, 9.761, 9.409, 9.4 ,
9.332, 9.34 , 9.284, 8.844, 8.722, 8.376, 8.775, 8.293,
8.144, 8.63 , 8.831, 8.957, 9.18 , 9.601, 9.695, 10.018,
9.841, 9.743, 9.292, 8.85 , 9.316, 9.288, 9.519, 9.738,
9.289, 9.785, 9.804, 10.06 , 10.188, 10.095, 9.739, 9.881,
9.7 , 9.991, 10.391, 10.002],
columns=['value'])
def test_performance_stats(self):
"""test the function performance_statistics()
"""
pass
def test_fv(self):
print(f'test with test data and empty DataFrame')
self.assertAlmostEqual(eval_fv(self.test_data1), 6.39245474)
self.assertAlmostEqual(eval_fv(self.test_data2), 10.05126375)
self.assertAlmostEqual(eval_fv(self.test_data3), 6.95068113)
self.assertAlmostEqual(eval_fv(self.test_data4), 8.86508591)
self.assertAlmostEqual(eval_fv(self.test_data5), 4.58627048)
self.assertAlmostEqual(eval_fv(self.test_data6), 4.10346795)
self.assertAlmostEqual(eval_fv(self.test_data7), 2.92532313)
self.assertAlmostEqual(eval_fv(pd.DataFrame()), -np.inf)
print(f'Error testing')
self.assertRaises(AssertionError, eval_fv, 15)
self.assertRaises(KeyError,
eval_fv,
pd.DataFrame([1, 2, 3], columns=['non_value']))
def test_max_drawdown(self):
print(f'test with test data and empty DataFrame')
self.assertAlmostEqual(eval_max_drawdown(self.test_data1)[0], 0.264274308)
self.assertEqual(eval_max_drawdown(self.test_data1)[1], 53)
self.assertEqual(eval_max_drawdown(self.test_data1)[2], 86)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data1)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data2)[0], 0.334690849)
self.assertEqual(eval_max_drawdown(self.test_data2)[1], 0)
self.assertEqual(eval_max_drawdown(self.test_data2)[2], 10)
self.assertEqual(eval_max_drawdown(self.test_data2)[3], 19)
self.assertAlmostEqual(eval_max_drawdown(self.test_data3)[0], 0.244452899)
self.assertEqual(eval_max_drawdown(self.test_data3)[1], 90)
self.assertEqual(eval_max_drawdown(self.test_data3)[2], 99)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data3)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data4)[0], 0.201849684)
self.assertEqual(eval_max_drawdown(self.test_data4)[1], 14)
self.assertEqual(eval_max_drawdown(self.test_data4)[2], 50)
self.assertEqual(eval_max_drawdown(self.test_data4)[3], 54)
self.assertAlmostEqual(eval_max_drawdown(self.test_data5)[0], 0.534206456)
self.assertEqual(eval_max_drawdown(self.test_data5)[1], 21)
self.assertEqual(eval_max_drawdown(self.test_data5)[2], 60)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data5)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data6)[0], 0.670062689)
self.assertEqual(eval_max_drawdown(self.test_data6)[1], 0)
self.assertEqual(eval_max_drawdown(self.test_data6)[2], 70)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data6)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data7)[0], 0.783577449)
self.assertEqual(eval_max_drawdown(self.test_data7)[1], 17)
self.assertEqual(eval_max_drawdown(self.test_data7)[2], 51)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data7)[3]))
self.assertEqual(eval_max_drawdown(pd.DataFrame()), -np.inf)
print(f'Error testing')
self.assertRaises(AssertionError, eval_fv, 15)
self.assertRaises(KeyError,
eval_fv,
pd.DataFrame([1, 2, 3], columns=['non_value']))
# test max drawdown == 0:
# TODO: investigate: how does divide by zero change?
self.assertAlmostEqual(eval_max_drawdown(self.test_data4 - 5)[0], 1.0770474121951792)
self.assertEqual(eval_max_drawdown(self.test_data4 - 5)[1], 14)
self.assertEqual(eval_max_drawdown(self.test_data4 - 5)[2], 50)
def test_info_ratio(self):
reference = self.test_data1
self.assertAlmostEqual(eval_info_ratio(self.test_data2, reference, 'value'), 0.075553316)
self.assertAlmostEqual(eval_info_ratio(self.test_data3, reference, 'value'), 0.018949457)
self.assertAlmostEqual(eval_info_ratio(self.test_data4, reference, 'value'), 0.056328143)
self.assertAlmostEqual(eval_info_ratio(self.test_data5, reference, 'value'), -0.004270068)
self.assertAlmostEqual(eval_info_ratio(self.test_data6, reference, 'value'), 0.009198027)
self.assertAlmostEqual(eval_info_ratio(self.test_data7, reference, 'value'), -0.000890283)
def test_volatility(self):
self.assertAlmostEqual(eval_volatility(self.test_data1), 0.748646166)
self.assertAlmostEqual(eval_volatility(self.test_data2), 0.75527442)
self.assertAlmostEqual(eval_volatility(self.test_data3), 0.654188853)
self.assertAlmostEqual(eval_volatility(self.test_data4), 0.688375814)
self.assertAlmostEqual(eval_volatility(self.test_data5), 1.089989522)
self.assertAlmostEqual(eval_volatility(self.test_data6), 1.775419308)
self.assertAlmostEqual(eval_volatility(self.test_data7), 1.962758406)
self.assertAlmostEqual(eval_volatility(self.test_data1, logarithm=False), 0.750993311)
self.assertAlmostEqual(eval_volatility(self.test_data2, logarithm=False), 0.75571473)
self.assertAlmostEqual(eval_volatility(self.test_data3, logarithm=False), 0.655331424)
self.assertAlmostEqual(eval_volatility(self.test_data4, logarithm=False), 0.692683021)
self.assertAlmostEqual(eval_volatility(self.test_data5, logarithm=False), 1.09602969)
self.assertAlmostEqual(eval_volatility(self.test_data6, logarithm=False), 1.774789504)
self.assertAlmostEqual(eval_volatility(self.test_data7, logarithm=False), 2.003329156)
self.assertEqual(eval_volatility(pd.DataFrame()), -np.inf)
self.assertRaises(AssertionError, eval_volatility, [1, 2, 3])
# 测试长数据的Volatility计算
expected_volatility = np.array([ np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
0.39955371, 0.39974258, 0.40309866, 0.40486593, 0.4055514 ,
0.40710639, 0.40708157, 0.40609006, 0.4073625 , 0.40835305,
0.41155304, 0.41218193, 0.41207489, 0.41300276, 0.41308415,
0.41292392, 0.41207645, 0.41238397, 0.41229291, 0.41164056,
0.41316317, 0.41348842, 0.41462249, 0.41474574, 0.41652625,
0.41649176, 0.41701556, 0.4166593 , 0.41684221, 0.41491689,
0.41435209, 0.41549087, 0.41849338, 0.41998049, 0.41959106,
0.41907311, 0.41916103, 0.42120773, 0.42052391, 0.42111225,
0.42124589, 0.42356445, 0.42214672, 0.42324022, 0.42476639,
0.42621689, 0.42549439, 0.42533678, 0.42539414, 0.42545038,
0.42593637, 0.42652095, 0.42665489, 0.42699563, 0.42798159,
0.42784512, 0.42898006, 0.42868781, 0.42874188, 0.42789631,
0.4277768 , 0.42776827, 0.42685216, 0.42660989, 0.42563155,
0.42618281, 0.42606281, 0.42505222, 0.42653242, 0.42555378,
0.42500842, 0.42561939, 0.42442059, 0.42395414, 0.42384356,
0.42319135, 0.42397497, 0.42488579, 0.42449729, 0.42508766,
0.42509878, 0.42456616, 0.42535577, 0.42681884, 0.42688552,
0.42779918, 0.42706058, 0.42792887, 0.42762114, 0.42894045,
0.42977398, 0.42919859, 0.42829041, 0.42780946, 0.42825318,
0.42858952, 0.42858315, 0.42805601, 0.42764751, 0.42744107,
0.42775518, 0.42707283, 0.4258592 , 0.42615335, 0.42526286,
0.4248906 , 0.42368986, 0.4232565 , 0.42265079, 0.42263954,
0.42153046, 0.42132051, 0.41995353, 0.41916605, 0.41914271,
0.41876945, 0.41740175, 0.41583884, 0.41614026, 0.41457908,
0.41472411, 0.41310876, 0.41261041, 0.41212369, 0.41211677,
0.4100645 , 0.40852504, 0.40860297, 0.40745338, 0.40698661,
0.40644546, 0.40591375, 0.40640744, 0.40620663, 0.40656649,
0.40727154, 0.40797605, 0.40807137, 0.40808913, 0.40809676,
0.40711767, 0.40724628, 0.40713077, 0.40772698, 0.40765157,
0.40658297, 0.4065991 , 0.405011 , 0.40537645, 0.40432626,
0.40390177, 0.40237701, 0.40291623, 0.40301797, 0.40324145,
0.40312864, 0.40328316, 0.40190955, 0.40246506, 0.40237663,
0.40198407, 0.401969 , 0.40185623, 0.40198313, 0.40005643,
0.39940743, 0.39850438, 0.39845398, 0.39695093, 0.39697295,
0.39663201, 0.39675444, 0.39538699, 0.39331959, 0.39326074,
0.39193287, 0.39157266, 0.39021327, 0.39062591, 0.38917591,
0.38976991, 0.38864187, 0.38872158, 0.38868096, 0.38868377,
0.38842057, 0.38654784, 0.38649517, 0.38600464, 0.38408115,
0.38323049, 0.38260215, 0.38207663, 0.38142669, 0.38003262,
0.37969367, 0.37768092, 0.37732108, 0.37741991, 0.37617779,
0.37698504, 0.37606784, 0.37499276, 0.37533731, 0.37350437,
0.37375172, 0.37385382, 0.37384003, 0.37338938, 0.37212288,
0.37273075, 0.370559 , 0.37038506, 0.37062153, 0.36964661,
0.36818564, 0.3656634 , 0.36539259, 0.36428672, 0.36502487,
0.3647148 , 0.36551435, 0.36409919, 0.36348181, 0.36254383,
0.36166601, 0.36142665, 0.35954942, 0.35846915, 0.35886759,
0.35813867, 0.35642888, 0.35375231, 0.35061783, 0.35078463,
0.34995508, 0.34688918, 0.34548257, 0.34633158, 0.34622833,
0.34652111, 0.34622774, 0.34540951, 0.34418809, 0.34276593,
0.34160916, 0.33811193, 0.33822709, 0.3391685 , 0.33883381])
test_volatility = eval_volatility(self.long_data)
test_volatility_roll = self.long_data['volatility'].values
self.assertAlmostEqual(test_volatility, np.nanmean(expected_volatility))
self.assertTrue(np.allclose(expected_volatility, test_volatility_roll, equal_nan=True))
def test_sharp(self):
self.assertAlmostEqual(eval_sharp(self.test_data1, 5, 0), 0.06135557)
self.assertAlmostEqual(eval_sharp(self.test_data2, 5, 0), 0.167858667)
self.assertAlmostEqual(eval_sharp(self.test_data3, 5, 0), 0.09950547)
self.assertAlmostEqual(eval_sharp(self.test_data4, 5, 0), 0.154928241)
self.assertAlmostEqual(eval_sharp(self.test_data5, 5, 0.002), 0.007868673)
self.assertAlmostEqual(eval_sharp(self.test_data6, 5, 0.002), 0.018306537)
self.assertAlmostEqual(eval_sharp(self.test_data7, 5, 0.002), 0.006259971)
# 测试长数据的sharp率计算
expected_sharp = np.array([ np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
-0.02346815, -0.02618783, -0.03763912, -0.03296276, -0.03085698,
-0.02851101, -0.02375842, -0.02016746, -0.01107885, -0.01426613,
-0.00787204, -0.01135784, -0.01164232, -0.01003481, -0.00022512,
-0.00046792, -0.01209378, -0.01278892, -0.01298135, -0.01938214,
-0.01671044, -0.02120509, -0.0244281 , -0.02416067, -0.02763238,
-0.027579 , -0.02372774, -0.02215294, -0.02467094, -0.02091266,
-0.02590194, -0.03049876, -0.02077131, -0.01483653, -0.02488144,
-0.02671638, -0.02561547, -0.01957986, -0.02479803, -0.02703162,
-0.02658087, -0.01641755, -0.01946472, -0.01647757, -0.01280889,
-0.00893643, -0.00643275, -0.00698457, -0.00549962, -0.00654677,
-0.00494757, -0.0035633 , -0.00109037, 0.00750654, 0.00451208,
0.00625502, 0.01221367, 0.01326454, 0.01535037, 0.02269538,
0.02028715, 0.02127712, 0.02333264, 0.02273159, 0.01670643,
0.01376513, 0.01265342, 0.02211647, 0.01612449, 0.00856706,
-0.00077147, -0.00268848, 0.00210993, -0.00443934, -0.00411912,
-0.0018756 , -0.00867461, -0.00581601, -0.00660835, -0.00861137,
-0.00678614, -0.01188408, -0.00589617, -0.00244323, -0.00201891,
-0.01042846, -0.01471016, -0.02167034, -0.02258554, -0.01306809,
-0.00909086, -0.01233746, -0.00595166, -0.00184208, 0.00750497,
0.01481886, 0.01761972, 0.01562886, 0.01446414, 0.01285826,
0.01357719, 0.00967613, 0.01636272, 0.01458437, 0.02280183,
0.02151903, 0.01700276, 0.01597368, 0.02114336, 0.02233297,
0.02585631, 0.02768459, 0.03519235, 0.04204535, 0.04328161,
0.04672855, 0.05046191, 0.04619848, 0.04525853, 0.05381529,
0.04598861, 0.03947394, 0.04665006, 0.05586077, 0.05617728,
0.06495018, 0.06205172, 0.05665466, 0.06500615, 0.0632062 ,
0.06084328, 0.05851466, 0.05659229, 0.05159347, 0.0432977 ,
0.0474047 , 0.04231723, 0.03613176, 0.03618391, 0.03591012,
0.03885674, 0.0402686 , 0.03846423, 0.04534014, 0.04721458,
0.05130912, 0.05026281, 0.05394312, 0.05529349, 0.05949243,
0.05463304, 0.06195165, 0.06767606, 0.06880985, 0.07048996,
0.07078815, 0.07420767, 0.06773439, 0.0658441 , 0.06470875,
0.06302349, 0.06456876, 0.06411282, 0.06216669, 0.067094 ,
0.07055075, 0.07254976, 0.07119253, 0.06173308, 0.05393352,
0.05681246, 0.05250643, 0.06099845, 0.0655544 , 0.06977334,
0.06636514, 0.06177949, 0.06869908, 0.06719767, 0.06178738,
0.05915714, 0.06882277, 0.06756821, 0.06507994, 0.06489791,
0.06553941, 0.073123 , 0.07576757, 0.06805446, 0.06063571,
0.05033801, 0.05206971, 0.05540306, 0.05249118, 0.05755587,
0.0586174 , 0.05051288, 0.0564852 , 0.05757284, 0.06358355,
0.06130082, 0.04925482, 0.03834472, 0.04163981, 0.04648316,
0.04457858, 0.04324626, 0.04328791, 0.04156207, 0.04818652,
0.04972634, 0.06024123, 0.06489556, 0.06255485, 0.06069815,
0.06466389, 0.07081163, 0.07895358, 0.0881782 , 0.09374151,
0.08336506, 0.08764795, 0.09080174, 0.08808926, 0.08641158,
0.07811943, 0.06885318, 0.06479503, 0.06851185, 0.07382819,
0.07047903, 0.06658251, 0.07638379, 0.08667974, 0.08867918,
0.08245323, 0.08961866, 0.09905298, 0.0961908 , 0.08562706,
0.0839014 , 0.0849072 , 0.08338395, 0.08783487, 0.09463609,
0.10332336, 0.11806497, 0.11220297, 0.11589097, 0.11678405])
test_sharp = eval_sharp(self.long_data, 5, 0.00035)
self.assertAlmostEqual(np.nanmean(expected_sharp), test_sharp)
self.assertTrue(np.allclose(self.long_data['sharp'].values, expected_sharp, equal_nan=True))
def test_beta(self):
reference = self.test_data1
self.assertAlmostEqual(eval_beta(self.test_data2, reference, 'value'), -0.017148939)
self.assertAlmostEqual(eval_beta(self.test_data3, reference, 'value'), -0.042204233)
self.assertAlmostEqual(eval_beta(self.test_data4, reference, 'value'), -0.15652986)
self.assertAlmostEqual(eval_beta(self.test_data5, reference, 'value'), -0.049195532)
self.assertAlmostEqual(eval_beta(self.test_data6, reference, 'value'), -0.026995082)
self.assertAlmostEqual(eval_beta(self.test_data7, reference, 'value'), -0.01147809)
self.assertRaises(TypeError, eval_beta, [1, 2, 3], reference, 'value')
self.assertRaises(TypeError, eval_beta, self.test_data3, [1, 2, 3], 'value')
self.assertRaises(KeyError, eval_beta, self.test_data3, reference, 'not_found_value')
# 测试长数据的beta计算
expected_beta = np.array([ np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
-0.04988841, -0.05127618, -0.04692104, -0.04272652, -0.04080598,
-0.0493347 , -0.0460858 , -0.0416761 , -0.03691527, -0.03724924,
-0.03678865, -0.03987324, -0.03488321, -0.02567672, -0.02690303,
-0.03010128, -0.02437967, -0.02571932, -0.02455681, -0.02839811,
-0.03358653, -0.03396697, -0.03466321, -0.03050966, -0.0247583 ,
-0.01629325, -0.01880895, -0.01480403, -0.01348783, -0.00544294,
-0.00648176, -0.00467036, -0.01135331, -0.0156841 , -0.02340763,
-0.02615705, -0.02730771, -0.02906174, -0.02860664, -0.02412914,
-0.02066416, -0.01744816, -0.02185133, -0.02145285, -0.02681765,
-0.02827694, -0.02394581, -0.02744096, -0.02778825, -0.02703065,
-0.03160023, -0.03615371, -0.03681072, -0.04265126, -0.04344738,
-0.04232421, -0.04705272, -0.04533344, -0.04605934, -0.05272737,
-0.05156463, -0.05134196, -0.04730733, -0.04425352, -0.03869831,
-0.04159571, -0.04223998, -0.04346747, -0.04229844, -0.04740093,
-0.04992507, -0.04621232, -0.04477644, -0.0486915 , -0.04598224,
-0.04943463, -0.05006391, -0.05362256, -0.04994067, -0.05464769,
-0.05443275, -0.05513493, -0.05173594, -0.04500994, -0.04662891,
-0.03903505, -0.0419592 , -0.04307773, -0.03925718, -0.03711574,
-0.03992631, -0.0433058 , -0.04533641, -0.0461183 , -0.05600344,
-0.05758377, -0.05959874, -0.05605942, -0.06002859, -0.06253002,
-0.06747014, -0.06427915, -0.05931947, -0.05769974, -0.04791515,
-0.05175088, -0.05748039, -0.05385232, -0.05072975, -0.05052637,
-0.05125567, -0.05005785, -0.05325104, -0.04977727, -0.04947867,
-0.05148544, -0.05739156, -0.05742069, -0.06047279, -0.0558414 ,
-0.06086126, -0.06265151, -0.06411129, -0.06828052, -0.06781762,
-0.07083409, -0.07211207, -0.06799162, -0.06913295, -0.06775162,
-0.0696265 , -0.06678248, -0.06867502, -0.06581961, -0.07055823,
-0.06448184, -0.06097973, -0.05795587, -0.0618383 , -0.06130145,
-0.06050652, -0.05936661, -0.05749424, -0.0499 , -0.05050495,
-0.04962687, -0.05033439, -0.05070116, -0.05422009, -0.05369759,
-0.05548943, -0.05907353, -0.05933035, -0.05927918, -0.06227663,
-0.06011455, -0.05650432, -0.05828134, -0.05620949, -0.05715323,
-0.05482478, -0.05387113, -0.05095559, -0.05377999, -0.05334267,
-0.05220438, -0.04001521, -0.03892434, -0.03660782, -0.04282708,
-0.04324623, -0.04127048, -0.04227559, -0.04275226, -0.04347049,
-0.04125853, -0.03806295, -0.0330632 , -0.03155531, -0.03277152,
-0.03304518, -0.03878731, -0.03830672, -0.03727434, -0.0370571 ,
-0.04509224, -0.04207632, -0.04116198, -0.04545179, -0.04584584,
-0.05287341, -0.05417433, -0.05175836, -0.05005509, -0.04268674,
-0.03442321, -0.03457309, -0.03613426, -0.03524391, -0.03629479,
-0.04361312, -0.02626705, -0.02406115, -0.03046384, -0.03181044,
-0.03375164, -0.03661673, -0.04520779, -0.04926951, -0.05726738,
-0.0584486 , -0.06220608, -0.06800563, -0.06797431, -0.07562211,
-0.07481996, -0.07731229, -0.08413381, -0.09031826, -0.09691925,
-0.11018071, -0.11952675, -0.10826026, -0.11173895, -0.10756359,
-0.10775916, -0.11664559, -0.10505051, -0.10606547, -0.09855355,
-0.10004159, -0.10857084, -0.12209301, -0.11605758, -0.11105113,
-0.1155195 , -0.11569505, -0.10513348, -0.09611072, -0.10719791,
-0.10843965, -0.11025856, -0.10247839, -0.10554044, -0.10927647,
-0.10645088, -0.09982498, -0.10542734, -0.09631372, -0.08229695])
test_beta_mean = eval_beta(self.long_data, self.long_bench, 'value')
test_beta_roll = self.long_data['beta'].values
self.assertAlmostEqual(test_beta_mean, np.nanmean(expected_beta))
self.assertTrue(np.allclose(test_beta_roll, expected_beta, equal_nan=True))
def test_alpha(self):
reference = self.test_data1
self.assertAlmostEqual(eval_alpha(self.test_data2, 5, reference, 'value', 0.5), 11.63072977)
self.assertAlmostEqual(eval_alpha(self.test_data3, 5, reference, 'value', 0.5), 1.886590071)
self.assertAlmostEqual(eval_alpha(self.test_data4, 5, reference, 'value', 0.5), 6.827021872)
self.assertAlmostEqual(eval_alpha(self.test_data5, 5, reference, 'value', 0.92), -1.192265168)
self.assertAlmostEqual(eval_alpha(self.test_data6, 5, reference, 'value', 0.92), -1.437142359)
self.assertAlmostEqual(eval_alpha(self.test_data7, 5, reference, 'value', 0.92), -1.781311545)
# 测试长数据的alpha计算
expected_alpha = np.array([ np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
-0.09418119, -0.11188463, -0.17938358, -0.15588172, -0.1462678 ,
-0.13089586, -0.10780125, -0.09102891, -0.03987585, -0.06075686,
-0.02459503, -0.04104284, -0.0444565 , -0.04074585, 0.02191275,
0.02255955, -0.05583375, -0.05875539, -0.06055551, -0.09648245,
-0.07913737, -0.10627829, -0.12320965, -0.12368335, -0.1506743 ,
-0.15768033, -0.13638829, -0.13065298, -0.14537834, -0.127428 ,
-0.15504529, -0.18184636, -0.12652146, -0.09190138, -0.14847221,
-0.15840648, -0.1525789 , -0.11859418, -0.14700954, -0.16295761,
-0.16051645, -0.10364859, -0.11961134, -0.10258267, -0.08090148,
-0.05727746, -0.0429945 , -0.04672356, -0.03581408, -0.0439215 ,
-0.03429495, -0.0260362 , -0.01075022, 0.04931808, 0.02779388,
0.03984083, 0.08311951, 0.08995566, 0.10522428, 0.16159058,
0.14238174, 0.14759783, 0.16257712, 0.158908 , 0.11302115,
0.0909566 , 0.08272888, 0.15261884, 0.10546376, 0.04990313,
-0.01284111, -0.02720704, 0.00454725, -0.03965491, -0.03818265,
-0.02186992, -0.06574751, -0.04846454, -0.05204211, -0.06316498,
-0.05095099, -0.08502656, -0.04681162, -0.02362027, -0.02205091,
-0.07706374, -0.10371841, -0.14434688, -0.14797935, -0.09055402,
-0.06739549, -0.08824959, -0.04855888, -0.02291244, 0.04027138,
0.09370505, 0.11472939, 0.10243593, 0.0921445 , 0.07662648,
0.07946651, 0.05450718, 0.10497677, 0.09068334, 0.15462924,
0.14231034, 0.10544952, 0.09980256, 0.14035223, 0.14942974,
0.17624102, 0.19035477, 0.2500807 , 0.30724652, 0.31768915,
0.35007521, 0.38412975, 0.34356521, 0.33614463, 0.41206165,
0.33999177, 0.28045963, 0.34076789, 0.42220356, 0.42314636,
0.50790423, 0.47713348, 0.42520169, 0.50488411, 0.48705211,
0.46252601, 0.44325578, 0.42640573, 0.37986783, 0.30652822,
0.34503393, 0.2999069 , 0.24928617, 0.24730218, 0.24326897,
0.26657905, 0.27861168, 0.26392824, 0.32552649, 0.34177792,
0.37837011, 0.37025267, 0.4030612 , 0.41339361, 0.45076809,
0.40383354, 0.47093422, 0.52505036, 0.53614256, 0.5500943 ,
0.55319293, 0.59021451, 0.52358459, 0.50605947, 0.49359168,
0.47895956, 0.49320243, 0.4908336 , 0.47310767, 0.51821564,
0.55105932, 0.57291504, 0.5599809 , 0.46868842, 0.39620087,
0.42086934, 0.38317217, 0.45934108, 0.50048866, 0.53941991,
0.50676751, 0.46500915, 0.52993663, 0.51668366, 0.46405428,
0.44100603, 0.52726147, 0.51565458, 0.49186248, 0.49001081,
0.49367648, 0.56422294, 0.58882785, 0.51334664, 0.44386256,
0.35056709, 0.36490029, 0.39205071, 0.3677061 , 0.41134736,
0.42315067, 0.35356394, 0.40324562, 0.41340007, 0.46503322,
0.44355762, 0.34854314, 0.26412842, 0.28633753, 0.32335224,
0.30761141, 0.29709569, 0.29570487, 0.28000063, 0.32802547,
0.33967726, 0.42511212, 0.46252357, 0.44244974, 0.42152907,
0.45436727, 0.50482359, 0.57339198, 0.6573356 , 0.70912003,
0.60328917, 0.6395092 , 0.67015805, 0.64241557, 0.62779142,
0.55028063, 0.46448736, 0.43709245, 0.46777983, 0.51789439,
0.48594916, 0.4456216 , 0.52008189, 0.60548684, 0.62792473,
0.56645031, 0.62766439, 0.71829315, 0.69481356, 0.59550329,
0.58133754, 0.59014148, 0.58026655, 0.61719273, 0.67373203,
0.75573056, 0.89501633, 0.8347253 , 0.87964685, 0.89015835])
test_alpha_mean = eval_alpha(self.long_data, 100, self.long_bench, 'value')
test_alpha_roll = self.long_data['alpha'].values
self.assertAlmostEqual(test_alpha_mean, np.nanmean(expected_alpha))
self.assertTrue(np.allclose(test_alpha_roll, expected_alpha, equal_nan=True))
def test_calmar(self):
"""test evaluate function eval_calmar()"""
pass
def test_benchmark(self):
reference = self.test_data1
tr, yr = eval_benchmark(self.test_data2, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data3, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data4, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data5, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data6, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data7, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
def test_evaluate(self):
pass
class TestLoop(unittest.TestCase):
"""通过一个假设但精心设计的例子来测试loop_step以及loop方法的正确性"""
def setUp(self):
self.shares = ['share1', 'share2', 'share3', 'share4', 'share5', 'share6', 'share7']
self.dates = ['2016/07/01', '2016/07/04', '2016/07/05', '2016/07/06', '2016/07/07',
'2016/07/08', '2016/07/11', '2016/07/12', '2016/07/13', '2016/07/14',
'2016/07/15', '2016/07/18', '2016/07/19', '2016/07/20', '2016/07/21',
'2016/07/22', '2016/07/25', '2016/07/26', '2016/07/27', '2016/07/28',
'2016/07/29', '2016/08/01', '2016/08/02', '2016/08/03', '2016/08/04',
'2016/08/05', '2016/08/08', '2016/08/09', '2016/08/10', '2016/08/11',
'2016/08/12', '2016/08/15', '2016/08/16', '2016/08/17', '2016/08/18',
'2016/08/19', '2016/08/22', '2016/08/23', '2016/08/24', '2016/08/25',
'2016/08/26', '2016/08/29', '2016/08/30', '2016/08/31', '2016/09/01',
'2016/09/02', '2016/09/05', '2016/09/06', '2016/09/07', '2016/09/08',
'2016/09/09', '2016/09/12', '2016/09/13', '2016/09/14', '2016/09/15',
'2016/09/16', '2016/09/19', '2016/09/20', '2016/09/21', '2016/09/22',
'2016/09/23', '2016/09/26', '2016/09/27', '2016/09/28', '2016/09/29',
'2016/09/30', '2016/10/10', '2016/10/11', '2016/10/12', '2016/10/13',
'2016/10/14', '2016/10/17', '2016/10/18', '2016/10/19', '2016/10/20',
'2016/10/21', '2016/10/23', '2016/10/24', '2016/10/25', '2016/10/26',
'2016/10/27', '2016/10/29', '2016/10/30', '2016/10/31', '2016/11/01',
'2016/11/02', '2016/11/05', '2016/11/06', '2016/11/07', '2016/11/08',
'2016/11/09', '2016/11/12', '2016/11/13', '2016/11/14', '2016/11/15',
'2016/11/16', '2016/11/19', '2016/11/20', '2016/11/21', '2016/11/22']
self.dates = [pd.Timestamp(date_text) for date_text in self.dates]
self.prices = np.array([[5.35, 5.09, 5.03, 4.98, 4.50, 5.09, 4.75],
[5.66, 4.84, 5.21, 5.44, 4.35, 5.06, 4.48],
[5.79, 4.60, 5.02, 5.45, 4.07, 4.76, 4.56],
[5.56, 4.63, 5.50, 5.74, 3.88, 4.62, 4.62],
[5.88, 4.64, 5.07, 5.46, 3.74, 4.63, 4.62],
[6.25, 4.51, 5.11, 5.45, 3.98, 4.25, 4.59],
[5.93, 4.96, 5.15, 5.24, 4.08, 4.13, 4.33],
[6.39, 4.65, 5.02, 5.47, 4.00, 3.91, 3.88],
[6.31, 4.26, 5.10, 5.58, 4.28, 3.77, 3.47],
[5.86, 3.77, 5.24, 5.36, 4.01, 3.43, 3.51],
[5.61, 3.39, 4.93, 5.38, 4.14, 3.68, 3.83],
[5.31, 3.76, 4.96, 5.30, 4.49, 3.63, 3.67],
[5.40, 4.06, 5.40, 5.77, 4.49, 3.94, 3.79],
[5.03, 3.87, 5.74, 5.75, 4.46, 4.40, 4.18],
[5.38, 3.91, 5.53, 6.15, 4.13, 4.03, 4.02],
[5.79, 4.13, 5.79, 6.04, 3.79, 3.93, 4.41],
[6.27, 4.27, 5.68, 6.01, 4.23, 3.50, 4.65],
[6.59, 4.57, 5.90, 5.71, 4.57, 3.39, 4.89],
[6.91, 5.04, 5.75, 5.23, 4.92, 3.30, 4.41],
[6.71, 5.31, 6.11, 5.47, 5.28, 3.25, 4.66],
[6.33, 5.40, 5.77, 5.79, 5.67, 2.94, 4.37],
[6.07, 5.21, 5.85, 5.82, 6.00, 2.71, 4.58],
[5.98, 5.06, 5.61, 5.61, 5.89, 2.55, 4.76],
[6.46, 4.69, 5.75, 5.31, 5.55, 2.21, 4.37],
[6.95, 5.12, 5.50, 5.24, 5.39, 2.29, 4.16],
[6.77, 5.27, 5.14, 5.41, 5.26, 2.21, 4.02],
[6.70, 5.72, 5.31, 5.60, 5.31, 2.04, 3.77],
[6.28, 6.10, 5.68, 5.28, 5.22, 2.39, 3.38],
[6.61, 6.27, 5.73, 4.99, 4.90, 2.30, 3.07],
[6.25, 6.49, 6.04, 5.09, 4.57, 2.41, 2.90],
[6.47, 6.16, 6.27, 5.39, 4.96, 2.40, 2.50],
[6.45, 6.26, 6.60, 5.58, 4.82, 2.79, 2.76],
[6.88, 6.39, 6.10, 5.33, 4.39, 2.67, 2.29],
[7.00, 6.58, 6.25, 5.48, 4.63, 2.27, 2.17],
[6.59, 6.20, 6.73, 5.10, 5.05, 2.09, 1.84],
[6.59, 5.70, 6.91, 5.39, 4.68, 2.55, 1.83],
[6.64, 5.20, 7.01, 5.30, 5.02, 2.22, 2.21],
[6.38, 5.37, 7.36, 5.04, 4.84, 2.59, 2.00],
[6.10, 5.40, 7.72, 5.51, 4.60, 2.59, 1.76],
[6.35, 5.22, 7.68, 5.43, 4.66, 2.95, 1.27],
[6.52, 5.38, 7.62, 5.23, 4.41, 2.69, 1.40],
[6.87, 5.53, 7.74, 4.99, 4.87, 2.20, 1.11],
[6.84, 6.03, 7.53, 5.43, 4.42, 2.69, 1.60],
[7.09, 5.77, 7.46, 5.40, 4.08, 2.65, 1.23],
[6.88, 5.66, 7.84, 5.60, 4.16, 2.63, 1.59],
[6.84, 6.08, 8.11, 5.66, 4.10, 2.14, 1.50],
[6.98, 5.62, 8.04, 6.01, 4.43, 2.39, 1.80],
[7.02, 5.63, 7.65, 5.64, 4.07, 1.95, 1.55],
[7.13, 6.11, 7.52, 5.67, 3.97, 2.32, 1.35],
[7.59, 6.03, 7.67, 5.30, 4.16, 2.69, 1.51],
[7.61, 6.27, 7.47, 4.91, 4.12, 2.51, 1.08],
[7.21, 6.28, 7.44, 5.37, 4.04, 2.62, 1.06],
[7.48, 6.52, 7.59, 5.75, 3.84, 2.16, 1.43],
[7.66, 7.00, 7.94, 6.08, 3.46, 2.35, 1.43],
[7.51, 7.34, 8.25, 6.58, 3.18, 2.31, 1.74],
[7.12, 7.34, 7.77, 6.78, 3.10, 1.96, 1.44],
[6.97, 7.68, 8.03, 7.20, 3.55, 2.35, 1.83],
[6.67, 8.09, 7.87, 7.65, 3.66, 2.58, 1.71],
[6.20, 7.68, 7.58, 8.00, 3.66, 2.40, 2.12],
[6.34, 7.58, 7.33, 7.92, 3.29, 2.20, 2.45],
[6.22, 7.46, 7.22, 8.30, 2.80, 2.31, 2.85],
[5.98, 7.59, 6.86, 8.46, 2.88, 2.16, 2.79],
[6.37, 7.19, 7.18, 7.99, 3.04, 2.16, 2.91],
[6.56, 7.40, 7.54, 8.19, 3.45, 2.20, 3.26],
[6.26, 7.48, 7.24, 8.61, 3.88, 1.73, 3.14],
[6.69, 7.93, 6.85, 8.66, 3.58, 1.93, 3.53],
[7.13, 8.23, 6.60, 8.91, 3.60, 2.25, 3.65],
[6.83, 8.35, 6.65, 9.08, 3.97, 2.69, 3.69],
[7.31, 8.44, 6.74, 9.34, 4.05, 2.59, 3.50],
[7.43, 8.35, 7.19, 8.96, 4.40, 2.14, 3.25],
[7.54, 8.58, 7.14, 8.98, 4.06, 1.68, 3.64],
[7.18, 8.82, 6.88, 8.50, 3.60, 1.98, 4.00],
[7.21, 9.09, 7.14, 8.65, 3.61, 2.14, 3.63],
[7.45, 9.02, 7.30, 8.94, 4.10, 1.89, 3.78],
[7.37, 8.87, 6.95, 8.63, 3.74, 1.97, 3.42],
[6.88, 9.22, 7.02, 8.65, 4.02, 1.99, 3.76],
[7.08, 9.04, 7.38, 8.40, 3.95, 2.37, 3.62],
[6.75, 8.60, 7.50, 8.38, 3.81, 2.14, 3.67],
[6.60, 8.48, 7.60, 8.23, 3.71, 2.35, 3.61],
[6.21, 8.71, 7.15, 8.04, 3.94, 1.86, 3.39],
[6.36, 8.79, 7.30, 7.91, 4.43, 2.14, 3.43],
[6.82, 8.93, 7.80, 7.57, 4.07, 2.39, 3.33],
[6.45, 9.36, 8.15, 7.73, 4.04, 2.53, 3.03],
[6.85, 9.68, 8.40, 7.74, 4.34, 2.47, 3.28],
[6.48, 10.16, 8.87, 8.07, 4.80, 2.93, 3.46],
[6.10, 10.56, 8.53, 7.99, 5.18, 3.09, 3.25],
[5.64, 10.63, 8.94, 7.92, 4.90, 2.93, 2.95],
[6.01, 10.55, 8.52, 8.40, 5.40, 3.22, 2.87],
[6.21, 10.65, 8.80, 8.80, 5.73, 3.06, 2.63],
[6.61, 10.55, 8.92, 8.47, 5.62, 2.90, 2.40],
[7.02, 10.19, 9.20, 8.07, 5.20, 2.68, 2.53],
[7.04, 10.48, 8.71, 7.87, 4.85, 2.46, 2.96],
[6.77, 10.36, 8.25, 8.02, 5.18, 2.41, 3.26],
[7.09, 10.03, 8.19, 8.39, 4.72, 2.74, 2.97],
[6.65, 10.24, 7.80, 8.69, 4.62, 3.15, 3.16],
[7.07, 10.01, 7.69, 8.81, 4.55, 3.40, 3.58],
[6.80, 10.14, 7.23, 8.99, 4.37, 3.82, 3.23],
[6.79, 10.31, 6.98, 9.10, 4.26, 4.02, 3.62],
[6.48, 9.88, 7.07, 8.90, 4.25, 3.76, 3.13],
[6.39, 10.05, 6.95, 8.87, 4.59, 4.10, 2.93]])
self.op_signals = np.array([[0, 0, 0, 0, 0.25, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0.1, 0.15],
[0.2, 0.2, 0, 0, 0, 0, 0],
[0, 0, 0.1, 0, 0, 0, 0],
[0, 0, 0, 0, -0.75, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[-0.333, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, -0.5, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, -1],
[0, 0, 0, 0, 0.2, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[-0.5, 0, 0, 0.15, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0.2, 0, -1, 0.2, 0],
[0.5, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0.2, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, -0.5, 0.2],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0.2, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0.15, 0, 0],
[-1, 0, 0.25, 0.25, 0, 0.25, 0],
[0, 0, 0, 0, 0, 0, 0],
[0.25, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0.2, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, -1, 0, 0.2],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, -1, 0, 0, 0, 0, 0],
[-1, 0, 0.15, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]])
self.cash = qt.CashPlan(['2016/07/01', '2016/08/12', '2016/09/23'], [10000, 10000, 10000])
self.rate = qt.Cost(buy_fix=0,
sell_fix=0,
buy_rate=0,
sell_rate=0,
buy_min=0,
sell_min=0,
slipage=0)
self.rate2 = qt.Cost(buy_fix=0,
sell_fix=0,
buy_rate=0,
sell_rate=0,
buy_min=10,
sell_min=5,
slipage=0)
self.op_signal_df = pd.DataFrame(self.op_signals, index=self.dates, columns=self.shares)
self.history_list = pd.DataFrame(self.prices, index=self.dates, columns=self.shares)
self.res = np.array([[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 9916.667],
[0.000, 0.000, 0.000, 0.000, 555.556, 205.065, 321.089, 5059.722, 0.000, 9761.111],
[346.982, 416.679, 0.000, 0.000, 555.556, 205.065, 321.089, 1201.278, 0.000, 9646.112],
[346.982, 416.679, 191.037, 0.000, 555.556, 205.065, 321.089, 232.719, 0.000, 9685.586],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9813.218],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9803.129],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9608.020],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9311.573],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 8883.625],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 8751.390],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 8794.181],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9136.570],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9209.359],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9093.829],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9387.554],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9585.959],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 9928.777],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 10060.381],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 10281.002],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 10095.561],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 0.000, 4506.393, 0.000, 10029.957],
[231.437, 416.679, 95.519, 0.000, 474.224, 205.065, 0.000, 2531.270, 0.000, 9875.613],
[231.437, 416.679, 95.519, 0.000, 474.224, 205.065, 0.000, 2531.270, 0.000, 9614.946],
[231.437, 416.679, 95.519, 0.000, 474.224, 205.065, 0.000, 2531.270, 0.000, 9824.172],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9732.574],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9968.339],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 10056.158],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9921.492],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9894.162],
[115.719, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 6179.774, 0.000, 20067.937],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21133.508],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20988.848],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20596.743],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 19910.773],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20776.707],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20051.797],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20725.388],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20828.880],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21647.181],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21310.169],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20852.099],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21912.395],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21937.828],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21962.458],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21389.402],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22027.453],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 20939.999],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21250.064],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22282.781],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21407.066],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21160.237],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21826.768],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22744.940],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 23466.118],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22017.882],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 23191.466],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 23099.082],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22684.767],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22842.135],
[1073.823, 416.679, 735.644, 269.850, 1785.205, 938.697, 1339.207, 5001.425, 0, 33323.836],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 32820.290],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 33174.614],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 35179.466],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 34465.195],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 34712.354],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 35755.550],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37895.223],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37854.284],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37198.374],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 35916.711],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 35806.937],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 36317.592],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37103.973],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 35457.883],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 36717.685],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37641.463],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 36794.298],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37073.817],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 35244.299],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37062.382],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37420.067],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 38089.058],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 39260.542],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 42609.684],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 43109.309],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 42283.408],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 43622.444],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 42830.254],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 41266.463],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 41164.839],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 41797.937],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 42440.861],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 42113.839],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 43853.588],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 46216.760],
[0.000, 0.000, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 5140.743, 0.000, 45408.737],
[0.000, 0.000, 2027.188, 719.924, 0.000, 2701.488, 4379.099, 0.000, 0.000, 47413.401],
[0.000, 0.000, 2027.188, 719.924, 0.000, 2701.488, 4379.099, 0.000, 0.000, 44603.718],
[0.000, 0.000, 2027.188, 719.924, 0.000, 2701.488, 4379.099, 0.000, 0.000, 44381.544]])
def test_loop_step(self):
cash, amounts, fee, value = qt.core._loop_step(pre_cash=10000,
pre_amounts=np.zeros(7, dtype='float'),
op=self.op_signals[0],
prices=self.prices[0],
rate=self.rate,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash: {cash}\n'
f'amounts: {np.round(amounts, 2)}\n'
f'value: {value}')
self.assertAlmostEqual(cash, 7500)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 555.5555556, 0, 0])))
self.assertAlmostEqual(value, 10000.00)
cash, amounts, fee, value = qt.core._loop_step(pre_cash=5059.722222,
pre_amounts=np.array([0, 0, 0, 0, 555.5555556,
205.0653595, 321.0891813]),
op=self.op_signals[3],
prices=self.prices[3],
rate=self.rate,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash: {cash}\n'
f'amounts: {np.round(amounts, 2)}\n'
f'value: {value}')
self.assertAlmostEqual(cash, 1201.2775195, 5)
self.assertTrue(np.allclose(amounts, np.array([346.9824373, 416.6786936, 0, 0,
555.5555556, 205.0653595, 321.0891813])))
self.assertAlmostEqual(value, 9646.111756, 5)
cash, amounts, fee, value = qt.core._loop_step(pre_cash=6179.77423,
pre_amounts=np.array([115.7186428, 416.6786936, 735.6441811,
269.8495646, 0, 1877.393446, 0]),
op=self.op_signals[31],
prices=self.prices[31],
rate=self.rate,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash: {cash}\n'
f'amounts: {np.round(amounts, 2)}\n'
f'value: {value}')
self.assertAlmostEqual(cash, 0, 5)
self.assertTrue(np.allclose(amounts, np.array([1073.823175, 416.6786936, 735.6441811,
269.8495646, 0, 1877.393446, 0])))
self.assertAlmostEqual(value, 21133.50798, 5)
cash, amounts, fee, value = qt.core._loop_step(pre_cash=10000,
pre_amounts=np.array([1073.823175, 416.6786936, 735.6441811,
269.8495646, 0, 938.6967231, 1339.207325]),
op=self.op_signals[60],
prices=self.prices[60],
rate=self.rate,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash: {cash}\n'
f'amounts: {np.round(amounts, 2)}\n'
f'value: {value}')
self.assertAlmostEqual(cash, 5001.424618, 5)
self.assertTrue(np.allclose(amounts, np.array([1073.823175, 416.6786936, 735.6441811, 269.8495646,
1785.205494, 938.6967231, 1339.207325])))
self.assertAlmostEqual(value, 33323.83588, 5)
cash, amounts, fee, value = qt.core._loop_step(pre_cash=cash,
pre_amounts=amounts,
op=self.op_signals[61],
prices=self.prices[61],
rate=self.rate,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash: {cash}\n'
f'amounts: {np.round(amounts, 2)}\n'
f'value: {value}')
self.assertAlmostEqual(cash, 0, 5)
self.assertTrue(np.allclose(amounts, np.array([0, 416.6786936, 1290.69215, 719.9239224,
1785.205494, 2701.487958, 1339.207325])))
self.assertAlmostEqual(value, 32820.29007, 5)
cash, amounts, fee, value = qt.core._loop_step(pre_cash=915.6208259,
pre_amounts=np.array([0, 416.6786936, 1290.69215, 719.9239224,
0, 2701.487958, 4379.098907]),
op=self.op_signals[96],
prices=self.prices[96],
rate=self.rate,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash: {cash}\n'
f'amounts: {np.round(amounts, 2)}\n'
f'value: {value}')
self.assertAlmostEqual(cash, 5140.742779, 5)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 1290.69215, 719.9239224, 0, 2701.487958, 4379.098907])))
self.assertAlmostEqual(value, 45408.73655, 4)
cash, amounts, fee, value = qt.core._loop_step(pre_cash=cash,
pre_amounts=amounts,
op=self.op_signals[97],
prices=self.prices[97],
rate=self.rate,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash: {cash}\n'
f'amounts: {np.round(amounts, 2)}\n'
f'value: {value}')
self.assertAlmostEqual(cash, 0, 5)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 2027.18825, 719.9239224, 0, 2701.487958, 4379.098907])))
self.assertAlmostEqual(value, 47413.40131, 4)
def test_loop(self):
res = apply_loop(op_list=self.op_signal_df,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
self.assertTrue(np.allclose(res.values, self.res, 5))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
self.op_signal_df,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
self.op_signal_df,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(op_list=self.op_signal_df,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
class TestOperatorSubFuncs(unittest.TestCase):
def setUp(self):
mask_list = [[0.0, 0.0, 0.0, 0.0],
[0.5, 0.0, 0.0, 1.0],
[0.5, 0.0, 0.3, 1.0],
[0.5, 0.0, 0.3, 0.5],
[0.5, 0.5, 0.3, 0.5],
[0.5, 0.5, 0.3, 1.0],
[0.3, 0.5, 0.0, 1.0],
[0.3, 1.0, 0.0, 1.0]]
signal_list = [[0.0, 0.0, 0.0, 0.0],
[0.5, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.3, 0.0],
[0.0, 0.0, 0.0, -0.5],
[0.0, 0.5, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.5],
[-0.4, 0.0, -1.0, 0.0],
[0.0, 0.5, 0.0, 0.0]]
mask_multi = [[[0, 0, 1, 1, 0],
[0, 1, 1, 1, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 0, 1, 1],
[0, 1, 0, 0, 1],
[1, 1, 0, 0, 1],
[1, 1, 0, 0, 1],
[1, 0, 0, 0, 1]],
[[0, 0, 1, 0, 1],
[0, 1, 1, 1, 1],
[1, 1, 0, 1, 1],
[1, 1, 1, 0, 0],
[1, 1, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 1, 0, 0, 0],
[1, 1, 0, 1, 0],
[0, 1, 0, 1, 0]],
[[0, 0, 0., 0, 1],
[0, 0, 1., 0, 1],
[0, 0, 1., 0, 1],
[1, 0, 1., 0, 1],
[1, 1, .5, 1, 1],
[1, 0, .5, 1, 0],
[1, 1, .5, 1, 0],
[0, 1, 0., 0, 0],
[1, 0, 0., 0, 0],
[0, 1, 0., 0, 0]]]
signal_multi = [[[0., 0., 1., 1., 0.],
[0., 1., 0., 0., 0.],
[1., 0., 0., 0., 0.],
[0., 0., 0., 0., 1.],
[0., 0., 0., 0., 0.],
[0., 0., -1., 0., 0.],
[-1., 0., 0., -1., 0.],
[1., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., -1., 0., 0., 0.]],
[[0., 0., 1., 0., 1.],
[0., 1., 0., 1., 0.],
[1., 0., -1., 0., 0.],
[0., 0., 1., -1., -1.],
[0., 0., -1., 0., 0.],
[0., -1., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 1., 0., 0., 0.],
[0., 0., 0., 1., 0.],
[-1., 0., 0., 0., 0.]],
[[0., 0., 0., 0., 1.],
[0., 0., 1., 0., 0.],
[0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0.],
[0., 1., -0.5, 1., 0.],
[0., -1., 0., 0., -1.],
[0., 1., 0., 0., 0.],
[-1., 0., -1., -1., 0.],
[1., -1., 0., 0., 0.],
[-1., 1., 0., 0., 0.]]]
self.mask = np.array(mask_list)
self.multi_mask = np.array(mask_multi)
self.correct_signal = np.array(signal_list)
self.correct_multi_signal = np.array(signal_multi)
self.op = qt.Operator()
def test_ls_blend(self):
"""测试多空蒙板的混合器,三种混合方式均需要测试"""
ls_mask1 = [[0.0, 0.0, 0.0, -0.0],
[1.0, 0.0, 0.0, -1.0],
[1.0, 0.0, 1.0, -1.0],
[1.0, 0.0, 1.0, -1.0],
[1.0, 1.0, 1.0, -1.0],
[1.0, 1.0, 1.0, -1.0],
[0.0, 1.0, 0.0, -1.0],
[0.0, 1.0, 0.0, -1.0]]
ls_mask2 = [[0.0, 0.0, 0.5, -0.5],
[0.0, 0.0, 0.5, -0.3],
[0.0, 0.5, 0.5, -0.0],
[0.5, 0.5, 0.3, -0.0],
[0.5, 0.5, 0.3, -0.3],
[0.5, 0.5, 0.0, -0.5],
[0.3, 0.5, 0.0, -1.0],
[0.3, 1.0, 0.0, -1.0]]
ls_mask3 = [[0.5, 0.0, 1.0, -0.4],
[0.4, 0.0, 1.0, -0.3],
[0.3, 0.0, 0.8, -0.2],
[0.2, 0.0, 0.6, -0.1],
[0.1, 0.2, 0.4, -0.2],
[0.1, 0.3, 0.2, -0.5],
[0.1, 0.4, 0.0, -0.5],
[0.1, 0.5, 0.0, -1.0]]
# result with blender 'avg'
ls_blnd_avg = [[0.16666667, 0.00000000, 0.50000000, -0.3],
[0.46666667, 0.00000000, 0.50000000, -0.53333333],
[0.43333333, 0.16666667, 0.76666667, -0.4],
[0.56666667, 0.16666667, 0.63333333, -0.36666667],
[0.53333333, 0.56666667, 0.56666667, -0.5],
[0.53333333, 0.60000000, 0.40000000, -0.66666667],
[0.13333333, 0.63333333, 0.00000000, -0.83333333],
[0.13333333, 0.83333333, 0.00000000, -1.]]
# result with blender 'str-1.5'
ls_blnd_str_15 = [[0, 0, 1, 0],
[0, 0, 1, -1],
[0, 0, 1, 0],
[1, 0, 1, 0],
[1, 1, 1, -1],
[1, 1, 0, -1],
[0, 1, 0, -1],
[0, 1, 0, -1]]
# result with blender 'pos-2' == 'pos-2-0'
ls_blnd_pos_2 = [[0, 0, 1, -1],
[1, 0, 1, -1],
[1, 0, 1, -1],
[1, 0, 1, -1],
[1, 1, 1, -1],
[1, 1, 1, -1],
[1, 1, 0, -1],
[1, 1, 0, -1]]
# result with blender 'pos-2-0.25'
ls_blnd_pos_2_25 = [[0, 0, 1, -1],
[1, 0, 1, -1],
[1, 0, 1, 0],
[1, 0, 1, 0],
[1, 1, 1, -1],
[1, 1, 0, -1],
[0, 1, 0, -1],
[0, 1, 0, -1]]
# result with blender 'avg_pos-2' == 'pos-2-0'
ls_blnd_avg_pos_2 = [[0.00000000, 0.00000000, 0.50000000, -0.3],
[0.46666667, 0.00000000, 0.50000000, -0.53333333],
[0.43333333, 0.00000000, 0.76666667, -0.4],
[0.56666667, 0.00000000, 0.63333333, -0.36666667],
[0.53333333, 0.56666667, 0.56666667, -0.5],
[0.53333333, 0.60000000, 0.40000000, -0.66666667],
[0.13333333, 0.63333333, 0.00000000, -0.83333333],
[0.13333333, 0.83333333, 0.00000000, -1.]]
# result with blender 'avg_pos-2-0.25'
ls_blnd_avg_pos_2_25 = [[0.00000000, 0.00000000, 0.50000000, -0.3],
[0.46666667, 0.00000000, 0.50000000, -0.53333333],
[0.43333333, 0.00000000, 0.76666667, 0.00000000],
[0.56666667, 0.00000000, 0.63333333, 0.00000000],
[0.53333333, 0.56666667, 0.56666667, -0.5],
[0.53333333, 0.60000000, 0.00000000, -0.66666667],
[0.00000000, 0.63333333, 0.00000000, -0.83333333],
[0.00000000, 0.83333333, 0.00000000, -1.]]
# result with blender 'combo'
ls_blnd_combo = [[0.5, 0., 1.5, -0.9],
[1.4, 0., 1.5, -1.6],
[1.3, 0.5, 2.3, -1.2],
[1.7, 0.5, 1.9, -1.1],
[1.6, 1.7, 1.7, -1.5],
[1.6, 1.8, 1.2, -2.],
[0.4, 1.9, 0., -2.5],
[0.4, 2.5, 0., -3.]]
ls_masks = np.array([np.array(ls_mask1), np.array(ls_mask2), np.array(ls_mask3)])
# test A: the ls_blender 'str-T'
self.op.set_blender('ls', 'str-1.5')
res = qt.Operator._ls_blend(self.op, ls_masks)
print(f'test A: result of ls_blender: str-1.5: \n{res}')
self.assertTrue(np.allclose(res, ls_blnd_str_15))
# test B: the ls_blender 'pos-N-T'
self.op.set_blender('ls', 'pos-2')
res = qt.Operator._ls_blend(self.op, ls_masks)
print(f'Test B-1: result of ls_blender: pos-2: \n{res}')
self.assertTrue(np.allclose(res, ls_blnd_pos_2))
self.op.set_blender('ls', 'pos-2-0.25')
res = qt.Operator._ls_blend(self.op, ls_masks)
print(f'Test B-2: result of ls_blender: pos-2-0.25: \n{res}')
self.assertTrue(np.allclose(res, ls_blnd_pos_2_25))
# test C: the ls_blender 'avg_pos-N-T'
self.op.set_blender('ls', 'avg_pos-2')
res = qt.Operator._ls_blend(self.op, ls_masks)
print(f'Test C-1: result of ls_blender: avg_pos-2: \n{res}')
self.assertTrue(np.allclose(res, ls_blnd_pos_2, 5))
self.op.set_blender('ls', 'avg_pos-2-0.25')
res = qt.Operator._ls_blend(self.op, ls_masks)
print(f'Test C-2: result of ls_blender: avg_pos-2-0.25: \n{res}')
self.assertTrue(np.allclose(res, ls_blnd_pos_2_25, 5))
# test D: the ls_blender 'avg'
self.op.set_blender('ls', 'avg')
res = qt.Operator._ls_blend(self.op, ls_masks)
print(f'Test D: result of ls_blender: avg: \n{res}')
self.assertTrue(np.allclose(res, ls_blnd_avg))
# test E: the ls_blender 'combo'
self.op.set_blender('ls', 'combo')
res = qt.Operator._ls_blend(self.op, ls_masks)
print(f'Test E: result of ls_blender: combo: \n{res}')
self.assertTrue(np.allclose(res, ls_blnd_combo))
def test_sel_blend(self):
"""测试选股蒙板的混合器,包括所有的混合模式"""
# step2, test blending of sel masks
pass
def test_bs_blend(self):
"""测试买卖信号混合模式"""
# step3, test blending of op signals
pass
def test_unify(self):
print('Testing Unify functions\n')
l1 = np.array([[3, 2, 5], [5, 3, 2]])
res = qt.unify(l1)
target = np.array([[0.3, 0.2, 0.5], [0.5, 0.3, 0.2]])
self.assertIs(np.allclose(res, target), True, 'sum of all elements is 1')
l1 = np.array([[1, 1, 1, 1, 1], [2, 2, 2, 2, 2]])
res = qt.unify(l1)
target = np.array([[0.2, 0.2, 0.2, 0.2, 0.2], [0.2, 0.2, 0.2, 0.2, 0.2]])
self.assertIs(np.allclose(res, target), True, 'sum of all elements is 1')
def test_mask_to_signal(self):
signal = qt.mask_to_signal(self.mask)
print(f'Test A: single mask to signal, result: \n{signal}')
self.assertTrue(np.allclose(signal, self.correct_signal))
signal = qt.mask_to_signal(self.multi_mask)
print(f'Test A: single mask to signal, result: \n{signal}')
self.assertTrue(np.allclose(signal, self.correct_multi_signal))
class TestLSStrategy(qt.RollingTiming):
"""用于test测试的简单多空蒙板生成策略。基于RollingTiming滚动择时方法生成
该策略有两个参数,N与Price
N用于计算OHLC价格平均值的N日简单移动平均,判断,当移动平均值大于等于Price时,状态为看多,否则为看空
"""
def __init__(self):
super().__init__(stg_name='test_LS',
stg_text='test long/short strategy',
par_count=2,
par_types='discr, conti',
par_bounds_or_enums=([1, 5], [2, 10]),
data_types='close, open, high, low',
data_freq='d',
window_length=5)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
n, price = params
h = hist_data.T
avg = (h[0] + h[1] + h[2] + h[3]) / 4
ma = sma(avg, n)
if ma[-1] < price:
return 0
else:
return 1
class TestSelStrategy(qt.SimpleSelecting):
"""用于Test测试的简单选股策略,基于Selecting策略生成
策略没有参数,选股周期为5D
在每个选股周期内,从股票池的三只股票中选出今日变化率 = (今收-昨收)/平均股价(OHLC平均股价)最高的两支,放入中选池,否则落选。
选股比例为平均分配
"""
def __init__(self):
super().__init__(stg_name='test_SEL',
stg_text='test portfolio selection strategy',
par_count=0,
par_types='',
par_bounds_or_enums=(),
data_types='high, low, close',
data_freq='d',
sample_freq='10d',
window_length=5)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
avg = np.nanmean(hist_data, axis=(1, 2))
dif = (hist_data[:, :, 2] - np.roll(hist_data[:, :, 2], 1, 1))
dif_no_nan = np.array([arr[~np.isnan(arr)][-1] for arr in dif])
difper = dif_no_nan / avg
large2 = difper.argsort()[1:]
chosen = np.zeros_like(avg)
chosen[large2] = 0.5
return chosen
class TestSelStrategyDiffTime(qt.SimpleSelecting):
"""用于Test测试的简单选股策略,基于Selecting策略生成
策略没有参数,选股周期为5D
在每个选股周期内,从股票池的三只股票中选出今日变化率 = (今收-昨收)/平均股价(OHLC平均股价)最高的两支,放入中选池,否则落选。
选股比例为平均分配
"""
def __init__(self):
super().__init__(stg_name='test_SEL',
stg_text='test portfolio selection strategy',
par_count=0,
par_types='',
par_bounds_or_enums=(),
data_types='close, low, open',
data_freq='d',
sample_freq='w',
window_length=2)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
avg = hist_data.mean(axis=1).squeeze()
difper = (hist_data[:, :, 0] - np.roll(hist_data[:, :, 0], 1))[:, -1] / avg
large2 = difper.argsort()[0:2]
chosen = np.zeros_like(avg)
chosen[large2] = 0.5
return chosen
class TestSigStrategy(qt.SimpleTiming):
"""用于Test测试的简单信号生成策略,基于SimpleTiming策略生成
策略有三个参数,第一个参数为ratio,另外两个参数为price1以及price2
ratio是k线形状比例的阈值,定义为abs((C-O)/(H-L))。当这个比值小于ratio阈值时,判断该K线为十字交叉(其实还有丁字等多种情形,但这里做了
简化处理。
信号生成的规则如下:
1,当某个K线出现十字交叉,且昨收与今收之差大于price1时,买入信号
2,当某个K线出现十字交叉,且昨收与今收之差小于price2时,卖出信号
"""
def __init__(self):
super().__init__(stg_name='test_SIG',
stg_text='test signal creation strategy',
par_count=3,
par_types='conti, conti, conti',
par_bounds_or_enums=([2, 10], [0, 3], [0, 3]),
data_types='close, open, high, low',
window_length=2)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
r, price1, price2 = params
h = hist_data.T
ratio = np.abs((h[0] - h[1]) / (h[3] - h[2]))
diff = h[0] - np.roll(h[0], 1)
sig = np.where((ratio < r) & (diff > price1),
1,
np.where((ratio < r) & (diff < price2), -1, 0))
return sig
class TestOperator(unittest.TestCase):
"""全面测试Operator对象的所有功能。包括:
1, Strategy 参数的设置
2, 历史数据的获取与分配提取
3, 策略优化参数的批量设置和优化空间的获取
4, 策略输出值的正确性验证
5, 策略结果的混合结果确认
"""
def setUp(self):
"""prepare data for Operator test"""
print('start testing HistoryPanel object\n')
# build up test data: a 4-type, 3-share, 50-day matrix of prices that contains nan values in some days
# for some share_pool
# for share1:
data_rows = 50
share1_close = [10.04, 10, 10, 9.99, 9.97, 9.99, 10.03, 10.03, 10.06, 10.06, 10.11,
10.09, 10.07, 10.06, 10.09, 10.03, 10.03, 10.06, 10.08, 10, 9.99,
10.03, 10.03, 10.06, 10.03, 9.97, 9.94, 9.83, 9.77, 9.84, 9.91, 9.93,
9.96, 9.91, 9.91, 9.88, 9.91, 9.64, 9.56, 9.57, 9.55, 9.57, 9.61, 9.61,
9.55, 9.57, 9.63, 9.64, 9.65, 9.62]
share1_open = [10.02, 10, 9.98, 9.97, 9.99, 10.01, 10.04, 10.06, 10.06, 10.11,
10.11, 10.07, 10.06, 10.09, 10.03, 10.02, 10.06, 10.08, 9.99, 10,
10.03, 10.02, 10.06, 10.03, 9.97, 9.94, 9.83, 9.78, 9.77, 9.91, 9.92,
9.97, 9.91, 9.9, 9.88, 9.91, 9.63, 9.64, 9.57, 9.55, 9.58, 9.61, 9.62,
9.55, 9.57, 9.61, 9.63, 9.64, 9.61, 9.56]
share1_high = [10.07, 10, 10, 10, 10.03, 10.03, 10.04, 10.09, 10.1, 10.14, 10.11, 10.1,
10.09, 10.09, 10.1, 10.05, 10.07, 10.09, 10.1, 10, 10.04, 10.04, 10.06,
10.09, 10.05, 9.97, 9.96, 9.86, 9.77, 9.92, 9.94, 9.97, 9.97, 9.92, 9.92,
9.92, 9.93, 9.64, 9.58, 9.6, 9.58, 9.62, 9.62, 9.64, 9.59, 9.62, 9.63,
9.7, 9.66, 9.64]
share1_low = [9.99, 10, 9.97, 9.97, 9.97, 9.98, 9.99, 10.03, 10.03, 10.04, 10.11, 10.07,
10.05, 10.03, 10.03, 10.01, 9.99, 10.03, 9.95, 10, 9.95, 10, 10.01, 9.99,
9.96, 9.89, 9.83, 9.77, 9.77, 9.8, 9.9, 9.91, 9.89, 9.89, 9.87, 9.85, 9.6,
9.64, 9.53, 9.55, 9.54, 9.55, 9.58, 9.54, 9.53, 9.53, 9.63, 9.64, 9.59, 9.56]
# for share2:
share2_close = [9.68, 9.87, 9.86, 9.87, 9.79, 9.82, 9.8, 9.66, 9.62, 9.58, 9.69, 9.78, 9.75,
9.96, 9.9, 10.04, 10.06, 10.08, 10.24, 10.24, 10.24, 9.86, 10.13, 10.12,
10.1, 10.25, 10.24, 10.22, 10.75, 10.64, 10.56, 10.6, 10.42, 10.25, 10.24,
10.49, 10.57, 10.63, 10.48, 10.37, 10.96, 11.02, np.nan, np.nan, 10.88, 10.87, 11.01,
11.01, 11.58, 11.8]
share2_open = [9.88, 9.88, 9.89, 9.75, 9.74, 9.8, 9.62, 9.65, 9.58, 9.67, 9.81, 9.8, 10,
9.95, 10.1, 10.06, 10.14, 9.9, 10.2, 10.29, 9.86, 9.48, 10.01, 10.24, 10.26,
10.24, 10.12, 10.65, 10.64, 10.56, 10.42, 10.43, 10.29, 10.3, 10.44, 10.6,
10.67, 10.46, 10.39, 10.9, 11.01, 11.01, np.nan, np.nan, 10.82, 11.02, 10.96,
11.55, 11.74, 11.8]
share2_high = [9.91, 10.04, 9.93, 10.04, 9.84, 9.88, 9.99, 9.7, 9.67, 9.71, 9.85, 9.9, 10,
10.2, 10.11, 10.18, 10.21, 10.26, 10.38, 10.47, 10.42, 10.07, 10.24, 10.27,
10.38, 10.43, 10.39, 10.65, 10.84, 10.65, 10.73, 10.63, 10.51, 10.35, 10.46,
10.63, 10.74, 10.76, 10.54, 11.02, 11.12, 11.17, np.nan, np.nan, 10.92, 11.15,
11.11, 11.55, 11.95, 11.93]
share2_low = [9.63, 9.84, 9.81, 9.74, 9.67, 9.72, 9.57, 9.54, 9.51, 9.47, 9.68, 9.63, 9.75,
9.65, 9.9, 9.93, 10.03, 9.8, 10.14, 10.09, 9.78, 9.21, 9.11, 9.68, 10.05,
10.12, 9.89, 9.89, 10.59, 10.43, 10.34, 10.32, 10.21, 10.2, 10.18, 10.36,
10.51, 10.41, 10.32, 10.37, 10.87, 10.95, np.nan, np.nan, 10.65, 10.71, 10.75,
10.91, 11.31, 11.58]
# for share3:
share3_close = [6.64, 7.26, 7.03, 6.87, np.nan, 6.64, 6.85, 6.7, 6.39, 6.22, 5.92, 5.91, 6.11,
5.91, 6.23, 6.28, 6.28, 6.27, np.nan, 5.56, 5.67, 5.16, 5.69, 6.32, 6.14, 6.25,
5.79, 5.26, 5.05, 5.45, 6.06, 6.21, 5.69, 5.46, 6.02, 6.69, 7.43, 7.72, 8.16,
7.83, 8.7, 8.71, 8.88, 8.54, 8.87, 8.87, 8.18, 7.8, 7.97, 8.25]
share3_open = [7.26, 7, 6.88, 6.91, np.nan, 6.81, 6.63, 6.45, 6.16, 6.24, 5.96, 5.97, 5.96,
6.2, 6.35, 6.11, 6.37, 5.58, np.nan, 5.65, 5.19, 5.42, 6.3, 6.15, 6.05, 5.89,
5.22, 5.2, 5.07, 6.04, 6.12, 5.85, 5.67, 6.02, 6.04, 7.07, 7.64, 7.99, 7.59,
8.73, 8.72, 8.97, 8.58, 8.71, 8.77, 8.4, 7.95, 7.76, 8.25, 7.51]
share3_high = [7.41, 7.31, 7.14, 7, np.nan, 6.82, 6.96, 6.85, 6.5, 6.34, 6.04, 6.02, 6.12, 6.38,
6.43, 6.46, 6.43, 6.27, np.nan, 6.01, 5.67, 5.67, 6.35, 6.32, 6.43, 6.36, 5.79,
5.47, 5.65, 6.04, 6.14, 6.23, 5.83, 6.25, 6.27, 7.12, 7.82, 8.14, 8.27, 8.92,
8.76, 9.15, 8.9, 9.01, 9.16, 9, 8.27, 7.99, 8.33, 8.25]
share3_low = [6.53, 6.87, 6.83, 6.7, np.nan, 6.63, 6.57, 6.41, 6.15, 6.07, 5.89, 5.82, 5.73, 5.81,
6.1, 6.06, 6.16, 5.57, np.nan, 5.51, 5.19, 5.12, 5.69, 6.01, 5.97, 5.86, 5.18, 5.19,
4.96, 5.45, 5.84, 5.85, 5.28, 5.42, 6.02, 6.69, 7.28, 7.64, 7.25, 7.83, 8.41, 8.66,
8.53, 8.54, 8.73, 8.27, 7.95, 7.67, 7.8, 7.51]
# for sel_finance test
shares_eps = np.array([[np.nan, np.nan, np.nan],
[0.1, np.nan, np.nan],
[np.nan, 0.2, np.nan],
[np.nan, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.2],
[0.1, np.nan, np.nan],
[np.nan, 0.3, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.3, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, 0.3, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, 0, 0.2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.1, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.15, np.nan, np.nan],
[np.nan, 0.1, np.nan],
[np.nan, np.nan, np.nan],
[0.1, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.2, np.nan, np.nan],
[np.nan, 0.5, np.nan],
[0.4, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, 0.3, np.nan],
[0.9, np.nan, np.nan],
[np.nan, np.nan, 0.1]])
self.date_indices = ['2016-07-01', '2016-07-04', '2016-07-05', '2016-07-06',
'2016-07-07', '2016-07-08', '2016-07-11', '2016-07-12',
'2016-07-13', '2016-07-14', '2016-07-15', '2016-07-18',
'2016-07-19', '2016-07-20', '2016-07-21', '2016-07-22',
'2016-07-25', '2016-07-26', '2016-07-27', '2016-07-28',
'2016-07-29', '2016-08-01', '2016-08-02', '2016-08-03',
'2016-08-04', '2016-08-05', '2016-08-08', '2016-08-09',
'2016-08-10', '2016-08-11', '2016-08-12', '2016-08-15',
'2016-08-16', '2016-08-17', '2016-08-18', '2016-08-19',
'2016-08-22', '2016-08-23', '2016-08-24', '2016-08-25',
'2016-08-26', '2016-08-29', '2016-08-30', '2016-08-31',
'2016-09-01', '2016-09-02', '2016-09-05', '2016-09-06',
'2016-09-07', '2016-09-08']
self.shares = ['000010', '000030', '000039']
self.types = ['close', 'open', 'high', 'low']
self.sel_finance_tyeps = ['eps']
self.test_data_3D = np.zeros((3, data_rows, 4))
self.test_data_2D = np.zeros((data_rows, 3))
self.test_data_2D2 = np.zeros((data_rows, 4))
self.test_data_sel_finance = np.empty((3, data_rows, 1))
# Build up 3D data
self.test_data_3D[0, :, 0] = share1_close
self.test_data_3D[0, :, 1] = share1_open
self.test_data_3D[0, :, 2] = share1_high
self.test_data_3D[0, :, 3] = share1_low
self.test_data_3D[1, :, 0] = share2_close
self.test_data_3D[1, :, 1] = share2_open
self.test_data_3D[1, :, 2] = share2_high
self.test_data_3D[1, :, 3] = share2_low
self.test_data_3D[2, :, 0] = share3_close
self.test_data_3D[2, :, 1] = share3_open
self.test_data_3D[2, :, 2] = share3_high
self.test_data_3D[2, :, 3] = share3_low
self.test_data_sel_finance[:, :, 0] = shares_eps.T
self.hp1 = qt.HistoryPanel(values=self.test_data_3D,
levels=self.shares,
columns=self.types,
rows=self.date_indices)
print(f'in test Operator, history panel is created for timing test')
self.hp1.info()
self.hp2 = qt.HistoryPanel(values=self.test_data_sel_finance,
levels=self.shares,
columns=self.sel_finance_tyeps,
rows=self.date_indices)
print(f'in test_Operator, history panel is created for selection finance test:')
self.hp2.info()
self.op = qt.Operator(selecting_types=['all'], timing_types='dma', ricon_types='urgent')
def test_info(self):
"""Test information output of Operator"""
print(f'test printing information of operator object')
# self.op.info()
def test_operator_ready(self):
"""test the method ready of Operator"""
pass
# print(f'operator is ready? "{self.op.ready}"')
def test_operator_add_strategy(self):
"""test adding strategies to Operator"""
pass
# self.assertIsInstance(self.op, qt.Operator)
# self.assertIsInstance(self.op.timing[0], qt.TimingDMA)
# self.assertIsInstance(self.op.selecting[0], qt.SelectingAll)
# self.assertIsInstance(self.op.ricon[0], qt.RiconUrgent)
# self.assertEqual(self.op.selecting_count, 1)
# self.assertEqual(self.op.strategy_count, 3)
# self.assertEqual(self.op.ricon_count, 1)
# self.assertEqual(self.op.timing_count, 1)
# self.assertEqual(self.op.ls_blender, 'pos-1')
# print(f'test adding strategies into existing op')
# print('test adding strategy by string')
# self.op.add_strategy('macd', 'timing')
# self.assertIsInstance(self.op.timing[0], qt.TimingDMA)
# self.assertIsInstance(self.op.timing[1], qt.TimingMACD)
# self.assertEqual(self.op.selecting_count, 1)
# self.assertEqual(self.op.strategy_count, 4)
# self.assertEqual(self.op.ricon_count, 1)
# self.assertEqual(self.op.timing_count, 2)
# self.assertEqual(self.op.ls_blender, 'pos-1')
# self.op.add_strategy('random', 'selecting')
# self.assertIsInstance(self.op.selecting[0], qt.TimingDMA)
# self.assertIsInstance(self.op.selecting[1], qt.TimingMACD)
# self.assertEqual(self.op.selecting_count, 2)
# self.assertEqual(self.op.strategy_count, 5)
# self.assertEqual(self.op.ricon_count, 1)
# self.assertEqual(self.op.timing_count, 2)
# self.assertEqual(self.op.selecting_blender, '0 or 1')
# self.op.add_strategy('none', 'ricon')
# self.assertIsInstance(self.op.ricon[0], qt.TimingDMA)
# self.assertIsInstance(self.op.ricon[1], qt.TimingMACD)
# self.assertEqual(self.op.selecting_count, 2)
# self.assertEqual(self.op.strategy_count, 6)
# self.assertEqual(self.op.ricon_count, 2)
# self.assertEqual(self.op.timing_count, 2)
# print('test adding strategy by list')
# self.op.add_strategy(['dma', 'macd'], 'timing')
# print('test adding strategy by object')
# test_ls = TestLSStrategy()
# self.op.add_strategy(test_ls, 'timing')
def test_operator_remove_strategy(self):
"""test removing strategies from Operator"""
pass
# self.op.remove_strategy(stg='macd')
def test_property_get(self):
self.assertIsInstance(self.op, qt.Operator)
self.assertIsInstance(self.op.timing[0], qt.TimingDMA)
self.assertIsInstance(self.op.selecting[0], qt.SelectingAll)
self.assertIsInstance(self.op.ricon[0], qt.RiconUrgent)
self.assertEqual(self.op.selecting_count, 1)
self.assertEqual(self.op.strategy_count, 3)
self.assertEqual(self.op.ricon_count, 1)
self.assertEqual(self.op.timing_count, 1)
print(self.op.strategies, '\n', [qt.TimingDMA, qt.SelectingAll, qt.RiconUrgent])
print(f'info of Timing strategy: \n{self.op.strategies[0].info()}')
self.assertEqual(len(self.op.strategies), 3)
self.assertIsInstance(self.op.strategies[0], qt.TimingDMA)
self.assertIsInstance(self.op.strategies[1], qt.SelectingAll)
self.assertIsInstance(self.op.strategies[2], qt.RiconUrgent)
self.assertEqual(self.op.strategy_count, 3)
self.assertEqual(self.op.op_data_freq, 'd')
self.assertEqual(self.op.op_data_types, ['close'])
self.assertEqual(self.op.opt_space_par, ([], []))
self.assertEqual(self.op.max_window_length, 270)
self.assertEqual(self.op.ls_blender, 'pos-1')
self.assertEqual(self.op.selecting_blender, '0')
self.assertEqual(self.op.ricon_blender, 'add')
self.assertEqual(self.op.opt_types, [0, 0, 0])
def test_prepare_data(self):
test_ls = TestLSStrategy()
test_sel = TestSelStrategy()
test_sig = TestSigStrategy()
self.op = qt.Operator(timing_types=[test_ls],
selecting_types=[test_sel],
ricon_types=[test_sig])
too_early_cash = qt.CashPlan(dates='2016-01-01', amounts=10000)
early_cash = qt.CashPlan(dates='2016-07-01', amounts=10000)
on_spot_cash = qt.CashPlan(dates='2016-07-08', amounts=10000)
no_trade_cash = qt.CashPlan(dates='2016-07-08, 2016-07-30, 2016-08-11, 2016-09-03',
amounts=[10000, 10000, 10000, 10000])
late_cash = qt.CashPlan(dates='2016-12-31', amounts=10000)
multi_cash = qt.CashPlan(dates='2016-07-08, 2016-08-08', amounts=[10000, 10000])
self.op.set_parameter(stg_id='t-0',
pars={'000300': (5, 10.),
'000400': (5, 10.),
'000500': (5, 6.)})
self.op.set_parameter(stg_id='s-0',
pars=())
self.op.set_parameter(stg_id='r-0',
pars=(0.2, 0.02, -0.02))
self.op.prepare_data(hist_data=self.hp1,
cash_plan=on_spot_cash)
self.assertIsInstance(self.op._selecting_history_data, list)
self.assertIsInstance(self.op._timing_history_data, list)
self.assertIsInstance(self.op._ricon_history_data, list)
self.assertEqual(len(self.op._selecting_history_data), 1)
self.assertEqual(len(self.op._timing_history_data), 1)
self.assertEqual(len(self.op._ricon_history_data), 1)
sel_hist_data = self.op._selecting_history_data[0]
tim_hist_data = self.op._timing_history_data[0]
ric_hist_data = self.op._ricon_history_data[0]
print(f'in test_prepare_data in TestOperator:')
print('selecting history data:\n', sel_hist_data)
print('originally passed data in correct sequence:\n', self.test_data_3D[:, 3:, [2, 3, 0]])
print('difference is \n', sel_hist_data - self.test_data_3D[:, :, [2, 3, 0]])
self.assertTrue(np.allclose(sel_hist_data, self.test_data_3D[:, :, [2, 3, 0]], equal_nan=True))
self.assertTrue(np.allclose(tim_hist_data, self.test_data_3D, equal_nan=True))
self.assertTrue(np.allclose(ric_hist_data, self.test_data_3D[:, 3:, :], equal_nan=True))
# raises Value Error if empty history panel is given
empty_hp = qt.HistoryPanel()
correct_hp = qt.HistoryPanel(values=np.random.randint(10, size=(3, 50, 4)),
columns=self.types,
levels=self.shares,
rows=self.date_indices)
too_many_shares = qt.HistoryPanel(values=np.random.randint(10, size=(5, 50, 4)))
too_many_types = qt.HistoryPanel(values=np.random.randint(10, size=(3, 50, 5)))
# raises Error when history panel is empty
self.assertRaises(ValueError,
self.op.prepare_data,
empty_hp,
on_spot_cash)
# raises Error when first investment date is too early
self.assertRaises(AssertionError,
self.op.prepare_data,
correct_hp,
early_cash)
# raises Error when last investment date is too late
self.assertRaises(AssertionError,
self.op.prepare_data,
correct_hp,
late_cash)
# raises Error when some of the investment dates are on no-trade-days
self.assertRaises(ValueError,
self.op.prepare_data,
correct_hp,
no_trade_cash)
# raises Error when number of shares in history data does not fit
self.assertRaises(AssertionError,
self.op.prepare_data,
too_many_shares,
on_spot_cash)
# raises Error when too early cash investment date
self.assertRaises(AssertionError,
self.op.prepare_data,
correct_hp,
too_early_cash)
# raises Error when number of d_types in history data does not fit
self.assertRaises(AssertionError,
self.op.prepare_data,
too_many_types,
on_spot_cash)
# test the effect of data type sequence in strategy definition
def test_operator_generate(self):
"""
:return:
"""
test_ls = TestLSStrategy()
test_sel = TestSelStrategy()
test_sig = TestSigStrategy()
self.op = qt.Operator(timing_types=[test_ls],
selecting_types=[test_sel],
ricon_types=[test_sig])
self.assertIsInstance(self.op, qt.Operator, 'Operator Creation Error')
self.op.set_parameter(stg_id='t-0',
pars={'000300': (5, 10.),
'000400': (5, 10.),
'000500': (5, 6.)})
self.op.set_parameter(stg_id='s-0',
pars=())
# 在所有策略的参数都设置好之前调用prepare_data会发生assertion Error
self.assertRaises(AssertionError,
self.op.prepare_data,
hist_data=self.hp1,
cash_plan=qt.CashPlan(dates='2016-07-08', amounts=10000))
self.op.set_parameter(stg_id='r-0',
pars=(0.2, 0.02, -0.02))
self.op.prepare_data(hist_data=self.hp1,
cash_plan=qt.CashPlan(dates='2016-07-08', amounts=10000))
self.op.info()
op_list = self.op.create_signal(hist_data=self.hp1)
print(f'operation list is created: as following:\n {op_list}')
self.assertTrue(isinstance(op_list, pd.DataFrame))
self.assertEqual(op_list.shape, (26, 3))
# 删除去掉重复信号的code后,信号从原来的23条变为26条,包含三条重复信号,但是删除重复信号可能导致将不应该删除的信号删除,详见
# operator.py的create_signal()函数注释836行
target_op_dates = ['2016/07/08', '2016/07/12', '2016/07/13', '2016/07/14',
'2016/07/18', '2016/07/20', '2016/07/22', '2016/07/26',
'2016/07/27', '2016/07/28', '2016/08/02', '2016/08/03',
'2016/08/04', '2016/08/05', '2016/08/08', '2016/08/10',
'2016/08/16', '2016/08/18', '2016/08/24', '2016/08/26',
'2016/08/29', '2016/08/30', '2016/08/31', '2016/09/05',
'2016/09/06', '2016/09/08']
target_op_values = np.array([[0.0, 1.0, 0.0],
[0.5, -1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.5, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 1.0, 0.0],
[0.0, -1.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 1.0, 0.0],
[-1.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 1.0],
[-1.0, 0.0, 0.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 1.0, 0.0]])
target_op = pd.DataFrame(data=target_op_values, index=target_op_dates, columns=['000010', '000030', '000039'])
target_op = target_op.rename(index=pd.Timestamp)
print(f'target operation list is as following:\n {target_op}')
dates_pairs = [[date1, date2, date1 == date2]
for date1, date2
in zip(target_op.index.strftime('%m-%d'), op_list.index.strftime('%m-%d'))]
signal_pairs = [[list(sig1), list(sig2), all(sig1 == sig2)]
for sig1, sig2
in zip(list(target_op.values), list(op_list.values))]
print(f'dates side by side:\n '
f'{dates_pairs}')
print(f'signals side by side:\n'
f'{signal_pairs}')
print([item[2] for item in dates_pairs])
print([item[2] for item in signal_pairs])
self.assertTrue(np.allclose(target_op.values, op_list.values, equal_nan=True))
self.assertTrue(all([date1 == date2
for date1, date2
in zip(target_op.index.strftime('%m-%d'), op_list.index.strftime('%m-%d'))]))
def test_operator_parameter_setting(self):
"""
:return:
"""
new_op = qt.Operator(selecting_types=['all'], timing_types='dma', ricon_types='urgent')
print(new_op.strategies, '\n', [qt.TimingDMA, qt.SelectingAll, qt.RiconUrgent])
print(f'info of Timing strategy in new op: \n{new_op.strategies[0].info()}')
self.op.set_parameter('t-0',
pars=(5, 10, 5),
opt_tag=1,
par_boes=((5, 10), (5, 15), (10, 15)),
window_length=10,
data_types=['close', 'open', 'high'])
self.op.set_parameter(stg_id='s-0',
pars=None,
opt_tag=1,
sample_freq='10d',
window_length=10,
data_types='close, open')
self.op.set_parameter(stg_id='r-0',
pars=None,
opt_tag=0,
sample_freq='d',
window_length=20,
data_types='close, open')
self.assertEqual(self.op.timing[0].pars, (5, 10, 5))
self.assertEqual(self.op.timing[0].par_boes, ((5, 10), (5, 15), (10, 15)))
self.assertEqual(self.op.op_data_freq, 'd')
self.assertEqual(self.op.op_data_types, ['close', 'high', 'open'])
self.assertEqual(self.op.opt_space_par,
([(5, 10), (5, 15), (10, 15), (0, 1)], ['discr', 'discr', 'discr', 'conti']))
self.assertEqual(self.op.max_window_length, 20)
self.assertRaises(AssertionError, self.op.set_parameter, stg_id='t-1', pars=(1, 2))
self.assertRaises(AssertionError, self.op.set_parameter, stg_id='t1', pars=(1, 2))
self.assertRaises(AssertionError, self.op.set_parameter, stg_id=32, pars=(1, 2))
self.op.set_blender('selecting', '0 and 1 or 2')
self.op.set_blender('ls', 'str-1.2')
self.assertEqual(self.op.ls_blender, 'str-1.2')
self.assertEqual(self.op.selecting_blender, '0 and 1 or 2')
self.assertEqual(self.op.selecting_blender_expr, ['or', 'and', '0', '1', '2'])
self.assertEqual(self.op.ricon_blender, 'add')
self.assertRaises(ValueError, self.op.set_blender, 'select', '0and1')
self.assertRaises(TypeError, self.op.set_blender, 35, '0 and 1')
self.assertEqual(self.op.opt_space_par,
([(5, 10), (5, 15), (10, 15), (0, 1)], ['discr', 'discr', 'discr', 'conti']))
self.assertEqual(self.op.opt_types, [1, 1, 0])
def test_exp_to_blender(self):
self.op.set_blender('selecting', '0 and 1 or 2')
self.assertEqual(self.op.selecting_blender_expr, ['or', 'and', '0', '1', '2'])
self.op.set_blender('selecting', '0 and ( 1 or 2 )')
self.assertEqual(self.op.selecting_blender_expr, ['and', '0', 'or', '1', '2'])
self.assertRaises(ValueError, self.op.set_blender, 'selecting', '0 and (1 or 2)')
def test_set_opt_par(self):
self.op.set_parameter('t-0',
pars=(5, 10, 5),
opt_tag=1,
par_boes=((5, 10), (5, 15), (10, 15)),
window_length=10,
data_types=['close', 'open', 'high'])
self.op.set_parameter(stg_id='s-0',
pars=(0.5,),
opt_tag=0,
sample_freq='10d',
window_length=10,
data_types='close, open')
self.op.set_parameter(stg_id='r-0',
pars=(9, -0.23),
opt_tag=1,
sample_freq='d',
window_length=20,
data_types='close, open')
self.assertEqual(self.op.timing[0].pars, (5, 10, 5))
self.assertEqual(self.op.selecting[0].pars, (0.5,))
self.assertEqual(self.op.ricon[0].pars, (9, -0.23))
self.assertEqual(self.op.opt_types, [1, 0, 1])
self.op.set_opt_par((5, 12, 9, 8, -0.1))
self.assertEqual(self.op.timing[0].pars, (5, 12, 9))
self.assertEqual(self.op.selecting[0].pars, (0.5,))
self.assertEqual(self.op.ricon[0].pars, (8, -0.1))
# test set_opt_par when opt_tag is set to be 2 (enumerate type of parameters)
self.assertRaises(ValueError, self.op.set_opt_par, (5, 12, 9, 8))
def test_stg_attribute_get_and_set(self):
self.stg = qt.TimingCrossline()
self.stg_type = 'TIMING'
self.stg_name = "CROSSLINE STRATEGY"
self.stg_text = 'Moving average crossline strategy, determine long/short position according to the cross ' \
'point' \
' of long and short term moving average prices '
self.pars = (35, 120, 10, 'buy')
self.par_boes = [(10, 250), (10, 250), (1, 100), ('buy', 'sell', 'none')]
self.par_count = 4
self.par_types = ['discr', 'discr', 'conti', 'enum']
self.opt_tag = 0
self.data_types = ['close']
self.data_freq = 'd'
self.sample_freq = 'd'
self.window_length = 270
self.assertEqual(self.stg.stg_type, self.stg_type)
self.assertEqual(self.stg.stg_name, self.stg_name)
self.assertEqual(self.stg.stg_text, self.stg_text)
self.assertEqual(self.stg.pars, self.pars)
self.assertEqual(self.stg.par_types, self.par_types)
self.assertEqual(self.stg.par_boes, self.par_boes)
self.assertEqual(self.stg.par_count, self.par_count)
self.assertEqual(self.stg.opt_tag, self.opt_tag)
self.assertEqual(self.stg.data_freq, self.data_freq)
self.assertEqual(self.stg.sample_freq, self.sample_freq)
self.assertEqual(self.stg.data_types, self.data_types)
self.assertEqual(self.stg.window_length, self.window_length)
self.stg.stg_name = 'NEW NAME'
self.stg.stg_text = 'NEW TEXT'
self.assertEqual(self.stg.stg_name, 'NEW NAME')
self.assertEqual(self.stg.stg_text, 'NEW TEXT')
self.stg.pars = (1, 2, 3, 4)
self.assertEqual(self.stg.pars, (1, 2, 3, 4))
self.stg.par_count = 3
self.assertEqual(self.stg.par_count, 3)
self.stg.par_boes = [(1, 10), (1, 10), (1, 10), (1, 10)]
self.assertEqual(self.stg.par_boes, [(1, 10), (1, 10), (1, 10), (1, 10)])
self.stg.par_types = ['conti', 'conti', 'discr', 'enum']
self.assertEqual(self.stg.par_types, ['conti', 'conti', 'discr', 'enum'])
self.stg.par_types = 'conti, conti, discr, conti'
self.assertEqual(self.stg.par_types, ['conti', 'conti', 'discr', 'conti'])
self.stg.data_types = 'close, open'
self.assertEqual(self.stg.data_types, ['close', 'open'])
self.stg.data_types = ['close', 'high', 'low']
self.assertEqual(self.stg.data_types, ['close', 'high', 'low'])
self.stg.data_freq = 'w'
self.assertEqual(self.stg.data_freq, 'w')
self.stg.window_length = 300
self.assertEqual(self.stg.window_length, 300)
def test_rolling_timing(self):
stg = TestLSStrategy()
stg_pars = {'000100': (5, 10),
'000200': (5, 10),
'000300': (5, 6)}
stg.set_pars(stg_pars)
history_data = self.hp1.values
output = stg.generate(hist_data=history_data)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
lsmask = np.array([[0., 0., 1.],
[0., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 0.],
[1., 1., 0.],
[1., 1., 0.],
[1., 0., 0.],
[1., 0., 0.],
[1., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.]])
# TODO: Issue to be solved: the np.nan value are converted to 0 in the lsmask,这样做可能会有意想不到的后果
# TODO: 需要解决nan值的问题
self.assertEqual(output.shape, lsmask.shape)
self.assertTrue(np.allclose(output, lsmask, equal_nan=True))
def test_sel_timing(self):
stg = TestSelStrategy()
stg_pars = ()
stg.set_pars(stg_pars)
history_data = self.hp1['high, low, close', :, :]
seg_pos, seg_length, seg_count = stg._seg_periods(dates=self.hp1.hdates, freq=stg.sample_freq)
self.assertEqual(list(seg_pos), [0, 5, 11, 19, 26, 33, 41, 47, 49])
self.assertEqual(list(seg_length), [5, 6, 8, 7, 7, 8, 6, 2])
self.assertEqual(seg_count, 8)
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
selmask = np.array([[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
def test_simple_timing(self):
stg = TestSigStrategy()
stg_pars = (0.2, 0.02, -0.02)
stg.set_pars(stg_pars)
history_data = self.hp1['close, open, high, low', :, 3:50]
output = stg.generate(hist_data=history_data, shares=self.shares, dates=self.date_indices)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
sigmatrix = np.array([[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, -1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[-1.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0]])
side_by_side_array = np.array([[i, out_line, sig_line]
for
i, out_line, sig_line
in zip(range(len(output)), output, sigmatrix)])
print(f'output and signal matrix lined up side by side is \n'
f'{side_by_side_array}')
self.assertEqual(sigmatrix.shape, output.shape)
self.assertTrue(np.allclose(output, sigmatrix))
def test_sel_finance(self):
"""Test selecting_finance strategy, test all built-in strategy parameters"""
stg = SelectingFinanceIndicator()
stg_pars = (False, 'even', 'greater', 0, 0, 0.67)
stg.set_pars(stg_pars)
stg.window_length = 5
stg.data_freq = 'd'
stg.sample_freq = '10d'
stg.sort_ascending = False
stg.condition = 'greater'
stg.lbound = 0
stg.ubound = 0
stg._poq = 0.67
history_data = self.hp2.values
print(f'Start to test financial selection parameter {stg_pars}')
seg_pos, seg_length, seg_count = stg._seg_periods(dates=self.hp1.hdates, freq=stg.sample_freq)
self.assertEqual(list(seg_pos), [0, 5, 11, 19, 26, 33, 41, 47, 49])
self.assertEqual(list(seg_length), [5, 6, 8, 7, 7, 8, 6, 2])
self.assertEqual(seg_count, 8)
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
selmask = np.array([[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
# test single factor, get mininum factor
stg_pars = (True, 'even', 'less', 1, 1, 0.67)
stg.sort_ascending = True
stg.condition = 'less'
stg.lbound = 1
stg.ubound = 1
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
# test single factor, get max factor in linear weight
stg_pars = (False, 'linear', 'greater', 0, 0, 0.67)
stg.sort_ascending = False
stg.weighting = 'linear'
stg.condition = 'greater'
stg.lbound = 0
stg.ubound = 0
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.66667, 0.00000],
[0.33333, 0.66667, 0.00000],
[0.33333, 0.66667, 0.00000]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
# test single factor, get max factor in linear weight
stg_pars = (False, 'proportion', 'greater', 0, 0, 0.67)
stg.sort_ascending = False
stg.weighting = 'proportion'
stg.condition = 'greater'
stg.lbound = 0
stg.ubound = 0
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.91667, 0.00000],
[0.08333, 0.91667, 0.00000],
[0.08333, 0.91667, 0.00000]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask, 0.001))
# test single factor, get max factor in linear weight, threshold 0.2
stg_pars = (False, 'even', 'greater', 0.2, 0.2, 0.67)
stg.sort_ascending = False
stg.weighting = 'even'
stg.condition = 'greater'
stg.lbound = 0.2
stg.ubound = 0.2
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask, 0.001))
class TestLog(unittest.TestCase):
def test_init(self):
pass
class TestConfig(unittest.TestCase):
"""测试Config对象以及QT_CONFIG变量的设置和获取值"""
def test_init(self):
pass
def test_invest(self):
pass
def test_pars_string_to_type(self):
_parse_string_kwargs('000300', 'asset_pool', _valid_qt_kwargs())
class TestHistoryPanel(unittest.TestCase):
def setUp(self):
print('start testing HistoryPanel object\n')
self.data = np.random.randint(10, size=(5, 10, 4))
self.index = pd.date_range(start='20200101', freq='d', periods=10)
self.index2 = ['2016-07-01', '2016-07-04', '2016-07-05', '2016-07-06',
'2016-07-07', '2016-07-08', '2016-07-11', '2016-07-12',
'2016-07-13', '2016-07-14']
self.index3 = '2016-07-01, 2016-07-04, 2016-07-05, 2016-07-06, 2016-07-07, ' \
'2016-07-08, 2016-07-11, 2016-07-12, 2016-07-13, 2016-07-14'
self.shares = '000100,000101,000102,000103,000104'
self.htypes = 'close,open,high,low'
self.data2 = np.random.randint(10, size=(10, 5))
self.data3 = np.random.randint(10, size=(10, 4))
self.data4 = np.random.randint(10, size=(10))
self.hp = qt.HistoryPanel(values=self.data, levels=self.shares, columns=self.htypes, rows=self.index)
self.hp2 = qt.HistoryPanel(values=self.data2, levels=self.shares, columns='close', rows=self.index)
self.hp3 = qt.HistoryPanel(values=self.data3, levels='000100', columns=self.htypes, rows=self.index2)
self.hp4 = qt.HistoryPanel(values=self.data4, levels='000100', columns='close', rows=self.index3)
self.hp5 = qt.HistoryPanel(values=self.data)
self.hp6 = qt.HistoryPanel(values=self.data, levels=self.shares, rows=self.index3)
def test_properties(self):
""" test all properties of HistoryPanel
"""
self.assertFalse(self.hp.is_empty)
self.assertEqual(self.hp.row_count, 10)
self.assertEqual(self.hp.column_count, 4)
self.assertEqual(self.hp.level_count, 5)
self.assertEqual(self.hp.shape, (5, 10, 4))
self.assertSequenceEqual(self.hp.htypes, ['close', 'open', 'high', 'low'])
self.assertSequenceEqual(self.hp.shares, ['000100', '000101', '000102', '000103', '000104'])
self.assertSequenceEqual(list(self.hp.hdates), list(self.index))
self.assertDictEqual(self.hp.columns, {'close': 0, 'open': 1, 'high': 2, 'low': 3})
self.assertDictEqual(self.hp.levels, {'000100': 0, '000101': 1, '000102': 2, '000103': 3, '000104': 4})
row_dict = {Timestamp('2020-01-01 00:00:00', freq='D'): 0,
Timestamp('2020-01-02 00:00:00', freq='D'): 1,
Timestamp('2020-01-03 00:00:00', freq='D'): 2,
Timestamp('2020-01-04 00:00:00', freq='D'): 3,
Timestamp('2020-01-05 00:00:00', freq='D'): 4,
Timestamp('2020-01-06 00:00:00', freq='D'): 5,
Timestamp('2020-01-07 00:00:00', freq='D'): 6,
Timestamp('2020-01-08 00:00:00', freq='D'): 7,
Timestamp('2020-01-09 00:00:00', freq='D'): 8,
Timestamp('2020-01-10 00:00:00', freq='D'): 9}
self.assertDictEqual(self.hp.rows, row_dict)
def test_len(self):
""" test the function len(HistoryPanel)
:return:
"""
self.assertEqual(len(self.hp), 10)
def test_empty_history_panel(self):
"""测试空HP或者特殊HP如维度标签为纯数字的HP"""
test_hp = qt.HistoryPanel(self.data)
self.assertFalse(test_hp.is_empty)
self.assertIsInstance(test_hp, qt.HistoryPanel)
self.assertEqual(test_hp.shape[0], 5)
self.assertEqual(test_hp.shape[1], 10)
self.assertEqual(test_hp.shape[2], 4)
self.assertEqual(test_hp.level_count, 5)
self.assertEqual(test_hp.row_count, 10)
self.assertEqual(test_hp.column_count, 4)
self.assertEqual(test_hp.shares, list(range(5)))
self.assertEqual(test_hp.hdates, list(pd.date_range(start='20200730', periods=10, freq='d')))
self.assertEqual(test_hp.htypes, list(range(4)))
self.assertTrue(np.allclose(test_hp.values, self.data))
print(f'shares: {test_hp.shares}\nhtypes: {test_hp.htypes}')
print(test_hp)
empty_hp = qt.HistoryPanel()
self.assertTrue(empty_hp.is_empty)
self.assertIsInstance(empty_hp, qt.HistoryPanel)
self.assertEqual(empty_hp.shape[0], 0)
self.assertEqual(empty_hp.shape[1], 0)
self.assertEqual(empty_hp.shape[2], 0)
self.assertEqual(empty_hp.level_count, 0)
self.assertEqual(empty_hp.row_count, 0)
self.assertEqual(empty_hp.column_count, 0)
def test_create_history_panel(self):
""" test the creation of a HistoryPanel object by passing all data explicitly
"""
self.assertIsInstance(self.hp, qt.HistoryPanel)
self.assertEqual(self.hp.shape[0], 5)
self.assertEqual(self.hp.shape[1], 10)
self.assertEqual(self.hp.shape[2], 4)
self.assertEqual(self.hp.level_count, 5)
self.assertEqual(self.hp.row_count, 10)
self.assertEqual(self.hp.column_count, 4)
self.assertEqual(list(self.hp.levels.keys()), self.shares.split(','))
self.assertEqual(list(self.hp.columns.keys()), self.htypes.split(','))
self.assertEqual(list(self.hp.rows.keys())[0], pd.Timestamp('20200101'))
self.assertIsInstance(self.hp2, qt.HistoryPanel)
self.assertEqual(self.hp2.shape[0], 5)
self.assertEqual(self.hp2.shape[1], 10)
self.assertEqual(self.hp2.shape[2], 1)
self.assertEqual(self.hp2.level_count, 5)
self.assertEqual(self.hp2.row_count, 10)
self.assertEqual(self.hp2.column_count, 1)
self.assertEqual(list(self.hp2.levels.keys()), self.shares.split(','))
self.assertEqual(list(self.hp2.columns.keys()), ['close'])
self.assertEqual(list(self.hp2.rows.keys())[0], pd.Timestamp('20200101'))
self.assertIsInstance(self.hp3, qt.HistoryPanel)
self.assertEqual(self.hp3.shape[0], 1)
self.assertEqual(self.hp3.shape[1], 10)
self.assertEqual(self.hp3.shape[2], 4)
self.assertEqual(self.hp3.level_count, 1)
self.assertEqual(self.hp3.row_count, 10)
self.assertEqual(self.hp3.column_count, 4)
self.assertEqual(list(self.hp3.levels.keys()), ['000100'])
self.assertEqual(list(self.hp3.columns.keys()), self.htypes.split(','))
self.assertEqual(list(self.hp3.rows.keys())[0], pd.Timestamp('2016-07-01'))
self.assertIsInstance(self.hp4, qt.HistoryPanel)
self.assertEqual(self.hp4.shape[0], 1)
self.assertEqual(self.hp4.shape[1], 10)
self.assertEqual(self.hp4.shape[2], 1)
self.assertEqual(self.hp4.level_count, 1)
self.assertEqual(self.hp4.row_count, 10)
self.assertEqual(self.hp4.column_count, 1)
self.assertEqual(list(self.hp4.levels.keys()), ['000100'])
self.assertEqual(list(self.hp4.columns.keys()), ['close'])
self.assertEqual(list(self.hp4.rows.keys())[0], pd.Timestamp('2016-07-01'))
self.hp5.info()
self.assertIsInstance(self.hp5, qt.HistoryPanel)
self.assertTrue(np.allclose(self.hp5.values, self.data))
self.assertEqual(self.hp5.shape[0], 5)
self.assertEqual(self.hp5.shape[1], 10)
self.assertEqual(self.hp5.shape[2], 4)
self.assertEqual(self.hp5.level_count, 5)
self.assertEqual(self.hp5.row_count, 10)
self.assertEqual(self.hp5.column_count, 4)
self.assertEqual(list(self.hp5.levels.keys()), [0, 1, 2, 3, 4])
self.assertEqual(list(self.hp5.columns.keys()), [0, 1, 2, 3])
self.assertEqual(list(self.hp5.rows.keys())[0], pd.Timestamp('2020-07-30'))
self.hp6.info()
self.assertIsInstance(self.hp6, qt.HistoryPanel)
self.assertTrue(np.allclose(self.hp6.values, self.data))
self.assertEqual(self.hp6.shape[0], 5)
self.assertEqual(self.hp6.shape[1], 10)
self.assertEqual(self.hp6.shape[2], 4)
self.assertEqual(self.hp6.level_count, 5)
self.assertEqual(self.hp6.row_count, 10)
self.assertEqual(self.hp6.column_count, 4)
self.assertEqual(list(self.hp6.levels.keys()), ['000100', '000101', '000102', '000103', '000104'])
self.assertEqual(list(self.hp6.columns.keys()), [0, 1, 2, 3])
self.assertEqual(list(self.hp6.rows.keys())[0], pd.Timestamp('2016-07-01'))
# Error testing during HistoryPanel creating
# shape does not match
self.assertRaises(AssertionError,
qt.HistoryPanel,
self.data,
levels=self.shares, columns='close', rows=self.index)
# valus is not np.ndarray
self.assertRaises(AssertionError,
qt.HistoryPanel,
list(self.data))
# dimension/shape does not match
self.assertRaises(AssertionError,
qt.HistoryPanel,
self.data2,
levels='000100', columns=self.htypes, rows=self.index)
# value dimension over 3
self.assertRaises(AssertionError,
qt.HistoryPanel,
np.random.randint(10, size=(5, 10, 4, 2)))
# lebel value not valid
self.assertRaises(ValueError,
qt.HistoryPanel,
self.data2,
levels=self.shares, columns='close',
rows='a,b,c,d,e,f,g,h,i,j')
def test_history_panel_slicing(self):
"""测试HistoryPanel的各种切片方法
包括通过标签名称切片,通过数字切片,通过逗号分隔的标签名称切片,通过冒号分隔的标签名称切片等切片方式"""
self.assertTrue(np.allclose(self.hp['close'], self.data[:, :, 0:1]))
self.assertTrue(np.allclose(self.hp['close,open'], self.data[:, :, 0:2]))
self.assertTrue(np.allclose(self.hp[['close', 'open']], self.data[:, :, 0:2]))
self.assertTrue(np.allclose(self.hp['close:high'], self.data[:, :, 0:3]))
self.assertTrue(np.allclose(self.hp['close,high'], self.data[:, :, [0, 2]]))
self.assertTrue(np.allclose(self.hp[:, '000100'], self.data[0:1, :, ]))
self.assertTrue(np.allclose(self.hp[:, '000100,000101'], self.data[0:2, :]))
self.assertTrue(np.allclose(self.hp[:, ['000100', '000101']], self.data[0:2, :]))
self.assertTrue(np.allclose(self.hp[:, '000100:000102'], self.data[0:3, :]))
self.assertTrue(np.allclose(self.hp[:, '000100,000102'], self.data[[0, 2], :]))
self.assertTrue(np.allclose(self.hp['close,open', '000100,000102'], self.data[[0, 2], :, 0:2]))
print('start testing HistoryPanel')
data = np.random.randint(10, size=(10, 5))
# index = pd.date_range(start='20200101', freq='d', periods=10)
shares = '000100,000101,000102,000103,000104'
dtypes = 'close'
df = pd.DataFrame(data)
print('=========================\nTesting HistoryPanel creation from DataFrame')
hp = qt.dataframe_to_hp(df=df, shares=shares, htypes=dtypes)
hp.info()
hp = qt.dataframe_to_hp(df=df, shares='000100', htypes='close, open, high, low, middle', column_type='htypes')
hp.info()
print('=========================\nTesting HistoryPanel creation from initialization')
data = np.random.randint(10, size=(5, 10, 4)).astype('float')
index = pd.date_range(start='20200101', freq='d', periods=10)
dtypes = 'close, open, high,low'
data[0, [5, 6, 9], [0, 1, 3]] = np.nan
data[1:4, [4, 7, 6, 2], [1, 1, 3, 0]] = np.nan
data[4:5, [2, 9, 1, 2], [0, 3, 2, 1]] = np.nan
hp = qt.HistoryPanel(data, levels=shares, columns=dtypes, rows=index)
hp.info()
print('==========================\n输出close类型的所有历史数据\n')
self.assertTrue(np.allclose(hp['close', :, :], data[:, :, 0:1], equal_nan=True))
print(f'==========================\n输出close和open类型的所有历史数据\n')
self.assertTrue(np.allclose(hp[[0, 1], :, :], data[:, :, 0:2], equal_nan=True))
print(f'==========================\n输出第一只股票的所有类型历史数据\n')
self.assertTrue(np.allclose(hp[:, [0], :], data[0:1, :, :], equal_nan=True))
print('==========================\n输出第0、1、2个htype对应的所有股票全部历史数据\n')
self.assertTrue(np.allclose(hp[[0, 1, 2]], data[:, :, 0:3], equal_nan=True))
print('==========================\n输出close、high两个类型的所有历史数据\n')
self.assertTrue(np.allclose(hp[['close', 'high']], data[:, :, [0, 2]], equal_nan=True))
print('==========================\n输出0、1两个htype的所有历史数据\n')
self.assertTrue(np.allclose(hp[[0, 1]], data[:, :, 0:2], equal_nan=True))
print('==========================\n输出close、high两个类型的所有历史数据\n')
self.assertTrue(np.allclose(hp['close,high'], data[:, :, [0, 2]], equal_nan=True))
print('==========================\n输出close起到high止的三个类型的所有历史数据\n')
self.assertTrue(np.allclose(hp['close:high'], data[:, :, 0:3], equal_nan=True))
print('==========================\n输出0、1、3三个股票的全部历史数据\n')
self.assertTrue(np.allclose(hp[:, [0, 1, 3]], data[[0, 1, 3], :, :], equal_nan=True))
print('==========================\n输出000100、000102两只股票的所有历史数据\n')
self.assertTrue(np.allclose(hp[:, ['000100', '000102']], data[[0, 2], :, :], equal_nan=True))
print('==========================\n输出0、1、2三个股票的历史数据\n', hp[:, 0: 3])
self.assertTrue(np.allclose(hp[:, 0: 3], data[0:3, :, :], equal_nan=True))
print('==========================\n输出000100、000102两只股票的所有历史数据\n')
self.assertTrue(np.allclose(hp[:, '000100, 000102'], data[[0, 2], :, :], equal_nan=True))
print('==========================\n输出所有股票的0-7日历史数据\n')
self.assertTrue(np.allclose(hp[:, :, 0:8], data[:, 0:8, :], equal_nan=True))
print('==========================\n输出000100股票的0-7日历史数据\n')
self.assertTrue(np.allclose(hp[:, '000100', 0:8], data[0, 0:8, :], equal_nan=True))
print('==========================\nstart testing multy axis slicing of HistoryPanel object')
print('==========================\n输出000100、000120两只股票的close、open两组历史数据\n',
hp['close,open', ['000100', '000102']])
print('==========================\n输出000100、000120两只股票的close到open三组历史数据\n',
hp['close,open', '000100, 000102'])
print(f'historyPanel: hp:\n{hp}')
print(f'data is:\n{data}')
hp.htypes = 'open,high,low,close'
hp.info()
hp.shares = ['000300', '600227', '600222', '000123', '000129']
hp.info()
def test_relabel(self):
new_shares_list = ['000001', '000002', '000003', '000004', '000005']
new_shares_str = '000001, 000002, 000003, 000004, 000005'
new_htypes_list = ['close', 'volume', 'value', 'exchange']
new_htypes_str = 'close, volume, value, exchange'
temp_hp = self.hp.copy()
temp_hp.re_label(shares=new_shares_list)
print(temp_hp.info())
print(temp_hp.htypes)
self.assertTrue(np.allclose(self.hp.values, temp_hp.values))
self.assertEqual(self.hp.htypes, temp_hp.htypes)
self.assertEqual(self.hp.hdates, temp_hp.hdates)
self.assertEqual(temp_hp.shares, new_shares_list)
temp_hp = self.hp.copy()
temp_hp.re_label(shares=new_shares_str)
self.assertTrue(np.allclose(self.hp.values, temp_hp.values))
self.assertEqual(self.hp.htypes, temp_hp.htypes)
self.assertEqual(self.hp.hdates, temp_hp.hdates)
self.assertEqual(temp_hp.shares, new_shares_list)
temp_hp = self.hp.copy()
temp_hp.re_label(htypes=new_htypes_list)
self.assertTrue(np.allclose(self.hp.values, temp_hp.values))
self.assertEqual(self.hp.shares, temp_hp.shares)
self.assertEqual(self.hp.hdates, temp_hp.hdates)
self.assertEqual(temp_hp.htypes, new_htypes_list)
temp_hp = self.hp.copy()
temp_hp.re_label(htypes=new_htypes_str)
self.assertTrue(np.allclose(self.hp.values, temp_hp.values))
self.assertEqual(self.hp.shares, temp_hp.shares)
self.assertEqual(self.hp.hdates, temp_hp.hdates)
self.assertEqual(temp_hp.htypes, new_htypes_list)
print(f'test errors raising')
temp_hp = self.hp.copy()
self.assertRaises(AssertionError, temp_hp.re_label, htypes=new_shares_str)
self.assertRaises(TypeError, temp_hp.re_label, htypes=123)
self.assertRaises(AssertionError, temp_hp.re_label, htypes='wrong input!')
def test_csv_to_hp(self):
pass
def test_hdf_to_hp(self):
pass
def test_hp_join(self):
# TODO: 这里需要加强,需要用具体的例子确认hp_join的结果正确
# TODO: 尤其是不同的shares、htypes、hdates,以及它们在顺
# TODO: 序不同的情况下是否能正确地组合
print(f'join two simple HistoryPanels with same shares')
temp_hp = self.hp.join(self.hp2, same_shares=True)
self.assertIsInstance(temp_hp, qt.HistoryPanel)
def test_df_to_hp(self):
print(f'test converting DataFrame to HistoryPanel')
data = np.random.randint(10, size=(10, 5))
df1 = pd.DataFrame(data)
df2 = pd.DataFrame(data, columns=qt.str_to_list(self.shares))
df3 = pd.DataFrame(data[:, 0:4])
df4 = pd.DataFrame(data[:, 0:4], columns=qt.str_to_list(self.htypes))
hp = qt.dataframe_to_hp(df1, htypes='close')
self.assertIsInstance(hp, qt.HistoryPanel)
self.assertEqual(hp.shares, [0, 1, 2, 3, 4])
self.assertEqual(hp.htypes, ['close'])
self.assertEqual(hp.hdates, [pd.Timestamp('1970-01-01 00:00:00'),
pd.Timestamp('1970-01-01 00:00:00.000000001'),
pd.Timestamp('1970-01-01 00:00:00.000000002'),
pd.Timestamp('1970-01-01 00:00:00.000000003'),
pd.Timestamp('1970-01-01 00:00:00.000000004'),
pd.Timestamp('1970-01-01 00:00:00.000000005'),
pd.Timestamp('1970-01-01 00:00:00.000000006'),
pd.Timestamp('1970-01-01 00:00:00.000000007'),
pd.Timestamp('1970-01-01 00:00:00.000000008'),
pd.Timestamp('1970-01-01 00:00:00.000000009')])
hp = qt.dataframe_to_hp(df2, shares=self.shares, htypes='close')
self.assertIsInstance(hp, qt.HistoryPanel)
self.assertEqual(hp.shares, qt.str_to_list(self.shares))
self.assertEqual(hp.htypes, ['close'])
hp = qt.dataframe_to_hp(df3, shares='000100', column_type='htypes')
self.assertIsInstance(hp, qt.HistoryPanel)
self.assertEqual(hp.shares, ['000100'])
self.assertEqual(hp.htypes, [0, 1, 2, 3])
hp = qt.dataframe_to_hp(df4, shares='000100', htypes=self.htypes, column_type='htypes')
self.assertIsInstance(hp, qt.HistoryPanel)
self.assertEqual(hp.shares, ['000100'])
self.assertEqual(hp.htypes, qt.str_to_list(self.htypes))
hp.info()
self.assertRaises(KeyError, qt.dataframe_to_hp, df1)
def test_to_dataframe(self):
""" 测试HistoryPanel对象的to_dataframe方法
"""
print(f'START TEST == test_to_dataframe')
print(f'test converting test hp to dataframe with share == "000102":')
df_test = self.hp.to_dataframe(share='000102')
self.assertIsInstance(df_test, pd.DataFrame)
self.assertEqual(list(self.hp.hdates), list(df_test.index))
self.assertEqual(list(self.hp.htypes), list(df_test.columns))
values = df_test.values
self.assertTrue(np.allclose(self.hp[:, '000102'], values))
print(f'test DataFrame conversion with share == "000100"')
df_test = self.hp.to_dataframe(share='000100')
self.assertIsInstance(df_test, pd.DataFrame)
self.assertEqual(list(self.hp.hdates), list(df_test.index))
self.assertEqual(list(self.hp.htypes), list(df_test.columns))
values = df_test.values
self.assertTrue(np.allclose(self.hp[:, '000100'], values))
print(f'test DataFrame conversion error: type incorrect')
self.assertRaises(AssertionError, self.hp.to_dataframe, share=3.0)
print(f'test DataFrame error raising with share not found error')
self.assertRaises(KeyError, self.hp.to_dataframe, share='000300')
print(f'test DataFrame conversion with htype == "close"')
df_test = self.hp.to_dataframe(htype='close')
self.assertIsInstance(df_test, pd.DataFrame)
self.assertEqual(list(self.hp.hdates), list(df_test.index))
self.assertEqual(list(self.hp.shares), list(df_test.columns))
values = df_test.values
self.assertTrue(np.allclose(self.hp['close'].T, values))
print(f'test DataFrame conversion with htype == "high"')
df_test = self.hp.to_dataframe(htype='high')
self.assertIsInstance(df_test, pd.DataFrame)
self.assertEqual(list(self.hp.hdates), list(df_test.index))
self.assertEqual(list(self.hp.shares), list(df_test.columns))
values = df_test.values
self.assertTrue(np.allclose(self.hp['high'].T, values))
print(f'test DataFrame conversion with htype == "high" and dropna')
v = self.hp.values.astype('float')
v[:, 3, :] = np.nan
v[:, 4, :] = np.inf
test_hp = qt.HistoryPanel(v, levels=self.shares, columns=self.htypes, rows=self.index)
df_test = test_hp.to_dataframe(htype='high', dropna=True)
self.assertIsInstance(df_test, pd.DataFrame)
self.assertEqual(list(self.hp.hdates[:3]) + list(self.hp.hdates[4:]), list(df_test.index))
self.assertEqual(list(self.hp.shares), list(df_test.columns))
values = df_test.values
target_values = test_hp['high'].T
target_values = target_values[np.where(~np.isnan(target_values))].reshape(9, 5)
self.assertTrue(np.allclose(target_values, values))
print(f'test DataFrame conversion with htype == "high", dropna and treat infs as na')
v = self.hp.values.astype('float')
v[:, 3, :] = np.nan
v[:, 4, :] = np.inf
test_hp = qt.HistoryPanel(v, levels=self.shares, columns=self.htypes, rows=self.index)
df_test = test_hp.to_dataframe(htype='high', dropna=True, inf_as_na=True)
self.assertIsInstance(df_test, pd.DataFrame)
self.assertEqual(list(self.hp.hdates[:3]) + list(self.hp.hdates[5:]), list(df_test.index))
self.assertEqual(list(self.hp.shares), list(df_test.columns))
values = df_test.values
target_values = test_hp['high'].T
target_values = target_values[np.where(~np.isnan(target_values) & ~np.isinf(target_values))].reshape(8, 5)
self.assertTrue(np.allclose(target_values, values))
print(f'test DataFrame conversion error: type incorrect')
self.assertRaises(AssertionError, self.hp.to_dataframe, htype=pd.DataFrame())
print(f'test DataFrame error raising with share not found error')
self.assertRaises(KeyError, self.hp.to_dataframe, htype='non_type')
print(f'Raises ValueError when both or none parameter is given')
self.assertRaises(KeyError, self.hp.to_dataframe)
self.assertRaises(KeyError, self.hp.to_dataframe, share='000100', htype='close')
def test_to_df_dict(self):
"""测试HistoryPanel公有方法to_df_dict"""
print('test convert history panel slice by share')
df_dict = self.hp.to_df_dict('share')
self.assertEqual(self.hp.shares, list(df_dict.keys()))
df_dict = self.hp.to_df_dict()
self.assertEqual(self.hp.shares, list(df_dict.keys()))
print('test convert historypanel slice by htype ')
df_dict = self.hp.to_df_dict('htype')
self.assertEqual(self.hp.htypes, list(df_dict.keys()))
print('test raise assertion error')
self.assertRaises(AssertionError, self.hp.to_df_dict, by='random text')
self.assertRaises(AssertionError, self.hp.to_df_dict, by=3)
print('test empty hp')
df_dict = qt.HistoryPanel().to_df_dict('share')
self.assertEqual(df_dict, {})
def test_stack_dataframes(self):
print('test stack dataframes in a list')
df1 = pd.DataFrame({'a': [1, 2, 3, 4], 'b': [2, 3, 4, 5], 'c': [3, 4, 5, 6]})
df1.index = ['20200101', '20200102', '20200103', '20200104']
df2 = pd.DataFrame({'b': [4, 3, 2, 1], 'd': [1, 1, 1, 1], 'c': [6, 5, 4, 3]})
df2.index = ['20200101', '20200102', '20200104', '20200105']
df3 = pd.DataFrame({'a': [6, 6, 6, 6], 'd': [4, 4, 4, 4], 'b': [2, 4, 6, 8]})
df3.index = ['20200101', '20200102', '20200103', '20200106']
values1 = np.array([[[1., 2., 3., np.nan],
[2., 3., 4., np.nan],
[3., 4., 5., np.nan],
[4., 5., 6., np.nan],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan]],
[[np.nan, 4., 6., 1.],
[np.nan, 3., 5., 1.],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, 2., 4., 1.],
[np.nan, 1., 3., 1.],
[np.nan, np.nan, np.nan, np.nan]],
[[6., 2., np.nan, 4.],
[6., 4., np.nan, 4.],
[6., 6., np.nan, 4.],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan],
[6., 8., np.nan, 4.]]])
values2 = np.array([[[1., np.nan, 6.],
[2., np.nan, 6.],
[3., np.nan, 6.],
[4., np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 6.]],
[[2., 4., 2.],
[3., 3., 4.],
[4., np.nan, 6.],
[5., 2., np.nan],
[np.nan, 1., np.nan],
[np.nan, np.nan, 8.]],
[[3., 6., np.nan],
[4., 5., np.nan],
[5., np.nan, np.nan],
[6., 4., np.nan],
[np.nan, 3., np.nan],
[np.nan, np.nan, np.nan]],
[[np.nan, 1., 4.],
[np.nan, 1., 4.],
[np.nan, np.nan, 4.],
[np.nan, 1., np.nan],
[np.nan, 1., np.nan],
[np.nan, np.nan, 4.]]])
print(df1.rename(index=pd.to_datetime))
print(df2.rename(index=pd.to_datetime))
print(df3.rename(index=pd.to_datetime))
hp1 = stack_dataframes([df1, df2, df3], stack_along='shares',
shares=['000100', '000200', '000300'])
hp2 = stack_dataframes([df1, df2, df3], stack_along='shares',
shares='000100, 000300, 000200')
print('hp1 is:\n', hp1)
print('hp2 is:\n', hp2)
self.assertEqual(hp1.htypes, ['a', 'b', 'c', 'd'])
self.assertEqual(hp1.shares, ['000100', '000200', '000300'])
self.assertTrue(np.allclose(hp1.values, values1, equal_nan=True))
self.assertEqual(hp2.htypes, ['a', 'b', 'c', 'd'])
self.assertEqual(hp2.shares, ['000100', '000300', '000200'])
self.assertTrue(np.allclose(hp2.values, values1, equal_nan=True))
hp3 = stack_dataframes([df1, df2, df3], stack_along='htypes',
htypes=['close', 'high', 'low'])
hp4 = stack_dataframes([df1, df2, df3], stack_along='htypes',
htypes='open, close, high')
print('hp3 is:\n', hp3.values)
print('hp4 is:\n', hp4.values)
self.assertEqual(hp3.htypes, ['close', 'high', 'low'])
self.assertEqual(hp3.shares, ['a', 'b', 'c', 'd'])
self.assertTrue(np.allclose(hp3.values, values2, equal_nan=True))
self.assertEqual(hp4.htypes, ['open', 'close', 'high'])
self.assertEqual(hp4.shares, ['a', 'b', 'c', 'd'])
self.assertTrue(np.allclose(hp4.values, values2, equal_nan=True))
print('test stack dataframes in a dict')
df1 = pd.DataFrame({'a': [1, 2, 3, 4], 'b': [2, 3, 4, 5], 'c': [3, 4, 5, 6]})
df1.index = ['20200101', '20200102', '20200103', '20200104']
df2 = pd.DataFrame({'b': [4, 3, 2, 1], 'd': [1, 1, 1, 1], 'c': [6, 5, 4, 3]})
df2.index = ['20200101', '20200102', '20200104', '20200105']
df3 = pd.DataFrame({'a': [6, 6, 6, 6], 'd': [4, 4, 4, 4], 'b': [2, 4, 6, 8]})
df3.index = ['20200101', '20200102', '20200103', '20200106']
values1 = np.array([[[1., 2., 3., np.nan],
[2., 3., 4., np.nan],
[3., 4., 5., np.nan],
[4., 5., 6., np.nan],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan]],
[[np.nan, 4., 6., 1.],
[np.nan, 3., 5., 1.],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, 2., 4., 1.],
[np.nan, 1., 3., 1.],
[np.nan, np.nan, np.nan, np.nan]],
[[6., 2., np.nan, 4.],
[6., 4., np.nan, 4.],
[6., 6., np.nan, 4.],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan],
[6., 8., np.nan, 4.]]])
values2 = np.array([[[1., np.nan, 6.],
[2., np.nan, 6.],
[3., np.nan, 6.],
[4., np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 6.]],
[[2., 4., 2.],
[3., 3., 4.],
[4., np.nan, 6.],
[5., 2., np.nan],
[np.nan, 1., np.nan],
[np.nan, np.nan, 8.]],
[[3., 6., np.nan],
[4., 5., np.nan],
[5., np.nan, np.nan],
[6., 4., np.nan],
[np.nan, 3., np.nan],
[np.nan, np.nan, np.nan]],
[[np.nan, 1., 4.],
[np.nan, 1., 4.],
[np.nan, np.nan, 4.],
[np.nan, 1., np.nan],
[np.nan, 1., np.nan],
[np.nan, np.nan, 4.]]])
print(df1.rename(index=pd.to_datetime))
print(df2.rename(index=pd.to_datetime))
print(df3.rename(index=pd.to_datetime))
hp1 = stack_dataframes(dfs={'000001.SZ': df1, '000002.SZ': df2, '000003.SZ': df3},
stack_along='shares')
hp2 = stack_dataframes(dfs={'000001.SZ': df1, '000002.SZ': df2, '000003.SZ': df3},
stack_along='shares',
shares='000100, 000300, 000200')
print('hp1 is:\n', hp1)
print('hp2 is:\n', hp2)
self.assertEqual(hp1.htypes, ['a', 'b', 'c', 'd'])
self.assertEqual(hp1.shares, ['000001.SZ', '000002.SZ', '000003.SZ'])
self.assertTrue(np.allclose(hp1.values, values1, equal_nan=True))
self.assertEqual(hp2.htypes, ['a', 'b', 'c', 'd'])
self.assertEqual(hp2.shares, ['000100', '000300', '000200'])
self.assertTrue(np.allclose(hp2.values, values1, equal_nan=True))
hp3 = stack_dataframes(dfs={'close': df1, 'high': df2, 'low': df3},
stack_along='htypes')
hp4 = stack_dataframes(dfs={'close': df1, 'low': df2, 'high': df3},
stack_along='htypes',
htypes='open, close, high')
print('hp3 is:\n', hp3.values)
print('hp4 is:\n', hp4.values)
self.assertEqual(hp3.htypes, ['close', 'high', 'low'])
self.assertEqual(hp3.shares, ['a', 'b', 'c', 'd'])
self.assertTrue(np.allclose(hp3.values, values2, equal_nan=True))
self.assertEqual(hp4.htypes, ['open', 'close', 'high'])
self.assertEqual(hp4.shares, ['a', 'b', 'c', 'd'])
self.assertTrue(np.allclose(hp4.values, values2, equal_nan=True))
def test_to_csv(self):
pass
def test_to_hdf(self):
pass
def test_fill_na(self):
print(self.hp)
new_values = self.hp.values.astype(float)
new_values[[0, 1, 3, 2], [1, 3, 0, 2], [1, 3, 2, 2]] = np.nan
print(new_values)
temp_hp = qt.HistoryPanel(values=new_values, levels=self.hp.levels, rows=self.hp.rows, columns=self.hp.columns)
self.assertTrue(np.allclose(temp_hp.values[[0, 1, 3, 2], [1, 3, 0, 2], [1, 3, 2, 2]], np.nan, equal_nan=True))
temp_hp.fillna(2.3)
new_values[[0, 1, 3, 2], [1, 3, 0, 2], [1, 3, 2, 2]] = 2.3
self.assertTrue(np.allclose(temp_hp.values,
new_values, equal_nan=True))
def test_get_history_panel(self):
# TODO: implement this test case
# test get only one line of data
pass
def test_get_price_type_raw_data(self):
shares = '000039.SZ, 600748.SH, 000040.SZ'
start = '20200101'
end = '20200131'
htypes = 'open, high, low, close'
target_price_000039 = [[9.45, 9.49, 9.12, 9.17],
[9.46, 9.56, 9.4, 9.5],
[9.7, 9.76, 9.5, 9.51],
[9.7, 9.75, 9.7, 9.72],
[9.73, 9.77, 9.7, 9.73],
[9.83, 9.85, 9.71, 9.72],
[9.85, 9.85, 9.75, 9.79],
[9.96, 9.96, 9.83, 9.86],
[9.87, 9.94, 9.77, 9.93],
[9.82, 9.9, 9.76, 9.87],
[9.8, 9.85, 9.77, 9.82],
[9.84, 9.86, 9.71, 9.72],
[9.83, 9.93, 9.81, 9.86],
[9.7, 9.87, 9.7, 9.82],
[9.83, 9.86, 9.69, 9.79],
[9.8, 9.94, 9.8, 9.86]]
target_price_600748 = [[5.68, 5.68, 5.32, 5.37],
[5.62, 5.68, 5.46, 5.65],
[5.72, 5.72, 5.61, 5.62],
[5.76, 5.77, 5.6, 5.73],
[5.78, 5.84, 5.73, 5.75],
[5.89, 5.91, 5.76, 5.77],
[6.03, 6.04, 5.87, 5.89],
[5.94, 6.07, 5.94, 6.02],
[5.96, 5.98, 5.88, 5.97],
[6.04, 6.06, 5.95, 5.96],
[5.98, 6.04, 5.96, 6.03],
[6.1, 6.11, 5.89, 5.94],
[6.02, 6.12, 6., 6.1],
[5.96, 6.05, 5.88, 6.01],
[6.03, 6.03, 5.95, 5.99],
[6.02, 6.12, 5.99, 5.99]]
target_price_000040 = [[3.63, 3.83, 3.63, 3.65],
[3.99, 4.07, 3.97, 4.03],
[4.1, 4.11, 3.93, 3.95],
[4.12, 4.13, 4.06, 4.11],
[4.13, 4.19, 4.07, 4.13],
[4.27, 4.28, 4.11, 4.12],
[4.37, 4.38, 4.25, 4.29],
[4.34, 4.5, 4.32, 4.41],
[4.28, 4.35, 4.2, 4.34],
[4.41, 4.43, 4.29, 4.31],
[4.42, 4.45, 4.36, 4.41],
[4.51, 4.56, 4.33, 4.35],
[4.35, 4.55, 4.31, 4.55],
[4.3, 4.41, 4.22, 4.36],
[4.27, 4.44, 4.23, 4.34],
[4.23, 4.27, 4.18, 4.25]]
print(f'test get price type raw data with single thread')
df_list = get_price_type_raw_data(start=start, end=end, shares=shares, htypes=htypes, freq='d')
self.assertIsInstance(df_list, dict)
self.assertEqual(len(df_list), 3)
self.assertTrue(np.allclose(df_list['000039.SZ'].values, np.array(target_price_000039)))
self.assertTrue(np.allclose(df_list['600748.SH'].values, np.array(target_price_600748)))
self.assertTrue(np.allclose(df_list['000040.SZ'].values, np.array(target_price_000040)))
print(f'in get financial report type raw data, got DataFrames: \n"000039.SZ":\n'
f'{df_list["000039.SZ"]}\n"600748.SH":\n'
f'{df_list["600748.SH"]}\n"000040.SZ":\n{df_list["000040.SZ"]}')
print(f'test get price type raw data with with multi threads')
df_list = get_price_type_raw_data(start=start, end=end, shares=shares, htypes=htypes, freq='d', parallel=10)
self.assertIsInstance(df_list, dict)
self.assertEqual(len(df_list), 3)
self.assertTrue(np.allclose(df_list['000039.SZ'].values, np.array(target_price_000039)))
self.assertTrue(np.allclose(df_list['600748.SH'].values, np.array(target_price_600748)))
self.assertTrue(np.allclose(df_list['000040.SZ'].values, np.array(target_price_000040)))
print(f'in get financial report type raw data, got DataFrames: \n"000039.SZ":\n'
f'{df_list["000039.SZ"]}\n"600748.SH":\n'
f'{df_list["600748.SH"]}\n"000040.SZ":\n{df_list["000040.SZ"]}')
def test_get_financial_report_type_raw_data(self):
shares = '000039.SZ, 600748.SH, 000040.SZ'
start = '20160101'
end = '20201231'
htypes = 'eps,basic_eps,diluted_eps,total_revenue,revenue,total_share,' \
'cap_rese,undistr_porfit,surplus_rese,net_profit'
target_eps_000039 = [[1.41],
[0.1398],
[-0.0841],
[-0.1929],
[0.37],
[0.1357],
[0.1618],
[0.1191],
[1.11],
[0.759],
[0.3061],
[0.1409],
[0.81],
[0.4187],
[0.2554],
[0.1624],
[0.14],
[-0.0898],
[-0.1444],
[0.1291]]
target_eps_600748 = [[0.41],
[0.22],
[0.22],
[0.09],
[0.42],
[0.23],
[0.22],
[0.09],
[0.36],
[0.16],
[0.15],
[0.07],
[0.47],
[0.19],
[0.12],
[0.07],
[0.32],
[0.22],
[0.14],
[0.07]]
target_eps_000040 = [[-0.6866],
[-0.134],
[-0.189],
[-0.036],
[-0.6435],
[0.05],
[0.062],
[0.0125],
[0.8282],
[1.05],
[0.985],
[0.811],
[0.41],
[0.242],
[0.113],
[0.027],
[0.19],
[0.17],
[0.17],
[0.064]]
target_basic_eps_000039 = [[1.3980000e-01, 1.3980000e-01, 6.3591954e+10, 6.3591954e+10],
[-8.4100000e-02, -8.4100000e-02, 3.9431807e+10, 3.9431807e+10],
[-1.9290000e-01, -1.9290000e-01, 1.5852177e+10, 1.5852177e+10],
[3.7000000e-01, 3.7000000e-01, 8.5815341e+10, 8.5815341e+10],
[1.3570000e-01, 1.3430000e-01, 6.1660271e+10, 6.1660271e+10],
[1.6180000e-01, 1.6040000e-01, 4.2717729e+10, 4.2717729e+10],
[1.1910000e-01, 1.1900000e-01, 1.9099547e+10, 1.9099547e+10],
[1.1100000e+00, 1.1000000e+00, 9.3497622e+10, 9.3497622e+10],
[7.5900000e-01, 7.5610000e-01, 6.6906147e+10, 6.6906147e+10],
[3.0610000e-01, 3.0380000e-01, 4.3560398e+10, 4.3560398e+10],
[1.4090000e-01, 1.4050000e-01, 1.9253639e+10, 1.9253639e+10],
[8.1000000e-01, 8.1000000e-01, 7.6299930e+10, 7.6299930e+10],
[4.1870000e-01, 4.1710000e-01, 5.3962706e+10, 5.3962706e+10],
[2.5540000e-01, 2.5440000e-01, 3.3387152e+10, 3.3387152e+10],
[1.6240000e-01, 1.6200000e-01, 1.4675987e+10, 1.4675987e+10],
[1.4000000e-01, 1.4000000e-01, 5.1111652e+10, 5.1111652e+10],
[-8.9800000e-02, -8.9800000e-02, 3.4982614e+10, 3.4982614e+10],
[-1.4440000e-01, -1.4440000e-01, 2.3542843e+10, 2.3542843e+10],
[1.2910000e-01, 1.2860000e-01, 1.0412416e+10, 1.0412416e+10],
[7.2000000e-01, 7.1000000e-01, 5.8685804e+10, 5.8685804e+10]]
target_basic_eps_600748 = [[2.20000000e-01, 2.20000000e-01, 5.29423397e+09, 5.29423397e+09],
[2.20000000e-01, 2.20000000e-01, 4.49275653e+09, 4.49275653e+09],
[9.00000000e-02, 9.00000000e-02, 1.59067065e+09, 1.59067065e+09],
[4.20000000e-01, 4.20000000e-01, 8.86555586e+09, 8.86555586e+09],
[2.30000000e-01, 2.30000000e-01, 5.44850143e+09, 5.44850143e+09],
[2.20000000e-01, 2.20000000e-01, 4.34978927e+09, 4.34978927e+09],
[9.00000000e-02, 9.00000000e-02, 1.73793793e+09, 1.73793793e+09],
[3.60000000e-01, 3.60000000e-01, 8.66375241e+09, 8.66375241e+09],
[1.60000000e-01, 1.60000000e-01, 4.72875116e+09, 4.72875116e+09],
[1.50000000e-01, 1.50000000e-01, 3.76879016e+09, 3.76879016e+09],
[7.00000000e-02, 7.00000000e-02, 1.31785454e+09, 1.31785454e+09],
[4.70000000e-01, 4.70000000e-01, 7.23391685e+09, 7.23391685e+09],
[1.90000000e-01, 1.90000000e-01, 3.76072215e+09, 3.76072215e+09],
[1.20000000e-01, 1.20000000e-01, 2.35845364e+09, 2.35845364e+09],
[7.00000000e-02, 7.00000000e-02, 1.03831865e+09, 1.03831865e+09],
[3.20000000e-01, 3.20000000e-01, 6.48880919e+09, 6.48880919e+09],
[2.20000000e-01, 2.20000000e-01, 3.72209142e+09, 3.72209142e+09],
[1.40000000e-01, 1.40000000e-01, 2.22563924e+09, 2.22563924e+09],
[7.00000000e-02, 7.00000000e-02, 8.96647052e+08, 8.96647052e+08],
[4.80000000e-01, 4.80000000e-01, 6.61917508e+09, 6.61917508e+09]]
target_basic_eps_000040 = [[-1.34000000e-01, -1.34000000e-01, 2.50438755e+09, 2.50438755e+09],
[-1.89000000e-01, -1.89000000e-01, 1.32692347e+09, 1.32692347e+09],
[-3.60000000e-02, -3.60000000e-02, 5.59073338e+08, 5.59073338e+08],
[-6.43700000e-01, -6.43700000e-01, 6.80576162e+09, 6.80576162e+09],
[5.00000000e-02, 5.00000000e-02, 6.38891620e+09, 6.38891620e+09],
[6.20000000e-02, 6.20000000e-02, 5.23267082e+09, 5.23267082e+09],
[1.25000000e-02, 1.25000000e-02, 2.22420874e+09, 2.22420874e+09],
[8.30000000e-01, 8.30000000e-01, 8.67628947e+09, 8.67628947e+09],
[1.05000000e+00, 1.05000000e+00, 5.29431716e+09, 5.29431716e+09],
[9.85000000e-01, 9.85000000e-01, 3.56822382e+09, 3.56822382e+09],
[8.11000000e-01, 8.11000000e-01, 1.06613439e+09, 1.06613439e+09],
[4.10000000e-01, 4.10000000e-01, 8.13102532e+09, 8.13102532e+09],
[2.42000000e-01, 2.42000000e-01, 5.17971521e+09, 5.17971521e+09],
[1.13000000e-01, 1.13000000e-01, 3.21704120e+09, 3.21704120e+09],
[2.70000000e-02, 2.70000000e-02, 8.41966738e+08, 8.24272235e+08],
[1.90000000e-01, 1.90000000e-01, 3.77350171e+09, 3.77350171e+09],
[1.70000000e-01, 1.70000000e-01, 2.38643892e+09, 2.38643892e+09],
[1.70000000e-01, 1.70000000e-01, 1.29127117e+09, 1.29127117e+09],
[6.40000000e-02, 6.40000000e-02, 6.03256858e+08, 6.03256858e+08],
[1.30000000e-01, 1.30000000e-01, 1.66572918e+09, 1.66572918e+09]]
target_total_share_000039 = [[3.5950140e+09, 4.8005360e+09, 2.1573660e+10, 3.5823430e+09],
[3.5860750e+09, 4.8402300e+09, 2.0750827e+10, 3.5823430e+09],
[3.5860750e+09, 4.9053550e+09, 2.0791307e+10, 3.5823430e+09],
[3.5845040e+09, 4.8813110e+09, 2.1482857e+10, 3.5823430e+09],
[3.5831490e+09, 4.9764250e+09, 2.0926816e+10, 3.2825850e+09],
[3.5825310e+09, 4.8501270e+09, 2.1020418e+10, 3.2825850e+09],
[2.9851110e+09, 5.4241420e+09, 2.2438350e+10, 3.2825850e+09],
[2.9849890e+09, 4.1284000e+09, 2.2082769e+10, 3.2825850e+09],
[2.9849610e+09, 4.0838010e+09, 2.1045994e+10, 3.2815350e+09],
[2.9849560e+09, 4.2491510e+09, 1.9694345e+10, 3.2815350e+09],
[2.9846970e+09, 4.2351600e+09, 2.0016361e+10, 3.2815350e+09],
[2.9828890e+09, 4.2096630e+09, 1.9734494e+10, 3.2815350e+09],
[2.9813960e+09, 3.4564240e+09, 1.8562738e+10, 3.2793790e+09],
[2.9803530e+09, 3.0759650e+09, 1.8076208e+10, 3.2793790e+09],
[2.9792680e+09, 3.1376690e+09, 1.7994776e+10, 3.2793790e+09],
[2.9785770e+09, 3.1265850e+09, 1.7495053e+10, 3.2793790e+09],
[2.9783640e+09, 3.1343850e+09, 1.6740840e+10, 3.2035780e+09],
[2.9783590e+09, 3.1273880e+09, 1.6578389e+10, 3.2035780e+09],
[2.9782780e+09, 3.1169280e+09, 1.8047639e+10, 3.2035780e+09],
[2.9778200e+09, 3.1818630e+09, 1.7663145e+10, 3.2035780e+09]]
target_total_share_600748 = [[1.84456289e+09, 2.60058426e+09, 5.72443733e+09, 4.58026529e+08],
[1.84456289e+09, 2.60058426e+09, 5.72096899e+09, 4.58026529e+08],
[1.84456289e+09, 2.60058426e+09, 5.65738237e+09, 4.58026529e+08],
[1.84456289e+09, 2.60058426e+09, 5.50257806e+09, 4.58026529e+08],
[1.84456289e+09, 2.59868164e+09, 5.16741523e+09, 4.44998882e+08],
[1.84456289e+09, 2.59684471e+09, 5.14677280e+09, 4.44998882e+08],
[1.84456289e+09, 2.59684471e+09, 4.94955591e+09, 4.44998882e+08],
[1.84456289e+09, 2.59684471e+09, 4.79001451e+09, 4.44998882e+08],
[1.84456289e+09, 3.11401684e+09, 4.46326988e+09, 4.01064256e+08],
[1.84456289e+09, 3.11596723e+09, 4.45419136e+09, 4.01064256e+08],
[1.84456289e+09, 3.11596723e+09, 4.39652948e+09, 4.01064256e+08],
[1.84456289e+09, 3.18007783e+09, 4.26608403e+09, 4.01064256e+08],
[1.84456289e+09, 3.10935622e+09, 3.78417688e+09, 3.65651701e+08],
[1.84456289e+09, 3.10935622e+09, 3.65806574e+09, 3.65651701e+08],
[1.84456289e+09, 3.10935622e+09, 3.62063090e+09, 3.65651701e+08],
[1.84456289e+09, 3.10935622e+09, 3.50063915e+09, 3.65651701e+08],
[1.41889453e+09, 3.55940850e+09, 3.22272993e+09, 3.62124939e+08],
[1.41889453e+09, 3.56129650e+09, 3.11477476e+09, 3.62124939e+08],
[1.41889453e+09, 3.59632888e+09, 3.06836903e+09, 3.62124939e+08],
[1.08337087e+09, 3.37400726e+07, 3.00918704e+09, 3.62124939e+08]]
target_total_share_000040 = [[1.48687387e+09, 1.06757900e+10, 8.31900755e+08, 2.16091994e+08],
[1.48687387e+09, 1.06757900e+10, 7.50177302e+08, 2.16091994e+08],
[1.48687387e+09, 1.06757899e+10, 9.90255974e+08, 2.16123282e+08],
[1.48687387e+09, 1.06757899e+10, 1.03109866e+09, 2.16091994e+08],
[1.48687387e+09, 1.06757910e+10, 2.07704745e+09, 2.16123282e+08],
[1.48687387e+09, 1.06757910e+10, 2.09608665e+09, 2.16123282e+08],
[1.48687387e+09, 1.06803833e+10, 2.13354083e+09, 2.16123282e+08],
[1.48687387e+09, 1.06804090e+10, 2.11489364e+09, 2.16123282e+08],
[1.33717327e+09, 8.87361727e+09, 2.42939924e+09, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 2.34220254e+09, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 2.16390368e+09, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 1.07961915e+09, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 8.58866066e+08, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 6.87024393e+08, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 5.71554565e+08, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 5.54241222e+08, 1.88489589e+08],
[1.33717327e+09, 8.87361726e+09, 5.10059576e+08, 1.88489589e+08],
[1.33717327e+09, 8.87361726e+09, 4.59351639e+08, 1.88489589e+08],
[4.69593364e+08, 2.78355875e+08, 4.13430814e+08, 1.88489589e+08],
[4.69593364e+08, 2.74235459e+08, 3.83557678e+08, 1.88489589e+08]]
target_net_profit_000039 = [[np.nan],
[2.422180e+08],
[np.nan],
[2.510113e+09],
[np.nan],
[1.102220e+09],
[np.nan],
[4.068455e+09],
[np.nan],
[1.315957e+09],
[np.nan],
[3.158415e+09],
[np.nan],
[1.066509e+09],
[np.nan],
[7.349830e+08],
[np.nan],
[-5.411600e+08],
[np.nan],
[2.271961e+09]]
target_net_profit_600748 = [[np.nan],
[4.54341757e+08],
[np.nan],
[9.14476670e+08],
[np.nan],
[5.25360283e+08],
[np.nan],
[9.24502415e+08],
[np.nan],
[4.66560302e+08],
[np.nan],
[9.15265285e+08],
[np.nan],
[2.14639674e+08],
[np.nan],
[7.45093049e+08],
[np.nan],
[2.10967312e+08],
[np.nan],
[6.04572711e+08]]
target_net_profit_000040 = [[np.nan],
[-2.82458846e+08],
[np.nan],
[-9.57130872e+08],
[np.nan],
[9.22114527e+07],
[np.nan],
[1.12643819e+09],
[np.nan],
[1.31715269e+09],
[np.nan],
[5.39940093e+08],
[np.nan],
[1.51440838e+08],
[np.nan],
[1.75339071e+08],
[np.nan],
[8.04740415e+07],
[np.nan],
[6.20445815e+07]]
print('test get financial data, in multi thread mode')
df_list = get_financial_report_type_raw_data(start=start, end=end, shares=shares, htypes=htypes, parallel=4)
self.assertIsInstance(df_list, tuple)
self.assertEqual(len(df_list), 4)
self.assertEqual(len(df_list[0]), 3)
self.assertEqual(len(df_list[1]), 3)
self.assertEqual(len(df_list[2]), 3)
self.assertEqual(len(df_list[3]), 3)
# 检查确认所有数据类型正确
self.assertTrue(all(isinstance(item, pd.DataFrame) for subdict in df_list for item in subdict.values()))
# 检查是否有空数据
print(all(item.empty for subdict in df_list for item in subdict.values()))
# 检查获取的每组数据正确,且所有数据的顺序一致, 如果取到空数据,则忽略
if df_list[0]['000039.SZ'].empty:
print(f'income data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[0]['000039.SZ'].values, target_basic_eps_000039))
if df_list[0]['600748.SH'].empty:
print(f'income data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[0]['600748.SH'].values, target_basic_eps_600748))
if df_list[0]['000040.SZ'].empty:
print(f'income data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[0]['000040.SZ'].values, target_basic_eps_000040))
if df_list[1]['000039.SZ'].empty:
print(f'indicator data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[1]['000039.SZ'].values, target_eps_000039))
if df_list[1]['600748.SH'].empty:
print(f'indicator data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[1]['600748.SH'].values, target_eps_600748))
if df_list[1]['000040.SZ'].empty:
print(f'indicator data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[1]['000040.SZ'].values, target_eps_000040))
if df_list[2]['000039.SZ'].empty:
print(f'balance data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[2]['000039.SZ'].values, target_total_share_000039))
if df_list[2]['600748.SH'].empty:
print(f'balance data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[2]['600748.SH'].values, target_total_share_600748))
if df_list[2]['000040.SZ'].empty:
print(f'balance data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[2]['000040.SZ'].values, target_total_share_000040))
if df_list[3]['000039.SZ'].empty:
print(f'cash flow data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[3]['000039.SZ'].values, target_net_profit_000039, equal_nan=True))
if df_list[3]['600748.SH'].empty:
print(f'cash flow data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[3]['600748.SH'].values, target_net_profit_600748, equal_nan=True))
if df_list[3]['000040.SZ'].empty:
print(f'cash flow data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[3]['000040.SZ'].values, target_net_profit_000040, equal_nan=True))
print('test get financial data, in single thread mode')
df_list = get_financial_report_type_raw_data(start=start, end=end, shares=shares, htypes=htypes, parallel=0)
self.assertIsInstance(df_list, tuple)
self.assertEqual(len(df_list), 4)
self.assertEqual(len(df_list[0]), 3)
self.assertEqual(len(df_list[1]), 3)
self.assertEqual(len(df_list[2]), 3)
self.assertEqual(len(df_list[3]), 3)
# 检查确认所有数据类型正确
self.assertTrue(all(isinstance(item, pd.DataFrame) for subdict in df_list for item in subdict.values()))
# 检查是否有空数据,因为网络问题,有可能会取到空数据
self.assertFalse(all(item.empty for subdict in df_list for item in subdict.values()))
# 检查获取的每组数据正确,且所有数据的顺序一致, 如果取到空数据,则忽略
if df_list[0]['000039.SZ'].empty:
print(f'income data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[0]['000039.SZ'].values, target_basic_eps_000039))
if df_list[0]['600748.SH'].empty:
print(f'income data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[0]['600748.SH'].values, target_basic_eps_600748))
if df_list[0]['000040.SZ'].empty:
print(f'income data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[0]['000040.SZ'].values, target_basic_eps_000040))
if df_list[1]['000039.SZ'].empty:
print(f'indicator data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[1]['000039.SZ'].values, target_eps_000039))
if df_list[1]['600748.SH'].empty:
print(f'indicator data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[1]['600748.SH'].values, target_eps_600748))
if df_list[1]['000040.SZ'].empty:
print(f'indicator data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[1]['000040.SZ'].values, target_eps_000040))
if df_list[2]['000039.SZ'].empty:
print(f'balance data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[2]['000039.SZ'].values, target_total_share_000039))
if df_list[2]['600748.SH'].empty:
print(f'balance data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[2]['600748.SH'].values, target_total_share_600748))
if df_list[2]['000040.SZ'].empty:
print(f'balance data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[2]['000040.SZ'].values, target_total_share_000040))
if df_list[3]['000039.SZ'].empty:
print(f'cash flow data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[3]['000039.SZ'].values, target_net_profit_000039, equal_nan=True))
if df_list[3]['600748.SH'].empty:
print(f'cash flow data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[3]['600748.SH'].values, target_net_profit_600748, equal_nan=True))
if df_list[3]['000040.SZ'].empty:
print(f'cash flow data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[3]['000040.SZ'].values, target_net_profit_000040, equal_nan=True))
def test_get_composite_type_raw_data(self):
pass
class TestUtilityFuncs(unittest.TestCase):
def setUp(self):
pass
def test_str_to_list(self):
self.assertEqual(str_to_list('a,b,c,d,e'), ['a', 'b', 'c', 'd', 'e'])
self.assertEqual(str_to_list('a, b, c '), ['a', 'b', 'c'])
self.assertEqual(str_to_list('a, b: c', sep_char=':'), ['a,b', 'c'])
def test_list_or_slice(self):
str_dict = {'close': 0, 'open': 1, 'high': 2, 'low': 3}
self.assertEqual(qt.list_or_slice(slice(1, 2, 1), str_dict), slice(1, 2, 1))
self.assertEqual(qt.list_or_slice('open', str_dict), [1])
self.assertEqual(list(qt.list_or_slice('close, high, low', str_dict)), [0, 2, 3])
self.assertEqual(list(qt.list_or_slice('close:high', str_dict)), [0, 1, 2])
self.assertEqual(list(qt.list_or_slice(['open'], str_dict)), [1])
self.assertEqual(list(qt.list_or_slice(['open', 'high'], str_dict)), [1, 2])
self.assertEqual(list(qt.list_or_slice(0, str_dict)), [0])
self.assertEqual(list(qt.list_or_slice([0, 2], str_dict)), [0, 2])
self.assertEqual(list(qt.list_or_slice([True, False, True, False], str_dict)), [0, 2])
def test_label_to_dict(self):
target_list = [0, 1, 10, 100]
target_dict = {'close': 0, 'open': 1, 'high': 2, 'low': 3}
target_dict2 = {'close': 0, 'open': 2, 'high': 1, 'low': 3}
self.assertEqual(qt.labels_to_dict('close, open, high, low', target_list), target_dict)
self.assertEqual(qt.labels_to_dict(['close', 'open', 'high', 'low'], target_list), target_dict)
self.assertEqual(qt.labels_to_dict('close, high, open, low', target_list), target_dict2)
self.assertEqual(qt.labels_to_dict(['close', 'high', 'open', 'low'], target_list), target_dict2)
def test_regulate_date_format(self):
self.assertEqual(regulate_date_format('2019/11/06'), '20191106')
self.assertEqual(regulate_date_format('2019-11-06'), '20191106')
self.assertEqual(regulate_date_format('20191106'), '20191106')
self.assertEqual(regulate_date_format('191106'), '20061119')
self.assertEqual(regulate_date_format('830522'), '19830522')
self.assertEqual(regulate_date_format(datetime.datetime(2010, 3, 15)), '20100315')
self.assertEqual(regulate_date_format(pd.Timestamp('2010.03.15')), '20100315')
self.assertRaises(ValueError, regulate_date_format, 'abc')
self.assertRaises(ValueError, regulate_date_format, '2019/13/43')
def test_list_to_str_format(self):
self.assertEqual(list_to_str_format(['close', 'open', 'high', 'low']),
'close,open,high,low')
self.assertEqual(list_to_str_format(['letters', ' ', '123 4', 123, ' kk l']),
'letters,,1234,kkl')
self.assertEqual(list_to_str_format('a string input'),
'a,string,input')
self.assertEqual(list_to_str_format('already,a,good,string'),
'already,a,good,string')
self.assertRaises(AssertionError, list_to_str_format, 123)
def test_is_trade_day(self):
"""test if the funcion maybe_trade_day() and is_market_trade_day() works properly
"""
date_trade = '20210401'
date_holiday = '20210102'
date_weekend = '20210424'
date_seems_trade_day = '20210217'
date_too_early = '19890601'
date_too_late = '20230105'
date_christmas = '20201225'
self.assertTrue(maybe_trade_day(date_trade))
self.assertFalse(maybe_trade_day(date_holiday))
self.assertFalse(maybe_trade_day(date_weekend))
self.assertTrue(maybe_trade_day(date_seems_trade_day))
self.assertTrue(maybe_trade_day(date_too_early))
self.assertTrue(maybe_trade_day(date_too_late))
self.assertTrue(maybe_trade_day(date_christmas))
self.assertTrue(is_market_trade_day(date_trade))
self.assertFalse(is_market_trade_day(date_holiday))
self.assertFalse(is_market_trade_day(date_weekend))
self.assertFalse(is_market_trade_day(date_seems_trade_day))
self.assertFalse(is_market_trade_day(date_too_early))
self.assertFalse(is_market_trade_day(date_too_late))
self.assertTrue(is_market_trade_day(date_christmas))
self.assertFalse(is_market_trade_day(date_christmas, exchange='XHKG'))
date_trade = pd.to_datetime('20210401')
date_holiday = pd.to_datetime('20210102')
date_weekend = pd.to_datetime('20210424')
self.assertTrue(maybe_trade_day(date_trade))
self.assertFalse(maybe_trade_day(date_holiday))
self.assertFalse(maybe_trade_day(date_weekend))
def test_prev_trade_day(self):
"""test the function prev_trade_day()
"""
date_trade = '20210401'
date_holiday = '20210102'
prev_holiday = pd.to_datetime(date_holiday) - pd.Timedelta(2, 'd')
date_weekend = '20210424'
prev_weekend = pd.to_datetime(date_weekend) - pd.Timedelta(1, 'd')
date_seems_trade_day = '20210217'
prev_seems_trade_day = '20210217'
date_too_early = '19890601'
date_too_late = '20230105'
date_christmas = '20201225'
self.assertEqual(pd.to_datetime(prev_trade_day(date_trade)),
pd.to_datetime(date_trade))
self.assertEqual(pd.to_datetime(prev_trade_day(date_holiday)),
pd.to_datetime(prev_holiday))
self.assertEqual(pd.to_datetime(prev_trade_day(date_weekend)),
pd.to_datetime(prev_weekend))
self.assertEqual(pd.to_datetime(prev_trade_day(date_seems_trade_day)),
| pd.to_datetime(prev_seems_trade_day) | pandas.to_datetime |
#!/usr/bin/env python3
# requirement: a unique train path for each given bitmap day and UID
import app.solr as solr
import json
import pandas as pd
DAY = pd.offsets.Day()
MONDAY = pd.offsets.Week(weekday=0)
WEEK = 7 * DAY
N = 0
def days_str(n):
return '{:b}'.format(n).zfill(7)
def day_int(bitmap):
return int(bitmap, 2)
DEBUG = True
if __name__ == '__main__':
DEBUG = False
if DEBUG:
pd.set_option('display.max_columns', None)
df1 = pd.DataFrame(solr.get_query('PA', fl='UUID, UID, Date_From, Date_To, Days, STP, Transaction, id, Origin, Terminus'))
df1['ID'] = 'PT'
for KEY in ['Date_From', 'Date_To']:
df1[KEY] = pd.to_datetime(df1[KEY])
df2 = df1[['Date_From', 'Date_To']].rename(columns={'Date_From': 'Start_Date', 'Date_To': 'End_Date'})
idx2 = df2['End_Date'].isnull()
df2.loc[idx2, 'End_Date'] = df2.loc[idx2, 'Start_Date']
idx_monday = df2['Start_Date'].dt.dayofweek == 0
df2.loc[~idx_monday, 'Start_Date'] = df2.loc[~idx_monday, 'Start_Date'] - MONDAY
df2['End_Date'] = df2['End_Date'] + MONDAY
df1 = df1.join(df2)
idx1 = df1['Days'].isna()
df1.loc[idx1, 'Days'] = '0000000'
df1 = df1.drop(['Date_From', 'Date_To'], axis=1)
df1['Actual'] = df1['Days']
def output_schedule(this_schedule):
this_schedule['Active'] = this_schedule['Start_Date'].dt.strftime('%Y-%m-%d') + '.' + this_schedule['End_Date'].dt.strftime('%Y-%m-%d') + '.' + this_schedule['Actual']
this_schedule['id'] = this_schedule['id'] + '.' + this_schedule.groupby('id').cumcount().apply(str)
for KEY in ['Start_Date', 'End_Date']:
this_schedule[KEY] = this_schedule[KEY].dt.strftime('%Y-%m-%dT%H:%M:%SZ')
df1 = this_schedule['Actual'].apply(lambda s: {'{}'.format(str(k)): v for k, v in enumerate(list(s))})
df2 = pd.DataFrame(df1.to_list(), index=df1.index)
this_schedule = this_schedule.join(df2)
this_data = [json.dumps({k: v for k, v in path.to_dict().items() if v}) for _, path in this_schedule.iterrows()]
print('\n'.join(this_data))
#Identify all unique UIDs in timetable
idx1 = df1['UID'].duplicated(keep=False)
SCHEDULE = pd.DataFrame(df1[~idx1]).reset_index(drop=True)
output_schedule(SCHEDULE)
DUPLICATES = df1[idx1]
# Identify all UIDs without date overlap in timetable
df2 = DUPLICATES[['UID', 'Start_Date', 'End_Date']].sort_values(['UID', 'Start_Date']).reset_index(drop=True)
df3 = df2[['UID', 'Start_Date']].rename({'UID': 'UID2', 'Start_Date': 'overlap'}, axis=1).shift(-1)
df2 = df2.join(df3)
df3 = df2[df2['UID'] == df2['UID2']].drop('UID2', axis=1).set_index('UID')
df2 = DUPLICATES.set_index('UID', drop=False)
idx3 = df3[df3['End_Date'] > df3['overlap']].index.unique()
SCHEDULE = df2.drop(idx3).reset_index(drop=True)
output_schedule(SCHEDULE)
# Identify all UIDs with date overlap and interleave
df2 = df2.loc[idx3]
def xor_bitmap(a, b):
return b & (a ^ b)
def overlay_bits(b):
v = list(b[::-1])
for n in range(1, len(v)):
v = v[:n] + [(xor_bitmap(v[n - 1], i)) for i in v[n:]]
return tuple(v[::-1])
def interleave(these_objects):
this_interval = [(j['Start_Date'], j['End_Date'], day_int(j['Days']), (j),) for j in these_objects]
idx = sorted(set([j for i in this_interval for j in (i[0], i[1])]))
all_paths = {}
for i in this_interval:
(m, n, bit, k) = i
for j in range(idx.index(m), idx.index(n)):
(k1, k2) = (idx[j], idx[j+1])
try:
all_paths[(k1, k2)] += ((bit, k),)
except KeyError:
all_paths[(k1, k2)] = ((bit, k),)
this_schedule = []
for (k1, k2), v in all_paths.items():
(bits, paths) = zip(*v)
bits = overlay_bits((bits))
for bit, path in zip(bits, paths):
if bit > 0:
path = path.copy()
path['Start_Date'] = k1
path['End_Date'] = k2
path['Actual'] = days_str(bit)
this_schedule.append(path)
return this_schedule
UPDATE = []
for UID in df2.index.unique():
this_schedule = [i.to_dict() for _, i in df2.loc[UID].iterrows()]
UPDATE += interleave(this_schedule)
SCHEDULE = | pd.DataFrame(UPDATE) | pandas.DataFrame |
# import libraries
import requests
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup as bs
import time
import random
import re
import os
# Important Note ---
# change the value for which you want to scrape the data defaults to 2008-2019
year_list = [year for year in range(2019, 2007, -1)]
# project paths
project_root_dir = os.path.normpath(os.getcwd() + os.sep + os.pardir)
file_path = os.path.join(project_root_dir, "data")
os.makedirs(file_path, exist_ok=True)
# function for loading data
def load_data(filename, file_path=file_path):
csv_path = os.path.join(file_path, filename)
return pd.read_csv(csv_path)
# function for saving data as csv file
def save_dataframe(df, filename, file_path=file_path):
"""
This function takes a dataframe and save it as a csv file.
df: dataframe to save
filename: Name to use for the csv file eg: 'my_file.csv'
file_path = where to save the file
"""
path = os.path.join(file_path, filename)
df.to_csv(path, index=False)
def get_batting_data(year):
"""This function gets the data from ipl official website,
extract all the table data and return it as a pandas dataframe.
"""
try:
# get the html from the website
url = "https://www.iplt20.com/stats/{}/most-runs".format(year)
response = requests.get(url)
batting_html = response.text
# parse the html
batting_soup = bs(batting_html, features="lxml")
# get the table data
batting_table_data = batting_soup.find(class_="js-table")
# get the column names
col_names = []
for header in batting_table_data.find_all("th"):
col_names.append(header.text.strip())
# create the dataframe
a_list = []
for data in batting_table_data.find_all("td"):
a_list.append(" ".join(data.text.split()))
n = 14
final = [a_list[i : i + n] for i in range(0, len(a_list), n)]
df = pd.DataFrame(final)
df.columns = col_names
# Add the nationality of each player in the dataframe
nationality_list = []
for index, data in enumerate(batting_table_data.find_all("tr")[1:]):
try:
nationality_list.append(data["data-nationality"])
except Exception as e:
print(e)
print(index)
# add none
nationality_list.append(None)
df["Nationality"] = nationality_list
# Add the player link for more info in the dataframe
base_url = "https://www.iplt20.com"
player_link_list = []
try:
# get all the links and add it to the list
for data in batting_table_data.find_all("a"):
player_link_list.append(base_url + data["href"])
# create a column with None value
df[15] = None
# iterate through each row and create a player name pattern
for index, row in df.iterrows():
player_name = row["PLAYER"].replace(" ", "-")
player_regex = re.compile(r"{}".format(player_name), re.IGNORECASE)
for item in player_link_list:
# if the pattern matches any links
if player_regex.search(item) != None:
# then append it to that row of the df
df.iloc[index, 15] = item
# rename the column
df.rename(columns={15: "Player Link"}, inplace=True)
# extract the player team name from the link and add to the df
team_regex = r"teams/(\w+-\w+-?\w+)"
df["Team"] = df["Player Link"].str.extract(team_regex, flags=re.IGNORECASE)
df["Team"] = df["Team"].apply(lambda x: str(x).title().replace("-", " "))
# convert data types from string to numeric
df["POS"] = pd.to_numeric(df["POS"], errors="coerce").fillna(0)
df["Mat"] = pd.to_numeric(df["Mat"], errors="coerce").fillna(0)
df["Inns"] = pd.to_numeric(df["Inns"], errors="coerce").fillna(0)
df["NO"] = pd.to_numeric(df["NO"], errors="coerce").fillna(0)
df["Runs"] = pd.to_numeric(df["Runs"], errors="coerce").fillna(0)
df["HS"] = pd.to_numeric(
df["HS"].str.replace("*", ""), errors="coerce"
).fillna(0)
df["Avg"] = pd.to_numeric(df["Avg"], errors="coerce").fillna(0)
df["BF"] = pd.to_numeric(df["BF"], errors="coerce").fillna(0)
df["SR"] = pd.to_numeric(df["SR"], errors="coerce").fillna(0)
df["100"] = pd.to_numeric(df["100"], errors="coerce").fillna(0)
df["50"] = pd.to_numeric(df["50"], errors="coerce").fillna(0)
df["4s"] = pd.to_numeric(df["4s"], errors="coerce").fillna(0)
df["6s"] = pd.to_numeric(df["6s"], errors="coerce").fillna(0)
# Add season year
df["Season"] = year
except Exception as e:
print(e)
print(year)
except Exception as e:
print(e)
print(year)
# return the dataframe
return df
def combine_all_years_data(function, year_list):
"""
Common function for combining data for all the years for a
given table from ipl website or any other. All table have
different functions to get the data from the websites.
"""
try:
# create an empty list to hold all the dataframes
df_list = []
# loop through each year and extract the data
for year in year_list:
# call the function to get the data for that year
df = function(year)
# append the data to the df list
df_list.append(df)
# add some random pause
time.sleep(1 + 2 * random.random())
# concat all the dataframes
df = pd.concat(df_list, ignore_index=True)
except Exception as e:
print(e)
print(year)
# return the dataframe
return df
def get_points_table_data(year):
"""This Function takes the year value and extract the points table data
from HowStat and return it as a Pandas Dataframe.
"""
try:
url = "http://www.howstat.com/cricket/Statistics/IPL/PointsTable.asp?s={}".format(
year
)
response = requests.get(url)
except Exception as e:
print(e)
print(year)
try:
# get the html text
points_html_str = response.text
# parse it using BeautifulSoup
points_soup = bs(points_html_str, features="lxml")
# Get all the Table data
table_data = points_soup.find(class_="TableLined")
# create an empty list
a_list = []
# loop through all the table data and extract the desired value and append
# it to the empty list
for data in table_data.find_all("td"):
a_list.append(data.text.strip())
# total item to put in a list as we have 10 columns
n = 10
# create a list of list each contains 10 items
final = [a_list[i : i + n] for i in range(0, len(a_list), n)]
# create a dataframe from the list of list
df = pd.DataFrame(final)
# set the column names which is in the 0th index
df.columns = df.iloc[0]
# drop the column names from the 0th index
df = df.drop(df.index[0])
# convert the data types of all the following columns
col_to_convert = ["Mat", "Won", "Lost", "Tied", "N/R", "Points", "Net R/R"]
# function for converting string to numerical values
def convert_to_float(val):
return float(val)
# do the conversion for each column
for col in col_to_convert:
df[col] = df[col].apply(convert_to_float)
# add season year
df["Season"] = year
except Exception as e:
print(e)
print("year:", year)
print("Status Code:", response.status_code)
# return the dataframe
return df
def get_series_matches_data(year):
"""This function takes the year value and returns the series match
data.
"""
try:
url = "http://howstat.com/cricket/Statistics/IPL/SeriesMatches.asp?s={}".format(
year
)
response = requests.get(url)
except Exception as e:
print(e)
print(year)
try:
# get the html text
series_match_html = response.text
# parse the html text
series_soup = bs(series_match_html, features="lxml")
# get the table data
series_table_data = series_soup.find(class_="TableLined")
# an empty list and append all the data to it
a_list = []
for data in series_table_data.find_all("td"):
a_list.append(data.text.strip())
n = 4
final = [a_list[i : i + n] for i in range(0, len(a_list), n)]
df = pd.DataFrame(final)
df.columns = df.iloc[0]
df = df.drop(df.index[0])
# convert to datetime object
df["Date"] = pd.to_datetime(df["Date"])
# split the match number and teams names
df[["Match Number", "Teams"]] = df["Match"].str.split(":", expand=True)
# get the team A and team B names
df[["Team A", "Team B"]] = df["Teams"].str.split("v", expand=True)
# matching pattern for team names
team_regex = r"""
(Rajasthan\sRoyals|Kings\sXI\sPunjab|Chennai\sSuper\sKings|Delhi\sCapitals|Mumbai\sIndians|
Kolkata\sKnight\sRiders|Royal\sChallengers\sBangalore|Deccan\sChargers|Kochi\sTuskers\sKerala|
Pune\sWarriors|Sunrisers\sHyderabad|Gujarat\sLions|Rising\sPune\sSupergiant|No\sresult|Match\sabandoned)
"""
# Extract the data
df["winner"] = df["Result"].str.extract(
team_regex, flags=re.VERBOSE | re.IGNORECASE
)
df["Wins By Runs"] = (
df["Result"]
.str.extract(r"(\d{1,3})\s(Runs|Run)", flags=re.IGNORECASE)
.fillna(0)
.iloc[:, 0]
)
df["Wins By Wickets"] = (
df["Result"]
.str.extract(r"(\d{1,2})\s(Wickets|Wicket)", flags=re.IGNORECASE)
.fillna(0)
.iloc[:, 0]
)
df["Season"] = df["Date"].dt.year
# columns to drop
cols_to_drop = ["Match", "Teams", "Result"]
df = df.drop(cols_to_drop, axis=1)
# convert strings to int
df["Wins By Runs"] = df["Wins By Runs"].astype("int")
df["Wins By Wickets"] = df["Wins By Wickets"].astype("int")
except Exception as e:
print(e)
print(year)
print(response.status_code)
# return the dataframe
return df
def get_fastest_fifties_data(year):
"""
Get the fastest fifties data.
"""
try:
url = "https://www.iplt20.com/stats/{}/fastest-fifties".format(year)
response = requests.get(url)
fifties_html = response.text
fifties_soup = bs(fifties_html, features="lxml")
# get the table data
fifties_table_data = fifties_soup.find(class_="js-table")
# get the column names
col_names = []
for header in fifties_table_data.find_all("th"):
col_names.append(header.text.strip())
a_list = []
for data in fifties_table_data.find_all("td"):
a_list.append(" ".join(data.text.split()))
n = 9
final = [a_list[i : i + n] for i in range(0, len(a_list), n)]
df = pd.DataFrame(final)
df.columns = col_names
# convert to datetime object
df["Match Date"] = pd.to_datetime(df["Match Date"])
# convert data types
df["POS"] = pd.to_numeric(df["POS"], errors="coerce").fillna(0)
df["BF"] = pd.to_numeric(df["BF"], errors="coerce").fillna(0)
df["6s"] = pd.to_numeric(df["6s"], errors="coerce").fillna(0)
df["4s"] = pd.to_numeric(df["4s"], errors="coerce").fillna(0)
df["Runs"] = pd.to_numeric(df["Runs"], errors="coerce").fillna(0)
# Add season year
df["Season"] = year
except Exception as e:
print(e)
print(year)
return df
def get_fastest_centuries_data(year):
"""
Extract fastest centuries data for this year.
"""
try:
url = "https://www.iplt20.com/stats/{}/fastest-centuries".format(year)
response = requests.get(url)
centuries_html = response.text
centuries_soup = bs(centuries_html, features="lxml")
# get the table data
centuries_table_data = centuries_soup.find(class_="js-table")
# get the column names
col_names = []
for header in centuries_table_data.find_all("th"):
col_names.append(header.text.strip())
a_list = []
for data in centuries_table_data.find_all("td"):
a_list.append(" ".join(data.text.split()))
n = 9
final = [a_list[i : i + n] for i in range(0, len(a_list), n)]
df = pd.DataFrame(final)
df.columns = col_names
# convert to datetime object
df["Match Date"] = pd.to_datetime(df["Match Date"])
# convert data from string to numeric
df["POS"] = pd.to_numeric(df["POS"], errors="coerce").fillna(0)
df["BF"] = pd.to_numeric(df["BF"], errors="coerce").fillna(0)
df["6s"] = pd.to_numeric(df["6s"], errors="coerce").fillna(0)
df["4s"] = pd.to_numeric(df["4s"], errors="coerce").fillna(0)
df["Runs"] = pd.to_numeric(df["Runs"], errors="coerce").fillna(0)
# add season year
df["Season"] = year
except Exception as e:
print(e)
print(year)
return df
def get_dot_balls_data(year):
"""This function gets the dot balls data for a particular year."""
url = "https://www.iplt20.com/stats/{}/most-dot-balls".format(year)
response = requests.get(url)
dots_html = response.text
dots_soup = bs(dots_html, features="lxml")
dots_table_data = dots_soup.find(class_="js-table")
# get the column names
col_names = []
for header in dots_table_data.find_all("th"):
col_names.append(header.text.strip())
a_list = []
for data in dots_table_data.find_all("td"):
a_list.append(" ".join(data.text.split()))
n = 13
final = [a_list[i : i + n] for i in range(0, len(a_list), n)]
df = pd.DataFrame(final)
df.columns = col_names
# select only player name and Dots data
df = df[["PLAYER", "Dots"]]
# convert data type
df["Dots"] = pd.to_numeric(df["Dots"], errors="coerce").fillna(0)
return df
def get_maidens_data(year):
"""This function gets the player name and maidens
data for a particular year.
"""
try:
url = "https://www.iplt20.com/stats/{}/most-maidens".format(year)
response = requests.get(url)
maidens_html = response.text
maidens_soup = bs(maidens_html, features="lxml")
maidens_table_data = maidens_soup.find(class_="js-table")
# get the column names
col_names = []
for header in maidens_table_data.find_all("th"):
col_names.append(header.text.strip())
a_list = []
for data in maidens_table_data.find_all("td"):
a_list.append(" ".join(data.text.split()))
n = 13
final = [a_list[i : i + n] for i in range(0, len(a_list), n)]
df = pd.DataFrame(final)
df.columns = col_names
# select only player name and maid column
df = df[["PLAYER", "Maid"]]
# change data type
df["Maid"] = pd.to_numeric(df["Maid"], errors="coerce").fillna(0)
except Exception as e:
print(e)
print(year)
return df
def get_dots_maidens(year):
"""
Combine the dots, maidens and data into a single df.
"""
try:
dots_df = get_dot_balls_data(year)
maidens_df = get_maidens_data(year)
# hats_df = get_hat_tricks_data(year)
df = pd.merge(left=dots_df, right=maidens_df, how="left", on=["PLAYER"])
# df = pd.merge(left=df, right=hats_df,how='left',on=['PLAYER'])
# fill missing values
df.fillna(0, inplace=True)
except Exception as e:
print(e)
print(year)
return df
def get_bowling_data(year):
try:
url = "https://www.iplt20.com/stats/{}/most-wickets".format(year)
response = requests.get(url)
bowling_html = response.text
bowling_soup = bs(bowling_html, features="lxml")
# get the table data
bowling_table_data = bowling_soup.find(class_="js-table")
# get the column names
col_names = []
for header in bowling_table_data.find_all("th"):
col_names.append(header.text.strip())
a_list = []
for data in bowling_table_data.find_all("td"):
a_list.append(" ".join(data.text.split()))
n = 13
final = [a_list[i : i + n] for i in range(0, len(a_list), n)]
df = pd.DataFrame(final)
df.columns = col_names
# Add the nationality of each player in the dataframe
nationality_list = []
for index, data in enumerate(bowling_table_data.find_all("tr")[1:]):
try:
nationality_list.append(data["data-nationality"])
except Exception as e:
print(e)
print(index)
# add none
nationality_list.append(None)
df["Nationality"] = nationality_list
# Add the player link for more info in the dataframe
base_url = "https://www.iplt20.com"
player_link_list = []
# get all the links and add it to the list
for data in bowling_table_data.find_all("a"):
player_link_list.append(base_url + data["href"])
# create a column with None value
df[14] = None
# iterate through each row and create a player name pattern
for index, row in df.iterrows():
player_name = row["PLAYER"].replace(" ", "-")
player_regex = re.compile(r"{}".format(player_name), re.IGNORECASE)
for item in player_link_list:
# if the pattern matches any links
if player_regex.search(item) != None:
# then append it to that row of the df
df.iloc[index, 14] = item
# rename the column
df.rename(columns={14: "Player Link"}, inplace=True)
# extract the player team name from the link and add to the df
team_regex = r"teams/(\w+-\w+-?\w+)"
df["Team"] = df["Player Link"].str.extract(team_regex, flags=re.IGNORECASE)
df["Team"] = df["Team"].apply(lambda x: str(x).title().replace("-", " "))
# convert data types from string to numeric
df["POS"] = pd.to_numeric(df["POS"], errors="coerce").fillna(0)
df["Mat"] = pd.to_numeric(df["Mat"], errors="coerce").fillna(0)
df["Inns"] = pd.to_numeric(df["Inns"], errors="coerce").fillna(0)
df["Ov"] = pd.to_numeric(df["Ov"], errors="coerce").fillna(0)
df["Runs"] = pd.to_numeric(df["Runs"], errors="coerce").fillna(0)
df["Wkts"] = pd.to_numeric(df["Wkts"], errors="coerce").fillna(0)
df["BBI"] = pd.to_numeric(df["BBI"], errors="coerce").fillna(0)
df["Avg"] = pd.to_numeric(df["Avg"], errors="coerce").fillna(0)
df["Econ"] = pd.to_numeric(df["Econ"], errors="coerce").fillna(0)
df["SR"] = pd.to_numeric(df["SR"], errors="coerce").fillna(0)
df["4w"] = pd.to_numeric(df["4w"], errors="coerce").fillna(0)
df["5w"] = pd.to_numeric(df["5w"], errors="coerce").fillna(0)
# extract the dots balls and maidens data
df2 = get_dots_maidens(year)
# combine both the dataframes
df = pd.merge(left=df, right=df2, how="left", on=["PLAYER"])
# fill missing values
df.fillna(0, inplace=True)
# add season year
df["Season"] = year
except Exception as e:
print(e)
print(year)
# return dataframe
return df
def get_wins_losses_data():
win_losses = pd.read_html(
"https://en.wikipedia.org/wiki/List_of_Indian_Premier_League_records_and_statistics"
)
# select the win losses table
win_losses_df = win_losses[3]
# drop the last
win_losses_df.drop(win_losses_df.index[-1], inplace=True)
# change names of the teams
val_dict = {
"CSK": "Chennai Super Kings",
"DC": "Delhi Capitals",
"KXIP": "Kings XI Punjab",
"KKR": "Kolkata Knight Riders",
"MI": "Mumbai Indians",
"RR": "Rajasthan Royals",
"RCB": "Royal Challengers Banglore",
"SRH": "Sunrisers Hyderabad",
}
win_losses_df["Team"] = win_losses_df["Team"].map(val_dict)
# rename the column
win_losses_df.rename(columns={"Win\xa0%": "Win %"}, inplace=True)
# columns list
cols_list = [
"Matches",
"Won",
"Lost",
"No Result",
"Tied and won",
"Tied and lost",
"Win %",
"Titles",
]
# convert data types
for col in cols_list:
win_losses_df[col] = pd.to_numeric(win_losses_df[col], errors="coerce").fillna(
0
)
return win_losses_df
def batting_all_time_record(df):
"""This Function create the aggregated all the season data
into a single dataframe.
"""
agg_dict = {
"Mat": "sum",
"Inns": "sum",
"NO": "sum",
"Runs": "sum",
"HS": "max",
"Avg": "mean",
"BF": "sum",
"SR": "mean",
"100": "sum",
"50": "sum",
"4s": "sum",
"6s": "sum",
}
batting_all_time = (
batting.groupby("PLAYER")
.aggregate(agg_dict)
.reset_index()
.sort_values(by="Runs", ascending=False)
)
batting_all_time = batting_all_time.round(2)
batting_all_time.index = np.arange(0, len(batting_all_time))
return batting_all_time
def get_bowling_data_all_time():
try:
url = "https://www.iplt20.com/stats/all-time/most-wickets"
response = requests.get(url)
bowling_html = response.text
bowling_soup = bs(bowling_html, "lxml")
# get the table data
bowling_table_data = bowling_soup.find(class_="js-table")
# get the column names
col_names = []
for header in bowling_table_data.find_all("th"):
col_names.append(header.text.strip())
a_list = []
for data in bowling_table_data.find_all("td"):
a_list.append(" ".join(data.text.split()))
n = 13
final = [a_list[i : i + n] for i in range(0, len(a_list), n)]
df = pd.DataFrame(final)
df.columns = col_names
# Add the nationality of each player in the dataframe
nationality_list = []
for index, data in enumerate(bowling_table_data.find_all("tr")[1:]):
try:
nationality_list.append(data["data-nationality"])
except Exception as e:
print(e)
print(index)
# add none
nationality_list.append(None)
df["Nationality"] = nationality_list
# Add the player link for more info in the dataframe
base_url = "https://www.iplt20.com"
player_link_list = []
# get all the links and add it to the list
for data in bowling_table_data.find_all("a"):
player_link_list.append(base_url + data["href"])
# create a column with None value
df[14] = None
# iterate through each row and create a player name pattern
for index, row in df.iterrows():
player_name = row["PLAYER"].replace(" ", "-")
player_regex = re.compile(r"{}".format(player_name), re.IGNORECASE)
for item in player_link_list:
# if the pattern matches any links
if player_regex.search(item) != None:
# then append it to that row of the df
df.iloc[index, 14] = item
# rename the column
df.rename(columns={14: "Player Link"}, inplace=True)
# extract the player team name from the link and add to the df
team_regex = r"teams/(\w+-\w+-?\w+)"
df["Team"] = df["Player Link"].str.extract(team_regex, flags=re.IGNORECASE)
df["Team"] = df["Team"].apply(lambda x: str(x).title().replace("-", " "))
# convert data types from string to numeric
df["POS"] = pd.to_numeric(df["POS"], errors="coerce").fillna(0)
df["Mat"] = | pd.to_numeric(df["Mat"], errors="coerce") | pandas.to_numeric |
# -*- coding: utf-8 -*-
"""Provides programs to process and analyze GOES X-ray data."""
from __future__ import absolute_import
import datetime
import matplotlib.dates
from matplotlib import pyplot as plt
from astropy.io import fits as pyfits
from numpy import nan
from numpy import floor
from pandas import DataFrame
from sunpy.lightcurve import LightCurve
from sunpy.time import parse_time, TimeRange, is_time_in_given_format
from sunpy.util import net
__all__ = ['GOESLightCurve']
class GOESLightCurve(LightCurve):
"""
GOES XRS LightCurve
Each GOES satellite there are two X-ray Sensors (XRS) which provide solar X
ray fluxes for the wavelength bands of 0.5 to 4 Å (short channel)
and 1 to 8 Å (long channel). Most recent data is usually available one or two days late.
Data is available starting on 1981/01/01.
Examples
--------
>>> from sunpy import lightcurve as lc
>>> from sunpy.time import TimeRange
>>> goes = lc.GOESLightCurve.create(TimeRange('2012/06/01', '2012/06/05'))
>>> goes.peek() # doctest: +SKIP
References
----------
* `GOES Mission Homepage <http://www.goes.noaa.gov>`_
* `GOES XRS Homepage <http://www.swpc.noaa.gov/products/goes-x-ray-flux>`_
* `GOES XRS Guide <http://ngdc.noaa.gov/stp/satellite/goes/doc/GOES_XRS_readme.pdf>`_
* `NASCOM Data Archive <http://umbra.nascom.nasa.gov/goes/fits/>`_
"""
def peek(self, title="GOES Xray Flux"):
"""Plots GOES XRS light curve is the usual manner. An example is shown
below.
.. plot::
from sunpy import lightcurve as lc
from sunpy.data.sample import GOES_LIGHTCURVE
goes = lc.GOESLightCurve.create(GOES_LIGHTCURVE)
goes.peek()
Parameters
----------
title : str
The title of the plot.
**kwargs : dict
Any additional plot arguments that should be used
when plotting.
Returns
-------
fig : `~matplotlib.Figure`
A plot figure.
"""
figure = plt.figure()
axes = plt.gca()
dates = matplotlib.dates.date2num(parse_time(self.data.index))
axes.plot_date(dates, self.data['xrsa'], '-',
label='0.5--4.0 $\AA$', color='blue', lw=2)
axes.plot_date(dates, self.data['xrsb'], '-',
label='1.0--8.0 $\AA$', color='red', lw=2)
axes.set_yscale("log")
axes.set_ylim(1e-9, 1e-2)
axes.set_title(title)
axes.set_ylabel('Watts m$^{-2}$')
axes.set_xlabel(datetime.datetime.isoformat(self.data.index[0])[0:10])
ax2 = axes.twinx()
ax2.set_yscale("log")
ax2.set_ylim(1e-9, 1e-2)
ax2.set_yticks((1e-9, 1e-8, 1e-7, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2))
ax2.set_yticklabels((' ', 'A', 'B', 'C', 'M', 'X', ' '))
axes.yaxis.grid(True, 'major')
axes.xaxis.grid(False, 'major')
axes.legend()
# @todo: display better tick labels for date range (e.g. 06/01 - 06/05)
formatter = matplotlib.dates.DateFormatter('%H:%M')
axes.xaxis.set_major_formatter(formatter)
axes.fmt_xdata = matplotlib.dates.DateFormatter('%H:%M')
figure.autofmt_xdate()
figure.show()
return figure
@classmethod
def _get_default_uri(cls):
"""Returns the URL for the latest GOES data."""
now = datetime.datetime.utcnow()
time_range = TimeRange(datetime.datetime(now.year, now.month, now.day), now)
url_does_exist = net.url_exists(cls._get_url_for_date_range(time_range))
while not url_does_exist:
time_range = TimeRange(time_range.start-datetime.timedelta(days=1),
time_range.start)
url_does_exist = net.url_exists(cls._get_url_for_date_range(time_range))
return cls._get_url_for_date_range(time_range)
@classmethod
def _get_goes_sat_num(self, start, end):
"""Parses the query time to determine which GOES satellite to use."""
goes_operational = {
2: TimeRange('1980-01-04', '1983-05-01'),
5: TimeRange('1983-05-02', '1984-08-01'),
6: TimeRange('1983-06-01', '1994-08-19'),
7: TimeRange('1994-01-01', '1996-08-14'),
8: TimeRange('1996-03-21', '2003-06-19'),
9: TimeRange('1997-01-01', '1998-09-09'),
10: TimeRange('1998-07-10', '2009-12-02'),
11: TimeRange('2006-06-20', '2008-02-16'),
12: TimeRange('2002-12-13', '2007-05-09'),
13: TimeRange('2006-08-01', '2006-08-01'),
14: TimeRange('2009-12-02', '2010-11-05'),
15: TimeRange('2010-09-01', datetime.datetime.utcnow())}
sat_list = []
for sat_num in goes_operational:
if ((start >= goes_operational[sat_num].start and
start <= goes_operational[sat_num].end and
(end >= goes_operational[sat_num].start and
end <= goes_operational[sat_num].end))):
# if true then the satellite with sat_num is available
sat_list.append(sat_num)
if not sat_list:
# if no satellites were found then raise an exception
raise Exception('No operational GOES satellites within time range')
else:
return sat_list
@staticmethod
def _get_url_for_date_range(*args):
"""Returns a URL to the GOES data for the specified date.
Parameters
----------
args : `~sunpy.time.TimeRange`, `datetime.datetime`, str
Date range should be specified using a TimeRange, or start
and end dates at datetime instances or date strings.
satellite_number : int
GOES satellite number (default = 15)
data_type : str
Data type to return for the particular GOES satellite. Supported
types depend on the satellite number specified. (default = xrs_2s)
"""
# TimeRange
if len(args) == 1 and isinstance(args[0], TimeRange):
start = args[0].start
end = args[0].end
elif len(args) == 2:
start = parse_time(args[0])
end = parse_time(args[1])
if end < start:
raise ValueError('start time > end time')
# find out which satellite and datatype to query from the query times
sat_num = GOESLightCurve._get_goes_sat_num(start, end)
base_url = 'http://umbra.nascom.nasa.gov/goes/fits/'
if start < parse_time('1999/01/15'):
url = base_url + "{date:%Y}/go{sat:02d}{date:%y%m%d}.fits".format(
date=start, sat=sat_num[0])
else:
url = base_url + "{date:%Y}/go{sat:02d}{date:%Y%m%d}.fits".format(
date=start, sat=sat_num[0])
return url
@staticmethod
def _parse_fits(filepath):
"""Parses a GOES FITS file from
http://umbra.nascom.nasa.gov/goes/fits/"""
fits = pyfits.open(filepath)
header = fits[0].header
if len(fits) == 4:
if is_time_in_given_format(fits[0].header['DATE-OBS'], '%d/%m/%Y'):
start_time = datetime.datetime.strptime(fits[0].header['DATE-OBS'], '%d/%m/%Y')
elif is_time_in_given_format(fits[0].header['DATE-OBS'], '%d/%m/%y'):
start_time = datetime.datetime.strptime(fits[0].header['DATE-OBS'], '%d/%m/%y')
else:
raise ValueError("Date not recognized")
xrsb = fits[2].data['FLUX'][0][:, 0]
xrsa = fits[2].data['FLUX'][0][:, 1]
seconds_from_start = fits[2].data['TIME'][0]
elif 1 <= len(fits) <= 3:
start_time = parse_time(header['TIMEZERO'])
seconds_from_start = fits[0].data[0]
xrsb = fits[0].data[1]
xrsa = fits[0].data[2]
else:
raise ValueError("Don't know how to parse this file")
times = [start_time + datetime.timedelta(seconds=int(floor(s)),
microseconds=int((s - floor(s)) * 1e6)) for s in seconds_from_start]
# remove bad values as defined in header comments
xrsb[xrsb == -99999] = nan
xrsa[xrsa == -99999] = nan
# fix byte ordering
newxrsa = xrsa.byteswap().newbyteorder()
newxrsb = xrsb.byteswap().newbyteorder()
data = | DataFrame({'xrsa': newxrsa, 'xrsb': newxrsb}, index=times) | pandas.DataFrame |
import viola
import pandas as pd
from io import StringIO
import sys, os
HERE = os.path.abspath(os.path.dirname(__file__))
data_expected = """vcf1_test1 0 small_del
vcf2_test1 0 small_del
vcf1_test2 0 small_del
vcf2_test2 0 small_del
vcf1_test3 0 large_del
vcf2_test3 0 large_del
vcf1_test4 0 large_del
vcf2_test4 0 large_del
vcf1_test5 0 large_del
vcf2_test5 0 large_del
vcf1_test6 0 small_dup
vcf2_test6 0 small_dup
vcf1_test7 0 small_inv
vcf2_test7 0 small_inv
vcf1_test8 0 others
vcf2_test8 0 others
vcf1_test9 0 small_inv
vcf2_test9 0 small_inv
vcf1_viola_breakpoint:0 0 tra
vcf2_viola_breakpoint:0 0 tra
vcf1_viola_breakpoint:1 0 tra
vcf2_viola_breakpoint:1 0 tra
"""
DEFINITIONS = """name 'small_del'
0 SVLEN > -100
1 SVTYPE == DEL
logic 0 & 1
name 'large_del'
0 SVTYPE == DEL
logic 0
name 'small_dup'
0 SVLEN < 100
1 SVTYPE == DUP
logic 0 & 1
name 'large_dup'
0 SVTYPE == DUP
logic 0
name 'small_inv'
0 SVLEN < 100
1 SVTYPE == INV
logic 0 & 1
name 'tra'
0 SVTYPE == TRA
logic 0
"""
def small_del(x):
return x.filter(['svlen > -100', 'svtype == DEL']).ids
def large_del(x):
return x.filter(['svtype == DEL']).ids
def small_dup(x):
return x.filter(['svlen < 100', 'svtype == DUP']).ids
def large_dup(x):
return x.filter(['svtype == DUP']).ids
def small_inv(x):
return x.filter(['svlen < 100', 'svtype == INV']).ids
def tra(x):
return x.filter('svtype == TRA').ids
def test_classify_manual_svtype():
vcf = viola.read_vcf(os.path.join(HERE, 'data/manta1.vcf'))
vcf2 = vcf.copy()
vcf = vcf.breakend2breakpoint()
vcf2 = vcf2.breakend2breakpoint()
multi_vcf = viola.MultiVcf([vcf, vcf2], ['vcf1', 'vcf2'])
ls_conditions = [small_del, large_del, small_dup, large_dup, small_inv, tra]
ls_names = ['small_del', 'large_del', 'small_dup', 'large_dup', 'small_inv', 'tra']
result = multi_vcf.classify_manual_svtype(ls_conditions=ls_conditions, ls_names=ls_names)
manual_sv_type = multi_vcf.manual_sv_type
manual_sv_type.set_index('id', inplace=True)
manual_sv_type_expected = pd.read_csv(StringIO(data_expected), sep='\t', names=('id', 'value_idx', 'manual_sv_type'))
manual_sv_type_expected.set_index('id', inplace=True)
| pd.testing.assert_frame_equal(manual_sv_type, manual_sv_type_expected, check_like=True) | pandas.testing.assert_frame_equal |
from os.path import abspath, dirname, join, isfile, normpath, relpath
from pandas.testing import assert_frame_equal
from numpy.testing import assert_allclose
from scipy.interpolate import interp1d
import matplotlib.pylab as plt
from datetime import datetime
import mhkit.wave as wave
from io import StringIO
import pandas as pd
import numpy as np
import contextlib
import unittest
import netCDF4
import inspect
import pickle
import json
import sys
import os
import time
from random import seed, randint
testdir = dirname(abspath(__file__))
datadir = normpath(join(testdir,relpath('../../examples/data/wave')))
class TestResourceSpectrum(unittest.TestCase):
@classmethod
def setUpClass(self):
omega = np.arange(0.1,3.5,0.01)
self.f = omega/(2*np.pi)
self.Hs = 2.5
self.Tp = 8
df = self.f[1] - self.f[0]
Trep = 1/df
self.t = np.arange(0, Trep, 0.05)
@classmethod
def tearDownClass(self):
pass
def test_pierson_moskowitz_spectrum(self):
S = wave.resource.pierson_moskowitz_spectrum(self.f,self.Tp)
Tp0 = wave.resource.peak_period(S).iloc[0,0]
error = np.abs(self.Tp - Tp0)/self.Tp
self.assertLess(error, 0.01)
def test_bretschneider_spectrum(self):
S = wave.resource.bretschneider_spectrum(self.f,self.Tp,self.Hs)
Hm0 = wave.resource.significant_wave_height(S).iloc[0,0]
Tp0 = wave.resource.peak_period(S).iloc[0,0]
errorHm0 = np.abs(self.Tp - Tp0)/self.Tp
errorTp0 = np.abs(self.Hs - Hm0)/self.Hs
self.assertLess(errorHm0, 0.01)
self.assertLess(errorTp0, 0.01)
def test_surface_elevation_seed(self):
S = wave.resource.bretschneider_spectrum(self.f,self.Tp,self.Hs)
sig = inspect.signature(wave.resource.surface_elevation)
seednum = sig.parameters['seed'].default
eta0 = wave.resource.surface_elevation(S, self.t)
eta1 = wave.resource.surface_elevation(S, self.t, seed=seednum)
assert_frame_equal(eta0, eta1)
def test_surface_elevation_phasing(self):
S = wave.resource.bretschneider_spectrum(self.f,self.Tp,self.Hs)
eta0 = wave.resource.surface_elevation(S, self.t)
sig = inspect.signature(wave.resource.surface_elevation)
seednum = sig.parameters['seed'].default
np.random.seed(seednum)
phases = np.random.rand(len(S)) * 2 * np.pi
eta1 = wave.resource.surface_elevation(S, self.t, phases=phases)
assert_frame_equal(eta0, eta1)
def test_surface_elevation_phases_np_and_pd(self):
S0 = wave.resource.bretschneider_spectrum(self.f,self.Tp,self.Hs)
S1 = wave.resource.bretschneider_spectrum(self.f,self.Tp,self.Hs*1.1)
S = pd.concat([S0, S1], axis=1)
phases_np = np.random.rand(S.shape[0], S.shape[1]) * 2 * np.pi
phases_pd = pd.DataFrame(phases_np, index=S.index, columns=S.columns)
eta_np = wave.resource.surface_elevation(S, self.t, phases=phases_np)
eta_pd = wave.resource.surface_elevation(S, self.t, phases=phases_pd)
assert_frame_equal(eta_np, eta_pd)
def test_surface_elevation_frequency_bins_np_and_pd(self):
S0 = wave.resource.bretschneider_spectrum(self.f,self.Tp,self.Hs)
S1 = wave.resource.bretschneider_spectrum(self.f,self.Tp,self.Hs*1.1)
S = pd.concat([S0, S1], axis=1)
eta0 = wave.resource.surface_elevation(S, self.t)
f_bins_np = np.array([np.diff(S.index)[0]]*len(S))
f_bins_pd = pd.DataFrame(f_bins_np, index=S.index, columns=['df'])
eta_np = wave.resource.surface_elevation(S, self.t, frequency_bins=f_bins_np)
eta_pd = wave.resource.surface_elevation(S, self.t, frequency_bins=f_bins_pd)
assert_frame_equal(eta0, eta_np)
assert_frame_equal(eta_np, eta_pd)
def test_surface_elevation_moments(self):
S = wave.resource.jonswap_spectrum(self.f, self.Tp, self.Hs)
eta = wave.resource.surface_elevation(S, self.t)
dt = self.t[1] - self.t[0]
Sn = wave.resource.elevation_spectrum(eta, 1/dt, len(eta.values),
detrend=False, window='boxcar',
noverlap=0)
m0 = wave.resource.frequency_moment(S,0).m0.values[0]
m0n = wave.resource.frequency_moment(Sn,0).m0.values[0]
errorm0 = np.abs((m0 - m0n)/m0)
self.assertLess(errorm0, 0.01)
m1 = wave.resource.frequency_moment(S,1).m1.values[0]
m1n = wave.resource.frequency_moment(Sn,1).m1.values[0]
errorm1 = np.abs((m1 - m1n)/m1)
self.assertLess(errorm1, 0.01)
def test_surface_elevation_rmse(self):
S = wave.resource.jonswap_spectrum(self.f, self.Tp, self.Hs)
eta = wave.resource.surface_elevation(S, self.t)
dt = self.t[1] - self.t[0]
Sn = wave.resource.elevation_spectrum(eta, 1/dt, len(eta),
detrend=False, window='boxcar',
noverlap=0)
fSn = interp1d(Sn.index.values, Sn.values, axis=0)
rmse = (S.values - fSn(S.index.values))**2
rmse_sum = (np.sum(rmse)/len(rmse))**0.5
self.assertLess(rmse_sum, 0.02)
def test_jonswap_spectrum(self):
S = wave.resource.jonswap_spectrum(self.f, self.Tp, self.Hs)
Hm0 = wave.resource.significant_wave_height(S).iloc[0,0]
Tp0 = wave.resource.peak_period(S).iloc[0,0]
errorHm0 = np.abs(self.Tp - Tp0)/self.Tp
errorTp0 = np.abs(self.Hs - Hm0)/self.Hs
self.assertLess(errorHm0, 0.01)
self.assertLess(errorTp0, 0.01)
def test_plot_spectrum(self):
filename = abspath(join(testdir, 'wave_plot_spectrum.png'))
if isfile(filename):
os.remove(filename)
S = wave.resource.pierson_moskowitz_spectrum(self.f,self.Tp)
plt.figure()
wave.graphics.plot_spectrum(S)
plt.savefig(filename, format='png')
plt.close()
self.assertTrue(isfile(filename))
def test_plot_chakrabarti(self):
filename = abspath(join(testdir, 'wave_plot_chakrabarti.png'))
if isfile(filename):
os.remove(filename)
D = 5
H = 10
lambda_w = 200
wave.graphics.plot_chakrabarti(H, lambda_w, D)
plt.savefig(filename)
def test_plot_chakrabarti_np(self):
filename = abspath(join(testdir, 'wave_plot_chakrabarti_np.png'))
if isfile(filename):
os.remove(filename)
D = np.linspace(5, 15, 5)
H = 10 * np.ones_like(D)
lambda_w = 200 * np.ones_like(D)
wave.graphics.plot_chakrabarti(H, lambda_w, D)
plt.savefig(filename)
self.assertTrue(isfile(filename))
def test_plot_chakrabarti_pd(self):
filename = abspath(join(testdir, 'wave_plot_chakrabarti_pd.png'))
if isfile(filename):
os.remove(filename)
D = np.linspace(5, 15, 5)
H = 10 * np.ones_like(D)
lambda_w = 200 * np.ones_like(D)
df = pd.DataFrame([H.flatten(),lambda_w.flatten(),D.flatten()],
index=['H','lambda_w','D']).transpose()
wave.graphics.plot_chakrabarti(df.H, df.lambda_w, df.D)
plt.savefig(filename)
self.assertTrue(isfile(filename))
class TestResourceMetrics(unittest.TestCase):
@classmethod
def setUpClass(self):
omega = np.arange(0.1,3.5,0.01)
self.f = omega/(2*np.pi)
self.Hs = 2.5
self.Tp = 8
file_name = join(datadir, 'ValData1.json')
with open(file_name, "r") as read_file:
self.valdata1 = pd.DataFrame(json.load(read_file))
self.valdata2 = {}
file_name = join(datadir, 'ValData2_MC.json')
with open(file_name, "r") as read_file:
data = json.load(read_file)
self.valdata2['MC'] = data
for i in data.keys():
# Calculate elevation spectra
elevation = pd.DataFrame(data[i]['elevation'])
elevation.index = elevation.index.astype(float)
elevation.sort_index(inplace=True)
sample_rate = data[i]['sample_rate']
NFFT = data[i]['NFFT']
self.valdata2['MC'][i]['S'] = wave.resource.elevation_spectrum(elevation,
sample_rate, NFFT)
file_name = join(datadir, 'ValData2_AH.json')
with open(file_name, "r") as read_file:
data = json.load(read_file)
self.valdata2['AH'] = data
for i in data.keys():
# Calculate elevation spectra
elevation = pd.DataFrame(data[i]['elevation'])
elevation.index = elevation.index.astype(float)
elevation.sort_index(inplace=True)
sample_rate = data[i]['sample_rate']
NFFT = data[i]['NFFT']
self.valdata2['AH'][i]['S'] = wave.resource.elevation_spectrum(elevation,
sample_rate, NFFT)
file_name = join(datadir, 'ValData2_CDiP.json')
with open(file_name, "r") as read_file:
data = json.load(read_file)
self.valdata2['CDiP'] = data
for i in data.keys():
temp = pd.Series(data[i]['S']).to_frame('S')
temp.index = temp.index.astype(float)
self.valdata2['CDiP'][i]['S'] = temp
@classmethod
def tearDownClass(self):
pass
def test_kfromw(self):
for i in self.valdata1.columns:
f = np.array(self.valdata1[i]['w'])/(2*np.pi)
h = self.valdata1[i]['h']
rho = self.valdata1[i]['rho']
expected = self.valdata1[i]['k']
k = wave.resource.wave_number(f, h, rho)
calculated = k.loc[:,'k'].values
error = ((expected-calculated)**2).sum() # SSE
self.assertLess(error, 1e-6)
def test_kfromw_one_freq(self):
g = 9.81
f = 0.1
h = 1e9
w = np.pi*2*f # deep water dispersion
expected = w**2 / g
calculated = wave.resource.wave_number(f=f, h=h, g=g).values[0][0]
error = np.abs(expected-calculated)
self.assertLess(error, 1e-6)
def test_wave_length(self):
k_list=[1,2,10,3]
l_expected = (2.*np.pi/np.array(k_list)).tolist()
k_df = pd.DataFrame(k_list,index = [1,2,3,4])
k_series= k_df[0]
k_array=np.array(k_list)
for l in [k_list, k_df, k_series, k_array]:
l_calculated = wave.resource.wave_length(l)
self.assertListEqual(l_expected,l_calculated.tolist())
idx=0
k_int = k_list[idx]
l_calculated = wave.resource.wave_length(k_int)
self.assertEqual(l_expected[idx],l_calculated)
def test_depth_regime(self):
expected = [True,True,False,True]
l_list=[1,2,10,3]
l_df = pd.DataFrame(l_list,index = [1,2,3,4])
l_series= l_df[0]
l_array=np.array(l_list)
h = 10
for l in [l_list, l_df, l_series, l_array]:
calculated = wave.resource.depth_regime(l,h)
self.assertListEqual(expected,calculated.tolist())
idx=0
l_int = l_list[idx]
calculated = wave.resource.depth_regime(l_int,h)
self.assertEqual(expected[idx],calculated)
def test_wave_celerity(self):
# Depth regime ratio
dr_ratio=2
# small change in f will give similar value cg
f=np.linspace(20.0001,20.0005,5)
# Choose index to spike at. cg spike is inversly proportional to k
k_idx=2
k_tmp=[1, 1, 0.5, 1, 1]
k = pd.DataFrame(k_tmp, index=f)
# all shallow
cg_shallow1 = wave.resource.wave_celerity(k, h=0.0001,depth_check=True)
cg_shallow2 = wave.resource.wave_celerity(k, h=0.0001,depth_check=False)
self.assertTrue(all(cg_shallow1.squeeze().values ==
cg_shallow2.squeeze().values))
# all deep
cg = wave.resource.wave_celerity(k, h=1000,depth_check=True)
self.assertTrue(all(np.pi*f/k.squeeze().values == cg.squeeze().values))
def test_energy_flux_deep(self):
# Dependent on mhkit.resource.BS spectrum
S = wave.resource.bretschneider_spectrum(self.f,self.Tp,self.Hs)
Te = wave.resource.energy_period(S)
Hm0 = wave.resource.significant_wave_height(S)
rho=1025
g=9.80665
coeff = rho*(g**2)/(64*np.pi)
J = coeff*(Hm0.squeeze()**2)*Te.squeeze()
h=-1 # not used when deep=True
J_calc = wave.resource.energy_flux(S, h, deep=True)
self.assertTrue(J_calc.squeeze() == J)
def test_moments(self):
for file_i in self.valdata2.keys(): # for each file MC, AH, CDiP
datasets = self.valdata2[file_i]
for s in datasets.keys(): # for each set
data = datasets[s]
for m in data['m'].keys():
expected = data['m'][m]
S = data['S']
if s == 'CDiP1' or s == 'CDiP6':
f_bins=pd.Series(data['freqBinWidth'])
else:
f_bins = None
calculated = wave.resource.frequency_moment(S, int(m)
,frequency_bins=f_bins).iloc[0,0]
error = np.abs(expected-calculated)/expected
self.assertLess(error, 0.01)
def test_metrics(self):
for file_i in self.valdata2.keys(): # for each file MC, AH, CDiP
datasets = self.valdata2[file_i]
for s in datasets.keys(): # for each set
data = datasets[s]
S = data['S']
if file_i == 'CDiP':
f_bins= | pd.Series(data['freqBinWidth']) | pandas.Series |
import re
import pandas as pd
from .soup import get_soup, table_to_df
TICKER_IN_PARENTHESIS_RE = re.compile(r'(?P<company_name>.+) \((?P<ticker>[A-Z]+)\)')
def get_wiki_table_df(url, index_col=None, columns=None):
"""Returns the first table of a Wikipedia page as a DataFrame"""
soup = get_soup(url)
table = soup.find('table', attrs={'class': 'wikitable'})
return table_to_df(table, index_col, columns)
def wiki_components_list_to_df(list_tag):
d = {'ticker': [], 'company_name': []}
for li in list_tag.find_all('li'):
match = TICKER_IN_PARENTHESIS_RE.search(li.text)
d['ticker'].append(match.group('ticker'))
d['company_name'].append(match.group('company_name'))
return | pd.DataFrame(d) | pandas.DataFrame |
import io
import os
from random import choice
import pandas as pd
import torch
import torch.nn as nn
from PIL import Image
from torch.utils.data import DataLoader
from torchvision import transforms as T
from torchvision.datasets import ImageFolder
from torchvision.models.resnet import BasicBlock, ResNet
SANITY_DIR = "dataset/sanity"
ID2LABEL = {
0: 'chicken_curry',
1: 'chicken_wings',
2: 'fried_rice',
3: 'grilled_salmon',
4: 'hamburger',
5: 'ice_cream',
6: 'pizza',
7: 'ramen',
8: 'steak',
9: 'sushi'
}
class ImageClassifier(ResNet):
def __init__(self):
super(ImageClassifier, self).__init__(BasicBlock, [2,2,2,2], num_classes=10)
self.fc = nn.Sequential(
nn.Linear(512 * BasicBlock.expansion, 128),
nn.ReLU(),
nn.Dropout(.2),
nn.Linear(128, 10),
nn.LogSoftmax(dim=1)
)
image_processing = T.Compose([
T.Resize((256,256)),
T.CenterCrop((224,224)),
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
sanity_dataset = ImageFolder(
root=SANITY_DIR,
transform=image_processing
)
sanity_loader = DataLoader(
sanity_dataset,
batch_size=8,
num_workers=0,
shuffle=True
)
model = ImageClassifier()
model.load_state_dict(torch.load("model/foodnet_resnet18.pth", map_location=torch.device('cpu')))
model.eval();
criterion = nn.CrossEntropyLoss()
running_corrects, running_loss = .0, .0
all_preds = torch.Tensor()
shuffled_labels = torch.Tensor()
for inputs, labels in sanity_loader:
inputs, labels = inputs.to('cpu'), labels.to('cpu')
shuffled_labels = torch.cat((shuffled_labels, labels), dim=0)
with torch.no_grad():
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
all_preds = torch.cat((all_preds, preds), dim=0)
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels)
stacks = torch.stack((shuffled_labels.type(torch.int32), all_preds.type(torch.int32)), dim=1)
conf_mat = torch.zeros(len(ID2LABEL), len(ID2LABEL), dtype=torch.int32)
for stack in stacks:
true_label, pred_label = stack.tolist()
conf_mat[true_label, pred_label] += 1
with open("confusion_matrix.txt", "w") as f:
f.write(pd.DataFrame(conf_mat.numpy(), index=list(ID2LABEL.values()), columns=list(ID2LABEL.values())).to_markdown())
loss = running_loss / len(sanity_dataset)
acc = running_corrects.double() / len(sanity_dataset)
with open("results.txt", "w") as f:
f.write( | pd.DataFrame([{'accuracy': acc, 'loss': loss}]) | pandas.DataFrame |
import calendar
import datetime
import numpy as np
import pandas as pd
from pandas.util.testing import (assert_frame_equal, assert_series_equal,
assert_index_equal)
from numpy.testing import assert_allclose
import pytest
from pvlib.location import Location
from pvlib import solarposition, spa
from conftest import (requires_ephem, needs_pandas_0_17,
requires_spa_c, requires_numba)
# setup times and locations to be tested.
times = pd.date_range(start=datetime.datetime(2014,6,24),
end=datetime.datetime(2014,6,26), freq='15Min')
tus = Location(32.2, -111, 'US/Arizona', 700) # no DST issues possible
# In 2003, DST in US was from April 6 to October 26
golden_mst = Location(39.742476, -105.1786, 'MST', 1830.14) # no DST issues possible
golden = Location(39.742476, -105.1786, 'America/Denver', 1830.14) # DST issues possible
times_localized = times.tz_localize(tus.tz)
tol = 5
@pytest.fixture()
def expected_solpos():
return pd.DataFrame({'elevation': 39.872046,
'apparent_zenith': 50.111622,
'azimuth': 194.340241,
'apparent_elevation': 39.888378},
index=['2003-10-17T12:30:30Z'])
@pytest.fixture()
def expected_solpos_multi():
return pd.DataFrame({'elevation': [39.872046, 39.505196],
'apparent_zenith': [50.111622, 50.478260],
'azimuth': [194.340241, 194.311132],
'apparent_elevation': [39.888378, 39.521740]},
index=[['2003-10-17T12:30:30Z', '2003-10-18T12:30:30Z']])
# the physical tests are run at the same time as the NREL SPA test.
# pyephem reproduces the NREL result to 2 decimal places.
# this doesn't mean that one code is better than the other.
@requires_spa_c
def test_spa_c_physical(expected_solpos):
times = pd.date_range(datetime.datetime(2003,10,17,12,30,30),
periods=1, freq='D', tz=golden_mst.tz)
ephem_data = solarposition.spa_c(times, golden_mst.latitude,
golden_mst.longitude,
pressure=82000,
temperature=11)
expected_solpos.index = times
assert_frame_equal(expected_solpos, ephem_data[expected_solpos.columns])
@requires_spa_c
def test_spa_c_physical_dst(expected_solpos):
times = pd.date_range(datetime.datetime(2003,10,17,13,30,30),
periods=1, freq='D', tz=golden.tz)
ephem_data = solarposition.spa_c(times, golden.latitude,
golden.longitude,
pressure=82000,
temperature=11)
expected_solpos.index = times
assert_frame_equal(expected_solpos, ephem_data[expected_solpos.columns])
def test_spa_python_numpy_physical(expected_solpos):
times = pd.date_range(datetime.datetime(2003,10,17,12,30,30),
periods=1, freq='D', tz=golden_mst.tz)
ephem_data = solarposition.spa_python(times, golden_mst.latitude,
golden_mst.longitude,
pressure=82000,
temperature=11, delta_t=67,
atmos_refract=0.5667,
how='numpy')
expected_solpos.index = times
assert_frame_equal(expected_solpos, ephem_data[expected_solpos.columns])
def test_spa_python_numpy_physical_dst(expected_solpos):
times = pd.date_range(datetime.datetime(2003,10,17,13,30,30),
periods=1, freq='D', tz=golden.tz)
ephem_data = solarposition.spa_python(times, golden.latitude,
golden.longitude,
pressure=82000,
temperature=11, delta_t=67,
atmos_refract=0.5667,
how='numpy')
expected_solpos.index = times
assert_frame_equal(expected_solpos, ephem_data[expected_solpos.columns])
@requires_numba
def test_spa_python_numba_physical(expected_solpos):
times = pd.date_range(datetime.datetime(2003,10,17,12,30,30),
periods=1, freq='D', tz=golden_mst.tz)
ephem_data = solarposition.spa_python(times, golden_mst.latitude,
golden_mst.longitude,
pressure=82000,
temperature=11, delta_t=67,
atmos_refract=0.5667,
how='numba', numthreads=1)
expected_solpos.index = times
assert_frame_equal(expected_solpos, ephem_data[expected_solpos.columns])
@requires_numba
def test_spa_python_numba_physical_dst(expected_solpos):
times = pd.date_range(datetime.datetime(2003,10,17,13,30,30),
periods=1, freq='D', tz=golden.tz)
ephem_data = solarposition.spa_python(times, golden.latitude,
golden.longitude, pressure=82000,
temperature=11, delta_t=67,
atmos_refract=0.5667,
how='numba', numthreads=1)
expected_solpos.index = times
assert_frame_equal(expected_solpos, ephem_data[expected_solpos.columns])
@needs_pandas_0_17
def test_get_sun_rise_set_transit():
south = Location(-35.0, 0.0, tz='UTC')
times = pd.DatetimeIndex([datetime.datetime(1996, 7, 5, 0),
datetime.datetime(2004, 12, 4, 0)]
).tz_localize('UTC')
sunrise = pd.DatetimeIndex([datetime.datetime(1996, 7, 5, 7, 8, 15),
datetime.datetime(2004, 12, 4, 4, 38, 57)]
).tz_localize('UTC').tolist()
sunset = pd.DatetimeIndex([datetime.datetime(1996, 7, 5, 17, 1, 4),
datetime.datetime(2004, 12, 4, 19, 2, 2)]
).tz_localize('UTC').tolist()
result = solarposition.get_sun_rise_set_transit(times, south.latitude,
south.longitude,
delta_t=64.0)
frame = pd.DataFrame({'sunrise':sunrise, 'sunset':sunset}, index=times)
result_rounded = pd.DataFrame(index=result.index)
# need to iterate because to_datetime does not accept 2D data
# the rounding fails on pandas < 0.17
for col, data in result.iteritems():
result_rounded[col] = pd.to_datetime(
np.floor(data.values.astype(np.int64) / 1e9)*1e9, utc=True)
del result_rounded['transit']
assert_frame_equal(frame, result_rounded)
# tests from USNO
# Golden
golden = Location(39.0, -105.0, tz='MST')
times = pd.DatetimeIndex([datetime.datetime(2015, 1, 2),
datetime.datetime(2015, 8, 2),]
).tz_localize('MST')
sunrise = pd.DatetimeIndex([datetime.datetime(2015, 1, 2, 7, 19, 2),
datetime.datetime(2015, 8, 2, 5, 1, 26)
]).tz_localize('MST').tolist()
sunset = pd.DatetimeIndex([datetime.datetime(2015, 1, 2, 16, 49, 10),
datetime.datetime(2015, 8, 2, 19, 11, 31)
]).tz_localize('MST').tolist()
result = solarposition.get_sun_rise_set_transit(times, golden.latitude,
golden.longitude,
delta_t=64.0)
frame = pd.DataFrame({'sunrise':sunrise, 'sunset':sunset}, index=times)
result_rounded = pd.DataFrame(index=result.index)
# need to iterate because to_datetime does not accept 2D data
# the rounding fails on pandas < 0.17
for col, data in result.iteritems():
result_rounded[col] = (pd.to_datetime(
np.floor(data.values.astype(np.int64) / 1e9)*1e9, utc=True)
.tz_convert('MST'))
del result_rounded['transit']
assert_frame_equal(frame, result_rounded)
@requires_ephem
def test_pyephem_physical(expected_solpos):
times = pd.date_range(datetime.datetime(2003,10,17,12,30,30),
periods=1, freq='D', tz=golden_mst.tz)
ephem_data = solarposition.pyephem(times, golden_mst.latitude,
golden_mst.longitude, pressure=82000,
temperature=11)
expected_solpos.index = times
assert_frame_equal(expected_solpos.round(2),
ephem_data[expected_solpos.columns].round(2))
@requires_ephem
def test_pyephem_physical_dst(expected_solpos):
times = pd.date_range(datetime.datetime(2003,10,17,13,30,30), periods=1,
freq='D', tz=golden.tz)
ephem_data = solarposition.pyephem(times, golden.latitude,
golden.longitude, pressure=82000,
temperature=11)
expected_solpos.index = times
assert_frame_equal(expected_solpos.round(2),
ephem_data[expected_solpos.columns].round(2))
@requires_ephem
def test_calc_time():
import pytz
import math
# validation from USNO solar position calculator online
epoch = datetime.datetime(1970,1,1)
epoch_dt = pytz.utc.localize(epoch)
loc = tus
loc.pressure = 0
actual_time = pytz.timezone(loc.tz).localize(
datetime.datetime(2014, 10, 10, 8, 30))
lb = pytz.timezone(loc.tz).localize(datetime.datetime(2014, 10, 10, tol))
ub = pytz.timezone(loc.tz).localize(datetime.datetime(2014, 10, 10, 10))
alt = solarposition.calc_time(lb, ub, loc.latitude, loc.longitude,
'alt', math.radians(24.7))
az = solarposition.calc_time(lb, ub, loc.latitude, loc.longitude,
'az', math.radians(116.3))
actual_timestamp = (actual_time - epoch_dt).total_seconds()
assert_allclose((alt.replace(second=0, microsecond=0) -
epoch_dt).total_seconds(), actual_timestamp)
assert_allclose((az.replace(second=0, microsecond=0) -
epoch_dt).total_seconds(), actual_timestamp)
@requires_ephem
def test_earthsun_distance():
times = pd.date_range(datetime.datetime(2003,10,17,13,30,30),
periods=1, freq='D')
distance = solarposition.pyephem_earthsun_distance(times).values[0]
assert_allclose(1, distance, atol=0.1)
def test_ephemeris_physical(expected_solpos):
times = pd.date_range(datetime.datetime(2003,10,17,12,30,30),
periods=1, freq='D', tz=golden_mst.tz)
ephem_data = solarposition.ephemeris(times, golden_mst.latitude,
golden_mst.longitude,
pressure=82000,
temperature=11)
expected_solpos.index = times
expected_solpos = np.round(expected_solpos, 2)
ephem_data = np.round(ephem_data, 2)
assert_frame_equal(expected_solpos, ephem_data[expected_solpos.columns])
def test_ephemeris_physical_dst(expected_solpos):
times = pd.date_range(datetime.datetime(2003,10,17,13,30,30),
periods=1, freq='D', tz=golden.tz)
ephem_data = solarposition.ephemeris(times, golden.latitude,
golden.longitude, pressure=82000,
temperature=11)
expected_solpos.index = times
expected_solpos = np.round(expected_solpos, 2)
ephem_data = np.round(ephem_data, 2)
| assert_frame_equal(expected_solpos, ephem_data[expected_solpos.columns]) | pandas.util.testing.assert_frame_equal |
from datetime import datetime, timedelta
import dateutil
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.ccalendar import DAYS, MONTHS
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas.compat import lrange, range, zip
import pandas as pd
from pandas import DataFrame, Series, Timestamp
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.indexes.datetimes import date_range
from pandas.core.indexes.period import Period, PeriodIndex, period_range
from pandas.core.resample import _get_period_range_edges
import pandas.util.testing as tm
from pandas.util.testing import (
assert_almost_equal, assert_frame_equal, assert_series_equal)
import pandas.tseries.offsets as offsets
@pytest.fixture()
def _index_factory():
return period_range
@pytest.fixture
def _series_name():
return 'pi'
class TestPeriodIndex(object):
@pytest.mark.parametrize('freq', ['2D', '1H', '2H'])
@pytest.mark.parametrize('kind', ['period', None, 'timestamp'])
def test_asfreq(self, series_and_frame, freq, kind):
# GH 12884, 15944
# make sure .asfreq() returns PeriodIndex (except kind='timestamp')
obj = series_and_frame
if kind == 'timestamp':
expected = obj.to_timestamp().resample(freq).asfreq()
else:
start = obj.index[0].to_timestamp(how='start')
end = (obj.index[-1] + obj.index.freq).to_timestamp(how='start')
new_index = date_range(start=start, end=end, freq=freq,
closed='left')
expected = obj.to_timestamp().reindex(new_index).to_period(freq)
result = obj.resample(freq, kind=kind).asfreq()
assert_almost_equal(result, expected)
def test_asfreq_fill_value(self, series):
# test for fill value during resampling, issue 3715
s = series
new_index = date_range(s.index[0].to_timestamp(how='start'),
(s.index[-1]).to_timestamp(how='start'),
freq='1H')
expected = s.to_timestamp().reindex(new_index, fill_value=4.0)
result = s.resample('1H', kind='timestamp').asfreq(fill_value=4.0)
assert_series_equal(result, expected)
frame = s.to_frame('value')
new_index = date_range(frame.index[0].to_timestamp(how='start'),
(frame.index[-1]).to_timestamp(how='start'),
freq='1H')
expected = frame.to_timestamp().reindex(new_index, fill_value=3.0)
result = frame.resample('1H', kind='timestamp').asfreq(fill_value=3.0)
assert_frame_equal(result, expected)
@pytest.mark.parametrize('freq', ['H', '12H', '2D', 'W'])
@pytest.mark.parametrize('kind', [None, 'period', 'timestamp'])
@pytest.mark.parametrize('kwargs', [dict(on='date'), dict(level='d')])
def test_selection(self, index, freq, kind, kwargs):
# This is a bug, these should be implemented
# GH 14008
rng = np.arange(len(index), dtype=np.int64)
df = DataFrame({'date': index, 'a': rng},
index=pd.MultiIndex.from_arrays([rng, index],
names=['v', 'd']))
msg = ("Resampling from level= or on= selection with a PeriodIndex is"
r" not currently supported, use \.set_index\(\.\.\.\) to"
" explicitly set index")
with pytest.raises(NotImplementedError, match=msg):
df.resample(freq, kind=kind, **kwargs)
@pytest.mark.parametrize('month', MONTHS)
@pytest.mark.parametrize('meth', ['ffill', 'bfill'])
@pytest.mark.parametrize('conv', ['start', 'end'])
@pytest.mark.parametrize('targ', ['D', 'B', 'M'])
def test_annual_upsample_cases(self, targ, conv, meth, month,
simple_period_range_series):
ts = simple_period_range_series(
'1/1/1990', '12/31/1991', freq='A-%s' % month)
result = getattr(ts.resample(targ, convention=conv), meth)()
expected = result.to_timestamp(targ, how=conv)
expected = expected.asfreq(targ, meth).to_period()
assert_series_equal(result, expected)
def test_basic_downsample(self, simple_period_range_series):
ts = simple_period_range_series('1/1/1990', '6/30/1995', freq='M')
result = ts.resample('a-dec').mean()
expected = ts.groupby(ts.index.year).mean()
expected.index = period_range('1/1/1990', '6/30/1995', freq='a-dec')
assert_series_equal(result, expected)
# this is ok
assert_series_equal(ts.resample('a-dec').mean(), result)
assert_series_equal(ts.resample('a').mean(), result)
@pytest.mark.parametrize('rule,expected_error_msg', [
('a-dec', '<YearEnd: month=12>'),
('q-mar', '<QuarterEnd: startingMonth=3>'),
('M', '<MonthEnd>'),
('w-thu', '<Week: weekday=3>')
])
def test_not_subperiod(
self, simple_period_range_series, rule, expected_error_msg):
# These are incompatible period rules for resampling
ts = simple_period_range_series('1/1/1990', '6/30/1995', freq='w-wed')
msg = ("Frequency <Week: weekday=2> cannot be resampled to {}, as they"
" are not sub or super periods").format(expected_error_msg)
with pytest.raises(IncompatibleFrequency, match=msg):
ts.resample(rule).mean()
@pytest.mark.parametrize('freq', ['D', '2D'])
def test_basic_upsample(self, freq, simple_period_range_series):
ts = simple_period_range_series('1/1/1990', '6/30/1995', freq='M')
result = ts.resample('a-dec').mean()
resampled = result.resample(freq, convention='end').ffill()
expected = result.to_timestamp(freq, how='end')
expected = expected.asfreq(freq, 'ffill').to_period(freq)
assert_series_equal(resampled, expected)
def test_upsample_with_limit(self):
rng = period_range('1/1/2000', periods=5, freq='A')
ts = Series(np.random.randn(len(rng)), rng)
result = ts.resample('M', convention='end').ffill(limit=2)
expected = ts.asfreq('M').reindex(result.index, method='ffill',
limit=2)
assert_series_equal(result, expected)
def test_annual_upsample(self, simple_period_range_series):
ts = simple_period_range_series('1/1/1990', '12/31/1995', freq='A-DEC')
df = DataFrame({'a': ts})
rdf = df.resample('D').ffill()
exp = df['a'].resample('D').ffill()
assert_series_equal(rdf['a'], exp)
rng = period_range('2000', '2003', freq='A-DEC')
ts = Series([1, 2, 3, 4], index=rng)
result = ts.resample('M').ffill()
ex_index = period_range('2000-01', '2003-12', freq='M')
expected = ts.asfreq('M', how='start').reindex(ex_index,
method='ffill')
assert_series_equal(result, expected)
@pytest.mark.parametrize('month', MONTHS)
@pytest.mark.parametrize('target', ['D', 'B', 'M'])
@pytest.mark.parametrize('convention', ['start', 'end'])
def test_quarterly_upsample(self, month, target, convention,
simple_period_range_series):
freq = 'Q-{month}'.format(month=month)
ts = simple_period_range_series('1/1/1990', '12/31/1995', freq=freq)
result = ts.resample(target, convention=convention).ffill()
expected = result.to_timestamp(target, how=convention)
expected = expected.asfreq(target, 'ffill').to_period()
assert_series_equal(result, expected)
@pytest.mark.parametrize('target', ['D', 'B'])
@pytest.mark.parametrize('convention', ['start', 'end'])
def test_monthly_upsample(self, target, convention,
simple_period_range_series):
ts = simple_period_range_series('1/1/1990', '12/31/1995', freq='M')
result = ts.resample(target, convention=convention).ffill()
expected = result.to_timestamp(target, how=convention)
expected = expected.asfreq(target, 'ffill').to_period()
assert_series_equal(result, expected)
def test_resample_basic(self):
# GH3609
s = Series(range(100), index=date_range(
'20130101', freq='s', periods=100, name='idx'), dtype='float')
s[10:30] = np.nan
index = PeriodIndex([
Period('2013-01-01 00:00', 'T'),
Period('2013-01-01 00:01', 'T')], name='idx')
expected = Series([34.5, 79.5], index=index)
result = s.to_period().resample('T', kind='period').mean()
assert_series_equal(result, expected)
result2 = s.resample('T', kind='period').mean()
assert_series_equal(result2, expected)
@pytest.mark.parametrize('freq,expected_vals', [('M', [31, 29, 31, 9]),
('2M', [31 + 29, 31 + 9])])
def test_resample_count(self, freq, expected_vals):
# GH12774
series = Series(1, index=pd.period_range(start='2000', periods=100))
result = series.resample(freq).count()
expected_index = pd.period_range(start='2000', freq=freq,
periods=len(expected_vals))
expected = Series(expected_vals, index=expected_index)
assert_series_equal(result, expected)
def test_resample_same_freq(self, resample_method):
# GH12770
series = Series(range(3), index=pd.period_range(
start='2000', periods=3, freq='M'))
expected = series
result = getattr(series.resample('M'), resample_method)()
assert_series_equal(result, expected)
def test_resample_incompat_freq(self):
msg = ("Frequency <MonthEnd> cannot be resampled to <Week: weekday=6>,"
" as they are not sub or super periods")
with pytest.raises(IncompatibleFrequency, match=msg):
Series(range(3), index=pd.period_range(
start='2000', periods=3, freq='M')).resample('W').mean()
def test_with_local_timezone_pytz(self):
# see gh-5430
local_timezone = pytz.timezone('America/Los_Angeles')
start = datetime(year=2013, month=11, day=1, hour=0, minute=0,
tzinfo=pytz.utc)
# 1 day later
end = datetime(year=2013, month=11, day=2, hour=0, minute=0,
tzinfo=pytz.utc)
index = pd.date_range(start, end, freq='H')
series = Series(1, index=index)
series = series.tz_convert(local_timezone)
result = series.resample('D', kind='period').mean()
# Create the expected series
# Index is moved back a day with the timezone conversion from UTC to
# Pacific
expected_index = (pd.period_range(start=start, end=end, freq='D') -
offsets.Day())
expected = Series(1, index=expected_index)
assert_series_equal(result, expected)
def test_resample_with_pytz(self):
# GH 13238
s = Series(2, index=pd.date_range('2017-01-01', periods=48, freq="H",
tz="US/Eastern"))
result = s.resample("D").mean()
expected = Series(2, index=pd.DatetimeIndex(['2017-01-01',
'2017-01-02'],
tz="US/Eastern"))
assert_series_equal(result, expected)
# Especially assert that the timezone is LMT for pytz
assert result.index.tz == pytz.timezone('US/Eastern')
def test_with_local_timezone_dateutil(self):
# see gh-5430
local_timezone = 'dateutil/America/Los_Angeles'
start = datetime(year=2013, month=11, day=1, hour=0, minute=0,
tzinfo=dateutil.tz.tzutc())
# 1 day later
end = datetime(year=2013, month=11, day=2, hour=0, minute=0,
tzinfo=dateutil.tz.tzutc())
index = pd.date_range(start, end, freq='H', name='idx')
series = Series(1, index=index)
series = series.tz_convert(local_timezone)
result = series.resample('D', kind='period').mean()
# Create the expected series
# Index is moved back a day with the timezone conversion from UTC to
# Pacific
expected_index = (pd.period_range(start=start, end=end, freq='D',
name='idx') - offsets.Day())
expected = Series(1, index=expected_index)
assert_series_equal(result, expected)
def test_resample_nonexistent_time_bin_edge(self):
# GH 19375
index = date_range('2017-03-12', '2017-03-12 1:45:00', freq='15T')
s = Series(np.zeros(len(index)), index=index)
expected = s.tz_localize('US/Pacific')
result = expected.resample('900S').mean()
tm.assert_series_equal(result, expected)
# GH 23742
index = date_range(start='2017-10-10', end='2017-10-20', freq='1H')
index = index.tz_localize('UTC').tz_convert('America/Sao_Paulo')
df = DataFrame(data=list(range(len(index))), index=index)
result = df.groupby(pd.Grouper(freq='1D')).count()
expected = date_range(start='2017-10-09', end='2017-10-20', freq='D',
tz="America/Sao_Paulo",
nonexistent='shift_forward', closed='left')
tm.assert_index_equal(result.index, expected)
def test_resample_ambiguous_time_bin_edge(self):
# GH 10117
idx = pd.date_range("2014-10-25 22:00:00", "2014-10-26 00:30:00",
freq="30T", tz="Europe/London")
expected = Series(np.zeros(len(idx)), index=idx)
result = expected.resample('30T').mean()
tm.assert_series_equal(result, expected)
def test_fill_method_and_how_upsample(self):
# GH2073
s = Series(np.arange(9, dtype='int64'),
index=date_range('2010-01-01', periods=9, freq='Q'))
last = s.resample('M').ffill()
both = s.resample('M').ffill().resample('M').last().astype('int64')
assert_series_equal(last, both)
@pytest.mark.parametrize('day', DAYS)
@pytest.mark.parametrize('target', ['D', 'B'])
@pytest.mark.parametrize('convention', ['start', 'end'])
def test_weekly_upsample(self, day, target, convention,
simple_period_range_series):
freq = 'W-{day}'.format(day=day)
ts = simple_period_range_series('1/1/1990', '12/31/1995', freq=freq)
result = ts.resample(target, convention=convention).ffill()
expected = result.to_timestamp(target, how=convention)
expected = expected.asfreq(target, 'ffill').to_period()
assert_series_equal(result, expected)
def test_resample_to_timestamps(self, simple_period_range_series):
ts = simple_period_range_series('1/1/1990', '12/31/1995', freq='M')
result = ts.resample('A-DEC', kind='timestamp').mean()
expected = ts.to_timestamp(how='start').resample('A-DEC').mean()
assert_series_equal(result, expected)
def test_resample_to_quarterly(self, simple_period_range_series):
for month in MONTHS:
ts = simple_period_range_series(
'1990', '1992', freq='A-%s' % month)
quar_ts = ts.resample('Q-%s' % month).ffill()
stamps = ts.to_timestamp('D', how='start')
qdates = period_range(ts.index[0].asfreq('D', 'start'),
ts.index[-1].asfreq('D', 'end'),
freq='Q-%s' % month)
expected = stamps.reindex(qdates.to_timestamp('D', 's'),
method='ffill')
expected.index = qdates
assert_series_equal(quar_ts, expected)
# conforms, but different month
ts = simple_period_range_series('1990', '1992', freq='A-JUN')
for how in ['start', 'end']:
result = ts.resample('Q-MAR', convention=how).ffill()
expected = ts.asfreq('Q-MAR', how=how)
expected = expected.reindex(result.index, method='ffill')
# .to_timestamp('D')
# expected = expected.resample('Q-MAR').ffill()
assert_series_equal(result, expected)
def test_resample_fill_missing(self):
rng = PeriodIndex([2000, 2005, 2007, 2009], freq='A')
s = Series(np.random.randn(4), index=rng)
stamps = s.to_timestamp()
filled = s.resample('A').ffill()
expected = stamps.resample('A').ffill().to_period('A')
assert_series_equal(filled, expected)
def test_cant_fill_missing_dups(self):
rng = PeriodIndex([2000, 2005, 2005, 2007, 2007], freq='A')
s = Series(np.random.randn(5), index=rng)
msg = "Reindexing only valid with uniquely valued Index objects"
with pytest.raises(InvalidIndexError, match=msg):
s.resample('A').ffill()
@pytest.mark.parametrize('freq', ['5min'])
@pytest.mark.parametrize('kind', ['period', None, 'timestamp'])
def test_resample_5minute(self, freq, kind):
rng = period_range('1/1/2000', '1/5/2000', freq='T')
ts = Series(np.random.randn(len(rng)), index=rng)
expected = ts.to_timestamp().resample(freq).mean()
if kind != 'timestamp':
expected = expected.to_period(freq)
result = ts.resample(freq, kind=kind).mean()
assert_series_equal(result, expected)
def test_upsample_daily_business_daily(self, simple_period_range_series):
ts = simple_period_range_series('1/1/2000', '2/1/2000', freq='B')
result = ts.resample('D').asfreq()
expected = ts.asfreq('D').reindex(period_range('1/3/2000', '2/1/2000'))
assert_series_equal(result, expected)
ts = simple_period_range_series('1/1/2000', '2/1/2000')
result = ts.resample('H', convention='s').asfreq()
exp_rng = period_range('1/1/2000', '2/1/2000 23:00', freq='H')
expected = ts.asfreq('H', how='s').reindex(exp_rng)
assert_series_equal(result, expected)
def test_resample_irregular_sparse(self):
dr = date_range(start='1/1/2012', freq='5min', periods=1000)
s = Series(np.array(100), index=dr)
# subset the data.
subset = s[:'2012-01-04 06:55']
result = subset.resample('10min').apply(len)
expected = s.resample('10min').apply(len).loc[result.index]
assert_series_equal(result, expected)
def test_resample_weekly_all_na(self):
rng = date_range('1/1/2000', periods=10, freq='W-WED')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample('W-THU').asfreq()
assert result.isna().all()
result = ts.resample('W-THU').asfreq().ffill()[:-1]
expected = ts.asfreq('W-THU').ffill()
assert_series_equal(result, expected)
def test_resample_tz_localized(self):
dr = date_range(start='2012-4-13', end='2012-5-1')
ts = Series(lrange(len(dr)), dr)
ts_utc = ts.tz_localize('UTC')
ts_local = ts_utc.tz_convert('America/Los_Angeles')
result = ts_local.resample('W').mean()
ts_local_naive = ts_local.copy()
ts_local_naive.index = [x.replace(tzinfo=None)
for x in ts_local_naive.index.to_pydatetime()]
exp = ts_local_naive.resample(
'W').mean().tz_localize('America/Los_Angeles')
assert_series_equal(result, exp)
# it works
result = ts_local.resample('D').mean()
# #2245
idx = date_range('2001-09-20 15:59', '2001-09-20 16:00', freq='T',
tz='Australia/Sydney')
s = Series([1, 2], index=idx)
result = s.resample('D', closed='right', label='right').mean()
ex_index = date_range('2001-09-21', periods=1, freq='D',
tz='Australia/Sydney')
expected = Series([1.5], index=ex_index)
assert_series_equal(result, expected)
# for good measure
result = s.resample('D', kind='period').mean()
ex_index = period_range('2001-09-20', periods=1, freq='D')
expected = Series([1.5], index=ex_index)
assert_series_equal(result, expected)
# GH 6397
# comparing an offset that doesn't propagate tz's
rng = date_range('1/1/2011', periods=20000, freq='H')
rng = rng.tz_localize('EST')
ts = DataFrame(index=rng)
ts['first'] = np.random.randn(len(rng))
ts['second'] = np.cumsum(np.random.randn(len(rng)))
expected = DataFrame(
{
'first': ts.resample('A').sum()['first'],
'second': ts.resample('A').mean()['second']},
columns=['first', 'second'])
result = ts.resample(
'A').agg({'first': np.sum,
'second': np.mean}).reindex(columns=['first', 'second'])
assert_frame_equal(result, expected)
def test_closed_left_corner(self):
# #1465
s = Series(np.random.randn(21),
index=date_range(start='1/1/2012 9:30',
freq='1min', periods=21))
s[0] = np.nan
result = s.resample('10min', closed='left', label='right').mean()
exp = s[1:].resample('10min', closed='left', label='right').mean()
assert_series_equal(result, exp)
result = s.resample('10min', closed='left', label='left').mean()
exp = s[1:].resample('10min', closed='left', label='left').mean()
ex_index = date_range(start='1/1/2012 9:30', freq='10min', periods=3)
tm.assert_index_equal(result.index, ex_index)
assert_series_equal(result, exp)
def test_quarterly_resampling(self):
rng = period_range('2000Q1', periods=10, freq='Q-DEC')
ts = Series(np.arange(10), index=rng)
result = ts.resample('A').mean()
exp = ts.to_timestamp().resample('A').mean().to_period()
assert_series_equal(result, exp)
def test_resample_weekly_bug_1726(self):
# 8/6/12 is a Monday
ind = date_range(start="8/6/2012", end="8/26/2012", freq="D")
n = len(ind)
data = [[x] * 5 for x in range(n)]
df = DataFrame(data, columns=['open', 'high', 'low', 'close', 'vol'],
index=ind)
# it works!
df.resample('W-MON', closed='left', label='left').first()
def test_resample_with_dst_time_change(self):
# GH 15549
index = (
| pd.DatetimeIndex([1457537600000000000, 1458059600000000000]) | pandas.DatetimeIndex |
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import random
import pickle
import missingno as msno
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from imblearn.over_sampling import SMOTE
import sklearn
from sklearn.feature_selection import SelectKBest, f_classif, chi2
from sklearn import svm
from sklearn.svm import OneClassSVM
from sklearn.preprocessing import OneHotEncoder, label_binarize
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score, RepeatedStratifiedKFold
from sklearn.linear_model import LogisticRegression, SGDClassifier
from sklearn.multiclass import OneVsRestClassifier
from sklearn.metrics import confusion_matrix, accuracy_score, precision_score, recall_score, f1_score, auc, \
classification_report, multilabel_confusion_matrix, precision_recall_curve, roc_curve, average_precision_score
from lightgbm import LGBMClassifier
import keras
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.wrappers.scikit_learn import KerasClassifier
from src.config import Config
from src.analysis import Analysis
pd.set_option('display.max_rows', 500)
class Train(Config):
def __init__(self):
self.data = {}
def dataset_prep(self, retrain_pca=False, save_pca=False):
# Set 1: Raw Data + Missing Data Deletion
data = analysis.read_file()
raw_data = data[~data.isna().any(axis=1)].reset_index(drop=True)
dataset1 = raw_data.copy()
missing_data = pd.DataFrame(raw_data.isna().sum()[raw_data.isna().sum() != 0], columns=["missing_count"])
missing_data["percentage"] = missing_data.missing_count*100/raw_data.shape[0]
# Set 2: Raw Data + Missing Data Deletion + Outlier Removal (Set 1 + Outlier Removal)
# Outlier Removal of Normal Class where Minority Class have same behavior
# Selected columns are also columns used for flagging (Feature Engineering)
selected_columns = Config.ANALYSIS_CONFIG["XCHART_COLUMNS"]
numerical_columns = raw_data[selected_columns].select_dtypes(["float", "int"])
clean_df = analysis.outlier_identification(raw_data, numerical_columns, mode="outlier_removal")
# Since the inherent missingess is from majority class and the maximum percentage missingness is 5%, \
# the rows are simply removed
missing_data = pd.DataFrame(clean_df.isna().sum()[clean_df.isna().sum() != 0], columns=["missing_count"])
missing_data["percentage"] = missing_data.missing_count*100/clean_df.shape[0]
dataset2 = clean_df[~clean_df.isna().any(axis=1)].reset_index(drop=True)
# Set 3: Raw Data + Missing Data Deletion + Outlier Removal + Feature Transformation (Set 2 + Feature Transformation)
transformed_dict, _ = analysis.data_transformation(dataset2)
transformed_df = pd.concat([transformed_dict["SYMMETRICAL_DATA"],
transformed_dict["MODPOS_TRANSFORMED"],
transformed_dict["MODNEG_TRANSFORMED"],
transformed_dict["HIGHPOS_TRANSFORMED"],
transformed_dict["HIGHNEG_TRANSFORMED"],
dataset2[["105", "147"]], # Include bimodal distributions for now
dataset2.select_dtypes("category")], axis=1)
dataset3 = transformed_df.copy()
# Set 4: Raw Data + Missing Data Deletion + Outlier Removal + Feature Transformation + Feature Engineering (Flag)
selected_columns = Config.ANALYSIS_CONFIG["TRANSFORMED_COLUMNS"]
numerical_columns = dataset3[selected_columns].select_dtypes(["float", "int"])
flag_data = analysis.outlier_identification(dataset3, numerical_columns, mode='feature_engineering')
dataset4 = pd.concat([flag_data.reset_index(drop=True),
dataset3[selected_columns].select_dtypes(["category"])], axis=1)
# Save dataset
dataset1.to_csv("./data/dataset1.csv", index=False)
dataset2.to_csv("./data/dataset2.csv", index=False)
dataset3.to_csv("./data/dataset3.csv", index=False)
dataset4.to_csv("./data/dataset4.csv", index=False)
# Read dataset and change datatype
dataset1 = analysis.read_file("./data/dataset1.csv")
dataset2 = analysis.read_file("./data/dataset2.csv")
dataset3 = analysis.read_file("./data/dataset3.csv")
dataset4 = analysis.read_file("./data/dataset4.csv")
# Set 5: Set 4 -> Pure PCA + Target
pca_df1 = analysis.pca_transformation(dataset1, retrain=retrain_pca, fname="./models/pca_v3.sav", save=save_pca)
pca_df2 = analysis.pca_transformation(dataset2, retrain=retrain_pca, fname="./models/pca_v4.sav", save=save_pca)
pca_df3 = analysis.pca_transformation(dataset3, retrain=retrain_pca, fname="./models/pca_v3.sav", save=save_pca)
pca_df4 = analysis.pca_transformation(dataset4, retrain=retrain_pca, fname="./models/pca_v4.sav", save=save_pca)
pca_df1.to_csv("./data/pca_dataset1.csv", index=False)
pca_df2.to_csv("./data/pca_dataset2.csv", index=False)
pca_df3.to_csv("./data/pca_dataset3.csv", index=False)
pca_df4.to_csv("./data/pca_dataset4.csv", index=False)
pca_df1 = analysis.read_file("./data/pca_dataset1.csv")
pca_df2 = analysis.read_file("./data/pca_dataset2.csv")
pca_df3 = analysis.read_file("./data/pca_dataset3.csv")
pca_df4 = analysis.read_file("./data/pca_dataset4.csv")
# Set 6: Hybrid of all (Transformed, Engineering)
combined_df1 = pd.concat([dataset1.loc[:, ~dataset1.columns.isin(["target"])].reset_index(drop=True),
pca_df1.reset_index(drop=True)], axis=1)
combined_df2 = pd.concat([dataset2.loc[:, ~dataset2.columns.isin(["target"])].reset_index(drop=True),
pca_df2.reset_index(drop=True)], axis=1)
combined_df3 = pd.concat([dataset3.loc[:, ~dataset3.columns.isin(["target"])].reset_index(drop=True),
pca_df3.reset_index(drop=True)], axis=1)
combined_df4 = pd.concat([dataset4.loc[:, ~dataset4.columns.isin(["target"])].reset_index(drop=True),
pca_df4.reset_index(drop=True)], axis=1)
combined_df1.to_csv("./data/combined_dataset1.csv", index=False)
combined_df2.to_csv("./data/combined_dataset2.csv", index=False)
combined_df3.to_csv("./data/combined_dataset3.csv", index=False)
combined_df4.to_csv("./data/combined_dataset4.csv", index=False)
def feature_selection(self, X_train, y_train, fname, retrain=False, num_cols="all", threshold=.5):
random.seed(123)
# Numerical input Categorical Output: ANOVA
X_anova = X_train.select_dtypes(["float", "int"])
filename = "./models/models_new/anova_{}".format(fname)
if retrain:
fs = SelectKBest(score_func=f_classif, k=num_cols).fit(X_anova, y_train)
pickle.dump(fs, open(filename, 'wb'))
fs = pickle.load(open(filename, 'rb'))
X_selected = fs.transform(X_anova)
anova_df = pd.DataFrame({"features_selected" : list(X_anova.loc[:, fs.get_support()].columns),
"features_pvalues": list(fs.pvalues_[fs.get_support()])})
anova_df = anova_df.loc[anova_df.features_pvalues <= threshold, :]
anova_df = anova_df.sort_values(by="features_pvalues", ascending=True)
# Numerical input Categorical Output: Chi2
X_chi2 = X_train.select_dtypes(["category"])
filename = "./models/models_new/chi2_{}".format(fname)
if retrain:
fs = SelectKBest(score_func=chi2, k=num_cols).fit(X_chi2, y_train)
pickle.dump(fs, open(filename, 'wb'))
fs = pickle.load(open(filename, 'rb'))
X_selected = fs.transform(X_chi2)
chi2_df = pd.DataFrame({"features_selected" : list(X_chi2.loc[:, fs.get_support()].columns),
"features_pvalues": list(fs.pvalues_[fs.get_support()])})
chi2_df = chi2_df.loc[chi2_df.features_pvalues <= threshold, :]
chi2_df = chi2_df.sort_values(by="features_pvalues", ascending=False)
return anova_df, chi2_df
def feature_importance(self, data, title, fontsize=20):
fig = plt.figure(figsize=(20,50))
plt.barh(data["features_selected"], data["features_pvalues"])
plt.title(title, fontsize=fontsize)
plt.xlabel("features_pvalues", fontsize=fontsize)
plt.ylabel("features", fontsize=fontsize)
return fig
def oversampling(self, X_train, y_train, plot=False):
oversample = SMOTE(random_state=123)
X_otrain,y_otrain = oversample.fit_resample(X_train,y_train)
if plot:
display(y_otrain.value_counts(normalize=True).plot.pie())
return X_otrain, y_otrain
def print_confusion_matrix(self, confusion_matrix, axes, class_label, class_names, fontsize=14):
df_cm = pd.DataFrame(confusion_matrix, index=class_names, columns=class_names)
try:
heatmap = sns.heatmap(df_cm, annot=True, fmt="d", cbar=False, ax=axes)
except ValueError:
raise ValueError("Confusion matrix values must be integers.")
heatmap.yaxis.set_ticklabels(heatmap.yaxis.get_ticklabels(), rotation=0, ha='right', fontsize=fontsize)
heatmap.xaxis.set_ticklabels(heatmap.xaxis.get_ticklabels(), rotation=45, ha='right', fontsize=fontsize)
axes.set_ylabel('True label')
axes.set_xlabel('Predicted label')
axes.set_title("Confusion Matrix for the class - " + class_label)
return fig
def cm_single(self, y_test, y_pred, fontsize=14):
labels = ["".join("c" + str(i[0])) for i in pd.DataFrame(y_test).value_counts().index]
df_cm = pd.DataFrame(confusion_matrix(y_test, y_pred), index=labels, columns=labels)
fig = plt.figure(figsize=(6,4))
heatmap = sns.heatmap(df_cm, annot=True, fmt="d", cbar=False)
heatmap.yaxis.set_ticklabels(heatmap.yaxis.get_ticklabels(), rotation=0, ha='right', fontsize=fontsize)
heatmap.xaxis.set_ticklabels(heatmap.xaxis.get_ticklabels(), rotation=45, ha='right', fontsize=fontsize)
heatmap.set_ylabel('True label')
heatmap.set_xlabel('Predicted label')
heatmap.set_title("Confusion Matrix for Binary Label" )
return fig
def cm_plot(self, y_test, y_pred, nrow=3, ncol=2):
labels = ["".join("c" + str(i[0])) for i in pd.DataFrame(y_test).value_counts().index]
cm = multilabel_confusion_matrix(y_test, y_pred)
if nrow == 1:
figsize = (8,4)
else:
figsize = (12,7)
fig, ax = plt.subplots(nrow, ncol, figsize=figsize)
for axes, cfs_matrix, label in zip(ax.flatten(), cm, labels):
print_confusion_matrix(cfs_matrix, axes, label, ["0", "1"])
if nrow == 3:
fig.delaxes(ax[2,1])
fig.tight_layout()
return fig
def pr_auc(self, X_test, y_test, n_classes):
fig, ax = plt.subplots(2, 1, figsize=(12, 7))
# Precision-Recall Curve
y_score = model.predict_proba(X_test)
y_test = label_binarize(y_test, classes=[*range(n_classes)])
precision = dict()
recall = dict()
for i in range(n_classes):
precision[i], recall[i], _ = precision_recall_curve(y_test[:, i],y_score[:, i])
ax[0].plot(recall[i], precision[i], lw=2, label='class {}'.format(i))
no_skill = len(y[y==1]) / len(y)
ax[0].plot([0, 1], [no_skill, no_skill], linestyle='--', label='No Skill')
ax[0].set_xlabel("recall")
ax[0].set_ylabel("precision")
ax[0].legend(loc="best")
ax[0].set_title("precision vs. recall curve")
# ROC curve
fpr = dict()
tpr = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
ax[1].plot(fpr[i], tpr[i], lw=2, label='class {}'.format(i))
ax[1].plot([0, 1], [0, 1], linestyle='--', label='No Skill')
ax[1].set_xlabel("false positive rate")
ax[1].set_ylabel("true positive rate")
ax[1].legend(loc="best")
ax[1].set_title("ROC curve")
fig.tight_layout()
return fig
def traintest_split(self, data, test_size=0.3):
X, y = data.loc[:,~data.columns.isin(["target"])], data.loc[:,data.columns.isin(["target"])]
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, test_size=test_size)
return X_train, X_test, y_train, y_test
def class_count(self, y_test, y_pred):
true = pd.DataFrame(y_test.value_counts())
pred = pd.DataFrame(list(np.unique(y_pred, return_counts=True)[1]))
pred.index = list(np.unique(y_pred, return_counts=True)[0])
final = pd.concat([true, pred], axis=1).rename(columns={0:"pred"})
return final
def plot_graphs(self, X_test, y_test, y_pred, model, fontsize=16):
algo_name = type(model).__name__
y_proba = model.predict_proba(X_test)
n_classes = len(y_test["target"].unique())
onehotencoder = OneHotEncoder()
y_enc = onehotencoder.fit_transform(np.array(y_test).reshape(-1,1)).toarray()
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(12, 5))
plt.style.use('default')
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_enc[:, i], y_proba[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
ax1.plot(fpr[i], tpr[i], lw=2, label='ROC curve of class {0} (area = {1:0.2f})'.format(i, roc_auc[i]))
ax1.set_xlim([0.0, 1.0])
ax1.set_ylim([0.0, 1.05])
ax1.set_xlabel("false positive rate")
ax1.set_ylabel("true positive rate")
ax1.legend(loc="lower right", prop={'size': 10})
ax1.set_title('ROC to multi-class')
fig.suptitle(algo_name, fontsize=16)
plt.style.use('default')
precision = dict()
recall = dict()
for i in range(n_classes):
precision[i], recall[i], _ = precision_recall_curve(y_enc[:, i], y_proba[:, i])
ax2.plot(recall[i], precision[i], lw=2, label='PR Curve of class {}'.format(i))
ax2.set_xlim([0.0, 1.0])
ax2.set_ylim([0.0, 1.05])
ax2.set_xlabel("recall")
ax2.set_ylabel("precision")
ax2.legend(loc="lower right", prop={'size': 10})
ax2.set_title('Precision-Recall to multi-class')
fig.suptitle(algo_name, fontsize=16)
labels = ["".join("c" + str(i[0])) for i in | pd.DataFrame(y_test) | pandas.DataFrame |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import KFold
from sklearn.preprocessing import StandardScaler
from scipy.stats import multivariate_normal as mvn
import seaborn as sn
import math
import gc
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.optimizers import Adam
from sklearn.preprocessing import MinMaxScaler, normalize
import FourierClock
from scipy.stats import ks_2samp
from functools import reduce
import random
import os
from numpy.linalg import norm
import subprocess
from copulas.multivariate import GaussianMultivariate
val_errors1 = []
test_errors1 = []
N_GENES = 30
SEED = 0
random.seed(SEED)
np.random.seed(SEED)
tf.random.set_seed(SEED)
os.environ['PYTHONHASHSEED'] = str(SEED)
df = pd.read_csv('Data\\X_train_raw.csv').T
df_valid = pd.read_csv('Data\\X_valid_raw.csv').T
df_test = pd.concat((pd.read_csv('Data\\X_test_raw_A.txt').T, pd.read_csv('Data\\X_test_raw_B.txt').T)).iloc[[0, 1, 2, 4, 5], :]
rach_clusters = pd.read_csv('Data\\X_train_clusters.csv')
Y_data = df.iloc[1:, -1].astype('float64')
Y_copy = Y_data
Y_valid_data = df_valid.iloc[1:, -1].astype('float64')
Y_valid_copy = Y_valid_data
common_IDs = reduce(np.intersect1d, (df.iloc[0, :-1].values, df_valid.iloc[0, :-1].values, df_test.iloc[0, :].values))
idx = np.where(df.iloc[0, :].isin(common_IDs))[0]
df = df.iloc[:, idx]
idx_valid = np.where(df_valid.iloc[0, :].isin(common_IDs))[0]
df_valid = df_valid.iloc[:, idx_valid]
idx_test = np.where(df_test.iloc[0, :].isin(common_IDs))[0]
df_test = df_test.iloc[:, idx_test]
X_data = df.iloc[1:, :].astype('float64')
X_ID = df.iloc[0, :]
X_valid_data = df_valid.iloc[1:, :].astype('float64')
X_valid_ID = df_valid.iloc[0, :]
X_test_data = df_test.iloc[1:, :].astype('float64')
X_test_ID = df_test.iloc[0, :]
X_ID1 = np.argsort(X_ID)
X_ID = X_ID.iloc[X_ID1]
X_data = X_data.iloc[:, X_ID1]
X_data.columns = X_ID
X_ID1 = np.argsort(X_valid_ID)
X_valid_ID = X_valid_ID.iloc[X_ID1]
X_valid_data = X_valid_data.iloc[:, X_ID1]
X_valid_data.columns = X_valid_ID
X_ID1 = np.argsort(X_test_ID)
X_test_ID = X_test_ID.iloc[X_ID1]
X_test_data = X_test_data.iloc[:, X_ID1]
X_test_data.columns = X_test_ID
# Variance threshold
from sklearn.feature_selection import VarianceThreshold
selector = VarianceThreshold()
selector.fit(X_data)
var_idx = selector.variances_ > 5
X_data = X_data.iloc[:, var_idx]
X_ID = X_ID.iloc[var_idx]
X_valid_data = X_valid_data.iloc[:, var_idx]
X_valid_ID = X_valid_ID.iloc[var_idx]
X_test_data = X_test_data.iloc[:, var_idx]
X_test_ID = X_test_ID.iloc[var_idx]
X_data.reset_index(inplace=True, drop=True)
X_valid_data.reset_index(inplace=True, drop=True)
X_test_data.reset_index(inplace=True, drop=True)
X_ID.reset_index(inplace=True, drop=True)
X_valid_ID.reset_index(inplace=True, drop=True)
X_test_ID.reset_index(inplace=True, drop=True)
del df
gc.collect()
n_folds = Y_data.shape[0]
folds = KFold(n_splits=n_folds, shuffle=True, random_state=SEED)
y_cos = -np.cos((2 * np.pi * Y_data.astype('float64') / 24)+(np.pi/2))
y_sin = np.sin((2 * np.pi * Y_data.astype('float64') / 24)+(np.pi/2))
Y_valid_cos = -np.cos((2 * np.pi * Y_valid_data.astype('float64') / 24)+(np.pi/2))
Y_valid_sin = np.sin((2 * np.pi * Y_valid_data.astype('float64') / 24)+(np.pi/2))
def cyclical_loss(y_true, y_pred):
error = 0
for i in range(y_pred.shape[0]):
error += np.arccos((y_true[i, :] @ y_pred[i, :]) / (norm(y_true[i, :]) * norm(y_pred[i, :])))
return error
def custom_loss(y_true, y_pred):
return tf.reduce_mean((tf.math.acos(tf.matmul(y_true, tf.transpose(y_pred)) / ((tf.norm(y_true) * tf.norm(y_pred)) + tf.keras.backend.epsilon()))**2))
adam = Adam(lr=0.00001, beta_1=0.9, beta_2=0.999, amsgrad=False)
def larger_model():
# create model
model = Sequential()
model.add(Dense(256, kernel_initializer='normal', activation='relu'))
model.add(Dense(1024, kernel_initializer='normal', activation='relu'))
model.add(Dense(2, kernel_initializer='normal'))
# Compile model
model.compile(loss=custom_loss, optimizer=adam)
return model
Y_data = np.concatenate((y_cos.values.reshape(-1, 1), y_sin.values.reshape(-1, 1)), axis=1)
Y_valid_data = np.concatenate((Y_valid_cos.values.reshape(-1, 1), Y_valid_sin.values.reshape(-1, 1)), axis=1)
error = 0 # Initialise error
all_preds = np.zeros((Y_data.shape[0], 2)) # Create empty array
all_valid_preds = np.zeros((Y_valid_data.shape[0], 2)) # Create empty array
early_stop = EarlyStopping(patience=100, restore_best_weights=True, monitor='val_loss', mode='min')
X_data_times = X_data.T
Y_times = np.array([0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44])
scaler = StandardScaler()
X_data_times_idx = X_data_times.index
X_data_times = (scaler.fit_transform(X_data_times.T)).T
X_data_times = pd.DataFrame(data=X_data_times, index=X_data_times_idx)
X_data_times = pd.concat((pd.DataFrame(Y_times.reshape(1, 12)), X_data_times), axis=0)
X_data_times.to_csv('Data\\X_train_times.csv', header=None)
subprocess.call(['C:\\Program Files\\R\\R-4.0.3\\bin\\Rscript', 'metacycle_scores.R'], shell=False)
arser_scores = pd.read_csv('MetaScores\\ARSresult_X_train_times.csv')
jtk_scores = pd.read_csv('MetaScores\\JTKresult_X_train_times.csv')
auto_indices, auto_clock_genes, auto_scores = FourierClock.get_autocorrelated_genes(X_data, X_ID)
auto_scores = np.abs(np.array(auto_scores))
cross_indices, cross_clock_genes, cross_scores = FourierClock.cross_corr(X_data, Y_copy, X_ID)
cross_scores = np.abs(np.array(cross_scores))
scores = np.concatenate((auto_scores.reshape(-1, 1), cross_scores.reshape(-1, 1),
arser_scores['fdr_BH'].values.reshape(-1, 1), jtk_scores['ADJ.P'].values.reshape(-1, 1)),
axis=1)
scores[:, 2:] = 1-scores[:, 2:]
num_resamples = 1000 # Change to 50,000/100,000
gcopula = GaussianMultivariate()
gcopula.fit(scores)
random_sample = gcopula.sample(num_resamples)
sample_scores = pd.DataFrame(random_sample)
mean = np.mean(sample_scores.values, axis=0)
covariance = np.cov(sample_scores.T)
dist = mvn(mean=mean, cov=covariance, allow_singular=True)
gene_scores = []
for i in range(scores.shape[0]):
gene_scores.append(dist.cdf(x=scores[i, :]))
gene_scores = np.array(gene_scores)
gene_scores = np.concatenate((arser_scores['CycID'].values.reshape(-1, 1), gene_scores.reshape(-1, 1)), axis=1)
gene_scores = gene_scores[gene_scores[:, 1].argsort()[::-1]]
selected_genes = gene_scores[:N_GENES*3, 0]
idx = np.where(X_ID.isin(selected_genes))[0]
selected_scores = gene_scores[idx]
X_data = X_data.iloc[:, idx]
idx_valid = np.where(X_valid_ID.isin(selected_genes))[0]
X_valid_data = X_valid_data.iloc[:, idx_valid]
idx_test = np.where(X_test_ID.isin(selected_genes))[0]
X_test_data = X_test_data.iloc[:, idx_test]
X_ID = X_ID.iloc[idx]
X_valid_ID = X_valid_ID.iloc[idx_valid]
X_test_ID = X_test_ID.iloc[idx_test]
scores = []
pvalues = []
for i in range(X_data.shape[1]):
l = ks_2samp(X_data.iloc[:, i], X_valid_data.iloc[:, i])
scores.append(i)
pvalues.append(l.pvalue)
pvalues_idx = np.argsort(pvalues)
scores = pvalues_idx[(pvalues_idx.shape[0]-2*N_GENES):]
similar_genes = selected_genes[scores]
X_data = X_data.iloc[:, scores]
selected_scores = selected_scores[scores]
X_ID = X_ID.iloc[scores]
X_valid_data = X_valid_data.iloc[:, scores]
X_test_data = X_test_data.iloc[:, scores]
Y_copy_res = np.array([0, 4, 8, 12, 16, 20, 0, 4, 8, 12, 16, 20])
X_ID2 = X_data.columns.values
scaler = MinMaxScaler()
scaler.fit(X_data)
X_data = scaler.transform(X_data)
X_valid_data = scaler.transform(X_valid_data)
X_test_data = scaler.transform(X_test_data)
X_data = pd.DataFrame(data=X_data, columns=X_ID2)
X_valid_data = pd.DataFrame(data=X_valid_data, columns=X_ID2)
X_test_data = | pd.DataFrame(data=X_test_data, columns=X_ID2) | pandas.DataFrame |
import matplotlib
matplotlib.use('Agg')
import tessreduce as tr
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import lightkurve as lk
from astropy.coordinates import SkyCoord
from astropy import units as u
import os
dirname = os.path.dirname(__file__)
#where we're going we dont need warnings!!
import warnings
warnings.filterwarnings("ignore")
file = os.path.join(dirname,'/data/cataclysmic_variables.csv')
cvs = pd.read_csv('./data/cataclysmic_variables.csv')
# don't want to deal with the crowded Tuc, Pav, or Sgr zones for now
ind = (cvs['GCVS'].values == 'Tuc ') | (cvs['GCVS'].values == 'Pav ') | (cvs['GCVS'].values == 'Sgr ')
cvs = cvs.iloc[~ind]
for j in range(len(cvs)):
cv = cvs.iloc[j]
print('NAME: ',cv['Names'])
ra = cv['RAJ2000']
dec = cv['DEJ2000']
c = SkyCoord(ra=float(ra)*u.degree, dec=float(dec) *u.degree, frame='icrs')
tess = lk.search_tesscut(c,sector=None)
try:
if len(tess) > 0:
lcs = []
zps = []
err = []
sectors = []
trends1 = []
trends2 = []
if len(tess) > 1:
tpfs = []
for t in tess:
tpf = t.download(cutout_size=50)
#aper_b18 = np.zeros(tpf.shape[1:], dtype=bool)
#aper_b18[44:48, 44:47] = True
res = tr.Quick_reduce(tpf,calibrate=False)#,aper=aper_b18)
lcs += [res['lc']]
err += [res['err']]
zps += [res['zp']]
sectors += [tpf.sector]
try:
trends1 += [tr.Remove_stellar_variability(lcs[-1],err[-1],variable=True)]
trends2 += [tr.Remove_stellar_variability(lcs[-1],err[-1],variable=False)]
except:
print('trend error in {} sector {}'.format(cv['Names'],tpf.sector))
filler = np.nan * np.ones(len(err[-1]))
trends1 += [filler]
trends2 += [filler]
name = cv['Names']
print('MAKE FIGURE')
plt.figure(figsize=(6.5,8))
plt.subplot(311)
plt.title(name)
for i in range(len(lcs)):
plt.plot(lcs[i][0],lcs[i][1],label='S ' + str(sectors[i]))
plt.legend()
plt.ylabel('Counts')
plt.subplot(312)
plt.title('trend method 1')
for i in range(len(lcs)):
#plt.fill_between(lcs[i][0],lcs[i][1]-trends1[i]-err[i],lcs[i][1]-trends1[i]+err[i],alpha=.5)
plt.plot(lcs[i][0],lcs[i][1]-trends1[i])
plt.ylabel('Counts')
plt.subplot(313)
plt.title('trend method 2')
for i in range(len(lcs)):
#plt.fill_between(lcs[i][0],lcs[i][1]-trends2[i]-err[i],lcs[i][1]-trends2[i]+err[i],alpha=.5)
plt.plot(lcs[i][0],lcs[i][1]-trends2[i])
plt.ylabel('Counts')
plt.xlabel('MJD')
plt.tight_layout()
savename = name.replace('/',' ').replace(' ','_')
plt.savefig('./figs/{}.pdf'.format(savename))
# save to cvs
print('SAVE TO CSV')
mjd = lcs[0][0].copy()
flux = lcs[0][1].copy()
e = err[0].copy()
t1 = trends1[0].copy()
t2 = trends2[0].copy()
z = np.ones(len(lcs[0][0])) * zps[0][0]
s = np.ones(len(lcs[0][0])) * sectors[0]
for i in range(len(lcs)-1):
i += 1
mjd = np.append(mjd,lcs[i][0])
flux = np.append(flux,lcs[i][1])
e = np.append(e,err[i])
t1 = np.append(t1,trends1[i])
t2 = np.append(t2,trends2[i])
zz = np.ones(len(lcs[i][0])) * zps[i][0]
ss = np.ones(len(lcs[i][0])) * sectors[i]
z = np.append(z,zz)
s = np.append(s,ss)
df = | pd.DataFrame(columns=['mjd','flux','err','trend1','trend2','zp','sector']) | pandas.DataFrame |
import numpy as np
import pandas as pd
from insolver.frame import InsolverDataFrame
from insolver.transforms import InsolverTransform, AutoFillNATransforms
def test_fillna_numerical():
df_test = InsolverDataFrame(pd.DataFrame(data={'col1': [1, 2, np.nan]}))
df_transformed = InsolverTransform(df_test, [
AutoFillNATransforms(),
])
df_transformed.ins_transform()
assert df_transformed['col1'][2] == 1.5
def test_fillna_numerical_all_na():
df_test = InsolverDataFrame( | pd.DataFrame(data={'col1': [np.nan, np.nan, np.nan]}) | pandas.DataFrame |
"""
This module defines geometric methods that work in 3D and allow receiverpoints and observation objects to interact with a map
"""
# rays,to_crs used in observations
# fresnel,to_crs,is_outside,ground_level used in sim
# map_to_crs is a standalone map method
from itertools import chain, compress, cycle, repeat
from typing import Union
import warnings
import geopandas as gpd
import numpy as np
import pandas as pd
import pyproj
import pygeos
import shapely.geometry
from shapely.ops import transform
from shapely.wkt import loads
import gnssmapper.common as cm
from gnssmapper.common.check import Rays
def coordinates(rays: Rays) -> np.array:
coords = pygeos.get_coordinates(rays.array.data, include_z=True)
return coords.reshape((rays.shape[0], 2, 3))
def z(points: gpd.GeoSeries) -> pd.Series:
"""Returns Z coordinate for a set of point geometries """
return | pd.Series((point.z for point in points), index=points.index) | pandas.Series |
import json
import pandas as pd
from pandas import DataFrame
import matplotlib.pyplot as plt
import os
import collections
import nltk.classify
import nltk.metrics
import numpy as np
import csv
"""
read all business id
"""
business=[]
users=[]
scores=[]
rates=[]
t=0
userdd= | pd.read_csv('users.tsv', sep="\t") | pandas.read_csv |
import unittest
from zeppos_bcpy.sql_statement import SqlStatement
import pandas as pd
import os
class TestTheProjectMethods(unittest.TestCase):
def test_get_table_create_statement_method(self):
df = | pd.DataFrame({'seconds': [3600], 'minutes': [10]}, columns=['seconds', 'minutes']) | pandas.DataFrame |
import shelve
import numpy as np
import re
import pandas as pd
import json
import pickle
import pdb
from copy import copy
with open('metadata/bacnet_devices.json', 'r') as fp:
sensor_dict = json.load(fp)
nae_dict = dict()
nae_dict['bonner'] = ["607", "608", "609", "557", "610"]
nae_dict['ap_m'] = ['514', '513','604']
nae_dict['bsb'] = ['519', '568', '567', '566', '564', '565']
nae_dict['ebu3b'] = ["505", "506"]
nae_dict['music'] = ['523']
nae_dict['sme'] = ['572', '573', '574']
nae_dict['bml'] = ['510']
# Vectorization
#Parse the data in the dictionary as filtered by device_list
#Gives us a sensor_list with sensor information of a building
def remove_emptystr(s):
while '' in s:
s.remove('')
return s
def extract_words(sentence, delimiter):
#result = re.findall('(\d+\s?-?\s?\d+)|(\d+)', sentence)
result = sentence.lower().split(delimiter)
while '' in result:
result.remove('')
return result
def sentence2lower(wordList):
return [word.lower() for word in wordList]
def tokenize(tokenType, raw, mappedWordMap=None):
raw = raw.replace('_', ' ')
if tokenType=='Alphanumeric':
sentence = re.findall("\w+", raw)
elif tokenType in ['AlphaAndNum', 'NumAsSingleWord']:
sentence = re.findall("[a-zA-Z]+|\d+", raw)
elif tokenType=='NoNumber':
sentence = re.findall("[a-zA-Z]+", raw)
elif tokenType=='JustSeparate':
sentence = re.findall("([a-zA-Z]+|\d+|[^0-9a-z])", raw)
else:
assert(False)
if tokenType=='NumAsSingleWord':
sentence = ['NUM' if len(re.findall('\d+',word))>0 else word for word in sentence]
sentence = sentence2lower(sentence)
if mappedWordMap!=None:
terms = mappedWordMap.keys()
else:
terms = list()
retSentence = list()
for word in sentence:
alphaWord = re.findall('[a-zA-Z]+',word)
if len(alphaWord )>0:
if alphaWord in terms:
#mappedWordList = mappedWordMap[alphaWord]
#for mappedWord in mappedWordList:
#word = word.replace(mappedWord, '_'+mappedWord+'_')
word = word.replace(alphaWord, '_'+'_'.join(mappedWordMap[alphaWord])+'_')
retSentence = retSentence + remove_emptystr(word.split('_'))
return retSentence
def parse_sentence(sentence):
return re.findall("([a-zA-Z]+|\d+|[^0-9a-z])", sentence.lower())
def get_bacnettype_dict(building_name):
bacnettypeMap = pd.read_csv('metadata/bacnettype_mapping.csv').set_index('bacnet_type_str')
naeList = nae_dict[building_name]
source_id_set = set([])
bacnettype_dict = dict()
bacnettype_code_dict = dict()
for nae in naeList:
device = sensor_dict[nae]
h_dev = device['props']
for sensor in device['objs']:
h_obj = sensor['props']
source_id = str(h_dev['device_id']) + '_' + str(h_obj['type']) + '_' + str(h_obj['instance'])
# if not source_id in validSrcidList:
# continue
if h_obj['type'] not in (0,1,2,3,4,5,13,14,19):
continue
if source_id in source_id_set:
continue
else:
source_id_set.add(source_id)
if sensor['props']['type_str']:
typeStr = bacnettypeMap.loc[sensor['props']['type_str']].tolist()[0]
if type(typeStr)!=str:
if np.isnan(typeStr):
typeStr = ''
else:
print("Error in bacnettype map file")
assert(False)
else:
typeStr = ''
bacnettype_dict[source_id] = typeStr
bacnettype_code_dict[source_id] = sensor['props']['type_str']
return bacnettype_dict
def get_unit_dict(building_name):
unitMap = pd.read_csv('metadata/unit_mapping.csv').set_index('unit')
naeList = nae_dict[building_name]
unit_code_dict = dict()
unit_dict = dict()
source_id_set = set([])
for nae in naeList:
device = sensor_dict[nae]
h_dev = device['props']
for sensor in device['objs']:
h_obj = sensor['props']
source_id = str(h_dev['device_id']) + '_' + str(h_obj['type']) + '_' + str(h_obj['instance'])
# if not source_id in validSrcidList:
# continue
if h_obj['type'] not in (0,1,2,3,4,5,13,14,19):
continue
if source_id in source_id_set:
continue
else:
source_id_set.add(source_id)
if sensor['unit']:
try:
unit_str = unitMap.loc[sensor['unit']].tolist()[0]
if type(unit_str) != str:
if np.isnan(unit_str):
unit_str = ''
else:
print("Error in unit map file")
assert(False)
except:
print("===================")
print(sensor['unit'])
print(sensor)
print("===================")
assert(False)
else:
unit_str = ''
unit_code_dict[source_id] = sensor['unit']
unit_dict[source_id] = unit_str
return unit_dict
def parse_sentences(building_name):
if building_name == 'pouya':
metadata_file = 'metadata/pouya.csv'
df = pd.read_csv(metadata_file)
names = df['Address'].tolist()
srcids = copy(names)
names = [parse_sentence(name) for name in names]
blanks = ['' for name in names]
jcinames = copy(blanks)
descs = copy(blanks)
units = copy(blanks)
bacnettypes = copy(blanks)
return None, srcids, names, jcinames, descs, units, bacnettypes
unitMap = pd.read_csv('metadata/unit_mapping.csv').set_index('unit')
bacnettypeMap = pd.read_csv('metadata/bacnettype_mapping.csv').set_index('bacnet_type_str')
naeList = nae_dict[building_name]
sensor_list = []
name_list = []
desc_list = []
unit_list = []
bacnettype_list = []
jciname_list = list()
source_id_set = set([])
source_id_list = list()
for nae in naeList:
device = sensor_dict[nae]
h_dev = device['props']
for sensor in device['objs']:
h_obj = sensor['props']
source_id = str(h_dev['device_id']) + '_' + str(h_obj['type']) + '_' + str(h_obj['instance'])
# if not source_id in validSrcidList:
# continue
if h_obj['type'] not in (0,1,2,3,4,5,13,14,19):
continue
if source_id in source_id_set:
continue
else:
source_id_set.add(source_id)
source_id_list.append(source_id)
jciname_list.append(parse_sentence(sensor['jci_name']))
name_list.append(parse_sentence(sensor['name']))
desc_list.append(parse_sentence(sensor['desc']))
if not sensor['unit']==None:
try:
unit_str = unitMap.loc[sensor['unit']].tolist()[0]
if type(unit_str) != str:
if np.isnan(unit_str):
unit_str = ''
else:
print("Error in unit map file")
assert(False)
except:
print("===================")
print(sensor['unit'])
print(sensor)
print("===================")
assert(False)
else:
unit_str = ''
unit_list.append([unit_str])
if not sensor['props']['type_str']==None:
typeStr = bacnettypeMap.loc[sensor['props']['type_str']].tolist()[0]
if type(typeStr)!=str:
if np.isnan(typeStr):
typeStr = ''
else:
print("Error in bacnettype map file")
assert(False)
else:
typeStr = ''
bacnettype_list.append([typeStr])
sensor_list.append({'source_id': source_id,
'name': sensor['name'],
'description': sensor['desc'],
'unit': sensor['unit'],
'type_string': h_obj['type_str'],
'type': h_obj['type'],
#'device_id': h_obj['device_id'],
'jci_name': sensor['jci_name'],
#add data related characteristics here
})
sensor_df = pd.DataFrame(sensor_list)
return sensor_df, source_id_list, name_list, jciname_list, desc_list, \
unit_list, bacnettype_list
def structure_metadata(buildingName=None, tokenType=None, bigramFlag=False, validSrcidList=[], mappedWordMap=None, withDotFlag=True):
unitMap = pd.read_csv('metadata/unit_mapping.csv').set_index('unit')
bacnettypeMap = | pd.read_csv('metadata/bacnettype_mapping.csv') | pandas.read_csv |
# Librairies
print("Load Libraries")
import os
import hashlib
import numpy as np
import pandas as pd
import tensorflow.keras.preprocessing.image as kpi
import tensorflow.keras.models as km
from tensorflow.python.client import device_lib
MODE = "GPU" if "GPU" in [k.device_type for k in device_lib.list_local_devices()] else "CPU"
print(MODE)
## Argument
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--epochs', type=int, default=10)
parser.add_argument('--batch_size', type=int, default=20)
parser.add_argument('--data_dir', type=str,
default="/Users/bguillouet/Insa/TP_Insa/dev/IA-Frameworks/GoogleCloud/data/sample_2/")
parser.add_argument('--results_dir', type=str,
default="/Users/bguillouet/Insa/TP_Insa/dev/IA-Frameworks/GoogleCloud/results/")
parser.add_argument('--model_dir', type=str,
default="/Users/bguillouet/Insa/TP_Insa/dev/IA-Frameworks/GoogleCloud/model/")
args = parser.parse_args()
## Definition des variables
img_width = 150
img_height = 150
## Data Generator
data_dir_test = args.data_dir+'/test'
N_test = len(os.listdir(data_dir_test+"/test"))
test_datagen = kpi.ImageDataGenerator(rescale=1. / 255)
test_generator = test_datagen.flow_from_directory(
data_dir_test,
target_size=(img_height, img_width),
batch_size=args.batch_size,
class_mode=None,
shuffle=False)
## Telechargement du modele
args_str = "_".join([k + ":" + str(v) for k, v in sorted(vars(args).items(), key=lambda x : x[0])])
id_str = hashlib.md5(args_str.encode("utf8")).hexdigest()
model_conv = km.load_model(args.model_dir + "/" + id_str + ".h5")
## Prediction
test_prediction = model_conv.predict_generator(test_generator, N_test // args.batch_size, verbose=1)
## Save prediction in csv
images_test = test_generator.filenames
classes = [int(t>0.5) for t in test_prediction]
array = np.vstack((images_test, test_prediction[:,0], classes)).T
df = | pd.DataFrame(array, columns=["filename","probabilities","classes"]) | pandas.DataFrame |
'''
This program will calculate a timeseries of active users across the lifetime of a project (or a workflow id/version for a project).
The inputs needed are:
the classification export file (request & download from the Project Builder)
[optional] the workflow id
[optional] the workflow version (only the major (int) version needed)
[optional] the output filename.
[optional] a flag to let the program know to just make plots, don't re-calculate
The program takes snapshots of the classification timeline in units of hours, and over that hour it simply computes the number of classifications submitted and the number of classifiers (registered and unregistered) who submitted the classifications. It outputs these timeseries to a CSV file.
<NAME>, 30th March 2017
'''
import sys, os
# put this way up here so if there are no inputs we exit quickly before even trying to load everything else
try:
classfile_in = sys.argv[1]
except:
print("\nUsage: %s classifications_infile" % sys.argv[0])
print(" classifications_infile is a Zooniverse (Panoptes) classifications data export CSV.")
print(" Optional extra inputs (no spaces):")
print(" workflow_id=N")
print(" specify the program should only consider classifications from workflow id N")
print(" workflow_version=M")
print(" specify the program should only consider classifications from workflow version M")
print(" (note the program will only consider the major version, i.e. the integer part)")
print(" outfile=filename.csv")
print(" specify the name of the output file. If not specified, it will")
print(" be based on the input filename, e.g. if your input file is")
print(" my-project-classifications.csv, the output file name will be")
print(" my-project-classifications_active_users_timeseries.csv.")
print(" --plots_only")
print(" if specified, the program won't re-calculate the time series")
print(" and will instead just read in the outfile and re-make plots.")
sys.exit(0)
import numpy as np
import pandas as pd
import datetime
import dateutil.parser
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.gridspec as gridspec
import json
import gc
plt.rc('figure', facecolor='none', edgecolor='none', autolayout=True)
plt.rc('path', simplify=True)
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.rc('axes', labelsize='large', facecolor='none', linewidth=0.7, color_cycle = ['k', 'r', 'g', 'b', 'c', 'm', 'y'])
plt.rc('xtick', labelsize='medium')
plt.rc('ytick', labelsize='medium')
plt.rc('lines', markersize=4, linewidth=1, markeredgewidth=0.2)
plt.rc('legend', numpoints=1, frameon=False, handletextpad=0.3, scatterpoints=1, handlelength=2, handleheight=0.1)
plt.rc('savefig', facecolor='none', edgecolor='none', frameon='False')
params = {'font.size' : 11,
'xtick.major.size': 8,
'ytick.major.size': 8,
'xtick.minor.size': 3,
'ytick.minor.size': 3,
}
plt.rcParams.update(params)
# default value is not to care about workflow ID or version
workflow_id = -1
workflow_version = -1
# default mode is to calculate the timeseries afresh
plot_only = False
outfile = classfile_in.replace(".csv", "_active_users_timeseries.csv")
# if the input filename doesn't have ".csv" in it you might end up overwriting
# the input file with the output file and that would be bad; don't do that.
if outfile == classfile_in:
outfile += "_active_users_timeseries.csv"
# Print out the input parameters just as a sanity check
print("File to be read: %s" % classfile_in)
print(len(sys.argv))
# check for other command-line arguments
if len(sys.argv) > 2:
# if there are additional arguments, loop through them
for i_arg, argstr in enumerate(sys.argv[2:]):
arg = argstr.split('=')
if arg[0] == "workflow_id":
workflow_id = int(arg[1])
print("Restricting classifications to workflow id: %d" % workflow_id)
elif arg[0] == "workflow_version":
workflow_version = float(arg[1])
print("Restricting classifications to workflow version: %d" % int(workflow_version))
elif arg[0] == "outfile":
outfile = arg[1]
elif arg[0] == "--plots_only":
plot_only = True
print("File to be written: %s" % outfile)
if not plot_only:
print("Reading classifications...")
#classifications = pd.read_csv(classfile_in)
# the above will work but uses a LOT of memory for projects with > 1 million
# classifications. Nothing here uses the actual classification data so don't read it
cols_keep = ["user_name", "user_id", "user_ip", "workflow_id", "workflow_version", "created_at"]
classifications = pd.read_csv(classfile_in, usecols=cols_keep)
# now restrict classifications to a particular workflow id/version if requested
if (workflow_id > 0) | (workflow_version > 0):
# only keep the stuff that matches these workflow properties
if (workflow_id > 0):
#print("Considering only workflow id %d" % workflow_id)
in_workflow = classifications.workflow_id == workflow_id
else:
# the workflow id wasn't specified, so just make an array of true
in_workflow = np.array([True for q in classifications.workflow_id])
if (workflow_version > 0):
classifications['version_int'] = [int(q) for q in classifications.workflow_version]
#print("Considering only major workflow version %d" % int(workflow_version))
# we only care about the major workflow version, not the minor version
in_version = classifications.version_int == int(workflow_version)
else:
in_version = np.array([True for q in classifications.workflow_version])
if (sum(in_workflow & in_version) == 0):
print("ERROR: your combination of workflow_id and workflow_version does not exist!\nIgnoring workflow id/version request and computing stats for ALL classifications instead.")
#classifications = classifications_all
else:
# select the subset of classifications
classifications = classifications[in_workflow & in_version]
else:
# just use everything
#classifications = classifications_all
workflow_ids = classifications.workflow_id.unique()
# this takes too much CPU time just for a print statement. Just use float versions
#classifications['version_int'] = [int(q) for q in classifications.workflow_version]
version_ints = classifications.workflow_version.unique()
print("Considering all classifications in workflow ids:")
print(workflow_ids)
print(" and workflow_versions:")
print(version_ints)
print("Creating timeseries...")#,datetime.datetime.now().strftime('%H:%M:%S.%f')
ca_temp = classifications['created_at'].copy()
# Do these separately so you can track errors to a specific line
# Try the format-specified ones first (because it's faster, if it works)
try:
classifications['created_at_ts'] = pd.to_datetime(ca_temp, format='%Y-%m-%d %H:%M:%S %Z')
except Exception as the_error:
#print "Oops:\n", the_error
try:
classifications['created_at_ts'] = pd.to_datetime(ca_temp, format='%Y-%m-%d %H:%M:%S')
except Exception as the_error:
#print "Oops:\n", the_error
classifications['created_at_ts'] = pd.to_datetime(ca_temp)
# no except for this because if it fails the program really should exit anyway
# index this into a timeseries
# this means the index might no longer be unique, but it has many advantages
classifications.set_index('created_at_ts', inplace=True, drop=False)
# get the first and last classification timestamps
first_class = min(classifications.created_at_ts)
last_class = max(classifications.created_at_ts)
hour_start_str = first_class.strftime('%Y-%m-%d %H:00:00')
hour_end_str = last_class.strftime('%Y-%m-%d %H:00:00')
hour_start = pd.to_datetime(hour_start_str, format='%Y-%m-%d %H:%M:%S')
hour_end = pd.to_datetime(hour_end_str, format='%Y-%m-%d %H:%M:%S') + np.timedelta64(1, 'h')
# writing to a file as we go turns out to be faster
fout = open(outfile, "w")
fout.write("time_start,time_end,n_class_total,n_class_registered,n_class_unregistered,n_users_total,n_users_registered,n_users_unregistered\n")
the_time = hour_start
# testing purposes
#the_time = pd.to_datetime("2017-04-04 00:00:00", format='%Y-%m-%d %H:%M:%S')
dt = np.timedelta64(1, 'h')
while the_time < hour_end:
# pick just the classifications in this time period
the_time_hi = the_time + dt
subclass = classifications[(classifications.created_at_ts >= the_time) & (classifications.created_at_ts < the_time_hi)]
# just the usernames
subusers = subclass.user_name.unique()
# total classification count
n_class = len(subclass)
if n_class > 0:
# total user count
n_users = len(subusers)
# identify registered and unregistered users
is_unregistered_user = np.array([q.startswith("not-logged-in") for q in subusers])
is_registered_user = np.invert(is_unregistered_user)
n_users_unreg = sum(is_unregistered_user)
n_users_reg = n_users - n_users_unreg
# count classifications by registered and unregistered users
is_unregistered_class = np.array([q.startswith("not-logged-in") for q in subclass.user_name])
is_registered_class = np.invert(is_unregistered_class)
n_class_unreg = sum(is_unregistered_class)
n_class_reg = n_class - n_class_unreg
fout.write("%s,%s,%d,%d,%d,%d,%d,%d\n" % (the_time.strftime('%Y-%m-%d %H:%M:%S'), the_time_hi.strftime('%Y-%m-%d %H:%M:%S'), n_class, n_class_reg, n_class_unreg, n_users, n_users_reg, n_users_unreg))
else:
# there weren't any classifications in this time period
# so everything is zero
fout.write("%s,%s,0,0,0,0,0,0\n" % (the_time.strftime('%Y-%m-%d %H:%M:%S'), the_time_hi.strftime('%Y-%m-%d %H:%M:%S')))
the_time += dt
# end of while loop
fout.close()
# end "if not plot_only"
# now read in the csv and make a plot or two
the_ts = pd.read_csv(outfile)
t_temp = the_ts['time_end'].copy()
the_ts['hour_end'] = | pd.to_datetime(t_temp, format='%Y-%m-%d %H:%M:%S') | pandas.to_datetime |
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import collections
import copy
import unittest
import functools
import itertools
import types
import numpy as np
import numpy.testing as npt
import pandas as pd
import scipy.stats
from skbio import Sequence, DNA, RNA, Protein, TabularMSA
from skbio.sequence import GrammaredSequence
from skbio.util import classproperty
from skbio.util._decorator import overrides
from skbio.util._testing import ReallyEqualMixin
from skbio.metadata._testing import (MetadataMixinTests,
PositionalMetadataMixinTests)
from skbio.util import assert_data_frame_almost_equal
from skbio.util._testing import assert_index_equal
class TabularMSASubclass(TabularMSA):
"""Used for testing purposes."""
pass
class TestTabularMSAMetadata(unittest.TestCase, ReallyEqualMixin,
MetadataMixinTests):
def setUp(self):
self._metadata_constructor_ = functools.partial(TabularMSA, [])
class TestTabularMSAPositionalMetadata(unittest.TestCase, ReallyEqualMixin,
PositionalMetadataMixinTests):
def setUp(self):
def factory(axis_len, positional_metadata=None):
return TabularMSA([DNA('A' * axis_len)],
positional_metadata=positional_metadata)
self._positional_metadata_constructor_ = factory
class TestTabularMSA(unittest.TestCase, ReallyEqualMixin):
def test_from_dict_empty(self):
self.assertEqual(TabularMSA.from_dict({}), TabularMSA([], index=[]))
def test_from_dict_single_sequence(self):
self.assertEqual(TabularMSA.from_dict({'foo': DNA('ACGT')}),
TabularMSA([DNA('ACGT')], index=['foo']))
def test_from_dict_multiple_sequences(self):
msa = TabularMSA.from_dict(
{1: DNA('ACG'), 2: DNA('GGG'), 3: DNA('TAG')})
# Sort because order is arbitrary.
msa.sort()
self.assertEqual(
msa,
TabularMSA([DNA('ACG'), DNA('GGG'), DNA('TAG')], index=[1, 2, 3]))
def test_from_dict_invalid_input(self):
# Basic test to make sure error-checking in the TabularMSA constructor
# is being invoked.
with self.assertRaisesRegex(
ValueError, 'must match the number of positions'):
TabularMSA.from_dict({'a': DNA('ACG'), 'b': DNA('ACGT')})
def test_constructor_invalid_dtype(self):
with self.assertRaisesRegex(TypeError, 'GrammaredSequence.*Sequence'):
TabularMSA([Sequence('')])
with self.assertRaisesRegex(TypeError, 'GrammaredSequence.*int'):
TabularMSA([42, DNA('')])
def test_constructor_not_monomorphic(self):
with self.assertRaisesRegex(TypeError,
'matching type.*RNA.*DNA'):
TabularMSA([DNA(''), RNA('')])
with self.assertRaisesRegex(TypeError,
'matching type.*float.*Protein'):
TabularMSA([Protein(''), Protein(''), 42.0, Protein('')])
def test_constructor_unequal_length(self):
with self.assertRaisesRegex(
ValueError,
'must match the number of positions.*1 != 0'):
TabularMSA([Protein(''), Protein('P')])
with self.assertRaisesRegex(
ValueError,
'must match the number of positions.*1 != 3'):
TabularMSA([Protein('PAW'), Protein('ABC'), Protein('A')])
def test_constructor_non_iterable(self):
with self.assertRaises(TypeError):
TabularMSA(42)
def test_constructor_minter_and_index_both_provided(self):
with self.assertRaisesRegex(ValueError, 'both.*minter.*index'):
TabularMSA([DNA('ACGT'), DNA('TGCA')], minter=str,
index=['a', 'b'])
def test_constructor_invalid_minter_callable(self):
with self.assertRaises(TypeError):
TabularMSA([DNA('ACGT'), DNA('TGCA')], minter=float)
def test_constructor_missing_minter_metadata_key(self):
with self.assertRaises(KeyError):
TabularMSA([DNA('ACGT', metadata={'foo': 'bar'}), DNA('TGCA')],
minter='foo')
def test_constructor_unhashable_minter_metadata_key(self):
with self.assertRaises(TypeError):
TabularMSA([DNA('ACGT'), DNA('TGCA')], minter=[])
def test_constructor_index_length_mismatch_iterable(self):
with self.assertRaisesRegex(ValueError,
'sequences.*2.*index length.*0'):
TabularMSA([DNA('ACGT'), DNA('TGCA')], index=iter([]))
def test_constructor_index_length_mismatch_index_object(self):
with self.assertRaisesRegex(ValueError,
'sequences.*2.*index length.*0'):
TabularMSA([DNA('ACGT'), DNA('TGCA')], index= | pd.Index([]) | pandas.Index |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../../../input/shrutimechlearn_churn-modelling/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("../../../input/shrutimechlearn_churn-modelling"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
# In[ ]:
# # importing basic Libararies
# In[ ]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# # Loading the Dataset
# In[ ]:
df =pd.read_csv("../../../input/shrutimechlearn_churn-modelling/Churn_Modelling.csv")
df.shape
# In[ ]:
df.sample(5)
# # Handling Missing value
# In[ ]:
df.isna().sum()
# # Handling CAtegorical Variables
# In[ ]:
df.select_dtypes(include='object').columns
# In[ ]:
for i in ['Geography' ,'Gender']:
print(df[i].value_counts())
# In[ ]:
# encoding for [''Geography' ,]Gneder']
geography = pd.get_dummies(df['Geography'], drop_first=True)
gender = | pd.get_dummies(df['Gender'], drop_first=True) | pandas.get_dummies |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 26 15:39:02 2018
@author: joyce
"""
import pandas as pd
import numpy as np
from numpy.matlib import repmat
from stats import get_stockdata_from_sql,get_tradedate,Corr,Delta,Rank,Cross_max,\
Cross_min,Delay,Sum,Mean,STD,TsRank,TsMax,TsMin,DecayLinear,Count,SMA,Cov,DTM,DBM,\
Highday,Lowday,HD,LD,RegBeta,RegResi,SUMIF,get_indexdata_from_sql,timer,get_fama
class stAlpha(object):
def __init__(self,begin,end):
self.begin = begin
self.end = end
self.close = get_stockdata_from_sql(1,self.begin,self.end,'Close')
self.open = get_stockdata_from_sql(1,self.begin,self.end,'Open')
self.high = get_stockdata_from_sql(1,self.begin,self.end,'High')
self.low = get_stockdata_from_sql(1,self.begin,self.end,'Low')
self.volume = get_stockdata_from_sql(1,self.begin,self.end,'Vol')
self.amt = get_stockdata_from_sql(1,self.begin,self.end,'Amount')
self.vwap = get_stockdata_from_sql(1,self.begin,self.end,'Vwap')
self.ret = get_stockdata_from_sql(1,begin,end,'Pctchg')
self.close_index = get_indexdata_from_sql(1,begin,end,'close','000001.SH')
self.open_index = get_indexdata_from_sql(1,begin,end,'open','000001.SH')
# self.mkt = get_fama_from_sql()
@timer
def alpha1(self):
volume = self.volume
ln_volume = np.log(volume)
ln_volume_delta = Delta(ln_volume,1)
close = self.close
Open = self.open
price_temp = pd.concat([close,Open],axis = 1,join = 'outer')
price_temp['ret'] = (price_temp['Close'] - price_temp['Open'])/price_temp['Open']
del price_temp['Close'],price_temp['Open']
r_ln_volume_delta = Rank(ln_volume_delta)
r_ret = Rank(price_temp)
rank = pd.concat([r_ln_volume_delta,r_ret],axis = 1,join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,6)
alpha = corr
alpha.columns = ['alpha1']
return alpha
@timer
def alpha2(self):
close = self.close
low = self.low
high = self.high
temp = pd.concat([close,low,high],axis = 1,join = 'outer')
temp['alpha'] = (2 * temp['Close'] - temp['Low'] - temp['High']) \
/ (temp['High'] - temp['Low'])
del temp['Close'],temp['Low'],temp['High']
alpha = -1 * Delta(temp,1)
alpha.columns = ['alpha2']
return alpha
@timer
def alpha3(self):
close = self.close
low = self.low
high = self.high
temp = pd.concat([close,low,high],axis = 1,join = 'outer')
close_delay = Delay(pd.DataFrame(temp['Close']),1)
close_delay.columns = ['close_delay']
temp = pd.concat([temp,close_delay],axis = 1,join = 'inner')
temp['min'] = Cross_max(pd.DataFrame(temp['close_delay']),pd.DataFrame(temp['Low']))
temp['max'] = Cross_min(pd.DataFrame(temp['close_delay']),pd.DataFrame(temp['High']))
temp['alpha_temp'] = 0
temp['alpha_temp'][temp['Close'] > temp['close_delay']] = temp['Close'] - temp['min']
temp['alpha_temp'][temp['Close'] < temp['close_delay']] = temp['Close'] - temp['max']
alpha = Sum(pd.DataFrame(temp['alpha_temp']),6)
alpha.columns = ['alpha3']
return alpha
@timer
def alpha4(self):
close = self.close
volume = self.volume
close_mean_2 = Mean(close,2)
close_mean_8 = Mean(close,8)
close_std = STD(close,8)
volume_mean_20 = Mean(volume,20)
data = pd.concat([close_mean_2,close_mean_8,close_std,volume_mean_20,volume],axis = 1,join = 'inner')
data.columns = ['close_mean_2','close_mean_8','close_std','volume_mean_20','volume']
data['alpha'] = -1
data['alpha'][data['close_mean_2'] < data['close_mean_8'] - data['close_std']] = 1
data['alpha'][data['volume']/data['volume_mean_20'] >= 1] = 1
alpha = pd.DataFrame(data['alpha'])
alpha.columns = ['alpha4']
return alpha
@timer
def alpha5(self):
volume = self.volume
high = self.high
r1 = TsRank(volume,5)
r2 = TsRank(high,5)
rank = pd.concat([r1,r2],axis = 1,join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,5)
alpha = -1 * TsMax(corr,5)
alpha.columns = ['alpha5']
return alpha
@timer
def alpha6(self):
Open = self.open
high = self.high
df = pd.concat([Open,high],axis = 1,join = 'inner')
df['price'] = df['Open'] * 0.85 + df['High'] * 0.15
df_delta = Delta(pd.DataFrame(df['price']),1)
alpha = Rank(np.sign(df_delta))
alpha.columns = ['alpha6']
return alpha
@timer
def alpha7(self):
close = self.close
vwap = self.vwap
volume = self.volume
volume_delta = Delta(volume,3)
data = pd.concat([close,vwap],axis = 1,join = 'inner')
data['diff'] = data['Vwap'] - data['Close']
r1 = Rank(TsMax(pd.DataFrame(data['diff']),3))
r2 = Rank(TsMin(pd.DataFrame(data['diff']),3))
r3 = Rank(volume_delta)
rank = pd.concat([r1,r2,r3],axis = 1,join = 'inner')
rank.columns = ['r1','r2','r3']
alpha = (rank['r1'] + rank['r2'])* rank['r3']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha7']
return alpha
@timer
def alpha8(self):
high = self.high
low = self.low
vwap = self.vwap
data = pd.concat([high,low,vwap],axis = 1,join = 'inner')
data_price = (data['High'] + data['Low'])/2 * 0.2 + data['Vwap'] * 0.2
data_price_delta = Delta(pd.DataFrame(data_price),4) * -1
alpha = Rank(data_price_delta)
alpha.columns = ['alpha8']
return alpha
@timer
def alpha9(self):
high = self.high
low = self.low
volume = self.volume
data = pd.concat([high,low,volume],axis = 1,join = 'inner')
data['price']= (data['High'] + data['Low'])/2
data['price_delay'] = Delay(pd.DataFrame(data['price']),1)
alpha_temp = (data['price'] - data['price_delay']) * (data['High'] - data['Low'])/data['Vol']
alpha_temp_unstack = alpha_temp.unstack(level = 'ID')
alpha = alpha_temp_unstack.ewm(span = 7, ignore_na = True, min_periods = 7).mean()
alpha_final = alpha.stack()
alpha = pd.DataFrame(alpha_final)
alpha.columns = ['alpha9']
return alpha
@timer
def alpha10(self):
ret = self.ret
close = self.close
ret_std = STD(pd.DataFrame(ret),20)
ret_std.columns = ['ret_std']
data = pd.concat([ret,close,ret_std],axis = 1, join = 'inner')
temp1 = pd.DataFrame(data['ret_std'][data['Pctchg'] < 0])
temp2 = pd.DataFrame(data['Close'][data['Pctchg'] >= 0])
temp1.columns = ['temp']
temp2.columns = ['temp']
temp = pd.concat([temp1,temp2],axis = 0,join = 'outer')
temp_order = pd.concat([data,temp],axis = 1)
temp_square = pd.DataFrame(np.power(temp_order['temp'],2))
alpha_temp = TsMax(temp_square,5)
alpha = Rank(alpha_temp)
alpha.columns = ['alpha10']
return alpha
@timer
def alpha11(self):
high = self.high
low = self.low
close = self.close
volume = self.volume
data = pd.concat([high,low,close,volume],axis = 1,join = 'inner')
data_temp = (data['Close'] - data['Low']) -(data['High'] - data['Close'])\
/(data['High'] - data['Low']) * data['Vol']
alpha = Sum(pd.DataFrame(data_temp),6)
alpha.columns = ['alpha11']
return alpha
@timer
def alpha12(self):
Open = self.open
vwap = self.vwap
close = self.close
data = pd.concat([Open,vwap,close],axis = 1, join = 'inner')
data['p1'] = data['Open'] - Mean(data['Open'],10)
data['p2'] = data['Close'] - data['Vwap']
r1 = Rank(pd.DataFrame(data['p1']))
r2 = Rank(pd.DataFrame(np.abs(data['p2'])))
rank = pd.concat([r1,r2],axis = 1,join = 'inner')
rank.columns = ['r1','r2']
alpha = rank['r1'] - rank['r2']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha12']
return alpha
@timer
def alpha13(self):
high = self.high
low = self.low
vwap = self.vwap
data = pd.concat([high,low,vwap],axis = 1,join = 'inner')
alpha = (data['High'] + data['Low'])/2 - data['Vwap']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha13']
return alpha
@timer
def alpha14(self):
close = self.close
close_delay = Delay(close,5)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
alpha = data['Close'] - data['close_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha14']
return alpha
@timer
def alpha15(self):
Open = self.open
close = self.close
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data = pd.concat([Open,close_delay],axis = 1,join = 'inner')
alpha = data['Open']/data['close_delay'] - 1
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha15']
return alpha
@timer
def alpha16(self):
vwap = self.vwap
volume = self.volume
data = pd.concat([vwap,volume],axis = 1, join = 'inner')
r1 = Rank(pd.DataFrame(data['Vol']))
r2 = Rank(pd.DataFrame(data['Vwap']))
rank = pd.concat([r1,r2],axis = 1, join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,5)
alpha = -1 * TsMax(Rank(corr),5)
alpha.columns = ['alpha16']
return alpha
@timer
def alpha17(self):
vwap = self.vwap
close = self.close
data = pd.concat([vwap,close],axis = 1, join = 'inner')
data['vwap_max15'] = TsMax(data['Vwap'],15)
data['close_delta5'] = Delta(data['Close'],5)
temp = np.power(data['vwap_max15'],data['close_delta5'])
alpha = Rank(pd.DataFrame(temp))
alpha.columns = ['alpha17']
return alpha
@timer
def alpha18(self):
"""
this one is similar with alpha14
"""
close = self.close
close_delay = Delay(close,5)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
alpha = data['Close']/data['close_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha18']
return alpha
@timer
def alpha19(self):
close = self.close
close_delay = Delay(close,5)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
data['temp1'] = (data['Close'] - data['close_delay'])/data['close_delay']
data['temp2'] = (data['Close'] - data['close_delay'])/data['Close']
temp1 = pd.DataFrame(data['temp1'][data['Close'] < data['close_delay']])
temp2 = pd.DataFrame(data['temp2'][data['Close'] >= data['close_delay']])
temp1.columns = ['temp']
temp2.columns = ['temp']
temp = pd.concat([temp1,temp2],axis = 0)
data = pd.concat([data,temp],axis = 1,join = 'outer')
alpha = pd.DataFrame(data['temp'])
alpha.columns = ['alpha19']
return alpha
@timer
def alpha20(self):
close = self.close
close_delay = Delay(close,6)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
alpha = (data['Close'] - data['close_delay'])/data['close_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha20']
return alpha
@timer
def alpha21(self):
close = self.close
close_mean = Mean(close,6)
alpha = RegBeta(0,close_mean,None,6)
alpha.columns = ['alpha21']
return alpha
@timer
def alpha22(self):
close = self.close
close_mean = Mean(close,6)
data = pd.concat([close,close_mean],axis = 1,join = 'inner')
data.columns = ['close','close_mean']
temp = pd.DataFrame((data['close'] - data['close_mean'])/data['close_mean'])
temp_delay = Delay(temp,3)
data_temp = pd.concat([temp,temp_delay],axis = 1,join = 'inner')
data_temp.columns = ['temp','temp_delay']
temp2 = pd.DataFrame(data_temp['temp'] - data_temp['temp_delay'])
alpha = SMA(temp2,12,1)
alpha.columns = ['alpha22']
return alpha
@timer
def alpha23(self):
close = self.close
close_std = STD(close,20)
close_delay = Delay(close,1)
data = pd.concat([close,close_std,close_delay],axis = 1, join = 'inner')
data.columns = ['Close','close_std','close_delay']
data['temp'] = data['close_std']
data['temp'][data['Close'] <= data['close_delay']] = 0
temp = pd.DataFrame(data['temp'])
sma1 = SMA(temp,20,1)
sma2 = SMA(pd.DataFrame(data['close_std']),20,1)
sma = pd.concat([sma1,sma2],axis = 1, join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1']/sma['sma2'])
alpha.columns = ['alpha23']
return alpha
@timer
def alpha24(self):
close = self.close
close_delay = Delay(close,5)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis=1 ,join = 'inner' )
temp = data['Close'] - data['close_delay']
temp = pd.DataFrame(temp)
alpha = SMA(temp,5,1)
alpha.columns = ['alpha24']
return alpha
@timer
def alpha25(self):
close = self.close
close_delta = Delta(close,7)
ret = self.ret
r1 = Rank(close_delta)
r3 = Rank(Sum(ret,250))
volume = self.volume
volume_mean = Mean(pd.DataFrame(volume['Vol']),20)
volume_mean.columns = ['volume_mean']
data = pd.concat([volume,volume_mean],axis = 1,join = 'inner')
temp0 = pd.DataFrame(data['Vol']/data['volume_mean'])
temp = DecayLinear(temp0,9)
r2 = Rank(temp)
rank = pd.concat([r1,r2,r3],axis = 1, join = 'inner')
rank.columns = ['r1','r2','r3']
alpha = pd.DataFrame(-1 * rank['r1'] * (1 - rank['r2']) * rank['r3'])
alpha.columns = ['alpha25']
return alpha
@timer
def alpha26(self):
close = self.close
vwap = self.vwap
close_mean7 = Mean(close,7)
close_mean7.columns = ['close_mean7']
close_delay5 = Delay(close,5)
close_delay5.columns = ['close_delay5']
data = pd.concat([vwap,close_delay5],axis = 1,join = 'inner')
corr = Corr(data,230)
corr.columns = ['corr']
data_temp = pd.concat([corr,close_mean7,close],axis = 1,join = 'inner')
alpha = data_temp['close_mean7'] - data_temp['Close'] + data_temp['corr']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha26']
return alpha
@timer
def alpha27(self):
"""
uncompleted
"""
close = self.close
close_delay3 = Delay(close,3)
close_delay6 = Delay(close,6)
data = pd.concat([close,close_delay3,close_delay6],axis = 1,join = 'inner')
data.columns = ['close','close_delay3','close_delay6']
temp1 = pd.DataFrame((data['close'] - data['close_delay3'])/data['close_delay3'] * 100)
temp2 = pd.DataFrame((data['close'] - data['close_delay6'])/data['close_delay6'] * 100)
data_temp = pd.concat([temp1,temp2],axis = 1,join = 'inner')
data_temp.columns = ['temp1','temp2']
temp = pd.DataFrame(data_temp['temp1'] + data_temp['temp2'])
alpha = DecayLinear(temp,12)
alpha.columns = ['alpha27']
return alpha
@timer
def alpha28(self):
close = self.close
low = self.low
high = self.high
low_min = TsMin(low,9)
high_max = TsMax(high,9)
data = pd.concat([close,low_min,high_max],axis = 1,join = 'inner')
data.columns = ['Close','low_min','high_max']
temp1 = pd.DataFrame((data['Close'] - data['low_min']) /(data['high_max'] - data['low_min']))
sma1 = SMA(temp1,3,1)
sma2 = SMA(sma1,3,1)
sma = pd.concat([sma1,sma2],axis = 1, join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1'] * 2 - sma['sma2'] * 3)
alpha.columns = ['alpha28']
return alpha
@timer
def alpha29(self):
close = self.close
volume = self.volume
close_delay = Delay(close,6)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay,volume],axis = 1, join = 'inner')
alpha = (data['Close'] - data['close_delay'])/data['close_delay'] * data['Vol']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha29']
return alpha
@timer
def alpha30(self):
close = self.close
close_delay = Delay(close,1)
@timer
def alpha31(self):
close = self.close
close_delay = Delay(close,12)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
alpha = (data['Close'] - data['close_delay'])/data['close_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha31']
return alpha
@timer
def alpha32(self):
volume = self.volume
high = self.high
r1 = Rank(volume)
r2 = Rank(high)
rank = pd.concat([r1,r2],axis = 1,join = 'inner')
corr = Corr(rank,3)
r = Rank(corr)
alpha = -1 * Sum(r,3)
alpha.columns = ['alpha32']
return alpha
@timer
def alpha33(self):
low = self.low
volume = self.volume
ret = self.ret
low_min = TsMin(low,5)
low_min_delay = Delay(low_min,5)
data1 = pd.concat([low_min,low_min_delay],axis = 1,join = 'inner')
data1.columns = ['low_min','low_min_delay']
ret_sum240 = Sum(ret,240)
ret_sum20 = Sum(ret,20)
ret_temp = pd.concat([ret_sum240,ret_sum20],axis = 1, join = 'inner')
ret_temp.columns = ['ret240','ret20']
temp1 = pd.DataFrame(data1['low_min_delay'] - data1['low_min'])
temp2 = pd.DataFrame((ret_temp['ret240'] - ret_temp['ret20'])/220)
r_temp2 = Rank(temp2)
r_volume = TsRank(volume,5)
temp = pd.concat([temp1,r_temp2,r_volume],axis = 1,join = 'inner')
temp.columns = ['temp1','r_temp2','r_volume']
alpha = temp['temp1'] * temp['r_temp2'] * temp['r_volume']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha33']
return alpha
@timer
def alpha34(self):
close = self.close
close_mean = Mean(close,12)
close_mean.columns = ['close_mean']
data = pd.concat([close,close_mean],axis = 1, join = 'inner')
alpha = pd.DataFrame(data['close_mean']/data['Close'])
alpha.columns = ['alpha34']
return alpha
@timer
def alpha35(self):
volume = self.volume
Open = self.open
open_delay = Delay(Open,1)
open_delay.columns = ['open_delay']
open_linear = DecayLinear(Open,17)
open_linear.columns = ['open_linear']
open_delay_temp = DecayLinear(open_delay,15)
r1 = Rank(open_delay_temp)
data = pd.concat([Open,open_linear],axis = 1,join = 'inner')
Open_temp = data['Open'] * 0.65 + 0.35 * data['open_linear']
rank = pd.concat([volume,Open_temp],axis = 1, join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,7)
r2 = Rank(-1 * corr)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = Cross_min(pd.DataFrame(r['r1']),pd.DataFrame(r['r2']))
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha35']
return alpha
@timer
def alpha36(self):
volume = self.volume
vwap = self.vwap
r1 = Rank(volume)
r2 = Rank(vwap)
rank = pd.concat([r1,r2],axis = 1,join = 'inner')
corr = Corr(rank,6)
temp = Sum(corr,2)
alpha = Rank(temp)
alpha.columns = ['alpha36']
return alpha
@timer
def alpha37(self):
Open = self.open
ret = self.ret
open_sum = Sum(Open,5)
ret_sum = Sum(ret,5)
data = pd.concat([open_sum,ret_sum],axis = 1,join = 'inner')
data.columns = ['open_sum','ret_sum']
temp = data['open_sum'] * data['ret_sum']
temp_delay = Delay(temp,10)
data_temp = pd.concat([temp,temp_delay],axis = 1,join = 'inner')
data_temp.columns = ['temp','temp_delay']
alpha = -1 * Rank(pd.DataFrame(data_temp['temp'] - data_temp['temp_delay']))
alpha.columns = ['alpha37']
return alpha
@timer
def alpha38(self):
high = self.high
high_mean = Mean(high,20)
high_delta = Delta(high,2)
data = pd.concat([high,high_mean,high_delta],axis = 1,join = 'inner')
data.columns = ['high','high_mean','high_delta']
data['alpha'] = -1 * data['high_delta']
data['alpha'][data['high_mean'] >= data['high']] = 0
alpha = pd.DataFrame(data['alpha'])
alpha.columns = ['alpha38']
return alpha
@timer
def alpha39(self):
close = self.close
Open = self.open
vwap = self.vwap
volume = self.volume
close_delta2 = Delta(close,2)
close_delta2_decay = DecayLinear(close_delta2,8)
r1 = Rank(close_delta2_decay)
price_temp = pd.concat([vwap,Open],axis = 1,join = 'inner')
price = pd.DataFrame(price_temp['Vwap'] * 0.3 + price_temp['Open'] * 0.7)
volume_mean = Mean(volume,180)
volume_mean_sum = Sum(volume_mean,37)
rank = pd.concat([price,volume_mean_sum],axis = 1,join = 'inner')
corr = Corr(rank,14)
corr_decay = DecayLinear(corr,12)
r2 = Rank(corr_decay)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r2'] - r['r1'])
alpha.columns = ['alpha39']
return alpha
@timer
def alpha40(self):
close = self.close
volume = self.volume
close_delay = Delay(close,1)
data = pd.concat([close,volume,close_delay],axis = 1, join = 'inner')
data.columns = ['close','volume','close_delay']
data['temp1'] = data['volume']
data['temp2'] = data['volume']
data['temp1'][data['close'] <= data['close_delay']] = 0
data['temp2'][data['close'] > data['close_delay']] = 0
s1 = Sum(pd.DataFrame(data['temp1']),26)
s2 = Sum(pd.DataFrame(data['temp2']),26)
s = pd.concat([s1,s2], axis = 1, join = 'inner')
s.columns = ['s1','s2']
alpha = pd.DataFrame(s['s1']/s['s2'] * 100)
alpha.columns = ['alpha40']
return alpha
@timer
def alpha41(self):
vwap = self.vwap
vwap_delta = Delta(vwap,3)
vwap_delta_max = TsMax(vwap_delta,5)
alpha = -1 * Rank(vwap_delta_max)
alpha.columns = ['alpha41']
return alpha
@timer
def alpha42(self):
high = self.high
volume = self.volume
high_std = STD(high,10)
r1 = Rank(high_std)
data = pd.concat([high,volume],axis = 1,join = 'inner')
corr = Corr(data,10)
r = pd.concat([r1,corr],axis = 1,join = 'inner')
r.columns = ['r1','corr']
alpha = pd.DataFrame(-1 * r['r1'] * r['corr'])
alpha.columns = ['alpha42']
return alpha
@timer
def alpha43(self):
close = self.close
volume = self.volume
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay,volume],axis = 1,join = 'inner')
data['sign'] = 1
data['sign'][data['Close'] < data['close_delay']] = -1
temp = pd.DataFrame(data['Vol'] * data['sign'])
alpha = Sum(temp,6)
alpha.columns = ['alpha43']
return alpha
@timer
def alpha44(self):
volume = self.volume
vwap = self.vwap
low = self.low
volume_mean = Mean(volume,10)
rank = pd.concat([low,volume_mean],axis = 1,join = 'inner')
corr = Corr(rank,7)
corr_decay = DecayLinear(corr,6)
r1 = TsRank(corr_decay,4)
vwap_delta = Delta(vwap,3)
vwap_delta_decay = DecayLinear(vwap_delta,10)
r2 = TsRank(vwap_delta_decay,15)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] + r['r2'])
alpha.columns = ['alpha44']
return alpha
@timer
def alpha45(self):
volume = self.volume
vwap = self.vwap
close = self.close
Open = self.open
price = pd.concat([close,Open],axis = 1,join = 'inner')
price['price'] = price['Close'] * 0.6 + price['Open'] * 0.4
price_delta = Delta(pd.DataFrame(price['price']),1)
r1 = Rank(price_delta)
volume_mean = Mean(volume,150)
data = pd.concat([vwap,volume_mean],axis = 1,join = 'inner')
corr = Corr(data,15)
r2 = Rank(corr)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] + r['r2'])
alpha.columns = ['alpha45']
return alpha
@timer
def alpha46(self):
close = self.close
close_mean3 = Mean(close,3)
close_mean6 = Mean(close,6)
close_mean12 = Mean(close,12)
close_mean24 = Mean(close,24)
data = pd.concat([close,close_mean3,close_mean6,close_mean12,close_mean24],axis = 1,join = 'inner')
data.columns = ['c','c3','c6','c12','c24']
alpha = (data['c3'] + data['c6'] + data['c12'] + data['c24'])/(4 * data['c'])
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha46']
return alpha
@timer
def alpha47(self):
close = self.close
low = self.low
high = self.high
high_max = TsMax(high,6)
low_min = TsMin(low,6)
data = pd.concat([high_max,low_min,close],axis = 1,join = 'inner')
data.columns = ['high_max','low_min','close']
temp = pd.DataFrame((data['high_max'] - data['close'])/(data['high_max'] - \
data['low_min']) * 100)
alpha = SMA(temp,9,1)
alpha.columns = ['alpha47']
return alpha
@timer
def alpha48(self):
close = self.close
volume = self.volume
temp1 = Delta(close,1)
temp1_delay1 = Delay(temp1,1)
temp1_delay2 = Delay(temp1,2)
data = pd.concat([temp1,temp1_delay1,temp1_delay2],axis = 1,join = 'inner')
data.columns = ['temp1','temp1_delay1','temp1_delay2']
temp2 = pd.DataFrame(np.sign(data['temp1']) + np.sign(data['temp1_delay1']) \
+ np.sign(data['temp1_delay2']))
volume_sum5 = Sum(volume,5)
volume_sum20 = Sum(volume,20)
data_temp = pd.concat([temp2,volume_sum5,volume_sum20],axis = 1,join = 'inner')
data_temp.columns = ['temp2','volume_sum5','volume_sum20']
temp3 = pd.DataFrame(data_temp['temp2'] * data_temp['volume_sum5']/\
data_temp['volume_sum20'])
alpha = -1 * Rank(temp3)
alpha.columns = ['alpha48']
return alpha
@timer
def alpha49(self):
low = self.low
high = self.high
data = pd.concat([low,high],axis = 1,join = 'inner')
price_sum = pd.DataFrame(data['Low'] + data['High'])
price_sum_delay = Delay(price_sum,1)
price = pd.concat([price_sum,price_sum_delay],axis = 1,join = 'inner')
price.columns = ['sum','sum_delay']
price['temp'] = 0
price['temp'][price['sum'] < price['sum_delay']] = 1
alpha = Sum(pd.DataFrame(price['temp']),12)
alpha.columns = ['alpha49']
return alpha
@timer
def alpha50(self):
low = self.low
high = self.high
data = pd.concat([low,high],axis = 1,join = 'inner')
price_sum = pd.DataFrame(data['Low'] + data['High'])
price_sum_delay = Delay(price_sum,1)
price = pd.concat([price_sum,price_sum_delay],axis = 1,join = 'inner')
price.columns = ['sum','sum_delay']
price['temp'] = 1
price['temp'][price['sum'] <= price['sum_delay']] = -1
alpha = Sum(pd.DataFrame(price['temp']),12)
alpha.columns = ['alpha50']
return alpha
@timer
def alpha51(self):
low = self.low
high = self.high
data = pd.concat([low,high],axis = 1,join = 'inner')
price_sum = pd.DataFrame(data['Low'] + data['High'])
price_sum_delay = Delay(price_sum,1)
price = pd.concat([price_sum,price_sum_delay],axis = 1,join = 'inner')
price.columns = ['sum','sum_delay']
price['temp'] = 1
price['temp'][price['sum'] <= price['sum_delay']] = 0
alpha = Sum(pd.DataFrame(price['temp']),12)
alpha.columns = ['alpha51']
return alpha
@timer
def alpha52(self):
low = self.low
high = self.high
close = self.close
data = pd.concat([low,high,close],axis = 1,join = 'inner')
data['sum_delay'] = Delay(pd.DataFrame((data['High'] + data['Low'] + data['Close'])/3),1)
temp1 = pd.DataFrame(data['High'] - data['sum_delay'])
temp1.columns = ['high_diff']
temp2 = pd.DataFrame(data['sum_delay'] - data['Low'])
temp2.columns = ['low_diff']
temp1['max'] = temp1['high_diff']
temp1['max'][temp1['high_diff'] < 0 ] = 0
temp2['max'] = temp2['low_diff']
temp2['max'][temp2['low_diff'] < 0 ] = 0
temp1_sum = Sum(pd.DataFrame(temp1['max']),26)
temp2_sum = Sum(pd.DataFrame(temp2['max']),26)
alpha_temp = pd.concat([temp1_sum,temp2_sum],axis = 1,join = 'inner')
alpha_temp.columns = ['s1','s2']
alpha = pd.DataFrame(alpha_temp['s1']/alpha_temp['s2'] * 100)
alpha.columns = ['alpha52']
return alpha
@timer
def alpha53(self):
close = self.close
close_delay = Delay(close,1)
count = Count(0,close,close_delay,12)
alpha = count/12.0 * 100
alpha.columns = ['alpha53']
return alpha
@timer
def alpha54(self):
Open = self.open
close = self.close
data = pd.concat([Open,close], axis = 1, join = 'inner')
data.columns = ['close','open']
temp = pd.DataFrame(data['close'] - data['open'])
temp_abs = pd.DataFrame(np.abs(temp))
df = pd.concat([temp,temp_abs], axis = 1, join= 'inner')
df.columns = ['temp','abs']
std = STD(pd.DataFrame(df['temp'] + df['abs']),10)
corr = Corr(data,10)
data1 = pd.concat([corr,std],axis = 1, join = 'inner')
data1.columns = ['corr','std']
alpha = Rank(pd.DataFrame(data1['corr'] + data1['std'])) * -1
alpha.columns = ['alpha54']
return alpha
@timer
def alpha55(self):
Open = self.open
close = self.close
low = self.low
high = self.high
close_delay = Delay(close,1)
open_delay = Delay(Open,1)
low_delay = Delay(low,1)
data = pd.concat([Open,close,low,high,close_delay,open_delay,low_delay], axis =1 ,join = 'inner')
data.columns= ['open','close','low','high','close_delay','open_delay','low_delay']
temp1 = pd.DataFrame((data['close'] - data['close_delay'] + (data['close'] - data['open'])/2\
+ data['close_delay'] - data['open_delay'])/ np.abs(data['high'] - data['close_delay']))
temp2 = pd.DataFrame(np.abs(data['high'] - data['close_delay']) + np.abs(data['low'] - data['close_delay'])/2 \
+ np.abs(data['close_delay'] - data['open_delay'])/4)
temp3 = pd.DataFrame(np.abs(data['low'] - data['close_delay']) + np.abs(data['high'] - data['close_delay'])/2 \
+ np.abs(data['close_delay'] - data['open_delay'])/4)
abs1 = pd.DataFrame(np.abs(data['high'] - data['close_delay']))
abs2 = pd.DataFrame(np.abs(data['low'] - data['close_delay']))
abs3 = pd.DataFrame(np.abs(data['high'] - data['low_delay']))
data1 = pd.concat([abs1,abs2,abs3], axis = 1, join = 'inner')
data1.columns = ['abs1','abs2','abs3']
data_temp = pd.concat([abs1,abs2],axis = 1, join = 'inner')
data_temp_max = pd.DataFrame(np.max(data_temp,axis = 1))
data_temp_max.columns = ['max']
data_temp1 = pd.concat([data,data_temp_max], axis = 1, join = 'inner')
temp4 = pd.DataFrame((np.abs(data_temp1['high'] - data_temp1['low_delay']) + \
np.abs(data_temp1['close_delay'] - data_temp1['open_delay'])) *\
data_temp1['max'])
data1['judge1'] = 0
data1['judge2'] = 0
data1['judge3'] = 0
data1['judge4'] = 0
data1['judge1'][data1['abs1'] > data1['abs2']] = 1
data1['judge2'][data1['abs1'] > data1['abs3']] = 1
data1['judge3'][data1['abs2'] > data1['abs3']] = 1
data1['judge3'][data1['abs3'] > data1['abs1']] = 1
judge_1 = pd.DataFrame(data1['judge1'] * data1['judge2'])
judge_2 = pd.DataFrame(data1['judge3'] * data1['judge4'])
data2 = pd.concat([temp1,temp2,temp3,temp4,judge_1,judge_2], axis = 1, join = 'inner')
data2.columns = ['t1','t2','t3','t4','j1','j2']
data2['j3'] = 1
data2['j4'] = data2['j3'] - data2['j1'] - data2['j2']
data2['t5'] = data2['t2'] * data2['j1'] + data2['t3'] * data2['j2'] + \
data2['t4'] * data2['j4']
tep = pd.DataFrame(16 * data2['t5']/data2['t1'])
alpha = Sum(tep,20)
alpha.columns = ['alpha55']
return alpha
@timer
def alpha56(self):
low = self.low
high = self.high
volume = self.volume
Open = self.open
open_min = TsMin(Open,12)
data1 = pd.concat([Open,open_min], axis = 1, join = 'inner')
data1.columns = ['open','open_min']
r1 = Rank(pd.DataFrame(data1['open'] - data1['open_min']))
volume_mean = Mean(volume,40)
volume_mean_sum= Sum(volume_mean,19)
data2 = pd.concat([high,low],axis = 1, join = 'inner')
temp = pd.DataFrame((data2['High'] + data2['Low'])/2)
rank = pd.concat([temp,volume_mean_sum],axis = 1 , join = 'inner')
rank.columns = ['temp','volume_mean_sum']
corr = Corr(rank,13)
r2 = Rank(corr)
r = pd.concat([r1,r2],axis = 1, join = 'inner')
r.columns = ['r1','r2']
r['alpha'] = 0
r['alpha'][r['r1'] >= r['r2']] = 1
alpha = pd.DataFrame(r['alpha'])
alpha.columns = ['alpha56']
return alpha
@timer
def alpha57(self):
low = self.low
high = self.high
close = self.close
low_min = TsMin(low,9)
high_max = TsMax(high,9)
data = pd.concat([close,low_min,high_max],axis = 1,join = 'inner')
data.columns = ['close','low_min','high_max']
temp = pd.DataFrame((data['close'] - data['low_min'])/(data['high_max'] \
- data['low_min']) * 100)
alpha = SMA(temp,3,1)
alpha.columns = ['alpha57']
return alpha
@timer
def alpha58(self):
close = self.close
close_delay = Delay(close,1)
count = Count(0,close,close_delay,20)
alpha = count/20.0 * 100
alpha.columns = ['alpha58']
return alpha
@timer
def alpha59(self):
low = self.low
high = self.high
close = self.close
close_delay = Delay(close,1)
max_temp = pd.concat([high,close_delay],axis = 1,join = 'inner')
min_temp = pd.concat([low,close_delay],axis = 1,join = 'inner')
max_temp1 = pd.DataFrame(np.max(max_temp,axis = 1))
min_temp1 = pd.DataFrame(np.min(min_temp,axis = 1))
data = pd.concat([close,close_delay,max_temp1,min_temp1],axis = 1,join = 'inner')
data.columns = ['close','close_delay','max','min']
data['max'][data['close'] > data['close_delay']] = 0
data['min'][data['close'] <= data['close_delay']] = 0
alpha = pd.DataFrame(data['max'] + data['min'])
alpha.columns = ['alpha59']
return alpha
@timer
def alpha60(self):
low = self.low
high = self.high
close = self.close
volume = self.volume
data = pd.concat([low,high,close,volume],axis = 1,join = 'inner')
temp = pd.DataFrame((2 * data['Close'] - data['Low'] - data['High'])/(data['Low'] + \
data['High']) * data['Vol'])
alpha = Sum(temp,20)
alpha.columns = ['alpha60']
return alpha
@timer
def alpha61(self):
low = self.low
volume = self.volume
vwap = self.vwap
vwap_delta = Delta(vwap,1)
vwap_delta_decay = DecayLinear(vwap_delta,12)
r1 = Rank(vwap_delta_decay)
volume_mean = Mean(volume,80)
data = pd.concat([low,volume_mean],axis = 1,join = 'inner')
corr = Corr(data,8)
corr_decay = DecayLinear(corr,17)
r2 = Rank(corr_decay)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
alpha = pd.DataFrame(np.max(r,axis = 1) * -1)
alpha.columns = ['alpha61']
return alpha
@timer
def alpha62(self):
high = self.high
volume = self.volume
volume_r = Rank(volume)
data = pd.concat([high,volume_r],axis = 1,join = 'inner')
alpha = -1 * Corr(data,5)
alpha.columns = ['alpha62']
return alpha
@timer
def alpha63(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay],axis = 1,join = 'inner')
data.columns = ['close','close_delay']
data['max'] = data['close'] - data['close_delay']
data['max'][data['max'] < 0] = 0
data['abs'] = np.abs(data['close'] - data['close_delay'])
sma1 = SMA(pd.DataFrame(data['max']),6,1)
sma2 = SMA(pd.DataFrame(data['abs']),6,1)
sma = pd.concat([sma1,sma2],axis = 1,join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1']/sma['sma2'] * 100)
alpha.columns = ['alpha63']
return alpha
@timer
def alpha64(self):
vwap = self.vwap
volume = self.volume
close = self.close
vwap_r = Rank(vwap)
volume_r = Rank(volume)
data1 = pd.concat([vwap_r,volume_r],axis = 1,join = 'inner')
corr1 = Corr(data1,4)
corr1_decay = DecayLinear(corr1,4)
r1 = Rank(corr1_decay)
close_mean = Mean(close,60)
close_r = Rank(close)
close_mean_r = Rank(close_mean)
data2 = pd.concat([close_r,close_mean_r],axis = 1,join = 'inner')
corr2 = Corr(data2,4)
corr2_max = TsMax(corr2,13)
corr2_max_decay = DecayLinear(corr2_max,14)
r2 = Rank(corr2_max_decay)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
alpha = pd.DataFrame(np.max(r,axis = 1) *-1)
alpha.columns = ['alpha64']
return alpha
@timer
def alpha65(self):
close = self.close
close_mean = Mean(close,6)
data = pd.concat([close,close_mean],axis = 1, join = 'inner')
data.columns = ['close','close_mean']
alpha = pd.DataFrame(data['close_mean']/data['close'])
alpha.columns = ['alpha65']
return alpha
@timer
def alpha66(self):
close = self.close
close_mean = Mean(close,6)
data = pd.concat([close,close_mean],axis = 1, join = 'inner')
data.columns = ['close','close_mean']
alpha = (data['close'] - data['close_mean'])/data['close_mean'] * 100
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha66']
return alpha
@timer
def alpha67(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay],axis = 1,join = 'inner')
data.columns = ['close','close_delay']
data['max'] = data['close'] - data['close_delay']
data['max'][data['max'] < 0] = 0
data['abs'] = np.abs(data['close'] - data['close_delay'])
sma1 = SMA(pd.DataFrame(data['max']),24,1)
sma2 = SMA(pd.DataFrame(data['abs']),24,1)
sma = pd.concat([sma1,sma2],axis = 1,join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1']/sma['sma2'] * 100)
alpha.columns = ['alpha67']
return alpha
@timer
def alpha68(self):
high = self.high
volume = self.volume
low = self.low
data = pd.concat([high,low,volume],axis = 1,join = 'inner')
data['sum']= (data['High'] + data['Low'])/2
data['sum_delta'] = Delta(pd.DataFrame(data['sum']),1)
temp = data['sum_delta'] * (data['High'] - data['Low'])/data['Vol']
alpha = SMA(pd.DataFrame(temp),15,2)
alpha.columns = ['alpha68']
return alpha
@timer
def alpha69(self):
high = self.high
low = self.low
Open = self.open
dtm = DTM(Open,high)
dbm = DBM(Open,low)
dtm_sum = Sum(dtm,20)
dbm_sum = Sum(dbm,20)
data = pd.concat([dtm_sum,dbm_sum],axis = 1, join = 'inner')
data.columns = ['dtm','dbm']
data['temp1'] = (data['dtm'] - data['dbm'])/data['dtm']
data['temp2'] = (data['dtm'] - data['dbm'])/data['dbm']
data['temp1'][data['dtm'] <= data['dbm']] = 0
data['temp2'][data['dtm'] >= data['dbm']] = 0
alpha = pd.DataFrame(data['temp1'] + data['temp2'])
alpha.columns = ['alpha69']
return alpha
@timer
def alpha70(self):
amount = self.amt
alpha= STD(amount,6)
alpha.columns = ['alpha70']
return alpha
@timer
def alpha71(self):
close = self.close
close_mean = Mean(close,24)
data = pd.concat([close,close_mean],axis = 1, join = 'inner')
data.columns = ['close','close_mean']
alpha = (data['close'] - data['close_mean'])/data['close_mean'] * 100
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha71']
return alpha
@timer
def alpha72(self):
low = self.low
high = self.high
close = self.close
low_min = TsMin(low,6)
high_max = TsMax(high,6)
data = pd.concat([close,low_min,high_max],axis = 1,join = 'inner')
data.columns = ['close','low_min','high_max']
temp = (data['high_max'] - data['close'])/(data['high_max'] - data['low_min']) * 100
alpha = SMA(pd.DataFrame(temp),15,1)
alpha.columns = ['alpha72']
return alpha
@timer
def alpha73(self):
vwap = self.vwap
volume = self.volume
close = self.close
data1 = pd.concat([close,volume],axis = 1,join = 'inner')
corr1 = Corr(data1,10)
corr1_decay = DecayLinear(DecayLinear(corr1,16),4)
r1 = TsRank(corr1_decay,5)
volume_mean = Mean(volume,30)
data2 = pd.concat([vwap,volume_mean],axis = 1,join = 'inner')
corr2 = Corr(data2,4)
corr2_decay = DecayLinear(corr2,3)
r2 = Rank(corr2_decay)
r = pd.concat([r1,r2],axis = 1,join ='inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r2'] - r['r1'])
alpha.columns= ['alpha73']
return alpha
@timer
def alpha74(self):
vwap = self.vwap
volume = self.volume
low = self.low
volume_mean = Mean(volume,40)
volume_mean_sum = Sum(volume_mean,20)
data1 = pd.concat([low,vwap],axis = 1,join = 'inner')
data_sum = Sum(pd.DataFrame(data1['Low'] * 0.35 + data1['Vwap'] * 0.65),20)
data = pd.concat([volume_mean_sum,data_sum],axis = 1,join = 'inner')
corr = Corr(data,7)
r1 = Rank(corr)
vwap_r = Rank(vwap)
volume_r = Rank(volume)
data_temp = pd.concat([vwap_r,volume_r],axis = 1,join = 'inner')
corr2 = Corr(data_temp,6)
r2 = Rank(corr2)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] + r['r2'])
alpha.columns = ['alpha74']
return alpha
@timer
def alpha75(self):
close = self.close
Open = self.open
close_index = self.close_index
open_index = self.open_index
data1 = pd.concat([close,Open], axis = 1, join = 'inner')
data1.columns = ['close','open']
data1['temp'] = 1
data1['temp'][data1['close'] <= data1['open']] = 0
data2 = pd.concat([close_index,open_index], axis = 1, join = 'inner')
data2.columns = ['close','open']
data2['tep'] = 1
data2['tep'][data2['close'] > data2['open']] = 0
temp = data1['temp'].unstack()
tep = data2['tep'].unstack()
tep1 = repmat(tep,1,np.size(temp,1))
data3 = temp * tep1
temp_result = data3.rolling(50,min_periods = 50).sum()
tep_result = tep.rolling(50,min_periods = 50).sum()
tep2_result = np.matlib.repmat(tep_result,1,np.size(temp,1))
result = temp_result/tep2_result
alpha = pd.DataFrame(result.stack())
alpha.columns = ['alpha75']
return alpha
@timer
def alpha76(self):
volume = self.volume
close = self.close
close_delay = Delay(close,1)
data = pd.concat([volume,close,close_delay],axis = 1,join = 'inner')
data.columns = ['volume','close','close_delay']
temp = pd.DataFrame(np.abs((data['close']/data['close_delay'] -1 )/data['volume']))
temp_std = STD(temp,20)
temp_mean = Mean(temp,20)
data_temp = pd.concat([temp_std,temp_mean],axis = 1,join = 'inner')
data_temp.columns = ['std','mean']
alpha = pd.DataFrame(data_temp['std']/data_temp['mean'])
alpha.columns = ['alpha76']
return alpha
@timer
def alpha77(self):
vwap = self.vwap
volume = self.volume
low = self.low
high = self.high
data = pd.concat([high,low,vwap],axis = 1,join = 'inner')
temp = pd.DataFrame((data['High'] + data['Low'])/2 - data['Vwap'])
temp_decay = DecayLinear(temp,20)
r1 = Rank(temp_decay)
temp1 = pd.DataFrame((data['High'] + data['Low'])/2)
volume_mean = Mean(volume,40)
data2 = pd.concat([temp1,volume_mean],axis = 1,join = 'inner')
corr = Corr(data2,3)
corr_decay = DecayLinear(corr,6)
r2 = Rank(corr_decay)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
alpha = pd.DataFrame(np.min(r,axis = 1))
alpha.columns = ['alpha77']
return alpha
@timer
def alpha78(self):
low = self.low
high = self.high
close = self.close
data = pd.concat([low,high,close],axis = 1,join = 'inner')
temp = pd.DataFrame((data['Low'] + data['High'] + data['Close'])/3)
temp.columns = ['temp']
temp_mean = Mean(temp,12)
temp_mean.columns = ['temp_mean']
temp2 = pd.concat([temp,temp_mean],axis = 1,join = 'inner')
tmp = pd.DataFrame(temp2['temp'] - temp2['temp_mean'])
data1 = pd.concat([close,temp_mean],axis = 1,join = 'inner')
temp_abs = pd.DataFrame(np.abs(data1['Close'] - data1['temp_mean']))
temp_abs_mean = Mean(temp_abs,12)
df = pd.concat([tmp,temp_abs_mean],axis = 1,join = 'inner')
df.columns = ['df1','df2']
alpha = pd.DataFrame(df['df1']/(df['df2'] * 0.015))
alpha.columns = ['alpha78']
return alpha
@timer
def alpha79(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay],axis = 1,join = 'inner')
data.columns = ['close','close_delay']
data['max'] = data['close'] - data['close_delay']
data['max'][data['max'] < 0] = 0
data['abs'] = np.abs(data['close'] - data['close_delay'])
sma1 = SMA(pd.DataFrame(data['max']),12,1)
sma2 = SMA(pd.DataFrame(data['abs']),12,1)
sma = pd.concat([sma1,sma2],axis = 1,join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1']/sma['sma2'] * 100)
alpha.columns = ['alpha79']
return alpha
@timer
def alpha80(self):
volume = self.volume
volume_delay = Delay(volume,5)
volume_delay.columns = ['volume_delay']
data = pd.concat([volume,volume_delay],axis = 1,join = 'inner')
alpha = (data['Vol'] - data['volume_delay'])/data['volume_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha80']
return alpha
@timer
def alpha81(self):
volume = self.volume
alpha = SMA(volume,21,2)
alpha.columns = ['alpha81']
return alpha
@timer
def alpha82(self):
low = self.low
high = self.high
close = self.close
low_min = TsMin(low,6)
high_max = TsMax(high,6)
data = pd.concat([close,low_min,high_max],axis = 1,join = 'inner')
data.columns = ['close','low_min','high_max']
temp = (data['high_max'] - data['close'])/(data['high_max'] - data['low_min']) * 100
alpha = SMA(pd.DataFrame(temp),20,1)
alpha.columns = ['alpha82']
return alpha
@timer
def alpha83(self):
high = self.high
volume = self.volume
high_r = Rank(high)
volume_r = Rank(volume)
data = pd.concat([high_r,volume_r],axis = 1,join = 'inner')
corr = Corr(data,5)
alpha = -1 * Rank(corr)
alpha.columns = ['alpha83']
return alpha
@timer
def alpha84(self):
close = self.close
volume = self.volume
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay,volume],axis = 1,join = 'inner')
data['sign'] = 1
data['sign'][data['Close'] < data['close_delay']] = -1
data['sign'][data['Close'] == data['close_delay']] = 0
temp = pd.DataFrame(data['Vol'] * data['sign'])
alpha = Sum(temp,20)
alpha.columns = ['alpha84']
return alpha
@timer
def alpha85(self):
close = self.close
volume = self.volume
volume_mean = Mean(volume,20)
close_delta = Delta(close,7)
data1 = pd.concat([volume,volume_mean],axis = 1,join = 'inner')
data1.columns = ['volume','volume_mean']
temp1 = pd.DataFrame(data1['volume']/data1['volume_mean'])
r1 = TsRank(temp1,20)
r2 = TsRank(-1 * close_delta,8)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] * r['r2'])
alpha.columns = ['alpha85']
return alpha
@timer
def alpha86(self):
close = self.close
close_delay20 = Delay(close,20)
close_delay10 = Delay(close,20)
data = pd.concat([close,close_delay20,close_delay10],axis = 1,join = 'inner')
data.columns = ['close','close_delay20','close_delay10']
temp = pd.DataFrame((data['close_delay20'] - data['close_delay10'])/10 - \
(data['close_delay10'] - data['close'])/10)
close_delta = Delta(close,1) * -1
data_temp = pd.concat([close_delta,temp],axis = 1,join = 'inner')
data_temp.columns = ['close_delta','temp']
data_temp['close_delta'][data_temp['temp'] > 0.25]= -1
data_temp['close_delta'][data_temp['temp'] < 0]= 1
alpha = pd.DataFrame(data_temp['close_delta'])
alpha.columns = ['alpha86']
return alpha
@timer
def alpha87(self):
vwap = self.vwap
high = self.high
low = self.low
Open = self.open
vwap_delta = Delta(vwap,4)
vwap_delta_decay = DecayLinear(vwap_delta,7)
r1 = Rank(vwap_delta_decay)
data = pd.concat([low,high,vwap,Open], axis = 1, join = 'inner')
temp = pd.DataFrame((data['Low'] * 0.1 + data['High'] * 0.9 - data['Vwap'])/\
(data['Open'] - 0.5 * (data['Low'] + data['High'])))
temp_decay = DecayLinear(temp,11)
r2 = TsRank(temp_decay,7)
r = pd.concat([r1,r2], axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(-1 * (r['r1'] + r['r2']))
alpha.columns = ['alpha87']
return alpha
@timer
def alpha88(self):
close = self.close
close_delay = Delay(close,20)
data = pd.concat([close,close_delay],axis = 1,join = 'inner')
data.columns = ['close','close_delta']
alpha = (data['close'] - data['close_delta'])/data['close_delta'] * 100
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha88']
return alpha
@timer
def alpha89(self):
close = self.close
sma1 = SMA(close,13,2)
sma2 = SMA(close,27,2)
sma = pd.concat([sma1,sma2],axis = 1, join = 'inner')
sma.columns = ['sma1','sma2']
temp = pd.DataFrame(sma['sma1'] - sma['sma2'])
sma3 = SMA(temp,10,2)
data = pd.concat([temp,sma3],axis = 1, join = 'inner')
data.columns = ['temp','sma']
alpha = pd.DataFrame(2 *(data['temp'] - data['sma']))
alpha.columns = ['alpha89']
return alpha
@timer
def alpha90(self):
volume = self.volume
vwap = self.vwap
r1 = Rank(volume)
r2 = Rank(vwap)
rank = pd.concat([r1,r2], axis = 1, join = 'inner')
corr = Corr(rank,5)
alpha = -1 * Rank(corr)
alpha.columns = ['alpha90']
return alpha
@timer
def alpha91(self):
close = self.close
volume = self.volume
low = self.low
close_max = TsMax(close,5)
data1 = pd.concat([close,close_max], axis = 1,join = 'inner')
data1.columns = ['close','close_max']
r1 = Rank(pd.DataFrame(data1['close'] - data1['close_max']))
volume_mean = Mean(volume,40)
data2 = pd.concat([volume_mean,low], axis = 1, join = 'inner')
corr = Corr(data2,5)
r2 = Rank(corr)
r = pd.concat([r1,r2],axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] * r['r2'] * -1)
alpha.columns = ['alpha91']
return alpha
@timer
def alpha92(self):
volume = self.volume
vwap = self.vwap
close = self.close
data = pd.concat([close,vwap],axis = 1, join = 'inner')
data['price'] = data['Close'] * 0.35 + data['Vwap'] * 0.65
price_delta = Delta(pd.DataFrame(data['price']),2)
price_delta_decay = DecayLinear(price_delta,3)
r1 = Rank(price_delta_decay)
volume_mean = Mean(volume,180)
rank = pd.concat([volume_mean,close],axis = 1,join = 'inner')
corr = Corr(rank,13)
temp = pd.DataFrame(np.abs(corr))
temp_decay = DecayLinear(temp,5)
r2 = TsRank(temp_decay,15)
r = pd.concat([r1,r2],axis = 1, join = 'inner')
alpha = pd.DataFrame(-1 * np.max(r, axis = 1))
alpha.columns = ['alpha92']
return alpha
@timer
def alpha93(self):
low = self.low
Open = self.open
open_delay = Delay(Open,1)
data = pd.concat([low,Open,open_delay],axis = 1,join = 'inner')
data.columns = ['low','open','open_delay']
temp1 = pd.DataFrame(data['open'] - data['low'])
temp2 = pd.DataFrame(data['open'] - data['open_delay'])
data_temp = pd.concat([temp1,temp2],axis = 1 ,join = 'inner')
temp_max = pd.DataFrame(np.max(data_temp,axis = 1))
temp_max.columns = ['max']
data2 = pd.concat([data,temp_max],axis = 1,join = 'inner')
data2['temp'] = data2['max']
data2['temp'][data2['open'] >= data2['open_delay']] = 0
alpha = Sum(pd.DataFrame(data2['temp']),20)
alpha.columns = ['alpha93']
return alpha
@timer
def alpha94(self):
close = self.close
volume = self.volume
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay,volume],axis = 1,join = 'inner')
data['sign'] = 1
data['sign'][data['Close'] < data['close_delay']] = -1
data['sign'][data['Close'] == data['close_delay']] = 0
temp = pd.DataFrame(data['Vol'] * data['sign'])
alpha = Sum(temp,30)
alpha.columns = ['alpha94']
return alpha
@timer
def alpha95(self):
amt = self.amt
alpha = STD(amt,20)
alpha.columns = ['alpha95']
return alpha
@timer
def alpha96(self):
low = self.low
high = self.high
close = self.close
low_min = TsMin(low,9)
high_max = TsMax(high,9)
data = pd.concat([close,low_min,high_max],axis = 1,join = 'inner')
data.columns = ['close','low_min','high_max']
temp = ( data['close'] - data['low_min'])/(data['high_max'] - data['low_min']) * 100
alpha_temp = SMA(pd.DataFrame(temp),3,1)
alpha = SMA(alpha_temp,3,1)
alpha.columns = ['alpha96']
return alpha
@timer
def alpha97(self):
volume = self.volume
alpha = STD(volume,10)
alpha.columns = ['alpha97']
return alpha
@timer
def alpha98(self):
close = self.close
close_mean = Mean(close,100)
close_mean_delta = Delta(close_mean,100)
close_delay = Delay(close,100)
data = pd.concat([close_mean_delta,close_delay],axis = 1,join = 'inner')
data.columns = ['delta','delay']
temp = pd.DataFrame(data['delta']/ data['delay'])
close_delta = Delta(close,3)
close_min = TsMin(close,100)
data_temp = pd.concat([close,close_delta,close_min,temp],axis = 1,join = 'inner')
data_temp.columns = ['close','close_delta','close_min','temp']
data_temp['diff'] = (data_temp['close'] - data_temp['close_min']) * -1
data_temp['diff'][data_temp['temp'] < 0.05] = 0
data_temp['close_delta'] = data_temp['close_delta'] * -1
data_temp['close_delta'][data_temp['temp'] >= 0.05]= 0
alpha = pd.DataFrame(data_temp['close_delta'] + data_temp['diff'])
alpha.columns = ['alpha98']
return alpha
@timer
def alpha99(self):
close = self.close
volume = self.volume
r1 = Rank(close)
r2 = Rank(volume)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
cov = Cov(r,5)
alpha = -1 * Rank(cov)
alpha.columns = ['alpha99']
return alpha
@timer
def alpha100(self):
volume = self.volume
alpha = STD(volume,20)
alpha.columns = ['alpha100']
return alpha
@timer
def alpha101(self):
close = self.close
volume = self.volume
high = self.high
vwap = self.vwap
volume_mean = Mean(volume,30)
volume_mean_sum = Sum(volume_mean,37)
data1 = pd.concat([close,volume_mean_sum], axis = 1, join = 'inner')
corr1 = Corr(data1,15)
r1 = Rank(corr1)
data2 = pd.concat([high,vwap],axis = 1, join = 'inner')
temp = pd.DataFrame(data2['High'] * 0.1 + data2['Vwap'] * 0.9)
temp_r = Rank(temp)
volume_r = Rank(volume)
data3 = pd.concat([temp_r,volume_r], axis = 1, join = 'inner')
corr2 = Corr(data3,11)
r2 = Rank(corr2)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
r['alpha'] = 0
r['alpha'][r['r1'] < r['r2']] = -1
alpha = pd.DataFrame(r['alpha'])
alpha.columns = ['alpha101']
return alpha
@timer
def alpha102(self):
volume = self.volume
temp = Delta(volume,1)
temp.columns = ['temp']
temp['max'] = temp['temp']
temp['max'][temp['temp'] < 0 ] = 0
temp['abs'] = np.abs(temp['temp'])
sma1 = SMA(pd.DataFrame(temp['max']),6,1)
sma2 = SMA(pd.DataFrame(temp['abs']),6,1)
sma = pd.concat([sma1,sma2], axis = 1 ,join ='inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1']/ sma['sma2'] * 100)
alpha.columns = ['alpha102']
return alpha
@timer
def alpha103(self):
low = self.low
lowday = Lowday(low,20)
alpha = (20 - lowday)/20.0 * 100
alpha.columns = ['alpha103']
return alpha
@timer
def alpha104(self):
close = self.close
volume = self.volume
high = self.high
data = pd.concat([high,volume], axis = 1, join = 'inner')
corr = Corr(data,5)
corr_delta = Delta(corr,5)
close_std = STD(close,20)
r1 = Rank(close_std)
temp = pd.concat([corr_delta,r1], axis = 1, join = 'inner')
temp.columns = ['delta','r']
alpha = pd.DataFrame(-1 * temp['delta'] * temp['r'])
alpha.columns = ['alpha104']
return alpha
@timer
def alpha105(self):
volume = self.volume
Open = self.open
volume_r = Rank(volume)
open_r = Rank(Open)
rank = pd.concat([volume_r,open_r],axis = 1, join = 'inner')
alpha = -1 * Corr(rank,10)
alpha.columns = ['alpha105']
return alpha
@timer
def alpha106(self):
close = self.close
close_delay = Delay(close,20)
data = pd.concat([close,close_delay], axis = 1, join = 'inner')
data.columns = ['close','close_delay']
alpha = pd.DataFrame(data['close'] - data['close_delay'])
alpha.columns = ['alpha106']
return alpha
@timer
def alpha107(self):
Open = self.open
high = self.high
close = self.close
low = self.low
high_delay = Delay(high,1)
close_delay = Delay(close,1)
low_delay = Delay(low,1)
data = pd.concat([high_delay,close_delay,low_delay,Open], axis = 1, join = 'inner')
data.columns = ['high_delay','close_delay','low_delay','open']
r1 = Rank(pd.DataFrame(data['open'] - data['high_delay']))
r2 = Rank(pd.DataFrame(data['open'] - data['close_delay']))
r3 = Rank(pd.DataFrame(data['open'] - data['low_delay']))
alpha = -1 * r1 * r2 * r3
alpha.columns = ['alpha107']
return alpha
@timer
def alpha108(self):
high = self.high
volume = self.volume
vwap = self.vwap
high_min = TsMin(high,2)
data1 = pd.concat([high,high_min], axis = 1, join = 'inner')
data1.columns = ['high','high_min']
r1 = Rank(pd.DataFrame(data1['high'] - data1['high_min']))
volume_mean = Mean(volume,120)
rank = pd.concat([vwap,volume_mean],axis = 1, join = 'inner')
corr = Corr(rank,6)
r2 = Rank(corr)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = r['r1'] * r['r2'] * -1
alpha.columns = ['alpha108']
return alpha
@timer
def alpha109(self):
high = self.high
low = self.low
data = pd.concat([high,low],axis = 1, join = 'inner')
temp = SMA(pd.DataFrame(data['High'] - data['Low']),10,2)
sma = SMA(temp,10,2)
sma_temp = pd.concat([temp,sma],axis = 1, join = 'inner')
sma_temp.columns = ['temp','sma']
alpha = pd.DataFrame(sma_temp['temp']/sma_temp['sma'])
alpha.columns = ['alpha109']
return alpha
@timer
def alpha110(self):
high = self.high
low = self.low
close = self.close
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data = pd.concat([high,low,close_delay], axis = 1, join = 'inner')
data['max1'] = data['High'] - data['close_delay']
data['max2'] = data['close_delay'] - data['Low']
data['max1'][data['max1'] < 0] = 0
data['max2'][data['max2'] < 0] = 0
s1 = Sum(pd.DataFrame(data['max1']),20)
s2 = Sum(pd.DataFrame(data['max2']),20)
s = pd.concat([s1,s2], axis = 1 , join = 'inner')
s.columns = ['s1','s2']
alpha = pd.DataFrame(s['s1']/s['s2'])
alpha.columns = ['alpha110']
return alpha
@timer
def alpha111(self):
high = self.high
low = self.low
close = self.close
volume = self.volume
data = pd.concat([high,low,close,volume], axis = 1, join = 'inner')
temp = pd.DataFrame(data['Vol'] * (2 * data['Close'] - data['Low'] - data['High'])\
/(data['High'] - data['Low']))
sma1 = SMA(temp,11,2)
sma2 = SMA(temp,4,2)
sma = pd.concat([sma1, sma2], axis = 1, join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1'] - sma['sma2'])
alpha.columns = ['alpha111']
return alpha
@timer
def alpha112(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close, close_delay], axis = 1, join = 'inner')
data.columns = ['close','close_delay']
data['temp'] = 1
data['temp'][data['close'] > data['close_delay']] = 0
alpha = Sum(pd.DataFrame(data['temp']),12)
alpha.columns = ['alpha112']
return alpha
@timer
def alpha113(self):
close = self.close
volume = self.volume
close_delay = Delay(close,5)
close_delay_mean = Mean(close_delay,20)
data1 = pd.concat([close,volume],axis = 1, join = 'inner')
corr = Corr(data1,2)
r1 = Rank(close_delay_mean)
data2 = pd.concat([r1,corr], axis = 1, join = 'inner')
data2.columns = ['r1','corr']
r1 = pd.DataFrame(data2['r1'] * data2['corr'])
close_sum5 = Sum(close,5)
close_sum20 = Sum(close,20)
data3 = pd.concat([close_sum5,close_sum20],axis = 1, join = 'inner')
corr2 = Corr(data3,2)
r2 = Rank(corr2)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] * r['r2'] * -1)
alpha.columns = ['alpha113']
return alpha
@timer
def alpha114(self):
close = self.close
high = self.high
low = self.low
volume = self.volume
vwap = self.vwap
close_mean = Mean(close,5)
data = pd.concat([high,low,close_mean], axis = 1, join = 'inner')
data.columns = ['high','low','close_mean']
temp = pd.DataFrame(data['high'] - data['low'] / data['close_mean'])
temp_delay = Delay(temp,2)
r1 = TsRank(temp_delay,5)
temp1 = pd.concat([temp,vwap,close], axis = 1, join = 'inner')
temp1.columns = ['temp','vwap','close']
tep = pd.DataFrame(temp1['temp']/(temp1['vwap'] - temp1['close']))
r2 = TsRank(volume,5)
data2 = pd.concat([r2,tep], axis = 1, join = 'inner')
data2.columns = ['r2','tep']
tep1 = pd.DataFrame(data2['r2']/data2['tep'])
r3 = TsRank(tep1,5)
r = pd.concat([r1,r3],axis = 1, join = 'inner')
r.columns = ['r1','r3']
alpha = pd.DataFrame(r['r1'] + r['r3'])
alpha.columns = ['alpha114']
return alpha
@timer
def alpha115(self):
high = self.high
low = self.low
volume = self.volume
volume_mean = Mean(volume,30)
price = pd.concat([high,low], axis = 1, join = 'inner')
price.columns = ['high','low']
price_temp = price['high'] * 0.9 + price['low'] * 0.1
data = pd.concat([price_temp,volume_mean],axis = 1, join = 'inner')
corr = Corr(data,10)
r1 = Rank(corr)
data2 = pd.concat([high,low], axis = 1, join = 'inner')
temp = pd.DataFrame((data2['High'] + data2['Low'])/2)
temp_r = TsRank(temp,4)
volume_r = TsRank(volume,10)
data3 = pd.concat([temp_r,volume_r], axis = 1, join = 'inner')
corr2 = Corr(data3,7)
r2 = Rank(corr2)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] * r['r2'])
alpha.columns = ['alpha115']
return alpha
@timer
def alpha116(self):
close = self.close
alpha = RegResi(0,close,None,20)
alpha.columns = ['alpha116']
return alpha
@timer
def alpha117(self):
high = self.high
low = self.low
close = self.close
volume = self.volume
ret = self.ret
r1 = TsRank(volume,32)
data1 = pd.concat([close,high,low],axis = 1, join = 'inner')
r2 = TsRank(pd.DataFrame(data1['Close'] + data1['High'] - data1['Low']),16)
r3 = TsRank(ret,32)
r = pd.concat([r1,r2,r3], axis = 1, join = 'inner')
r.columns = ['r1','r2','r3']
alpha = pd.DataFrame(r['r1'] * (1 - r['r2']) * (1 - r['r3']))
alpha.columns = ['alpha117']
return alpha
@timer
def alpha118(self):
high = self.high
low = self.low
Open = self.open
data = pd.concat([high,low,Open], axis = 1, join = 'inner')
s1 = Sum(pd.DataFrame(data['High'] - data['Open']),20)
s2 = Sum(pd.DataFrame(data['Open'] - data['Low']),20)
s = pd.concat([s1,s2], axis = 1, join = 'inner')
s.columns = ['s1','s2']
alpha = pd.DataFrame(s['s1']/s['s2'] * 100)
alpha.columns = ['alpha118']
return alpha
@timer
def alpha119(self):
Open = self.open
volume = self.volume
vwap = self.vwap
volume_mean = Mean(volume,5)
volume_mean_sum = Sum(volume_mean,26)
data1 = pd.concat([vwap,volume_mean_sum],axis = 1, join = 'inner')
corr1 = Corr(data1,5)
corr1_decay = DecayLinear(corr1,7)
r1 = Rank(corr1_decay)
open_r = Rank(Open)
volume_mean2 = Mean(volume,15)
volume_mean2_r = Rank(volume_mean2)
data2 = pd.concat([open_r, volume_mean2_r], axis = 1, join = 'inner')
corr2 = Corr(data2,21)
corr2_min = TsMin(corr2,9)
corr2_min_r = TsRank(corr2_min,7)
corr_min_r_decay = DecayLinear(corr2_min_r,8)
r2 = Rank(corr_min_r_decay)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] - r['r2'])
alpha.columns = ['alpha119']
return alpha
@timer
def alpha120(self):
vwap = self.vwap
close = self.close
data = pd.concat([vwap,close], axis = 1, join = 'inner')
r1 = Rank(pd.DataFrame(data['Vwap'] - data['Close']))
r2 = Rank(pd.DataFrame(data['Vwap'] + data['Close']))
r = pd.concat([r1,r2],axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1']/r['r2'])
alpha.columns = ['alpha120']
return alpha
@timer
def alpha121(self):
vwap = self.vwap
volume = self.volume
vwap_r = TsRank(vwap,20)
volume_mean = Mean(volume,60)
volume_mean_r = TsRank(volume_mean,2)
data = pd.concat([vwap_r,volume_mean_r], axis = 1, join = 'inner')
corr= Corr(data,18)
temp = TsRank(corr,3)
vwap_min = TsMin(vwap,12)
data2 = pd.concat([vwap,vwap_min],axis = 1, join = 'inner')
data2.columns = ['vwap','vwap_min']
rank = Rank(pd.DataFrame(data2['vwap'] - data2['vwap_min']))
data3 = pd.concat([rank,temp],axis = 1, join = 'inner')
data3.columns = ['rank','temp']
alpha = pd.DataFrame(np.power(data3['rank'],data3['temp']) * -1)
alpha.columns = ['alpha121']
return alpha
@timer
def alpha122(self):
close = self.close
close_ln = pd.DataFrame(np.log(close))
temp1 = SMA(close_ln,13,2)
sma1 = SMA(temp1,13,2)
sma2 = SMA(sma1,13,2)
sma3 = SMA(sma2,13,2)
sma3_delay = Delay(sma3,1)
data = pd.concat([sma3,sma3_delay],axis = 1, join = 'inner')
data.columns = ['sma','sma_delay']
alpha = pd.DataFrame(data['sma']/data['sma_delay'])
alpha.columns = ['alpha122']
return alpha
@timer
def alpha123(self):
volume = self.volume
high = self.high
low = self.low
data1 = pd.concat([high,low], axis = 1, join = 'inner')
s1 = Sum(pd.DataFrame((data1['High'] + data1['Low'])/2),20)
volume_mean = Mean(volume,60)
s2 = Sum(volume_mean,20)
data2 = pd.concat([s1,s2], axis = 1, join = 'inner')
corr1 = Corr(data2,9)
data3 = pd.concat([low,volume], axis = 1, join = 'inner')
corr2 = Corr(data3,6)
corr1_r = Rank(corr1)
corr2_r = Rank(corr2)
data = pd.concat([corr1_r,corr2_r], axis = 1, join = 'inner')
data.columns = ['r1','r2']
data['alpha'] = -1
data['alpha'][data['r1'] >= data['r2']] = 0
alpha = pd.DataFrame(data['alpha'])
alpha.columns = ['alpha123']
return alpha
@timer
def alpha124(self):
close = self.close
vwap = self.vwap
close_max = TsMax(close,30)
close_max_r = Rank(close_max)
close_max_r_decay = DecayLinear(close_max_r,2)
close_max_r_decay.columns = ['decay']
data = pd.concat([close,vwap,close_max_r_decay], axis = 1, join ='inner')
alpha = pd.DataFrame((data['Close'] - data['Vwap'])/data['decay'])
alpha.columns = ['alpha124']
return alpha
@timer
def alpha125(self):
close = self.close
vwap = self.vwap
volume = self.volume
volume_mean = Mean(volume,80)
data1 = pd.concat([vwap,volume_mean], axis = 1, join = 'inner')
corr1 = Corr(data1,17)
data2 = pd.concat([close,vwap], axis = 1, join = 'inner')
temp2 = pd.DataFrame(0.5*(data2['Close'] + data2['Vwap']))
temp2_delta = Delta(temp2,3)
corr1_decay = DecayLinear(corr1,20)
r1 = Rank(corr1_decay)
temp2_delta_decay = DecayLinear(temp2_delta,16)
r2 = Rank(temp2_delta_decay)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1']/r['r2'])
alpha.columns = ['alpha125']
return alpha
@timer
def alpha126(self):
close = self.close
high = self.high
low = self.low
data = pd.concat([close,high,low], axis = 1, join = 'inner')
alpha = pd.DataFrame((data['Close'] + data['High'] + data['Low'])/3)
alpha.columns = ['alpha126']
return alpha
@timer
def alpha127(self):
close = self.close
close_max = TsMax(close,12)
data = pd.concat([close,close_max], axis = 1, join = 'inner')
data.columns = ['close','close_max']
alpha = pd.DataFrame((data['close'] - data['close_max'])/data['close_max'])
alpha.columns = ['alpha127']
return alpha
@timer
def alpha128(self):
close = self.close
high = self.high
low = self.low
volume = self.volume
data = pd.concat([close,high,low,volume], axis = 1, join = 'inner')
data['temp1'] = (data['Close'] + data['Low'] + data['High'])/3
data['temp2'] = data['temp1'] * data['Vol']
data['temp3'] = data['temp1'] * data['Vol']
temp_delay = Delay(pd.DataFrame(data['temp1']),1)
temp_delay.columns = ['temp_decay']
data = pd.concat([data,temp_delay], axis = 1, join = 'inner')
data['temp2'][data['temp1'] < data['temp_decay']] = 0
data['temp3'][data['temp1'] > data['temp_decay']] = 0
s1 = Sum(pd.DataFrame(data['temp2']),14)
s2 = Sum(pd.DataFrame(data['temp3']),14)
s = pd.concat([s1,s2], axis = 1, join = 'inner')
s.columns = ['s1','s2']
alpha = pd.DataFrame(100 - 100/(1+ s['s1']/s['s2']))
alpha.columns = ['alpha128']
return alpha
@timer
def alpha129(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay], axis = 1, join = 'inner')
data.columns = ['close','close_delay']
data['abs'] = np.abs(data['close'] - data['close_delay'])
data['temp'] = data['abs']
data['temp'][data['close'] < data['close_delay']] = 0
alpha = Sum(pd.DataFrame(data['temp']),12)
alpha.columns = ['alpha129']
return alpha
@timer
def alpha130(self):
close = self.close
high = self.high
low = self.low
volume = self.volume
volume_mean = Mean(volume,40)
data1 = pd.concat([high,low],axis = 1, join = 'inner')
temp1 = pd.DataFrame((data1['High'] + data1['Low'])/2)
rank1 = pd.concat([temp1,volume_mean], axis = 1, join = 'inner')
corr = Corr(rank1,9)
close_r = Rank(close)
volume_r = Rank(volume)
data2 = pd.concat([close_r,volume_r],axis = 1, join = 'inner')
corr2 = Corr(data2,7)
corr_decay = DecayLinear(corr,10)
r1 = Rank(corr_decay)
corr2_decay = DecayLinear(corr2,3)
r2 = Rank(corr2_decay)
r = pd.concat([r1,r2],axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1']/r['r2'])
alpha.columns = ['alpha130']
return alpha
@timer
def alpha131(self):
close = self.close
vwap = self.vwap
volume = self.volume
volume_mean = Mean(volume,50)
data1 = pd.concat([close,volume_mean], axis = 1, join = 'inner')
corr = Corr(data1,18)
vwap_delta = Delta(vwap,1)
temp2 = TsRank(corr,18)
data2 = pd.concat([vwap_delta,temp2],axis = 1, join = 'inner')
data2.columns = ['vwap_delta','temp2']
temp3 = np.power(data2['vwap_delta'],data2['temp2'])
alpha = Rank(pd.DataFrame(temp3))
alpha.columns = ['alpha131']
return alpha
@timer
def alpha132(self):
amt = self.amt
alpha = Mean(amt,20)
alpha.columns = ['alpha132']
return alpha
@timer
def alpha133(self):
low = self.low
high = self.high
highday = Highday(high,20)
lowday = Lowday(low,20)
data = pd.concat([highday,lowday],axis = 1, join = 'inner')
data.columns = ['highday','lowday']
alpha = (20 - data['highday']/20.0) * 100 - (20 - data['lowday']/20.0) * 100
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha133']
return alpha
@timer
def alpha134(self):
close = self.close
volume = self.volume
close_delay = Delay(close,12)
close_delay.columns = ['close_delay']
data = pd.concat([close,volume,close_delay], axis = 1, join = 'inner')
alpha = pd.DataFrame((data['Close'] - data['close_delay'])/data['close_delay'])
alpha.columns = ['alpha134']
return alpha
@timer
def alpha135(self):
close = self.close
close_delay = Delay(close,20)
data = pd.concat([close,close_delay],axis = 1 , join = 'inner')
data.columns = ['close','close_delay']
temp = pd.DataFrame(data['close']/data['close_delay'])
temp_delay = Delay(temp,1)
alpha = SMA(temp_delay,20,1)
alpha.columns = ['alpha135']
return alpha
@timer
def alpha136(self):
volume = self.volume
Open = self.open
ret = self.ret
ret_delta = Delta(ret,3)
ret_delta_r = Rank(ret_delta)
data = pd.concat([Open,volume],axis = 1, join = 'inner')
corr = Corr(data,10)
data_temp = pd.concat([ret_delta_r,corr],axis = 1, join = 'inner')
data_temp.columns = ['ret_delta','corr']
alpha = pd.DataFrame(-1 * data_temp['ret_delta'] * data_temp['corr'])
alpha.columns = ['alpha136']
return alpha
@timer
def alpha137(self):
Open = self.open
close = self.close
low = self.low
high = self.high
close_delay = Delay(close,1)
open_delay = Delay(Open,1)
low_delay = Delay(low,1)
data = pd.concat([Open,close,low,high,close_delay,open_delay,low_delay], axis =1 ,join = 'inner')
data.columns= ['open','close','low','high','close_delay','open_delay','low_delay']
temp1 = pd.DataFrame((data['close'] - data['close_delay'] + (data['close'] - data['open'])/2\
+ data['close_delay'] - data['open_delay'])/ np.abs(data['high'] - data['close_delay']))
temp2 = pd.DataFrame(np.abs(data['high'] - data['close_delay']) + np.abs(data['low'] - data['close_delay'])/2 \
+ np.abs(data['close_delay'] - data['open_delay'])/4)
temp3 = pd.DataFrame(np.abs(data['low'] - data['close_delay']) + np.abs(data['high'] - data['close_delay'])/2 \
+ np.abs(data['close_delay'] - data['open_delay'])/4)
abs1 = pd.DataFrame(np.abs(data['high'] - data['close_delay']))
abs2 = pd.DataFrame(np.abs(data['low'] - data['close_delay']))
abs3 = pd.DataFrame(np.abs(data['high'] - data['low_delay']))
data1 = pd.concat([abs1,abs2,abs3], axis = 1, join = 'inner')
data1.columns = ['abs1','abs2','abs3']
data_temp = pd.concat([abs1,abs2],axis = 1, join = 'inner')
data_temp_max = pd.DataFrame(np.max(data_temp,axis = 1))
data_temp_max.columns = ['max']
data_temp1 = pd.concat([data,data_temp_max], axis = 1, join = 'inner')
temp4 = pd.DataFrame((np.abs(data_temp1['high'] - data_temp1['low_delay']) + \
np.abs(data_temp1['close_delay'] - data_temp1['open_delay'])) *\
data_temp1['max'])
data1['judge1'] = 0
data1['judge2'] = 0
data1['judge3'] = 0
data1['judge4'] = 0
data1['judge1'][data1['abs1'] > data1['abs2']] = 1
data1['judge2'][data1['abs1'] > data1['abs3']] = 1
data1['judge3'][data1['abs2'] > data1['abs3']] = 1
data1['judge3'][data1['abs3'] > data1['abs1']] = 1
judge_1 = pd.DataFrame(data1['judge1'] * data1['judge2'])
judge_2 = pd.DataFrame(data1['judge3'] * data1['judge4'])
data2 = pd.concat([temp1,temp2,temp3,temp4,judge_1,judge_2], axis = 1, join = 'inner')
data2.columns = ['t1','t2','t3','t4','j1','j2']
data2['j3'] = 1
data2['j4'] = data2['j3'] - data2['j1'] - data2['j2']
data2['t5'] = data2['t2'] * data2['j1'] + data2['t3'] * data2['j2'] + \
data2['t4'] * data2['j4']
alpha = pd.DataFrame(16 * data2['t5']/data2['t1'])
alpha.columns = ['alpha137']
return alpha
@timer
def alpha138(self):
vwap = self.vwap
volume = self.volume
low = self.low
data1 = pd.concat([low,vwap], axis = 1, join = 'inner')
temp1 = pd.DataFrame(data1['Low'] * 0.7 + data1['Vwap'] * 0.3)
temp1_delta = Delta(temp1,3)
temp1_delta_decay = DecayLinear(temp1_delta,20)
r1 = Rank(temp1_delta_decay)
low_r = TsRank(low,8)
volume_mean = Mean(volume,60)
volume_mean_r = TsRank(volume_mean,17)
data2 = pd.concat([low_r,volume_mean_r],axis = 1, join = 'inner')
corr = Corr(data2,5)
corr_r = TsRank(corr,19)
corr_r_decay = DecayLinear(corr_r,16)
r2 = TsRank(corr_r_decay,7)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] - r['r2'])
alpha.columns = ['alpha138']
return alpha
@timer
def alpha139(self):
Open = self.open
volume = self.volume
data = pd.concat([Open,volume], axis = 1, join = 'inner')
alpha = -1 * Corr(data,10)
alpha.columns = ['alpha139']
return alpha
@timer
def alpha140(self):
Open = self.open
volume = self.volume
high = self.high
low = self.low
close = self.close
open_r = Rank(Open)
low_r = Rank(low)
high_r = Rank(high)
close_r = Rank(close)
data1 = pd.concat([open_r,low_r,high_r,close_r],axis = 1, join = 'inner')
data1.columns = ['open_r','low_r','high_r','close_r']
temp = pd.DataFrame(data1['open_r'] + data1['low_r'] - \
(data1['high_r'] + data1['close_r']))
close_r_temp = TsRank(close,8)
volume_mean = Mean(volume,70)
volume_mean_r = TsRank(volume_mean,20)
data2 = pd.concat([close_r_temp,volume_mean_r], axis = 1, join = 'inner')
corr = Corr(data2,8)
temp_decay = DecayLinear(temp,8)
r1 = Rank(temp_decay)
corr_decay = DecayLinear(corr,7)
r2 = TsRank(corr_decay,3)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
alpha = pd.DataFrame(np.min(r))
alpha.columns = ['alpha140']
return alpha
@timer
def alpha141(self):
volume = self.volume
high = self.high
volume_mean = Mean(volume,15)
high_r = Rank(high)
volume_mean_r = Rank(volume_mean)
data = pd.concat([high_r,volume_mean_r], axis = 1, join = 'inner')
corr = Corr(data,9)
alpha = -1 * Rank(corr)
alpha.columns = ['alpha141']
return alpha
@timer
def alpha142(self):
close = self.close
volume = self.volume
close_r = TsRank(close,10)
r1 = Rank(close_r)
close_delta = Delta(close,1)
close_delta_delta = Delta(close_delta,1)
r2 = Rank(close_delta_delta)
volume_mean = Mean(volume,20)
data = pd.concat([volume,volume_mean], axis = 1, join = 'inner')
data.columns = ['v','v_m']
temp = pd.DataFrame(data['v']/data['v_m'])
temp_r = TsRank(temp,5)
r3 = Rank(temp_r)
r = pd.concat([r1,r2,r3],axis = 1, join = 'inner')
r.columns = ['r1','r2','r3']
alpha = pd.DataFrame(- 1* r['r1'] * r['r2'] * r['r3'])
alpha.columns= ['alpha142']
return alpha
@timer
def alpha143(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay], axis = 1, join = 'inner')
data.columns = ['close','close_delay']
temp = pd.DataFrame((data['close'] - data['close_delay'])/data['close_delay'])
temp.columns= ['temp']
data_temp = pd.concat([data,temp],axis = 1, join = 'inner')
data_temp['temp'][data_temp['close'] <= data_temp['close_delay']] = 1
temp_unstack = data_temp['temp'].unstack()
temp_unstack.iloc[0,:] = 1
df = np.cumprod(temp_unstack,axis = 0)
alpha = df.stack()
alpha.columns = ['alpha143']
return alpha
@timer
def alpha144(self):
close = self.close
amt = self.amt
close_delay = Delay(close,1)
data = pd.concat([close,close_delay,amt], axis = 1, join = 'inner')
data.columns = ['close','close_delay','amt']
data['temp'] = np.abs(data['close']/data['close_delay'] - 1)/data['amt']
data['sign'] = 1
data['sign'][data['close'] >= data['close_delay']] = 0
tep1 = Sum(pd.DataFrame(data['sign'] * data['temp']),20)
tep2 = Count(0,pd.DataFrame(data['close_delay']),pd.DataFrame(data['close']),20)
data2 = pd.concat([tep1,tep2], axis = 1, join = 'inner')
data2.columns = ['tep1','tep2']
alpha = pd.DataFrame(data2['tep1']/data2['tep2'])
alpha.columns = ['alpha144']
return alpha
@timer
def alpha145(self):
volume = self.volume
volume_mean9 = Mean(volume,9)
volume_mean26 = Mean(volume,26)
volume_mean12 = Mean(volume,12)
data = pd.concat([volume_mean9,volume_mean26,volume_mean12], axis = 1, join = 'inner')
data.columns = ['m9','m26','m12']
alpha = pd.DataFrame((data['m9'] - data['m26'])/data['m12'] * 100)
alpha.columns = ['alpha145']
return alpha
@timer
def alpha146(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
data.columns = ['close','close_delay']
temp = pd.DataFrame((data['close'] -data['close_delay'])/data['close_delay'])
sma1 = SMA(temp,61,2)
data2 = pd.concat([temp,sma1], axis = 1, join = 'inner')
data2.columns = ['temp1','sma1']
data2['temp2'] = data2['temp1'] - data2['sma1']
temp2_mean = Mean(pd.DataFrame(data2['temp2']),20)
sma2 = SMA(pd.DataFrame(data2['temp1'] - data2['temp2']),61,2)
data_temp = pd.concat([temp2_mean,pd.DataFrame(data2['temp2']),sma2], axis = 1 , join = 'inner')
data_temp.columns = ['temp2_mean','temp2','sma2']
alpha = data_temp['temp2_mean'] * data_temp['temp2'] / data_temp['sma2']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha146']
return alpha
@timer
def alpha147(self):
close = self.close
close_mean = Mean(close,12)
alpha = RegBeta(0,close_mean,None,12)
alpha.columns = ['alpha147']
return alpha
@timer
def alpha148(self):
Open = self.open
volume = self.volume
volume_mean = Mean(volume,60)
volume_mean_s = Sum(volume_mean,9)
data = pd.concat([Open,volume_mean_s],axis = 1, join = 'inner')
corr = Corr(data,6)
r1 = Rank(corr)
open_min = TsMin(Open,14)
data2 = pd.concat([Open,open_min], axis = 1, join = 'inner')
data2.columns = ['open','open_min']
r2 = Rank(pd.DataFrame(data2['open'] - data2['open_min']))
r = pd.concat([r1,r2],axis = 1, join = 'inner')
r.columns = ['r1','r2']
r['alpha'] = -1
r['alpha'][r['r1'] > r['r2']] = 0
alpha = pd.DataFrame(r['alpha'])
alpha.columns = ['alpha148']
return alpha
@timer
def alpha149(self):
close = self.close
close_index = self.close_index
close_delay = Delay(close,1)
close_index_delay = Delay(close_index,1)
data_index = pd.concat([close_index,close_index_delay], axis = 1, join = 'inner')
data_index.columns = ['close','close_delay']
data_index['delta'] = data_index['close']/data_index['close_delay'] - 1
data_index['judge'] = 1
data_index['judge'][data_index['close'] >= data_index['close_delay']] = 0
data_index['delta'][data_index['judge'] == 0] = np.nan
# index_delta_unstack = index_delta_unstack.dropna()
data = pd.concat([close,close_delay], axis = 1, join = 'inner')
data.columns = ['close','close_delay']
data['delta'] = data['close'] / data['close_delay'] - 1
df1 = pd.DataFrame(data['delta'])
df2 = pd.DataFrame(data_index['delta'])
alpha = RegBeta(1,df1,df2,252)
alpha.columns = ['alpha149']
return alpha
@timer
def alpha150(self):
high = self.high
low = self.low
close = self.close
volume = self.volume
data = pd.concat([high,low,close,volume], axis = 1, join = 'inner')
alpha = (data['Close'] + data['High'] + data['Low'])/3 * data['Vol']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha150']
return alpha
@timer
def alpha151(self):
close = self.close
close_delay = Delay(close,20)
data = pd.concat([close,close_delay], axis = 1, join = 'inner')
data.columns = ['close','close_delay']
temp = pd.DataFrame(data['close'] - data['close_delay'])
alpha = SMA(temp,20,1)
alpha.columns = ['alpha151']
return alpha
@timer
def alpha152(self):
close = self.close
close_delay = Delay(close,9)
data = pd.concat([close,close_delay], axis = 1, join = 'inner')
data.columns = ['close','close_delay']
temp = pd.DataFrame(data['close']/data['close_delay'])
temp_delay = Delay(temp,1)
sma1 = SMA(temp_delay,9,1)
sma1_delay = Delay(sma1,1)
sma1_delay_mean1 = Mean(sma1_delay,12)
sma1_delay_mean2 = Mean(sma1_delay,26)
data_temp = pd.concat([sma1_delay_mean1,sma1_delay_mean2],axis = 1, join = 'inner')
data_temp.columns = ['m1','m2']
alpha = SMA(pd.DataFrame(data_temp['m1'] - data_temp['m2']),9,1)
alpha.columns = ['alpha152']
return alpha
@timer
def alpha153(self):
close = self.close
close_mean3 = Mean(close,3)
close_mean6 = Mean(close,6)
close_mean12 = Mean(close,12)
close_mean24 = Mean(close,24)
data = pd.concat([close_mean3, close_mean6, close_mean12, close_mean24], axis = 1 ,join ='inner')
alpha = pd.DataFrame(np.mean(data, axis = 1))
alpha.columns = ['alpha153']
return alpha
@timer
def alpha154(self):
volume = self.volume
vwap = self.vwap
volume_mean = Mean(volume,180)
data = pd.concat([vwap,volume_mean], axis = 1, join = 'inner')
corr = Corr(data,18)
vwap_min = TsMin(vwap,16)
data1 = pd.concat([vwap,vwap_min],axis = 1, join = 'inner')
data1.columns = ['vwap','vwap_min']
temp = pd.DataFrame(data1['vwap'] - data1['vwap_min'])
data_temp = pd.concat([corr,temp], axis = 1, join = 'inner')
data_temp.columns = ['corr','temp']
data_temp['alpha'] = 1
data_temp['alpha'][data_temp['corr'] >= data_temp['temp']] = 0
alpha = pd.DataFrame(data_temp['alpha'])
alpha.columns = ['alpha154']
return alpha
@timer
def alpha155(self):
volume = self.volume
sma1 = SMA(volume,13,2)
sma2 = SMA(volume,26,2)
sma = pd.concat([sma1, sma2], axis = 1, join = 'inner')
sma.columns = ['sma1','sma2']
temp = pd.DataFrame(sma['sma1'] - sma['sma2'])
sma3 = SMA(temp,10,2)
data = pd.concat([temp,sma3], axis = 1 ,join = 'inner')
data.columns = ['temp','sma']
alpha = pd.DataFrame(data['temp'] - data['sma'])
alpha.columns = ['alpha155']
return alpha
@timer
def alpha156(self):
vwap = self.vwap
Open = self.open
low = self.low
vwap_delta = Delta(vwap,5)
vwap_delta_decay = DecayLinear(vwap_delta,3)
r1 = Rank(vwap_delta_decay)
data1 = pd.concat([Open,low],axis = 1, join = 'inner')
temp = -1 * Delta(pd.DataFrame(data1['Open'] * 0.15 + data1['Low'] * 0.85),2)
temp_decay = DecayLinear(temp,3)
r2 = Rank(temp_decay)
r = pd.concat([r1,r2],axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(- 1 *np.max(r, axis = 1))
alpha.columns = ['alpha156']
return alpha
@timer
def alpha157(self):
close = self.close
ret = self.ret
close_delta = Delta(close,5)
close_delta_r = Rank(Rank(close_delta) * -1)
r1 = TsMin(close_delta_r,2)
ret_delay = Delay(-1 * ret,6)
r2 = TsRank(ret_delay,5)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
temp = pd.DataFrame(r['r1'] + r['r2'])
alpha = TsMin(temp,5)
alpha.columns = ['alpha157']
return alpha
@timer
def alpha158(self):
high = self.high
low = self.low
close = self.close
temp = SMA(close,15,2)
temp.columns = ['temp']
data = pd.concat([high,low,close,temp],axis = 1 , join = 'inner')
alpha =(data['High'] + data['Low'] - 2 * data['temp'] )/data['Close']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha158']
return alpha
@timer
def alpha159(self):
high = self.high
low = self.low
close = self.close
close_delay = Delay(close,1)
data1 = pd.concat([low,close_delay],axis = 1, join = 'inner')
data2 = pd.concat([high, close_delay], axis = 1, join = 'inner')
temp1 = pd.DataFrame(np.min(data1,axis = 1))
temp2= pd.DataFrame(np.max(data2,axis = 1))
temp = pd.concat([temp1,temp2], axis = 1 ,join = 'inner')
temp.columns = ['temp1','temp2']
temp1_sum6 = Sum(temp1,6)
temp1_sum12 = Sum(temp1,12)
temp1_sum24 = Sum(temp1,24)
tep = pd.DataFrame(temp['temp2'] - temp['temp1'])
s6 = Sum(tep,6)
s12 = Sum(tep,12)
s24 = Sum(tep,24)
data3 = pd.concat([temp1_sum6,temp1_sum12,temp1_sum24,s6,s12,s24], axis = 1 ,join = 'inner')
data3.columns = ['ts6','ts12','ts24','s6','s12','s24']
temp3 = pd.DataFrame(data3['ts6']/data3['s6'] * 12 * 24 + data3['ts12']/data3['s12'] * 6 * 24 \
+ data3['ts24']/data3['s24'] * 6 * 24)
alpha = temp3 / (6*12 + 6*24 + 12*24) * 100
alpha.columns = ['alpha159']
return alpha
@timer
def alpha160(self):
close = self.close
close_std = STD(close,20)
close_delay = Delay(close,1)
data = pd.concat([close,close_std,close_delay],axis = 1, join = 'inner')
data.columns = ['close','close_std','close_delay']
data['close_std'][data['close'] >= data['close_delay']] = 0
alpha = SMA(pd.DataFrame(data['close_std']),20,1)
alpha.columns = ['alpha160']
return alpha
@timer
def alpha161(self):
high = self.high
low = self.low
close = self.close
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data1 = pd.concat([high,low],axis = 1 , join = 'inner')
diff = pd.DataFrame(data1['High'] - data1['Low'])
data2 = pd.concat([close_delay,high], axis = 1, join ='inner')
abs1 = pd.DataFrame(np.abs(data2['close_delay'] - data2['High']))
data3 = pd.concat([diff,abs1], axis = 1, join = 'inner')
temp1 = pd.DataFrame(np.max(data3,axis = 1))
data4 = pd.concat([close_delay,low],axis = 1, join = 'inner')
temp2 = pd.DataFrame(np.abs(data4['close_delay'] -data4['Low']))
data = pd.concat([temp1,temp2],axis =1 , join = 'inner')
data.columns = ['temp1','temp2']
temp = pd.DataFrame(np.max(data, axis = 1))
alpha = Mean(temp,12)
alpha.columns = ['alpha161']
return alpha
@timer
def alpha162(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
data.columns = ['close','close_delay']
data['max']= data['close'] - data['close_delay']
data['max'][data['max'] < 0] = 0
data['abs'] = np.abs(data['close'] - data['close_delay'])
temp1 = SMA(pd.DataFrame(data['max']),12,1)
temp2 = SMA(pd.DataFrame(data['abs']),12,1)
data1 = pd.concat([temp1,temp2], axis = 1, join = 'inner')
data1.columns = ['temp1','temp2']
tep = pd.DataFrame(data1['temp1']/data1['temp2'])
temp3 = TsMin(tep,12)
temp4 = TsMax(tep,12)
data_temp = pd.concat([tep,temp3,temp4], axis = 1, join = 'inner')
data_temp.columns = ['tep','temp3','temp4']
alpha = (data_temp['tep'] - data_temp['temp3']/data_temp['temp4']) * 100
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha162']
return alpha
@timer
def alpha163(self):
low = self.low
high = self.high
volume = self.volume
ret = self.ret
vwap = self.vwap
volume_mean = Mean(volume,20)
data = pd.concat([high,low,vwap,ret,volume_mean],axis = 1, join = 'inner')
data.columns = ['high','low','vwap','ret','volume_mean']
temp = pd.DataFrame(-1 *data['ret'] * data['volume_mean'] *data['vwap'] * \
(data['high'] - data['low']))
alpha = Rank(temp)
alpha.columns = ['alpha163']
return alpha
@timer
def alpha164(self):
close = self.close
high = self.high
low = self.low
close_delay = Delay(close,1)
data = pd.concat([close,high,low,close_delay],axis = 1, join = 'inner')
data.columns = ['close','high','low','close_delay']
data['temp'] = 1/(data['close'] - data['close_delay'])
data_min = TsMin(pd.DataFrame(data['temp']),12)
data_min.columns = ['min']
data2 = pd.concat([data,data_min],axis = 1, join = 'inner')
data2['tep'] = data2['temp'] - data2['min']/(data2['high'] - data2['low'])
data2['tep'][data['close'] <= data['close_delay']] = 0
alpha = SMA(pd.DataFrame(data2['tep']) * 100,13,2)
alpha.columns = ['alpha164']
return alpha
@timer
def alpha165(self):
close = self.close
close_mean = Mean(close,48)
data = pd.concat([close,close_mean],axis = 1, join = 'inner')
data.columns = ['close','close_mean']
temp = pd.DataFrame(data['close'] - data['close_mean'])
temp_sum = Sum(temp,48)
temp_sum_min = TsMin(temp_sum,48)
temp_sum_max = TsMax(temp_sum,48)
close_std = STD(close,48)
data_temp = pd.concat([temp_sum_min,temp_sum_max,close_std], axis = 1, join = 'inner')
data_temp.columns = ['min','max','std']
alpha = (data_temp['max'] - data_temp['min'])/data_temp['std']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha165']
return alpha
@timer
def alpha166(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay], axis = 1, join = 'inner')
data.columns = ['close','close_delay']
temp = pd.DataFrame(data['close']/data['close_delay'])
temp_mean = Mean(temp,20)
data1 = pd.concat([temp,temp_mean], axis = 1, join = 'inner')
data1.columns = ['temp','temp_mean']
temp2 = Sum(pd.DataFrame(data1['temp'] - data1['temp_mean']),20) * 20 * 19
temp3 = Sum(temp,20) * 19 * 18
data2 = pd.concat([temp2,temp3], axis = 1, join = 'inner')
data2.columns = ['temp2','temp3']
alpha = np.power(data2['temp2'],1.5)/np.power(data2['temp3'],1.5)
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha166']
return alpha
@timer
def alpha167(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay], axis = 1, join = 'inner')
data.columns = ['close','close_delay']
data['temp'] = data['close'] - data['close_delay']
data['temp'][data['close'] <= data['close_delay']] = 0
alpha = Sum(pd.DataFrame(data['temp']),12)
alpha.columns = ['alpha167']
return alpha
@timer
def alpha168(self):
volume = self.volume
volume_mean = Mean(volume,20)
data = pd.concat([volume,volume_mean], axis = 1, join = 'inner')
data.columns = ['volume','volume_mean']
alpha = data['volume']/data['volume_mean'] * -1
alpha.columns = ['alpha168']
return alpha
@timer
def alpha169(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay], axis = 1, join = 'inner')
data.columns = ['close','close_delay']
temp1 = pd.DataFrame(data['close'] - data['close_delay'])
sma = SMA(temp1,9,1)
temp2 = Delay(sma,1)
temp2_mean12 = Mean(temp2,12)
temp2_mean26 = Mean(temp2,26)
data2 = pd.concat([temp2_mean12,temp2_mean26], axis = 1, join ='inner')
data2.columns = ['mean1','mean2']
alpha = SMA(pd.DataFrame(data2['mean1'] - data2['mean2']),10,1)
alpha.columns = ['alpha169']
return alpha
@timer
def alpha170(self):
close = self.close
high = self.high
volume = self.volume
vwap = self.vwap
volume_mean = Mean(volume,20)
data1 = pd.concat([high,close,volume,volume_mean], axis = 1, join = 'inner')
data1.columns =['high','close','volume','volume_mean']
temp1 = pd.DataFrame(data1['high']/data1['close'] * data1['volume']/data1['volume_mean'])
r1 = Rank(temp1)
high_mean = Mean(high,5)
vwap_delay = Delay(vwap,5)
data2 = pd.concat([high,close,high_mean], axis = 1, join = 'inner')
data2.columns = ['high','close','high_mean']
temp2 = pd.DataFrame((data2['high'] - data2['close'])/data2['high_mean'])
temp2_r = Rank(temp2)
data3 = pd.concat([vwap,vwap_delay], axis = 1, join = 'inner')
data3.columns = ['vwap','vwap_delay']
temp3 = pd.DataFrame(data3['vwap'] - data3['vwap_delay'])
temp3_r = Rank(temp3)
rank = pd.concat([temp2_r,temp3_r], axis = 1, join = 'inner')
rank.columns = ['r1','r2']
r2 = pd.DataFrame(rank['r1'] - rank['r2'])
data_temp = pd.concat([r1,r2],axis = 1, join = 'inner')
data_temp.columns = ['r1','r2']
alpha = pd.DataFrame(data_temp['r1'] * data_temp['r2'])
alpha.columns = ['alpha170']
return alpha
@timer
def alpha171(self):
high = self.high
close = self.close
low = self.low
Open = self.open
data = pd.concat([high,close,low,Open],axis = 1, join = 'inner')
alpha = -1 * (data['Low'] - data['Close']) * np.power(data['Open'],5)/\
((data['Close'] - data['High']) * np.power(data['Close'],5))
alpha.columns = ['alpha171']
return alpha
@timer
def alpha172(self):
high = self.high
low = self.low
hd = HD(high)
ld = LD(low)
data = pd.concat([hd,ld],axis = 1, join = 'inner')
data.columns = ['hd','ld']
data['temp'] = 0
data['temp'][((data['hd'] > data['ld'])& (data['hd'] > 0)) | \
((data['ld'] > data['hd'])& (data['ld'] > 0))] = 1
alpha = Mean(pd.DataFrame(data['temp']),6)
alpha.columns = ['alpha172']
return alpha
@timer
def alpha173(self):
close = self.close
close_ln = pd.DataFrame(np.log(close))
temp1 = SMA(close,13,2)
temp2 = SMA(close_ln,13,2)
temp3 = SMA(temp1,13,2)
temp4 = SMA(SMA(temp2,13,2),13,2)
data = pd.concat([temp1,temp3,temp4], axis = 1, join = 'inner')
data.columns = ['t1','t2','t3']
alpha = pd.DataFrame(3 * data['t1'] - 2 * data['t2'] + data['t3'])
alpha.columns = ['alpha173']
return alpha
@timer
def alpha174(self):
close = self.close
close_delay = Delay(close,1)
close_std = STD(close,20)
data = pd.concat([close,close_delay,close_std], axis = 1, join = 'inner')
data.columns = ['close','close_delay','close_std']
data['close_std'][data['close'] <= data['close_delay']] = 0
alpha = SMA(pd.DataFrame(data['close_std']),12,1)
alpha.columns = ['alpha174']
return alpha
@timer
def alpha175(self):
high = self.high
close = self.close
low = self.low
close_delay = Delay(close,1)
data = pd.concat([high,close,low,close_delay],axis = 1, join = 'inner')
data.columns = ['high','close','low','close_delay']
data_abs = pd.DataFrame(np.abs(data['close_delay'] - data['high']))
h_l = pd.DataFrame(data['high'] - data['low'])
data1 = pd.concat([data_abs,h_l], axis = 1, join = 'inner')
temp1 = pd.DataFrame(np.max(data1,axis = 1))
data_abs2 = pd.DataFrame(np.abs(data['close_delay'] - data['low']))
data2 = pd.concat([temp1,data_abs2], axis = 1, join = 'inner')
temp2 = pd.DataFrame(np.max(data2,axis = 1))
data3 = pd.concat([temp1,temp2],axis = 1, join = 'inner')
max_temp = pd.DataFrame(np.max(data3,axis = 1))
alpha = Mean(max_temp,6)
alpha.columns = ['alpha175']
return alpha
@timer
def alpha176(self):
high = self.high
close = self.close
low = self.low
volume = self.volume
low_min = TsMin(low,12)
high_max = TsMax(high,12)
data1 = pd.concat([close,low_min,high_max],axis = 1, join = 'inner')
data1.columns = ['close','low_min','high_max']
temp = pd.DataFrame((data1['close'] - data1['low_min'])\
/(data1['high_max'] - data1['low_min']))
r1 = Rank(temp)
r2 = Rank(volume)
rank = pd.concat([r1,r2], axis = 1, join = 'inner')
alpha = Corr(rank,6)
alpha.columns = ['alpha176']
return alpha
@timer
def alpha177(self):
high = self.high
highday = Highday(high,20)
alpha = pd.DataFrame((20 - highday)/20.0 * 100)
alpha.columns = ['alpha177']
return alpha
@timer
def alpha178(self):
close = self.close
close_delay = Delay(close,1)
volume = self.volume
data = pd.concat([close,close_delay,volume], axis = 1, join = 'inner')
data.columns = ['close','close_delay','volume']
alpha = pd.DataFrame((data['close'] - data['close_delay'])\
/data['close_delay'] * data['volume'])
alpha.columns = ['alpha178']
return alpha
@timer
def alpha179(self):
low = self.low
volume = self.volume
vwap = self.vwap
data1 = pd.concat([vwap,volume], axis = 1, join = 'inner')
corr = Corr(data1,4)
r1 = Rank(corr)
volume_mean = Mean(volume,50)
volume_mean_r = Rank(volume_mean)
row_r = Rank(low)
data2 = pd.concat([row_r,volume_mean_r], axis = 1, join = 'inner')
corr2 = Corr(data2,12)
r2 = Rank(corr2)
data = pd.concat([r1,r2], axis = 1, join = 'inner')
data.columns = ['r1','r2']
alpha = pd.DataFrame(data['r1'] * data['r2'])
alpha.columns = ['alpha179']
return alpha
@timer
def alpha180(self):
volume = self.volume
close = self.close
close_delta = Delta(close,7)
volume_mean = Mean(volume,20)
close_delta_abs = pd.DataFrame(np.abs(close_delta))
r = TsRank(close_delta_abs,60)
sign = pd.DataFrame(np.sign(close_delta))
temp = pd.concat([r,sign],axis = 1, join = 'inner')
temp.columns = ['r','sign']
temp1 = temp['r'] * temp['sign']
data = pd.concat([volume,volume_mean,temp1], axis = 1, join = 'inner')
data.columns = ['volume','volume_mean','temp1']
data['volume1'] = data['volume']
data['temp1'][data['volume'] >= data['volume_mean']] = 0
data['volume1'][data['volume'] < data['volume_mean']] = 0
alpha = -1 * pd.DataFrame(data['volume1'] + data['temp1'])
alpha.columns = ['alpha180']
return alpha
@timer
def alpha181(self):
close = self.close
close_index = self.close_index
close_delay = Delay(close,1)
data1 = pd.concat([close,close_delay],axis = 1, join = 'inner')
data1.columns = ['close','close_delay']
temp = pd.DataFrame(data1['close']/data1['close_delay']) - 1
temp_mean = Mean(temp,20)
data_temp = pd.concat([temp,temp_mean], axis = 1, join = 'inner')
data_temp.columns = ['temp','temp_mean']
temp1 = pd.DataFrame(data_temp['temp'] - data_temp['temp_mean'])
close_index_mean = Mean(close_index,20)
data2 = pd.concat([close_index,close_index_mean], axis = 1, join = 'inner')
data2.columns = ['close_index','close_index_mean']
temp2 = pd.DataFrame(np.power(data2['close_index'] - data2['close_index_mean'],2))
temp3 = pd.DataFrame(np.power(data2['close_index'] - data2['close_index_mean'],3))
temp1_unstack = temp1.unstack()
temp2_unstack = temp2.unstack()
temp2_mod = pd.DataFrame(repmat(temp2_unstack,1,np.size(temp1_unstack,1)))
temp3_unstack = temp3.unstack()
temp3_mod = pd.DataFrame(repmat(temp3_unstack,1,np.size(temp1_unstack,1)))
temp1_result = temp1_unstack.rolling(20, min_periods = 20).sum()
temp2_result = temp2_mod.rolling(20, min_periods = 20).sum()
temp2_result.index = temp2_unstack.index.tolist()
temp3_result = temp3_mod.rolling(20, min_periods = 20).sum()
temp3_result.index = temp3_unstack.index.tolist()
result = pd.concat([temp1_result,temp2_result,temp3_result], axis = 1, join = 'inner')
m = np.size(temp1_result,1)
alpha_temp = pd.DataFrame((result.values[:,:m] - result.values[:,m:2*m])/result.values[:,2*m:])
df1 = result.iloc[:,:m]
alpha_temp.columns = df1.columns.tolist()
alpha_temp.index = df1.index.tolist()
alpha = pd.DataFrame(alpha_temp.stack())
alpha.columns = ['alpha181']
return alpha
@timer
def alpha182(self):
close = self.close
Open = self.open
close_index = self.close_index
open_index = self.open_index
data1 = pd.concat([close,Open], axis = 1, join = 'inner')
data1['temp'] = 1
data1['temp'][data1['Close'] <= data1['Open']] = 0
data1['temp1'] = 1
data1['temp1'][data1['Close'] > data1['Open']] = 0
data2 = pd.concat([close_index,open_index], axis = 1, join = 'inner')
data2['tep'] = 0
data2['tep'][data2['close'] > data2['open']] = 1
data2['tep1'] = 0
data2['tep1'][data2['close'] < data2['open']] = 1
temp = data1['temp'].unstack()
temp1 = data1['temp1'].unstack()
tep = data2['tep'].unstack()
tep1 = data2['tep1'].unstack()
tep_rep = repmat(tep,1,np.size(temp,1))
tep1_rep = repmat(tep1,1,np.size(temp,1))
data3 = temp * tep_rep + temp1 * tep1_rep - temp * tep_rep * temp1 * tep1_rep
result = data3.rolling(20,min_periods = 20).sum()
alpha_temp = result/20.0
alpha = pd.DataFrame(alpha_temp.stack())
alpha.columns = ['alpha182']
return alpha
@timer
def alpha183(self):
close = self.close
close_mean = Mean(close,24)
close_std = STD(close,24)
data1 = pd.concat([close,close_mean], axis = 1, join = 'inner')
data1.columns = ['close','close_mean']
temp = pd.DataFrame(data1['close'] - data1['close_mean'])
temp_max = TsMin(temp,24)
temp_min = TsMin(temp,24)
data2 = pd.concat([temp_max,temp_min,close_std],axis = 1, join = 'inner')
data2.columns = ['max','min','std']
alpha = pd.DataFrame((data2['max'] - data2['min'])/data2['std'])
alpha.columns = ['alpha183']
return alpha
@timer
def alpha184(self):
close = self.close
Open = self.open
data = pd.concat([close,Open], axis = 1, join = 'inner')
data['diff'] = data['Open'] - data['Close']
diff_delay = Delay(pd.DataFrame(data['diff']),1)
data1 = pd.concat([diff_delay,close],axis = 1, join = 'inner')
corr = Corr(data1,200)
r1 = Rank(corr)
r2 = Rank(pd.DataFrame(data['diff']))
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = | pd.DataFrame(r['r1'] + r['r2']) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import xlrd
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.metrics import roc_curve, auc, accuracy_score
import matplotlib.pyplot as plt
import xgboost as xgb
import pandas as pd
from xgboost import XGBClassifier
from pandas.core.frame import DataFrame
from sklearn.model_selection import GridSearchCV
from sklearn.feature_selection import SelectFromModel
from sklearn.ensemble import ExtraTreesClassifier
import numpy as np
from sklearn import tree
from sklearn.tree import export_graphviz
import pydotplus
import random
import requests
import json
import base64
import urllib
import sys
import ssl
import graphviz
import time
# In[ ]:
def readxls(root):
data_list=[]
data=xlrd.open_workbook(root)
table=data.sheets()[0]
nrows=table.nrows
ncols=table.ncols
for i in range(1,nrows):
data_list.append(table.row_values(i))
rowname=table.row_values(0)
return data_list,rowname
# In[ ]:
healthy,rowname=readxls("negative_data.xls")
unhealthy,rowname=readxls("positive_data.xls")
total_data=healthy+unhealthy
total_data=DataFrame(total_data)
total_data.columns=rowname
#print(total_data)
target=[0]*len(healthy)+[1]*len(unhealthy)
X_train, X_test, y_train, y_test =train_test_split(total_data, target, test_size=0.25, random_state=99999)
# In[ ]:
start = time.clock()
clf = ExtraTreesClassifier()
clf = clf.fit(X_train, y_train)
print(clf.feature_importances_)
model = SelectFromModel(clf, prefit=True,threshold=0.03)
X_train = model.transform(X_train)
X_test=model.transform(X_test)
print(len(X_train[0]))
end = time.clock()
print(str(end-start))
# In[ ]:
select_result=DataFrame(clf.feature_importances_).sort_values(by=0).T
select_result.columns=rowname
select_result.to_csv('feature selection.csv')
print(select_result)
# In[ ]:
sel_rows=np.array(rowname)[clf.feature_importances_>=0.03]
# In[ ]:
X_train=DataFrame(X_train)
X_train.columns=np.array(rowname)[clf.feature_importances_>=0.03]
X_test= | DataFrame(X_test) | pandas.core.frame.DataFrame |
__author__ = "<NAME>"
__license__ = "Apache 2"
__version__ = "1.0.0"
__maintainer__ = "<NAME>"
__website__ = "https://llp.berkeley.edu/asgari/"
__git__ = "https://github.com/ehsanasgari/"
__email__ = "<EMAIL>"
__project__ = "1000Langs -- Super parallel project at CIS LMU"
import requests
from bs4 import BeautifulSoup
import tqdm
import sys
sys.path.append('../')
from utility.file_utility import FileUtility
import pandas as pd
import numpy as np
from massive_parallelbible_IF.accessbible import AccessBible
def warn(*args, **kwargs):
pass
import warnings
warnings.warn = warn
def getMetaFindBible():
#"https://find.bible/bibles/"
base_url = '../meta/bibles_list.html'
f=open(base_url,'r')
soup = BeautifulSoup(f)
table=soup.select('table')[0]
df=pd.read_html(table.prettify(), flavor='bs4',header=0)[0]
res = [[np.where(tag.has_attr('href'),tag.get('href'),"no link") for tag in tagr.find_all('a')] for tagr in table.find_all('tr')]
df['trans_ID'] = ['Not Available' if x==[] else x[0].tolist().split('/')[-1] for x in res[1::]]
df=df.rename(index=str,columns={x:x.strip() for x in df.columns.tolist()})
df=df.rename(index=str,columns={'Name':'Description','Date':'Year','ISO':'language_iso','Language':'language_name'})
df=df[['trans_ID','language_iso','language_name','Description','Year']]
df=df[df['trans_ID']!='Not Available']
df.set_index('trans_ID')
return df
def getMetaEbible():
try:
base_url = 'http://ebible.org/Scriptures/copyright.php'
soup = BeautifulSoup(requests.get(base_url).content)
tables=soup.select('table')
dfs=[]
for table in tables:
dfs.append(pd.read_html(table.prettify(), flavor='bs4',header=0)[0])
df=pd.concat(dfs, sort=True)
mask=(df['FCBH/DBS'].str.len() == 6) & (df['FCBH/DBS'].str.isupper())
except:
soup = BeautifulSoup(open("../meta/ebible.html") )
tables=soup.select('table')
dfs=[]
for table in tables:
dfs.append(pd.read_html(table.prettify(), flavor='bs4',header=0)[0])
df=pd.concat(dfs, sort=True)
mask=(df['FCBH/DBS'].str.len() == 6) & (df['FCBH/DBS'].str.isupper())
df = df.loc[mask]
df['iso']=[x[0:3] for x in df['ID'].tolist()]
df=df[['iso','FCBH/DBS','Language in English', 'Year','Short Title']]
df=df.rename(index=str,columns={'iso':'language_iso','FCBH/DBS':'trans_ID','Language in English':'language_name','Short Title':'Description','Date':'Year'})
df=df[['trans_ID','language_iso','language_name','Description','Year']]
df.set_index('trans_ID')
return df
def getMetaMerged():
# read and merge two bibles
df_ebible=getMetaEbible()
df_main=getMetaFindBible()
df=pd.concat([df_main,df_ebible])
df.drop_duplicates(subset=['trans_ID'], keep='last', inplace=True)
df.set_index('trans_ID')
return df
def getMassiveparallel_meta(update=False):
errors=[]
if update:
# Get michael's corpora
AccBible = AccessBible(AccessBible.path)
massive_par_corpora=AccBible.get_list_of_all_lang_translations()
massive_par_corpora_length=dict()
for lang, trnsls in tqdm.tqdm(massive_par_corpora.items()):
length_list=[]
for trns in trnsls:
try:
l=len(AccBible.get_subcorpus_bible_by_lang_trans_filtered(lang,trns))
length_list.append(l)
except:
errors.append((lang,trns))
if length_list!=[]:
massive_par_corpora_length[lang]=(len(length_list),max(length_list),np.mean(length_list))
rows=[]
for iso, scores in massive_par_corpora_length.items():
rows.append([iso, scores[0], scores[1], scores[2]])
df=pd.DataFrame(rows)
df=df.rename(index=str, columns={0:'language_iso',1:'#trans-massivepar', 2:'max-verse-massivepar',3:'mean-verse-massivepar'})
df=df.set_index('language_iso')
df.to_csv('../meta/massive_par_stat.tsv', sep='\t', index=True)
return | pd.read_table('../meta/massive_par_stat.tsv', sep='\t') | pandas.read_table |
# -*- coding: utf-8 -*-
"""Creates folders and files with simulated data for various characterization techniques
Notes
-----
All data is made up and does not correspond to the materials listed.
The data is meant to simply emulate real data and allow for basic analysis.
@author: <NAME>
Created on Jun 15, 2020
"""
from pathlib import Path
from lmfit import lineshapes
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import PySimpleGUI as sg
from . import utils
__all__ = ['generate_raw_data']
_PARAMETER_FILE = 'raw data parameters.txt'
def _generate_peaks(x, y, peak_type, params, param_var, **func_kwargs):
"""
Used to generate peaks with a given variability.
Parameters
----------
x : array-like
The x data.
y : array-like
The y data (can contain the background and noise).
peak_type : function
A peak generating function, which uses x and the items from
params as the inputs.
params : list or tuple
The parameters to create the given peak_type; len(params) is
how many peaks will be made by the function.
param_var : list or tuple
Random variability to add to the parameter; the sigma associated
with the normal distribution of the random number generator.
func_kwargs : dict
Additional keyword arguments to pass to the peak generating function.
Returns
-------
y : array-like
The y data with the peaks added.
new_params : list
The actual parameter values after applying the given variability.
Notes
-----
Uses np.random.rand to only use random values in the range [0, 1). This
was a mistake, and np.random.randn was supposed to be used, but the parameter
values and variability are already optimized for use with np.random.rand, so
there is no reason to change anything since it works.
"""
# to prevent overwriting the input collection objects
new_params = [param.copy() for param in params]
for param in new_params:
for i, value in enumerate(param):
param[i] = value + param_var[i] * np.random.rand(1)
y += peak_type(x, *param, **func_kwargs)
return y, new_params
def _generate_XRD_data(directory, num_data=6, show_plots=True):
"""
Generates the folders and files containing example XRD data.
Parameters
----------
directory : Path
The file path to the Raw Data folder.
num_data : int, optional
The number of files to create.
show_plots : bool, optional
If True, will show a plot of the data.
Notes
-----
This function will create two folders containing the same data
with different file names, simply to create more files.
The background is a second order polynomial.
Peaks are all pseudovoigt.
"""
fe_path = Path(directory, 'XRD/Fe')
ti_path = Path(directory, 'XRD/Ti')
fe_path.mkdir(parents=True, exist_ok=True)
ti_path.mkdir(parents=True, exist_ok=True)
x = np.linspace(10, 90, 500)
background = 0.4 * ((75 - x)**2) # equivalent to 0.4*x^2 - 60*x + 2250
# [amplitude, center, sigma, fraction]
params = [
[3000, 18, 0.3, 0.5],
[5000, 32, 0.2, 0.5],
[5000, 36, 1, 0.5],
[1000, 51, 0.5, 0.5],
[1500, 65, 0.5, 0.5],
[600, 80, 0.5, 0.5]
]
param_var = [1000, 1, 1, 0.1]
plt.figure(num='xrd')
param_dict = {}
data = {'x': x}
for i in range(num_data if not num_data % 2 else num_data + 1):
if i < num_data / 2:
sample_name = f'Ti-{i}W-700'
sample_name_2 = f'Fe-{i}W-700'
else:
sample_name = f'Ti-{(i - int(np.ceil(num_data / 2)))}W-800'
sample_name_2 = f'Fe-{(i - int(np.ceil(num_data / 2)))}W-800'
noise = 10 * np.random.randn(x.size)
data['y'], new_params = _generate_peaks(
x, background + noise, lineshapes.pvoigt, params, param_var
)
param_dict[sample_name] = new_params
param_dict[sample_name_2] = new_params
data_df = | pd.DataFrame(data) | pandas.DataFrame |
import pandas as pd
import numpy as np
import pandas as pd
from datetime import datetime
import matplotlib.pyplot as plt
import itertools
from dateutil.relativedelta import relativedelta
import sklearn.tree as tree
from sklearn.neural_network import MLPClassifier
from imblearn.over_sampling import SMOTE, ADASYN
from imblearn.combine import SMOTEENN
from imblearn.under_sampling import RandomUnderSampler, EditedNearestNeighbours
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix, precision_score, recall_score
from imblearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV, StratifiedShuffleSplit
from sklearn.preprocessing import label_binarize
from sklearn.preprocessing import scale
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import roc_auc_score
from sklearn.preprocessing import StandardScaler
import xgboost as xgb
from sklearn.ensemble import GradientBoostingClassifier
from joblib import Parallel, delayed
pd.set_option('display.max_columns', None)
def get_all_data():
#dir = 'D:\\Backups\\StemData\\'
files = ['sample_orig_2016.txt', 'sample_orig_2015.txt', 'sample_orig_2014.txt', 'sample_orig_2013.txt',
'sample_orig_2012.txt', 'sample_orig_2011.txt',
'sample_orig_2010.txt', 'sample_orig_2009.txt', 'sample_orig_2008.txt', 'sample_orig_2007.txt']
files1 = ['sample_svcg_2016.txt', 'sample_svcg_2015.txt', 'sample_svcg_2014.txt', 'sample_svcg_2013.txt',
'sample_svcg_2012.txt', 'sample_svcg_2011.txt',
'sample_svcg_2010.txt', 'sample_svcg_2009.txt', 'sample_svcg_2008.txt', 'sample_svcg_2008.txt']
merged_data = | pd.DataFrame() | pandas.DataFrame |
from __future__ import annotations
from typing import Optional, List, Dict, Tuple
import logging
import textwrap
import pandas as pd
import numpy as np
import h5py
from tqdm import tqdm
from .catmaid_interface import Catmaid, Bbox, ConnectorDetail
from .utils import CoordZYX
logger = logging.getLogger(__name__)
def trim_cols(df, required: List[str], name=None):
try:
return df[required]
except KeyError:
name = f"{name} " if name else ""
msg = f"Invalid {name}dataframe columns.\n\tRequired: {required}\n\t Got: {list(df.columns)}"
raise ValueError(msg)
class TransformerMixin:
_transformer_attr: Optional[str] = None
def world_to_px(self, world_coords, as_int=False, round_z=False):
if self._transformer_attr is None:
out = (np.asarray(world_coords) - self.offset) / self.resolution
if round_z:
out[..., 0] = np.round(out[..., 0]).astype(out.dtype)
if as_int:
out = out.astype(np.uint64)
return out
else:
return getattr(self, self._transformer_attr).world_to_px(
world_coords, as_int, round_z
)
def px_to_world(self, px_coords):
if self._transformer_attr is None:
return (
np.asarray(px_coords, dtype=np.float64) * self.resolution + self.offset
)
else:
return getattr(self, self._transformer_attr).px_to_world(px_coords)
class Image(TransformerMixin):
def __init__(
self, array, resolution=(1, 1, 1), offset=(0, 0, 0), dims=("z", "y", "x")
):
self.array = np.asarray(array)
self.resolution = np.asarray(resolution, dtype=float)
self.offset = np.asarray(offset, dtype=float)
if list(dims) != ["z", "y", "x"]:
raise NotImplementedError("Non-ZYX orientations are not supported")
self.dims = dims
def extents(self):
"""[[mins], [maxes]]"""
return np.array([self.offset, self.offset + self.resolution * self.array.shape])
def to_hdf5(self, f, name, mode="a", attrs=None):
if not isinstance(f, h5py.Group):
with h5py.File(f, mode) as f2:
return self.to_hdf5(f2, name)
ds = f.create_dataset(name, data=self.array, compression="gzip")
ds.attrs["resolution"] = self.resolution
ds.attrs["offset"] = self.offset
ds.attrs["dims"] = self.dims
if attrs is not None:
ds.attrs.update(attrs)
return ds
def is_compatible(self, other: Image):
try:
self.raise_on_incompatible(other)
except ValueError:
return False
return True
def raise_on_incompatible(self, other: Image, names=("left", "right")):
features = {}
if not isinstance(self, Image) or not isinstance(other, Image):
features["class"] = (type(self), type(other))
if self.array.shape != other.array.shape:
features["shape"] = (self.array.shape, other.array.shape)
if tuple(self.resolution) != tuple(other.resolution):
features["resolution"] = (tuple(self.resolution), tuple(other.resolution))
if tuple(self.offset) != tuple(other.offset):
features["offset"] = (tuple(self.offset), tuple(other.offset))
if tuple(self.dims) != tuple(other.dims):
features["dims"] = (tuple(self.dims), tuple(other.dims))
if not features:
return
left_name, right_name = pad_strs(names)
lines = []
for k, (l_val, r_val) in features.items():
lines.append(k)
lines.append(f" {left_name}: {l_val}")
lines.append(f" {right_name}: {r_val}")
msg = textwrap.indent("\n".join(lines), " ")
raise ValueError("Images not compatible.\n" + msg)
@classmethod
def from_hdf5(cls, f, name=None):
if isinstance(f, h5py.Dataset):
return cls(f[:], f.attrs["resolution"], f.attrs["offset"], f.attrs["dims"])
elif isinstance(f, h5py.Group):
return cls.from_hdf5(f[name])
else:
with h5py.File(f, "r") as f2:
return cls.from_hdf5(f2[name])
def max_plus_one(self):
if not issubclass(self.array.dtype, np.integer):
raise TypeError("Array is not of integer subtype")
return self.array.data.max() + 1
def contains(self, coord: Tuple[float, float, float]) -> bool:
"""Whether a real-world coordinate tuple is inside the array"""
diffs = self.extents - coord
return np.all(diffs[0] <= 0) and np.all(diffs[1] >= 0)
def sub_image_px(
self, internal_offset: Tuple[int, int, int], shape: Tuple[int, int, int]
) -> Image:
int_off = np.asarray(internal_offset, int)
if np.any(int_off < 0):
raise ValueError("internal_offset must be positive")
if np.any(int_off + shape > self.array.shape):
raise ValueError("sub-image extends beyond image")
slicing = tuple(slice(o, o + s) for o, s in zip(int_off, shape))
arr = self.array[slicing]
return type(self)(
arr, self.resolution, self.offset + int_off * self.resolution, self.dims
)
def sub_image(
self,
internal_offset: Tuple[float, float, float],
shape: Tuple[float, float, float],
) -> Image:
"""Start and stop points are found in world coordinates; then rounded to pixels"""
offset_px = np.round(self.offset + internal_offset).astype(int)
stop_px = np.round(self.offset + internal_offset + shape).astype(int)
return self.sub_image_px(offset_px, stop_px - offset_px)
def pad_strs(strs, prefix=True, pad=" "):
if len(pad) != 1:
raise ValueError("Pad string must be 1 character long")
length = max(len(s) for s in strs)
return [s + pad * (length - len(s)) for s in strs]
def serialize_treenodes(tns: pd.DataFrame):
tns = tns.copy()
tns["parent_id"] = np.array(tns["parent_id"].fillna(0), dtype=int)
return tns
def deserialize_treenodes(tns: pd.DataFrame):
tns = tns.copy()
ids = pd.array(tns["parent_id"], dtype="UInt64")
ids[ids == 0] = pd.NA
tns["parent_id"] = ids
return tns
def remove_single_nodes(treenodes: pd.DataFrame):
"""Remove all nodes belonging to skeletons with only 1 treenode in the dataframe"""
skids, counts = np.unique(treenodes["skeleton_id"], return_counts=True)
single_tns = skids[counts == 1]
to_drop = np.zeros(len(treenodes), bool)
for skid in single_tns:
to_drop |= treenodes["skeleton_id"] == skid
return treenodes.loc[~to_drop].copy()
class CatnapIO(TransformerMixin):
_transformer_attr = "raw"
def __init__(
self,
raw: Image,
treenodes: pd.DataFrame,
connectors: pd.DataFrame,
partners: pd.DataFrame,
labels: Optional[Image] = None,
):
self.raw: Image = raw
self.treenodes = remove_single_nodes(
trim_cols(
treenodes,
["treenode_id", "parent_id", "skeleton_id", "z", "y", "x"],
"treenode",
)
)
self.connectors = trim_cols(
connectors, ["connector_id", "z", "y", "x"], "connector"
)
self.partners = trim_cols(
partners,
["skeleton_id", "treenode_id", "connector_id", "is_presynaptic"],
"partners",
)
self.labels: Optional[Image] = None
self.set_labels(labels)
def to_hdf5(self, fpath, gname=""):
gname = gname.rstrip("/") if gname else ""
prefix = f"{gname}/tables"
with pd.HDFStore(fpath, "w") as f:
serialize_treenodes(self.treenodes).to_hdf(f, f"{prefix}/treenodes")
self.connectors.to_hdf(f, f"{prefix}/connectors")
self.partners.to_hdf(f, f"{prefix}/partners")
prefix = f"{gname}/volumes"
with h5py.File(fpath, "a") as f:
self.raw.to_hdf5(f, f"{prefix}/raw")
if self.labels is not None:
self.labels.to_hdf5(f, f"{prefix}/labels")
@classmethod
def from_hdf5(cls, fpath, gname="", ignore_labels=False):
prefix = f"{gname}/tables"
with | pd.HDFStore(fpath, "r") | pandas.HDFStore |
import pandas as pd
from collections import Counter
import sklearn.preprocessing as preprocessing
import numpy as np
import os
from pandas import Series
import seaborn as sns
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.feature_selection import SelectKBest
column_names = ['age','workclass','fnlwgt','education','education-num','marital-status','occupation','relationship','race','sex','capital-gain','capital-loss','hours-per-week','native-country','class']
cur_path = os.path.dirname(__file__)
'''
Read the data from csv
'''
def read_data(path):
data = open(path)
#column_names = list(data.columns.values)
data = | pd.read_csv(data,sep='\s*,\s*',encoding='ascii',names = column_names,engine='python') | pandas.read_csv |
"""
Functions and objects to work with mzML data and tabular data obtained from
third party software used to process Mass Spectrometry data.
Objects
-------
MSData: reads raw MS data in the mzML format. Manages Chromatograms and
MSSpectrum creation. Performs feature detection on centroid data.
Functions
---------
read_pickle(path): Reads a DataContainer stored as a pickle.
read_progenesis(path): Reads data matrix in a csv file generated with
Progenesis software.
read_data_matrix(path, mode): Reads data matrix in several formats. Calls other
read functions.
functions.
See Also
--------
Chromatogram
MSSpectrum
DataContainer
Roi
"""
import numpy as np
import pandas as pd
import os
from typing import Optional, Iterable, Callable, Union, List, BinaryIO, TextIO
from .container import DataContainer
from ._names import *
from . import lcms
from . import validation
import pickle
import requests
import warnings
import pyopenms
def read_pickle(path: Union[str, BinaryIO]) -> DataContainer:
"""
read a DataContainer stored as a pickle
Parameters
----------
path: str or file
path to read DataContainer
Returns
-------
DataContainer
"""
if hasattr(path, "read"):
with path as fin:
result = pickle.load(fin)
else:
with open(path, "rb") as fin:
result = pickle.load(fin)
return result
def read_progenesis(path: Union[str, TextIO]):
"""
Read a progenesis file into a DataContainer
Parameters
----------
path : str or file
Returns
-------
dc : DataContainer
"""
df_header = pd.read_csv(path, low_memory=False)
df = df_header.iloc[2:].copy()
col_names = df_header.iloc[1].values
df.columns = col_names
df = df.set_index("Compound")
df_header = df_header.iloc[:1].copy()
df_header = df_header.fillna(axis=1, method="ffill")
norm_index = df_header.columns.get_loc("Normalised abundance") - 1
raw_index = df_header.columns.get_loc("Raw abundance") - 1
ft_def = df.iloc[:, 0:norm_index].copy()
data = df.iloc[:, raw_index:(2 * raw_index - norm_index)].T
sample_info = \
df_header.iloc[:, (raw_index + 1):(2 * raw_index - norm_index + 1)].T
# rename data matrix
data.index.rename("sample", inplace=True)
data.columns.rename("feature", inplace=True)
data = data.astype(float)
# rename sample info
sample_info.index = data.index
sample_info.rename({sample_info.columns[0]: _sample_class},
axis="columns", inplace=True)
# rename features def
ft_def.index.rename("feature", inplace=True)
ft_def.rename({"m/z": "mz", "Retention time (min)": "rt"},
axis="columns",
inplace=True)
ft_def = ft_def.astype({"rt": float, "mz": float})
ft_def["rt"] = ft_def["rt"] * 60
validation.validate_data_container(data, ft_def, sample_info)
dc = DataContainer(data, ft_def, sample_info)
return dc
def read_mzmine(data: Union[str, TextIO],
sample_metadata: Union[str, TextIO]) -> DataContainer:
"""
read a MZMine2 csv file into a DataContainer.
Parameters
----------
data : str or file
csv file generated with MZMine.
sample_metadata : str, file or DataFrame
csv file with sample metadata. The following columns are required:
* sample : the same sample names used in `data`
* class : the sample classes
Columns with run order and analytical batch information are optional.
Must be names "order" and "batch"
Returns
-------
DataContainer
"""
df = pd.read_csv(data)
col_names = | pd.Series(df.columns) | pandas.Series |
import os
import argparse
import itertools
import numpy as np
import pandas as pd
import scipy.sparse as sp
from tqdm import tqdm
from shqod import (
read_trajec_csv,
trajec,
read_level_grid,
od_matrix,
calculate_field,
field_to_dict,
mobility_functional,
fractalD,
trajectory_length,
)
project_dir = os.environ["dementia"] # set in the shell
grids_dir = os.path.join(project_dir, "data", "grids", "")
processed_dir = os.path.join(project_dir, "data", "processed", "")
apoe_dir = os.path.join(project_dir, "data", "apoe_dataframes")
ad_dir = os.path.join(project_dir, "data", "ad_dataframe")
# e3e4_dir = os.path.join(apoe_dir, 'e3e4')
# e3e3_dir = os.path.join(apoe_dir, 'e3e3')
# e4e4_dir = os.path.join(apoe_dir, 'e4e4')
class NormativeBenchmark(object):
def __init__(
self,
age_range: str,
gender: str,
od_matrix: sp.csr.csr_matrix,
nb_trajecs: int,
grid_width: int,
grid_length: int,
level: int = None,
flags: np.array = None,
):
self.age_range = age_range
self.gender = gender
self.normative_mat = od_matrix
self.N = nb_trajecs
self.width = grid_width
self.length = grid_length
if level:
self.level = level
if flags:
self.flags = flags
self.test_names = [
"Frob. norm",
"Inf. norm",
"Restrict. sum",
"Mobty functional",
"Fractal dim.",
"Tot. length",
]
def __str__(self):
return f"Normative bechmark - level {self.level} - {self.age_range} - {self.gender} (N={self.N})"
def test_trajectory(self, trajectory_data: str):
"""Run the battery of 6 tests."""
t = list(trajec(trajectory_data, lexico=False))
lex = trajec(trajectory_data, lexico=True, grid_width=self.width)
od_mat = od_matrix([lex], self.width * self.length)
norm_mat = self.normative_mat / self.N
fro = np.linalg.norm((norm_mat - od_mat).toarray(), "fro")
inf = np.linalg.norm((norm_mat - od_mat).toarray(), np.inf)
# Sum of matching entries
r, s = od_mat.nonzero()
match = norm_mat[r, s].sum() / len(r)
# Mobility functional
mob = mobility_functional(t, self.normative_mat, self.width, self.N)
# Fractal dimension
dim = fractalD(t, self.width, self.length)
# Total length
lgt = trajectory_length(t)
return [fro, inf, match, mob, dim, lgt]
def load_environments(levels, Rs):
"""Load the normative benchmark environments."""
counts_df = pd.read_csv(processed_dir + "uk_counts.csv")
out = dict()
for lvl in levels:
# Level width and length
filename_grid = grids_dir + f"level{lvl:02}.json"
_, _, width, length = read_level_grid(filename_grid)
# The table containing counts per level, age group and gender
lvl_counts_df = counts_df.loc[counts_df.level == lvl]
for age_range, gender in itertools.product(Rs, ["f", "m"]):
# The processed normative matrix
filename = os.path.join(
processed_dir, f"level_{lvl}_uk_{age_range}_{gender}.npz"
)
norm_mat = sp.load_npz(filename)
# The nbr of entries for that age range and gender
age_counts = (
lvl_counts_df.loc[counts_df.age_range == age_range]
.set_index("gender")["counts"]
.to_dict()
)
N = age_counts[gender]
out[(lvl, age_range, gender)] = NormativeBenchmark(
age_range, gender, norm_mat, N, width, length, level=lvl
)
return out
def group_tidy_df(filename, envs, Rs):
"""Load level data for several groups and run the tests."""
df = read_trajec_csv(filename)
levels = df.level.unique()
# We append the age range
df["age_range"] = (
df["age"]
.apply(lambda x: x // 10)
.replace(dict(zip([int(x[0]) for x in Rs], Rs)))
)
# The test names are defined inside the environment object
test_names = envs[list(envs)[0]].test_names
dfs = []
for lvl in tqdm(levels):
lvl_df = df.loc[df.level == lvl].reset_index(drop=True)
results = np.ones((len(lvl_df), 6))
# Load the trajectories and calculate the results
for k, row in lvl_df.iterrows():
ar, g, data = row[["age_range", "gender", "trajectory_data"]]
results[k] = envs[(lvl, ar, g)].test_trajectory(data)
res_df = pd.DataFrame(results, columns=test_names)
res_df["level"] = lvl
dfs.append(res_df)
# We keep original ids in the results df
out_df = | pd.concat(dfs) | pandas.concat |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 31 15:16:47 2017
@author: wasifaahmed
"""
from flask import Flask, flash,render_template, request, Response, redirect, url_for, send_from_directory,jsonify,session
import json as json
from datetime import datetime,timedelta,date
from sklearn.cluster import KMeans
import numpy as np
from PIL import Image
from flask.ext.sqlalchemy import SQLAlchemy
import matplotlib.image as mpimg
from io import StringIO
from skimage import data, exposure, img_as_float ,io,color
import scipy
from scipy import ndimage
import time
import tensorflow as tf
import os , sys
import shutil
import numpy as np
import pandas as pd
from PIL import Image
from model import *
from sqlalchemy.sql import text
from sqlalchemy import *
from forms import *
import math
from io import StringIO
import csv
from sqlalchemy.orm import load_only
from datetime import datetime,date
from numpy import genfromtxt
from sqlalchemy.ext.serializer import loads, dumps
from sqlalchemy.orm import sessionmaker, scoped_session
from flask_bootstrap import Bootstrap
graph = tf.Graph()
with graph.as_default():
sess = tf.Session(graph=graph)
init_op = tf.global_variables_initializer()
pointsarray=[]
def load_model():
sess.run(init_op)
saver = tf.train.import_meta_graph('E:/FRAS Windows/FRAS_production/Simulation/FRAS_20170726/FRAS_20170727.meta')
#saver = tf.train.import_meta_graph('/Users/wasifaahmed/Documents/FRAS/Fras_production_v.0.1/FRAS Windows/FRAS Windows/FRAS_production/Simulation/FRAS_20170726/FRAS_20170727.meta')
print('The model is loading...')
#saver.restore(sess, "/Users/wasifaahmed/Documents/FRAS/Fras_production_v.0.1/FRAS Windows/FRAS Windows/FRAS_production/Simulation/FRAS_20170726/FRAS_20170727")
saver.restore(sess, 'E:/FRAS Windows/FRAS_production/Simulation/FRAS_20170726/FRAS_20170727')
print('loaded...')
pass
engine =create_engine('postgresql://postgres:user@localhost/postgres')
Session = scoped_session(sessionmaker(bind=engine))
mysession = Session()
app = Flask(__name__)
app.config.update(
DEBUG=True,
SECRET_KEY='\<KEY>')
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://postgres:user@localhost/fras_production'
db.init_app(app)
Bootstrap(app)
@app.after_request
def add_header(response):
response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'
response.headers['Cache-Control'] = 'public, max-age=0'
return response
@app.route('/',methods=['GET', 'POST'])
def login():
form = LoginForm()
return render_template('forms/login.html', form=form)
@app.route('/home',methods=['GET', 'POST'])
def index():
return render_template('pages/home.html')
@app.route('/detail_setup/')
def Detail_Setup():
curdate=time.strftime("%Y-%m-%d")
selection=Shooting_Session.query.filter(Shooting_Session.date>=curdate).order_by(Shooting_Session.datetimestamp.desc()).all()
firer_1 = [row.service_id for row in Shooter.query.all()]
return render_template('pages/detail_setup.html',
data=selection,
firer_1=firer_1)
@app.route('/auto_setup/')
def auto_setup():
drop=[]
curdate=time.strftime("%Y-%m-%d")
form=BulkRegistrationForm()
selection_2=Shooting_Session.query.filter(Shooting_Session.date>=curdate).order_by(Shooting_Session.datetimestamp.desc()).all()
selection=TGroup.query.distinct(TGroup.group_no).filter(TGroup.date==curdate).all()
return render_template('pages/auto_setup.html',
data=selection, data_2=selection_2,form=form)
@app.route('/auto_setup_1/')
def auto_setup_1():
drop=[]
curdate=time.strftime("%Y-%m-%d")
form=BulkRegistrationForm()
selection_2=Shooting_Session.query.filter(Shooting_Session.date>=curdate).order_by(Shooting_Session.datetimestamp.desc()).all()
selection=TGroup.query.distinct(TGroup.group_no).all()
return render_template('pages/auto_setup_1.html',
data=selection, data_2=selection_2,form=form)
@app.route('/group_gen/',methods=['GET', 'POST'])
def group_gen():
da_1=None
da_2=None
da_3=None
da_4=None
da_5=None
da_6=None
da_7=None
da_8=None
if request.method == "POST":
data = request.get_json()
group=data['data']
session['group']=group
data=TGroup.query.filter(TGroup.group_no==group).scalar()
da_1=data.target_1_no
da_2=data.target_2_no
da_3=data.target_3_no
da_4=data.target_4_no
da_5=data.target_5_no
da_6=data.target_6_no
da_7=data.target_7_no
da_8=data.target_8_no
return jsonify(data1=da_1,
data2=da_2,
data3=da_3,
data4=da_4,
data5=da_5,
data6=da_6,
data7=da_7,
data8=da_8
)
@app.route('/detail_exitence_1/',methods=['GET', 'POST'])
def detail_exitence_1():
ra_1=None
da_1=None
detail=None
service_id_1=None
session=None
paper=None
set_no=None
cant=None
if request.method == "POST":
data = request.get_json()
detail=data['data']
dt=time.strftime("%Y-%m-%d")
data=db.session.query(Session_Detail).filter(Session_Detail.detail_no==detail).scalar()
db.session.query(TShooting).delete()
db.session.commit()
Tdetail_shots =TShooting(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=data.session_id,
detail_no=data.detail_no,
target_1_id=data.target_1_id,
target_2_id=data.target_2_id,
target_3_id=data.target_3_id,
target_4_id=data.target_4_id,
target_5_id=data.target_5_id,
target_6_id=data.target_6_id,
target_7_id=data.target_7_id,
target_8_id=data.target_8_id,
paper_ref=data.paper_ref,
set_no=data.set_no,
save_flag=0
)
db.session.add(Tdetail_shots)
db.session.commit()
res=[]
ten=[]
gp_len=[]
tten=db.session.query(MPI.tendency_code).filter(MPI.firer_id==data.target_1_id).order_by(MPI.datetimestamp.desc()).limit(5).all()[::-1]
tres = db.session.query(Grouping.result).filter(Grouping.firer_id==data.target_1_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
tgp = db.session.query(Grouping.grouping_length_f).filter(Grouping.firer_id==data.target_1_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
for ele in tres:
for ele2 in ele:
res.append(ele2)
for ele3 in tten:
for ele4 in ele3:
ten.append(ele4)
for ele5 in tgp:
for ele6 in ele5:
gp_len.append(ele6)
da_1=db.session.query(Shooter.name).filter(Shooter.id==data.target_1_id).scalar()
cant_id=db.session.query(Shooter.cantonment_id).filter(Shooter.id==data.target_1_id).scalar()
cant=db.session.query(Cantonment.cantonment).filter(Cantonment.id==cant_id).scalar()
ra_1_id=db.session.query(Shooter.rank_id).filter(Shooter.id==data.target_1_id).scalar()
ra_1 = db.session.query(Rank.name).filter(Rank.id==ra_1_id).scalar()
session=db.session.query(TShooting.session_id).scalar()
paper=db.session.query(TShooting.paper_ref).scalar()
set_no=db.session.query(TShooting.set_no).scalar()
service_id_1 = db.session.query(Shooter.service_id).filter(Shooter.id==data.target_1_id).scalar()
return jsonify(
data1=da_1,
ra_1=ra_1,
detail=detail,
service_id_1=service_id_1,
session=session,
paper=paper,
set_no=set_no,
cant=cant,
res=res,
ten=ten,
gp_len=gp_len
)
@app.route('/generate_ref/' ,methods=['GET', 'POST'])
def generate_ref():
g=None
if request.method == "POST":
data = request.get_json()
paper_ref =data['data']
if (paper_ref == 'New'):
g=0
else:
obj=TPaper_ref.query.scalar()
g= obj.paper_ref
return jsonify(gen=int(g))
@app.route('/create_detail_target_2/', methods=['GET', 'POST'])
def create_detail_target_2():
curdate=time.strftime("%Y-%m-%d")
firer_1 = [row.service_id for row in Shooter.query.all()]
detail_data=TShooting.query.scalar()
return render_template('pages/create_detail_target_2.html',
detail_data=detail_data,
firer_1=firer_1
)
@app.route('/save_target_2/', methods=['GET', 'POST'])
def save_target_2():
r=request.form['tag']
r_object=Shooter.query.filter(Shooter.service_id==r).scalar()
r_id=r_object.id
ses=Session_Detail.query.first()
ses.target_2_id=r_id
db.session.commit()
temp =TShooting.query.first()
temp.target_2_id=r_id
db.session.commit()
return redirect(url_for('individual_score_target_2'))
@app.route('/create_detail_target_1/', methods=['GET', 'POST'])
def create_detail_target_1():
curdate=time.strftime("%Y-%m-%d")
selection=Shooting_Session.query.filter(Shooting_Session.date==curdate).all()
firer_1 = [row.service_id for row in Shooter.query.all()]
return render_template('pages/create_detail_target_1.html',
data=selection,
firer_1=firer_1
)
@app.route('/create_session/', methods=['GET', 'POST'])
def create_session():
try:
data = Shooter.query.all()
rang= Range.query.all()
firearms = Firearms.query.all()
ammunation = Ammunation.query.all()
rang_name = request.form.get('comp_select_4')
fire_name = request.form.get('comp_select_5')
ammu_name = request.form.get('comp_select_6')
form=SessionForm()
if(rang_name is None):
range_id=999
fire_id=999
ammu_id=999
else:
range_id = db.session.query(Range.id).filter(Range.name==rang_name).scalar()
fire_id = db.session.query(Firearms.id).filter(Firearms.name==fire_name).scalar()
ammu_id = db.session.query(Ammunation.id).filter(Ammunation.name==ammu_name).scalar()
if form.validate_on_submit():
shooting=Shooting_Session(
date=form.date.data.strftime('%Y-%m-%d'),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
shooting_range_id=range_id,
firearms_id=fire_id,
ammunation_id=ammu_id,
target_distance = form.target_distance.data,
weather_notes = form.weather_notes.data,
comments = form.comments.data,
session_no=form.session_no.data,
occasion=form.occ.data
)
db.session.add(shooting)
db.session.commit()
return redirect(url_for('create_detail_target_1'))
except Exception as e:
return redirect(url_for('error5_505.html'))
return render_template('forms/shooting_form.html', form=form, data =data ,rang=rang , firearmns=firearms, ammunation = ammunation)
@app.route('/monthly_report/',methods=['GET','POST'])
def monthly_report():
year=None
month=None
date_start=None
try:
if request.method=='POST':
month=request.form.get('comp_select')
year = datetime.now().year
if (month == 'October'):
dt_start='-10-01'
dt_end ='-10-31'
str_date_start = str(year)+dt_start
date_start=datetime.strptime(str_date_start, "%Y-%m-%d")
str_date_end = str(year)+dt_end
date_end=datetime.strptime(str_date_end, "%Y-%m-%d")
dat1=db.session.query(Grouping.date,Shooter.service_id,Rank.name,Shooter.name.label('firer'),Shooter.unit,Shooter.brigade,Grouping.detail_no,Grouping.result,Grouping.grouping_length_f,MPI.tendency_text).filter(Grouping.date.between(date_start,date_end), Grouping.firer_id==Shooter.id,Shooter.rank_id==Rank.id and Grouping.date==MPI.date, Grouping.session_id==MPI.session_id,Grouping.firer_id==MPI.firer_id,Grouping.detail_no==MPI.detail_no,Grouping.target_no==MPI.target_no,Grouping.spell_no==MPI.spell_no,Grouping.paper_ref==MPI.paper_ref).all()
elif(month=='January'):
dt_start='-01-01'
dt_end ='-01-31'
str_date_start = str(year)+dt_start
date_start=datetime.strptime(str_date_start, "%Y-%m-%d")
str_date_end = str(year)+dt_end
date_end=datetime.strptime(str_date_end, "%Y-%m-%d")
dat1=db.session.query(Grouping.date,Shooter.service_id,Rank.name,Shooter.name.label('firer'),Shooter.unit,Shooter.brigade,Grouping.detail_no,Grouping.result,Grouping.grouping_length_f,MPI.tendency_text).filter(Grouping.date.between(date_start,date_end), Grouping.firer_id==Shooter.id,Shooter.rank_id==Rank.id and Grouping.date==MPI.date, Grouping.session_id==MPI.session_id,Grouping.firer_id==MPI.firer_id,Grouping.detail_no==MPI.detail_no,Grouping.target_no==MPI.target_no,Grouping.spell_no==MPI.spell_no,Grouping.paper_ref==MPI.paper_ref).all()
elif(month=='February'):
dt_start='-02-01'
dt_end ='-02-28'
str_date_start = str(year)+dt_start
date_start=datetime.strptime(str_date_start, "%Y-%m-%d")
str_date_end = str(year)+dt_end
date_end=datetime.strptime(str_date_end, "%Y-%m-%d")
dat1=db.session.query(Grouping.date,Shooter.service_id,Rank.name,Shooter.name.label('firer'),Shooter.unit,Shooter.brigade,Grouping.detail_no,Grouping.result,Grouping.grouping_length_f,MPI.tendency_text).filter(Grouping.date.between(date_start,date_end), Grouping.firer_id==Shooter.id,Shooter.rank_id==Rank.id and Grouping.date==MPI.date, Grouping.session_id==MPI.session_id,Grouping.firer_id==MPI.firer_id,Grouping.detail_no==MPI.detail_no,Grouping.target_no==MPI.target_no,Grouping.spell_no==MPI.spell_no,Grouping.paper_ref==MPI.paper_ref).all()
elif(month=='March'):
dt_start='-03-01'
dt_end ='-03-31'
str_date_start = str(year)+dt_start
date_start=datetime.strptime(str_date_start, "%Y-%m-%d")
str_date_end = str(year)+dt_end
date_end=datetime.strptime(str_date_end, "%Y-%m-%d")
dat1=db.session.query(Grouping.date,Shooter.service_id,Rank.name,Shooter.name.label('firer'),Shooter.unit,Shooter.brigade,Grouping.detail_no,Grouping.result,Grouping.grouping_length_f,MPI.tendency_text).filter(Grouping.date.between(date_start,date_end), Grouping.firer_id==Shooter.id,Shooter.rank_id==Rank.id and Grouping.date==MPI.date, Grouping.session_id==MPI.session_id,Grouping.firer_id==MPI.firer_id,Grouping.detail_no==MPI.detail_no,Grouping.target_no==MPI.target_no,Grouping.spell_no==MPI.spell_no,Grouping.paper_ref==MPI.paper_ref).all()
elif(month=='April'):
dt_start='-04-01'
dt_end ='-04-30'
str_date_start = str(year)+dt_start
date_start=datetime.strptime(str_date_start, "%Y-%m-%d")
str_date_end = str(year)+dt_end
date_end=datetime.strptime(str_date_end, "%Y-%m-%d")
dat1=db.session.query(Grouping.date,Shooter.service_id,Rank.name,Shooter.name.label('firer'),Shooter.unit,Shooter.brigade,Grouping.detail_no,Grouping.result,Grouping.grouping_length_f,MPI.tendency_text).filter(Grouping.date.between(date_start,date_end), Grouping.firer_id==Shooter.id,Shooter.rank_id==Rank.id and Grouping.date==MPI.date, Grouping.session_id==MPI.session_id,Grouping.firer_id==MPI.firer_id,Grouping.detail_no==MPI.detail_no,Grouping.target_no==MPI.target_no,Grouping.spell_no==MPI.spell_no,Grouping.paper_ref==MPI.paper_ref).all()
elif(month=='May'):
dt_start='-05-01'
dt_end ='-05-31'
str_date_start = str(year)+dt_start
date_start=datetime.strptime(str_date_start, "%Y-%m-%d")
str_date_end = str(year)+dt_end
date_end=datetime.strptime(str_date_end, "%Y-%m-%d")
dat1=db.session.query(Grouping.date,Shooter.service_id,Rank.name,Shooter.name.label('firer'),Shooter.unit,Shooter.brigade,Grouping.detail_no,Grouping.result,Grouping.grouping_length_f,MPI.tendency_text).filter(Grouping.date.between(date_start,date_end), Grouping.firer_id==Shooter.id,Shooter.rank_id==Rank.id and Grouping.date==MPI.date, Grouping.session_id==MPI.session_id,Grouping.firer_id==MPI.firer_id,Grouping.detail_no==MPI.detail_no,Grouping.target_no==MPI.target_no,Grouping.spell_no==MPI.spell_no,Grouping.paper_ref==MPI.paper_ref).all()
elif(month=='June'):
dt_start='-06-01'
dt_end ='-06-30'
str_date_start = str(year)+dt_start
date_start=datetime.strptime(str_date_start, "%Y-%m-%d")
str_date_end = str(year)+dt_end
date_end=datetime.strptime(str_date_end, "%Y-%m-%d")
dat1=db.session.query(Grouping.date,Shooter.service_id,Rank.name,Shooter.name.label('firer'),Shooter.unit,Shooter.brigade,Grouping.detail_no,Grouping.result,Grouping.grouping_length_f,MPI.tendency_text).filter(Grouping.date.between(date_start,date_end), Grouping.firer_id==Shooter.id,Shooter.rank_id==Rank.id and Grouping.date==MPI.date, Grouping.session_id==MPI.session_id,Grouping.firer_id==MPI.firer_id,Grouping.detail_no==MPI.detail_no,Grouping.target_no==MPI.target_no,Grouping.spell_no==MPI.spell_no,Grouping.paper_ref==MPI.paper_ref).all()
elif(month=='July'):
dt_start='-07-01'
dt_end ='-07-31'
str_date_start = str(year)+dt_start
date_start=datetime.strptime(str_date_start, "%Y-%m-%d")
str_date_end = str(year)+dt_end
date_end=datetime.strptime(str_date_end, "%Y-%m-%d")
dat1=db.session.query(Grouping.date,Shooter.service_id,Rank.name,Shooter.name.label('firer'),Shooter.unit,Shooter.brigade,Grouping.detail_no,Grouping.result,Grouping.grouping_length_f,MPI.tendency_text).filter(Grouping.date.between(date_start,date_end), Grouping.firer_id==Shooter.id,Shooter.rank_id==Rank.id and Grouping.date==MPI.date, Grouping.session_id==MPI.session_id,Grouping.firer_id==MPI.firer_id,Grouping.detail_no==MPI.detail_no,Grouping.target_no==MPI.target_no,Grouping.spell_no==MPI.spell_no,Grouping.paper_ref==MPI.paper_ref).all()
elif(month=='August'):
dt_start='-08-01'
dt_end ='-08-31'
str_date_start = str(year)+dt_start
date_start=datetime.strptime(str_date_start, "%Y-%m-%d")
str_date_end = str(year)+dt_end
date_end=datetime.strptime(str_date_end, "%Y-%m-%d")
dat1=db.session.query(Grouping.date,Shooter.service_id,Rank.name,Shooter.name.label('firer'),Shooter.unit,Shooter.brigade,Grouping.detail_no,Grouping.result,Grouping.grouping_length_f,MPI.tendency_text).filter(Grouping.date.between(date_start,date_end), Grouping.firer_id==Shooter.id,Shooter.rank_id==Rank.id and Grouping.date==MPI.date, Grouping.session_id==MPI.session_id,Grouping.firer_id==MPI.firer_id,Grouping.detail_no==MPI.detail_no,Grouping.target_no==MPI.target_no,Grouping.spell_no==MPI.spell_no,Grouping.paper_ref==MPI.paper_ref).all()
elif(month=='September'):
dt_start='-09-01'
dt_end ='-09-30'
str_date_start = str(year)+dt_start
date_start=datetime.strptime(str_date_start, "%Y-%m-%d")
str_date_end = str(year)+dt_end
date_end=datetime.strptime(str_date_end, "%Y-%m-%d")
dat1=db.session.query(Grouping.date,Shooter.service_id,Rank.name,Shooter.name.label('firer'),Shooter.unit,Shooter.brigade,Grouping.detail_no,Grouping.result,Grouping.grouping_length_f,MPI.tendency_text).filter(Grouping.date.between(date_start,date_end), Grouping.firer_id==Shooter.id,Shooter.rank_id==Rank.id and Grouping.date==MPI.date, Grouping.session_id==MPI.session_id,Grouping.firer_id==MPI.firer_id,Grouping.detail_no==MPI.detail_no,Grouping.target_no==MPI.target_no,Grouping.spell_no==MPI.spell_no,Grouping.paper_ref==MPI.paper_ref).all()
elif(month=='November'):
dt_start='-11-01'
dt_end ='-11-30'
str_date_start = str(year)+dt_start
date_start=datetime.strptime(str_date_start, "%Y-%m-%d")
str_date_end = str(year)+dt_end
date_end=datetime.strptime(str_date_end, "%Y-%m-%d")
dat1=db.session.query(Grouping.date,Shooter.service_id,Rank.name,Shooter.name.label('firer'),Shooter.unit,Shooter.brigade,Grouping.detail_no,Grouping.result,Grouping.grouping_length_f,MPI.tendency_text).filter(Grouping.date.between(date_start,date_end), Grouping.firer_id==Shooter.id,Shooter.rank_id==Rank.id and Grouping.date==MPI.date, Grouping.session_id==MPI.session_id,Grouping.firer_id==MPI.firer_id,Grouping.detail_no==MPI.detail_no,Grouping.target_no==MPI.target_no,Grouping.spell_no==MPI.spell_no,Grouping.paper_ref==MPI.paper_ref).all()
else:
dt_start='-12-01'
dt_end ='-12-31'
str_date_start = str(year)+dt_start
date_start=datetime.strptime(str_date_start, "%Y-%m-%d")
str_date_end = str(year)+dt_end
date_end=datetime.strptime(str_date_end, "%Y-%m-%d")
dat1=db.session.query(Grouping.date,Shooter.service_id,Rank.name,Shooter.name.label('firer'),Shooter.unit,Shooter.brigade,Grouping.detail_no,Grouping.result,Grouping.grouping_length_f,MPI.tendency_text).filter(Grouping.date.between(date_start,date_end), Grouping.firer_id==Shooter.id,Shooter.rank_id==Rank.id and Grouping.date==MPI.date, Grouping.session_id==MPI.session_id,Grouping.firer_id==MPI.firer_id,Grouping.detail_no==MPI.detail_no,Grouping.target_no==MPI.target_no,Grouping.spell_no==MPI.spell_no,Grouping.paper_ref==MPI.paper_ref).all()
return render_template('pages/monthly_report.html', dat1=dat1 ,month=month)
except Exception as e:
return render_template('errors/month_session.html')
return render_template('pages/monthly_report.html')
@app.route('/save_target_1/', methods=['GET', 'POST'])
def save_target_1():
ref_1=None
try:
if request.method == 'POST':
detail_no = request.form['game_id_1']
r=request.form['tag']
r_object=Shooter.query.filter(Shooter.service_id==r).scalar()
r_id=r_object.id
r2_id=999
r3_id=999
r4_id=999
r5_id=999
r6_id=999
r7_id=999
r8_id=999
ref=request.form['business']
set_no = request.form.get('comp_select_6')
shots = request.form['tag_8']
sess=request.form.get('comp_select')
ref_1 = None
paper=db.session.query(TPaper_ref).scalar()
if(ref == ""):
ref_1=paper.paper_ref
else:
ref_1=ref
temp_shooting=db.session.query(TShooting).scalar()
if(temp_shooting is None):
detail_shots =Session_Detail(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r2_id,
target_3_id=r3_id,
target_4_id=r4_id,
target_5_id=r5_id,
target_6_id=r6_id,
target_7_id=r7_id,
target_8_id=r8_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(detail_shots)
db.session.commit()
db.session.query(TPaper_ref).delete()
db.session.commit()
ref_db = TPaper_ref(
paper_ref=ref_1,
detail_no=detail_no,
session_no=sess
)
db.session.add(ref_db)
db.session.commit()
Tdetail_shots =TShooting(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r2_id,
target_3_id=r3_id,
target_4_id=r4_id,
target_5_id=r5_id,
target_6_id=r6_id,
target_7_id=r7_id,
target_8_id=r8_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(Tdetail_shots)
db.session.commit()
else:
db.session.query(TShooting).delete()
db.session.commit()
db.session.query(TPaper_ref).delete()
db.session.commit()
ref_db = TPaper_ref(
paper_ref=ref_1,
detail_no=detail_no,
session_no=sess
)
db.session.add(ref_db)
db.session.commit()
detail_shots =Session_Detail(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r2_id,
target_3_id=r3_id,
target_4_id=r4_id,
target_5_id=r5_id,
target_6_id=r6_id,
target_7_id=r7_id,
target_8_id=r8_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(detail_shots)
db.session.commit()
Tdetail_shots =TShooting(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r2_id,
target_3_id=r3_id,
target_4_id=r4_id,
target_5_id=r5_id,
target_6_id=r6_id,
target_7_id=r7_id,
target_8_id=r8_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(Tdetail_shots)
db.session.commit()
except Exception as e:
return redirect(url_for('error_target_1'))
return redirect(url_for('individual_score_target_1'))
@app.route('/FRAS/', methods=['GET', 'POST'])
def load ():
try:
ref_1=None
if request.method == 'POST':
detail_no = request.form['game_id_1']
tmp_list = []
duplicate = False
r=request.form['tag']
if (r== ""):
r_id = 999
else:
r_object=Shooter.query.filter(Shooter.service_id==r).scalar()
r_id=r_object.id
r1=request.form['tag_1']
if(r1== ""):
r1_id=999
else:
r1_object=Shooter.query.filter(Shooter.service_id==r1).scalar()
r1_id=r1_object.id
r2=request.form['tag_2']
if (r2==""):
r2_id=999
else:
r2_object=Shooter.query.filter(Shooter.service_id==r2).scalar()
r2_id=r2_object.id
r3=request.form['tag_3']
if(r3==""):
r3_id=999
else:
r3_object=Shooter.query.filter(Shooter.service_id==r3).scalar()
r3_id=r3_object.id
r4=request.form['tag_4']
if(r4==""):
r4_id=999
else:
r4_object=Shooter.query.filter(Shooter.service_id==r4).scalar()
r4_id=r4_object.id
r5=request.form['tag_5']
if(r5==""):
r5_id=999
else:
r5_object=Shooter.query.filter(Shooter.service_id==r5).scalar()
r5_id=r5_object.id
r6=request.form['tag_6']
if(r6==""):
r6_id=999
else:
r6_object=Shooter.query.filter(Shooter.service_id==r6).scalar()
r6_id=r6_object.id
r7=request.form['tag_7']
if(r7== ""):
r7_id=999
else:
r7_object=Shooter.query.filter(Shooter.service_id==r7).scalar()
r7_id=r7_object.id
ref=request.form['business']
set_no = request.form.get('comp_select_6')
shots = request.form['tag_8']
sess=request.form.get('comp_select')
tmp_list.append(r_id)
tmp_list.append(r1_id)
tmp_list.append(r2_id)
tmp_list.append(r3_id)
tmp_list.append(r4_id)
tmp_list.append(r5_id)
tmp_list.append(r6_id)
tmp_list.append(r7_id)
if ref == None or ref =="":
ref_obj=TPaper_ref.query.scalar()
ref_1=ref_obj.paper_ref
else :
print("Inside ref _4 else")
ref_1=ref
print(ref_1)
print("Inside ref _4 else 1")
if(int(set_no)>5):
print("Inside ref _5 else")
return redirect(url_for('paper_duplicate_error'))
else:
print("Inside TPaper_ref")
db.session.query(TPaper_ref).delete()
print("Inside TPaper_ref")
db.session.commit()
ref_db = TPaper_ref(
paper_ref=ref_1,
detail_no=detail_no,
session_no=sess
)
db.session.add(ref_db)
db.session.commit()
print("Inside load 3")
for i in range(len(tmp_list)):
for j in range(len(tmp_list)):
if(tmp_list[i]== 999 and tmp_list[j]==999):
duplicate = False
elif(i!=j and tmp_list[i]==tmp_list[j]):
duplicate = True
print("temp1")
if(duplicate):
return redirect(url_for('duplicate_firer_error'))
else:
print("temp")
temp=db.session.query(TShooting.save_flag).scalar()
print(temp)
if(temp is None):
print("Inside the temp if")
print(sess)
print(detail_no)
Tdetail_shots =TShooting(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r1_id,
target_3_id=r2_id,
target_4_id=r3_id,
target_5_id=r4_id,
target_6_id=r5_id,
target_7_id=r6_id,
target_8_id=r7_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
print(Tdetail_shots)
print("Tdetail_shots")
db.session.add(Tdetail_shots)
db.session.commit()
print(""
)
detail_shots =Session_Detail(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r1_id,
target_3_id=r2_id,
target_4_id=r3_id,
target_5_id=r4_id,
target_6_id=r5_id,
target_7_id=r6_id,
target_8_id=r7_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(detail_shots)
db.session.commit()
else:
db.session.query(TShooting).filter(TShooting.id != 999).delete()
db.session.commit()
Tdetail_shots =TShooting(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r1_id,
target_3_id=r2_id,
target_4_id=r3_id,
target_5_id=r4_id,
target_6_id=r5_id,
target_7_id=r6_id,
target_8_id=r7_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(Tdetail_shots)
db.session.commit()
detail_shots =Session_Detail(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r1_id,
target_3_id=r2_id,
target_4_id=r3_id,
target_5_id=r4_id,
target_6_id=r5_id,
target_7_id=r6_id,
target_8_id=r7_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(detail_shots)
db.session.commit()
except Exception as e:
print(e)
return redirect(url_for('error_2'))
return redirect(url_for('image_process'))
@app.route('/FRAS_1/', methods=['GET', 'POST'])
def load_1 ():
ref_1=None
try:
if request.method == 'POST':
print("This is inside Post")
detail_no = request.form['game_id_1']
print("this is detail_no")
print(detail_no)
tmp_list = []
duplicate = False
gr=session.get('group',None)
data=TGroup.query.filter(TGroup.group_no==gr).scalar()
da_1=data.target_1_no
da_2=data.target_2_no
da_3=data.target_3_no
da_4=data.target_4_no
da_5=data.target_5_no
da_6=data.target_6_no
da_7=data.target_7_no
da_8=data.target_8_no
if(da_1==""):
r_id=999
else:
r=Shooter.query.filter(Shooter.service_id==da_1).scalar()
r_id=r.id
if(da_2==""):
r1_id=999
else:
r1=Shooter.query.filter(Shooter.service_id==da_2).scalar()
r1_id=r1.id
if(da_3==""):
r2_id=999
else:
r2=Shooter.query.filter(Shooter.service_id==da_3).scalar()
r2_id=r2.id
if(da_4==""):
r3_id=999
else:
r3=Shooter.query.filter(Shooter.service_id==da_4).scalar()
r3_id=r3.id
if(da_5==""):
r4_id=999
else:
r4=Shooter.query.filter(Shooter.service_id==da_5).scalar()
r4_id=r4.id
if(da_6==""):
r5_id=999
else:
r5=Shooter.query.filter(Shooter.service_id==da_6).scalar()
r5_id=r5.id
if(da_7==""):
r6_id=999
else:
r6=Shooter.query.filter(Shooter.service_id==da_7).scalar()
r6_id=r6.id
if(da_8==""):
r7_id=999
else:
r7=Shooter.query.filter(Shooter.service_id==da_8).scalar()
r7_id=r7.id
ref=request.form['business']
set_no = request.form.get('comp_select_6')
shots = request.form['tag_8']
sess=request.form.get('comp_select')
tmp_list.append(r_id)
tmp_list.append(r1_id)
tmp_list.append(r2_id)
tmp_list.append(r3_id)
tmp_list.append(r4_id)
tmp_list.append(r5_id)
tmp_list.append(r6_id)
tmp_list.append(r7_id)
print(tmp_list)
if ref == None or ref =="":
ref_obj=TPaper_ref.query.scalar()
ref_1=ref_obj.paper_ref
else :
ref_1=ref
check=TPaper_ref.query.scalar()
cses=check.session_no
det=check.detail_no
if(int(set_no)>5):
return redirect(url_for('paper_duplicate_error'))
else:
db.session.query(TPaper_ref).delete()
db.session.commit()
ref_db = TPaper_ref(
paper_ref=ref_1,
detail_no=detail_no,
session_no=sess
)
db.session.add(ref_db)
db.session.commit()
for i in range(len(tmp_list)):
for j in range(len(tmp_list)):
if(tmp_list[i]== 999 and tmp_list[j]==999):
duplicate = False
elif(i!=j and tmp_list[i]==tmp_list[j]):
duplicate = True
if(duplicate):
return redirect(url_for('duplicate_firer_error'))
else:
temp_shooting=db.session.query(TShooting).scalar()
if(temp_shooting is None):
detail_shots =Session_Detail(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r1_id,
target_3_id=r2_id,
target_4_id=r3_id,
target_5_id=r4_id,
target_6_id=r5_id,
target_7_id=r6_id,
target_8_id=r7_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(detail_shots)
db.session.commit()
Tdetail_shots =TShooting(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r1_id,
target_3_id=r2_id,
target_4_id=r3_id,
target_5_id=r4_id,
target_6_id=r5_id,
target_7_id=r6_id,
target_8_id=r7_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(Tdetail_shots)
db.session.commit()
else:
db.session.query(TShooting).filter(TShooting.id != 999).delete()
db.session.commit()
Tdetail_shots =TShooting(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r1_id,
target_3_id=r2_id,
target_4_id=r3_id,
target_5_id=r4_id,
target_6_id=r5_id,
target_7_id=r6_id,
target_8_id=r7_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(Tdetail_shots)
db.session.commit()
detail_shots =Session_Detail(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r1_id,
target_3_id=r2_id,
target_4_id=r3_id,
target_5_id=r4_id,
target_6_id=r5_id,
target_7_id=r6_id,
target_8_id=r7_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(detail_shots)
db.session.commit()
except Exception as e:
return redirect(url_for('error_102'))
return redirect(url_for('detail_view'))
@app.route('/FRAS_2/', methods=['GET', 'POST'])
def load_2 ():
ref_1=None
try:
if request.method == 'POST':
print("This is inside Post")
detail_no = request.form['game_id_1']
print("this is detail_no")
print(detail_no)
tmp_list = []
duplicate = False
gr=session.get('group',None)
data=TGroup.query.filter(TGroup.group_no==gr).scalar()
da_1=data.target_1_no
da_2=data.target_2_no
da_3=data.target_3_no
da_4=data.target_4_no
da_5=data.target_5_no
da_6=data.target_6_no
da_7=data.target_7_no
da_8=data.target_8_no
if(da_1==""):
r_id=999
else:
r=Shooter.query.filter(Shooter.service_id==da_1).scalar()
r_id=r.id
if(da_2==""):
r1_id=999
else:
r1=Shooter.query.filter(Shooter.service_id==da_2).scalar()
r1_id=r1.id
if(da_3==""):
r2_id=999
else:
r2=Shooter.query.filter(Shooter.service_id==da_3).scalar()
r2_id=r2.id
if(da_4==""):
r3_id=999
else:
r3=Shooter.query.filter(Shooter.service_id==da_4).scalar()
r3_id=r3.id
if(da_5==""):
r4_id=999
else:
r4=Shooter.query.filter(Shooter.service_id==da_5).scalar()
r4_id=r4.id
if(da_6==""):
r5_id=999
else:
r5=Shooter.query.filter(Shooter.service_id==da_6).scalar()
r5_id=r5.id
if(da_7==""):
r6_id=999
else:
r6=Shooter.query.filter(Shooter.service_id==da_7).scalar()
r6_id=r6.id
if(da_8==""):
r7_id=999
else:
r7=Shooter.query.filter(Shooter.service_id==da_8).scalar()
r7_id=r7.id
ref=request.form['business']
set_no = request.form.get('comp_select_6')
shots = request.form['tag_8']
sess=request.form.get('comp_select')
tmp_list.append(r_id)
tmp_list.append(r1_id)
tmp_list.append(r2_id)
tmp_list.append(r3_id)
tmp_list.append(r4_id)
tmp_list.append(r5_id)
tmp_list.append(r6_id)
tmp_list.append(r7_id)
print(tmp_list)
if ref == None or ref =="":
ref_obj=TPaper_ref.query.scalar()
ref_1=ref_obj.paper_ref
else :
ref_1=ref
check=TPaper_ref.query.scalar()
cses=check.session_no
det=check.detail_no
if(int(set_no)>5):
return redirect(url_for('paper_duplicate_error'))
else:
db.session.query(TPaper_ref).delete()
db.session.commit()
ref_db = TPaper_ref(
paper_ref=ref_1,
detail_no=detail_no,
session_no=sess
)
db.session.add(ref_db)
db.session.commit()
for i in range(len(tmp_list)):
for j in range(len(tmp_list)):
if(tmp_list[i]== 999 and tmp_list[j]==999):
duplicate = False
elif(i!=j and tmp_list[i]==tmp_list[j]):
duplicate = True
if(duplicate):
return redirect(url_for('duplicate_firer_error'))
else:
temp_shooting=db.session.query(TShooting).scalar()
if(temp_shooting is None):
detail_shots =Session_Detail(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r1_id,
target_3_id=r2_id,
target_4_id=r3_id,
target_5_id=r4_id,
target_6_id=r5_id,
target_7_id=r6_id,
target_8_id=r7_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(detail_shots)
db.session.commit()
Tdetail_shots =TShooting(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r1_id,
target_3_id=r2_id,
target_4_id=r3_id,
target_5_id=r4_id,
target_6_id=r5_id,
target_7_id=r6_id,
target_8_id=r7_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(Tdetail_shots)
db.session.commit()
else:
db.session.query(TShooting).filter(TShooting.id != 999).delete()
db.session.commit()
Tdetail_shots =TShooting(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r1_id,
target_3_id=r2_id,
target_4_id=r3_id,
target_5_id=r4_id,
target_6_id=r5_id,
target_7_id=r6_id,
target_8_id=r7_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(Tdetail_shots)
db.session.commit()
detail_shots =Session_Detail(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r1_id,
target_3_id=r2_id,
target_4_id=r3_id,
target_5_id=r4_id,
target_6_id=r5_id,
target_7_id=r6_id,
target_8_id=r7_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(detail_shots)
db.session.commit()
except Exception as e:
print(e)
return redirect(url_for('error'))
return redirect(url_for('image_process'))
@app.route('/detail_view/', methods=['GET', 'POST'])
def detail_view():
detail = Session_Detail.query.all()
for details in detail:
details.target_1=Shooter.query.filter(Shooter.id==details.target_1_id).scalar()
details.target_2=Shooter.query.filter(Shooter.id==details.target_2_id).scalar()
details.target_3=Shooter.query.filter(Shooter.id==details.target_3_id).scalar()
details.target_4=Shooter.query.filter(Shooter.id==details.target_4_id).scalar()
details.target_5=Shooter.query.filter(Shooter.id==details.target_5_id).scalar()
details.target_6=Shooter.query.filter(Shooter.id==details.target_6_id).scalar()
details.target_7=Shooter.query.filter(Shooter.id==details.target_7_id).scalar()
details.target_8=Shooter.query.filter(Shooter.id==details.target_8_id).scalar()
return render_template('pages/detail_view.html',detail=detail)
@app.route('/detail_view/detail/<id>', methods=['GET', 'POST'])
def view_detail(id):
detail=Session_Detail.query.filter(Session_Detail.id == id)
for details in detail:
details.target_1=Shooter.query.filter(Shooter.id==details.target_1_id).scalar()
details.target_2=Shooter.query.filter(Shooter.id==details.target_2_id).scalar()
details.target_3=Shooter.query.filter(Shooter.id==details.target_3_id).scalar()
details.target_4=Shooter.query.filter(Shooter.id==details.target_4_id).scalar()
details.target_5=Shooter.query.filter(Shooter.id==details.target_5_id).scalar()
details.target_6=Shooter.query.filter(Shooter.id==details.target_6_id).scalar()
details.target_7=Shooter.query.filter(Shooter.id==details.target_7_id).scalar()
details.target_8=Shooter.query.filter(Shooter.id==details.target_8_id).scalar()
return render_template('pages/detail_view_id.html',data=detail)
@app.route('/detail_view/edit/<id>', methods=['GET', 'POST'])
def view_detail_edit(id):
try:
detail=Session_Detail.query.filter(Session_Detail.id == id).first()
form=DetailEditForm(obj=detail)
if form.validate_on_submit():
tmp_list = []
target_1=Shooter.query.filter(Shooter.service_id == form.target_1_service.data).scalar()
tmp_list.append(target_1.id)
target_2=Shooter.query.filter(Shooter.service_id == form.target_2_service.data).scalar()
tmp_list.append(target_2.id)
target_3=Shooter.query.filter(Shooter.service_id == form.target_3_service.data).scalar()
tmp_list.append(target_3.id)
target_4=Shooter.query.filter(Shooter.service_id == form.target_4_service.data).scalar()
tmp_list.append(target_4.id)
target_5=Shooter.query.filter(Shooter.service_id == form.target_5_service.data).scalar()
tmp_list.append(target_5.id)
target_6=Shooter.query.filter(Shooter.service_id == form.target_6_service.data).scalar()
tmp_list.append(target_6.id)
target_7=Shooter.query.filter(Shooter.service_id == form.target_7_service.data).scalar()
tmp_list.append(target_7.id)
target_8=Shooter.query.filter(Shooter.service_id == form.target_8_service.data).scalar()
tmp_list.append(target_8.id)
duplicate = False
for i in range(len(tmp_list)):
for j in range(len(tmp_list)):
if(tmp_list[i]== 999 and tmp_list[j]==999):
duplicate = False
elif(i!=j and tmp_list[i]==tmp_list[j]):
duplicate = True
if(duplicate):
return redirect(url_for('duplicate_firer_error'))
else:
detail.date=form.date.data
detail.session_id=form.session_id.data
detail.detail_no=form.detail_no.data
detail.paper_ref=form.paper_ref.data
detail.set_no=form.set_no.data
target_1_obj=Shooter.query.filter(Shooter.service_id == form.target_1_service.data).scalar()
detail.target_1_id=target_1_obj.id
target_2_obj=Shooter.query.filter(Shooter.service_id == form.target_2_service.data).scalar()
detail.target_2_id=target_2_obj.id
target_3_obj=Shooter.query.filter(Shooter.service_id == form.target_3_service.data).scalar()
detail.target_3_id=target_3_obj.id
target_4_obj=Shooter.query.filter(Shooter.service_id == form.target_4_service.data).scalar()
detail.target_4_id=target_4_obj.id
target_5_obj=Shooter.query.filter(Shooter.service_id == form.target_5_service.data).scalar()
detail.target_5_id=target_5_obj.id
target_6_obj=Shooter.query.filter(Shooter.service_id == form.target_6_service.data).scalar()
detail.target_6_id=target_6_obj.id
target_7_obj=Shooter.query.filter(Shooter.service_id == form.target_7_service.data).scalar()
detail.target_7_id=target_7_obj.id
target_8_obj=Shooter.query.filter(Shooter.service_id == form.target_8_service.data).scalar()
detail.target_8_id=target_8_obj.id
db.session.commit()
db.session.query(TPaper_ref).delete()
db.session.commit()
ref_edit = TPaper_ref(
paper_ref=form.paper_ref.data,
detail_no=form.detail_no.data,
session_no=form.session_id.data
)
db.session.add(ref_edit)
db.session.commit()
target_1_obj=Shooter.query.filter(Shooter.service_id == form.target_1_service.data).scalar()
target_2_obj=Shooter.query.filter(Shooter.service_id == form.target_2_service.data).scalar()
target_3_obj=Shooter.query.filter(Shooter.service_id == form.target_3_service.data).scalar()
target_4_obj=Shooter.query.filter(Shooter.service_id == form.target_4_service.data).scalar()
target_5_obj=Shooter.query.filter(Shooter.service_id == form.target_5_service.data).scalar()
target_6_obj=Shooter.query.filter(Shooter.service_id == form.target_6_service.data).scalar()
target_7_obj=Shooter.query.filter(Shooter.service_id == form.target_7_service.data).scalar()
target_8_obj=Shooter.query.filter(Shooter.service_id == form.target_8_service.data).scalar()
temp_shooting=db.session.query(TShooting).scalar()
if(temp_shooting.save_flag==1):
return redirect(url_for('data_save'))
else:
db.session.query(TShooting).filter(TShooting.id != 999).delete()
db.session.commit()
Tdetail_edit =TShooting(
date=form.date.data,
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=form.session_id.data,
detail_no=form.detail_no.data,
target_1_id=target_1_obj.id,
target_2_id=target_2_obj.id,
target_3_id=target_3_obj.id,
target_4_id=target_4_obj.id,
target_5_id=target_5_obj.id,
target_6_id=target_6_obj.id,
target_7_id=target_7_obj.id,
target_8_id=target_8_obj.id,
paper_ref=form.paper_ref.data,
set_no=form.set_no.data,
save_flag=0
)
db.session.add(Tdetail_edit)
db.session.commit()
return redirect(url_for('detail_view'))
form.date.data=detail.date
form.session_id.data=detail.session_id
form.detail_no.data=detail.detail_no
form.paper_ref.data=detail.paper_ref
form.set_no.data=detail.set_no
name_1= Shooter.query.filter(Shooter.id==detail.target_1_id).scalar()
form.target_1_service.data=data=name_1.service_id
name_2= Shooter.query.filter(Shooter.id==detail.target_2_id).scalar()
form.target_2_service.data=data=name_2.service_id
name_3= Shooter.query.filter(Shooter.id==detail.target_3_id).scalar()
form.target_3_service.data=data=name_3.service_id
name_4= Shooter.query.filter(Shooter.id==detail.target_4_id).scalar()
form.target_4_service.data=data=name_4.service_id
name_5=Shooter.query.filter(Shooter.id==detail.target_5_id).scalar()
form.target_5_service.data=data=name_5.service_id
name_6=Shooter.query.filter(Shooter.id==detail.target_6_id).scalar()
form.target_6_service.data=data=name_6.service_id
name_7=Shooter.query.filter(Shooter.id==detail.target_7_id).scalar()
form.target_7_service.data=data=name_7.service_id
name_8=Shooter.query.filter(Shooter.id==detail.target_8_id).scalar()
form.target_8_service.data=data=name_8.service_id
except Exception as e:
return render_template('errors/detail_view.html')
return render_template('pages/detail_view_edit.html' , detail=detail,form=form)
@app.route('/data_save', methods=['GET', 'POST'])
def data_save():
return render_template('pages/data_save.html')
@app.route('/target_registration/', methods=['GET', 'POST'])
def target_registration():
result=None
if request.method=="POST":
data1 = request.get_json()
print(data1)
cant=data1['cant']
div=data1['div']
rank=data1['rank']
gen=data1['gender']
dt=data1['date']
name=data1['name']
army_no=data1['service']
unit=data1['unit']
brigade=data1['brig']
gender_id=db.session.query(Gender.id).filter(Gender.name==gen).scalar()
rank_id=db.session.query(Rank.id).filter(Rank.name==rank).scalar()
cant_id=db.session.query(Cantonment.id).filter(Cantonment.cantonment==cant ,Cantonment.division==div).scalar()
print("cant_id")
print(cant_id)
shooter = Shooter(
name=name,
service_id = army_no,
registration_date = dt,
gender_id=gender_id,
cantonment_id = cant_id,
rank_id =rank_id,
unit=unit,
brigade=brigade
)
db.session.add(shooter)
db.session.commit()
result="Data Saved Sucessfully"
return jsonify(result=result)
@app.route('/shooter_registration/', methods=['GET', 'POST'])
def registration():
try:
cantonment=Cantonment.query.distinct(Cantonment.cantonment)
gender =Gender.query.all()
rank = Rank.query.all()
ran = request.form.get('comp_select4')
cant = request.form.get('comp_select')
gen = request.form.get('comp_select5')
brig = request.form.get('comp_select1')
form = RegistrationForm(request.form)
if(ran is None):
pass
else:
ran_object=Rank.query.filter(Rank.name==ran).scalar()
rank_id = ran_object.id
cant_object = Cantonment.query.filter(Cantonment.cantonment==cant,Cantonment.division==brig).scalar()
cant_id = cant_object.id
gen_obj=Gender.query.filter(Gender.name==gen).scalar()
gender_id = gen_obj.id
if form.validate_on_submit():
shooter = Shooter(
name=form.name.data,
service_id = form.service_id.data,
registration_date = form.dt.data.strftime('%Y-%m-%d'),
gender_id=gender_id,
cantonment_id = cant_id,
rank_id =rank_id,
unit=form.unit.data,
brigade=form.brig.data
)
db.session.add(shooter)
db.session.commit()
new_form = RegistrationForm(request.form)
return redirect(url_for('firer_details'))
except Exception as e:
return redirect(url_for('error_4'))
return render_template('forms/registration.html',
cantonment = cantonment ,
form=form ,
rank = rank,
gender=gender)
@app.route('/get_brigade/')
def get_brigade():
cant = request.args.get('customer')
da = da = Cantonment.query.filter(Cantonment.cantonment==cant).distinct(Cantonment.division)
data = [{"name": x.division} for x in da]
return jsonify(data)
@app.route('/firer_details/', methods=['GET', 'POST'])
def firer_details():
firer = Shooter.query.all()
for firers in firer:
firers.cantonment_name= Cantonment.query.filter(Cantonment.id==firers.cantonment_id).scalar()
firers.division = Cantonment.query.filter(Cantonment.id==firers.cantonment_id).scalar()
firers.rank = Rank.query.filter(Rank.id==firers.rank_id).scalar()
firers.gender_name = Gender.query.filter(Gender.id==firers.gender_id).scalar()
return render_template('pages/firer_details.html' , firer = firer)
@app.route('/bulk_registration_group')
def bulk_registration_group():
form=BulkRegistrationForm(request.form)
return render_template('pages/bulk_registration_group.html',form=form)
@app.route('/bulk_registration')
def bulk_registration():
cantonment=db.session.query(Cantonment).distinct(Cantonment.cantonment)
form=RegistrationForm(request.form)
return render_template('pages/bulk_registration.html',cantonment=cantonment,form=form)
@app.route('/upload', methods=['POST'])
def upload():
try:
f = request.files['data_file']
cant = request.form.get('comp_select')
div = request.form.get('comp_select1')
form=RegistrationForm(request.form)
unit = request.form['game_id_1']
brig = request.form['game_id_2']
cant_id = db.session.query(Cantonment.id).filter(Cantonment.cantonment==cant,
Cantonment.division==div
).scalar()
if form.is_submitted():
stream = StringIO(f.stream.read().decode("UTF8"))
csv_input = csv.reader(stream)
lis =list(csv_input)
for i in range(len(lis)):
if (i==0):
pass
else:
shooters = Shooter(
name = lis[i][0],
service_id=lis[i][3],
registration_date=datetime.now(),
gender_id=db.session.query(Gender.id).filter(Gender.name==lis[i][2]).scalar(),
cantonment_id = cant_id,
rank_id = db.session.query(Rank.id).filter(Rank.name==lis[i][1]).scalar(),
unit=unit,
brigade=brig
)
db.session.add(shooters)
db.session.commit()
except Exception as e:
return redirect(url_for('error_3'))
return redirect(url_for('firer_details'))
@app.route('/uploadgroup', methods=['POST'])
def uploadgroup():
try:
f = request.files['data_file']
form=BulkRegistrationForm(request.form)
if form.is_submitted():
curdate_p=(date.today())- timedelta(1)
if(db.session.query(db.exists().where(TGroup.date <= curdate_p)).scalar()):
db.session.query(TGroup).delete()
db.session.commit()
stream = StringIO(f.stream.read().decode("UTF8"))
csv_input = csv.reader(stream)
lis =list(csv_input)
for i in range(len(lis)):
if (i==0):
pass
else:
group = TGroup(
date=datetime.now(),
group_no=lis[i][0],
target_1_no=lis[i][1],
target_2_no=lis[i][2],
target_3_no=lis[i][3],
target_4_no=lis[i][4],
target_5_no=lis[i][5],
target_6_no=lis[i][6],
target_7_no=lis[i][7],
target_8_no=lis[i][8]
)
db.session.add(group)
db.session.commit()
else:
stream = StringIO(f.stream.read().decode("UTF8"))
csv_input = csv.reader(stream)
lis =list(csv_input)
for i in range(len(lis)):
if (i==0):
pass
else:
group = TGroup(
date=datetime.now(),
group_no=lis[i][0],
target_1_no=lis[i][1],
target_2_no=lis[i][2],
target_3_no=lis[i][3],
target_4_no=lis[i][4],
target_5_no=lis[i][5],
target_6_no=lis[i][6],
target_7_no=lis[i][7],
target_8_no=lis[i][8]
)
db.session.add(group)
db.session.commit()
except Exception as e:
return redirect(url_for('error_duplicate'))
return redirect(url_for('group_view'))
@app.route('/new_group')
def new_group():
firer = [row.service_id for row in Shooter.query.all()]
return render_template('pages/new_group.html',firer_1=firer)
@app.route('/individual_group/', methods=['GET', 'POST'])
def individual_group():
try:
curdate_p=(date.today())- timedelta(1)
#check=mysession.query(TGroup).filter(date==curdate_p).all()
if request.method=="POST":
grp = request.form['game_id_1']
tmp_list = []
duplicate = False
r=request.form['tag']
if (r== ""):
r_id = 999
else:
r_object=Shooter.query.filter(Shooter.service_id==r).scalar()
r_id=r_object.id
r1=request.form['tag_1']
if(r1== ""):
r1_id=999
else:
r1_object=Shooter.query.filter(Shooter.service_id==r1).scalar()
r1_id=r1_object.id
r2=request.form['tag_2']
if (r2==""):
r2_id=999
else:
r2_object=Shooter.query.filter(Shooter.service_id==r2).scalar()
r2_id=r2_object.id
r3=request.form['tag_3']
if(r3==""):
r3_id=999
else:
r3_object=Shooter.query.filter(Shooter.service_id==r3).scalar()
r3_id=r3_object.id
r4=request.form['tag_4']
if(r4==""):
r4_id=999
else:
r4_object=Shooter.query.filter(Shooter.service_id==r4).scalar()
r4_id=r4_object.id
r5=request.form['tag_5']
if(r5==""):
r5_id=999
else:
r5_object=Shooter.query.filter(Shooter.service_id==r5).scalar()
r5_id=r5_object.id
r6=request.form['tag_6']
if(r6==""):
r6_id=999
else:
r6_object=Shooter.query.filter(Shooter.service_id==r6).scalar()
r6_id=r6_object.id
r7=request.form['tag_7']
if(r7== ""):
r7_id=999
else:
r7_object=Shooter.query.filter(Shooter.service_id==r7).scalar()
r7_id=r7_object.id
tmp_list.append(r_id)
tmp_list.append(r1_id)
tmp_list.append(r2_id)
tmp_list.append(r3_id)
tmp_list.append(r4_id)
tmp_list.append(r5_id)
tmp_list.append(r6_id)
tmp_list.append(r7_id)
for i in range(len(tmp_list)):
for j in range(len(tmp_list)):
if(tmp_list[i]== 999 and tmp_list[j]==999):
duplicate = False
elif(i!=j and tmp_list[i]==tmp_list[j]):
duplicate = True
if(db.session.query(db.exists().where(TGroup.date == curdate_p)).scalar()):
db.session.query(TGroup).delete()
db.session.commit()
if(duplicate):
return redirect(url_for('duplicate_firer_error'))
else:
gr=TGroup(
date=datetime.now(),
group_no=grp,
target_1_no=r,
target_2_no=r1,
target_3_no=r2,
target_4_no=r3,
target_5_no=r4,
target_6_no=r5,
target_7_no=r6,
target_8_no=r7
)
db.session.add(gr)
db.session.commit()
else:
if(duplicate):
return redirect(url_for('duplicate_firer_error'))
else:
gr=TGroup(
date=datetime.now(),
group_no=grp,
target_1_no=r,
target_2_no=r1,
target_3_no=r2,
target_4_no=r3,
target_5_no=r4,
target_6_no=r5,
target_7_no=r6,
target_8_no=r7
)
db.session.add(gr)
db.session.commit()
except Exception as e:
return render_template('errors/group_view_error.html')
return redirect(url_for('group_view'))
@app.route('/group_view/', methods=['GET', 'POST'])
def group_view():
detail = TGroup.query.all()
return render_template('pages/group_detail_view.html',detail=detail)
@app.route('/group_view/detail/<id>', methods=['GET', 'POST'])
def group_detail_view(id):
view = TGroup.query.filter(TGroup.group_no == id)
return render_template('pages/group_detail_view_id.html' , data = view)
@app.route('/group_details/edit/<id>', methods=['GET', 'POST'])
def group_detail_edit(id):
firer = TGroup.query.filter(TGroup.group_no == id).first()
form=GroupEditForm(obj=firer)
if form.validate_on_submit():
firer.date=form.date.data
firer.target_1_no=form.target_1_army.data
firer.target_2_no=form.target_2_army.data
firer.target_3_no=form.target_3_army.data
firer.target_4_no=form.target_4_army.data
firer.target_5_no=form.target_5_army.data
firer.target_6_no=form.target_6_army.data
firer.target_7_no=form.target_7_army.data
firer.target_8_no=form.target_8_army.data
firer.group_no=form.group_no.data
db.session.commit()
return redirect(url_for('group_view'))
form.group_no.data=firer.group_no
form.target_1_army.data=firer.target_1_no
form.target_2_army.data=firer.target_2_no
form.target_3_army.data=firer.target_3_no
form.target_4_army.data=firer.target_4_no
form.target_5_army.data=firer.target_5_no
form.target_6_army.data=firer.target_6_no
form.target_7_army.data=firer.target_7_no
form.target_8_army.data=firer.target_8_no
return render_template('pages/group_edit.html' , firer = firer , form=form)
@app.route('/firer_details/detail/<id>', methods=['GET', 'POST'])
def firer_detail_view(id):
firer = Shooter.query.filter(Shooter.service_id == id)
for firers in firer:
firers.cantonment_name= Cantonment.query.filter(Cantonment.id==firers.cantonment_id).scalar()
firers.division = Cantonment.query.filter(Cantonment.id==firers.cantonment_id).scalar()
firers.rank = Rank.query.filter(Rank.id==firers.rank_id).scalar()
firers.gender_name = Gender.query.filter(Gender.id==firers.gender_id).scalar()
return render_template('pages/firer_detail_view.html' , data = firer)
@app.route('/firer_details/edit/<id>', methods=['GET', 'POST'])
def firer_detail_edit(id):
firer = Shooter.query.filter(Shooter.service_id == id).first()
form=RegistrationEditForm(obj=firer)
try:
if form.validate_on_submit():
firer.name = form.name.data
firer.service_id=form.service_id.data
firer.registration_date=form.date.data
gender_obj=Gender.query.filter(Gender.name==form.gender.data).scalar()
firer.gender_id=gender_obj.id
cantonment_obj=Cantonment.query.filter(Cantonment.cantonment==form.cantonment.data ,Cantonment.division==form.div.data).scalar()
firer.cantonment_id=cantonment_obj.id
rank_obj=Range.query.filter(Rank.name==form.rank.data).distinct(Rank.id).scalar()
firer.rank_id=rank_obj.id
firer.unit=form.unit.data
firer.brigade=form.brigade.data
db.session.commit()
return redirect(url_for('firer_details'))
form.name.data=firer.name
form.service_id.data=firer.service_id
form.date.data=firer.registration_date
gender_name=Gender.query.filter(Gender.id==firer.gender_id).scalar()
form.gender.data=gender_name.name
cantonment_name=Cantonment.query.filter(Cantonment.id==firer.cantonment_id).scalar()
form.cantonment.data=cantonment_name.cantonment
form.div.data=cantonment_name.division
unit_data=Shooter.query.filter(Shooter.service_id==firer.service_id).scalar()
form.unit.data=unit_data.unit
form.brigade.data=unit_data.brigade
rank_name=Rank.query.filter(Rank.id==firer.rank_id).distinct(Rank.name).scalar()
form.rank.data=rank_name.name
except Exception as e:
return redirect(url_for('error_7'))
return render_template('pages/firer_detail_edit.html' , firer = firer , form=form)
@app.route('/live/')
def live():
T1_name = mysession.query(Shooter.name).filter(Shooter.id==TShooting.target_1_id).scalar()
T1_service = mysession.query(Shooter.service_id).filter(Shooter.id==TShooting.target_1_id).scalar()
T1_r_id = mysession.query(Shooter.rank_id).filter(Shooter.id==TShooting.target_1_id).scalar()
T1_rank = mysession.query(Rank.name).filter(Rank.id==T1_r_id).scalar()
T2_name = mysession.query(Shooter.name).filter(Shooter.id==TShooting.target_2_id).scalar()
T2_service = mysession.query(Shooter.service_id).filter(Shooter.id==TShooting.target_2_id).scalar()
T2_r_id = mysession.query(Shooter.rank_id).filter(Shooter.id==TShooting.target_2_id).scalar()
T2_rank = mysession.query(Rank.name).filter(Rank.id==T2_r_id).scalar()
T3_name = mysession.query(Shooter.name).filter(Shooter.id==TShooting.target_3_id).scalar()
T3_service = mysession.query(Shooter.service_id).filter(Shooter.id==TShooting.target_3_id).scalar()
T3_r_id = mysession.query(Shooter.rank_id).filter(Shooter.id==TShooting.target_3_id).scalar()
T3_rank = mysession.query(Rank.name).filter(Rank.id==T3_r_id).scalar()
T4_name = mysession.query(Shooter.name).filter(Shooter.id==TShooting.target_4_id).scalar()
T4_service = mysession.query(Shooter.service_id).filter(Shooter.id==TShooting.target_4_id).scalar()
T4_r_id = mysession.query(Shooter.rank_id).filter(Shooter.id==TShooting.target_4_id).scalar()
T4_rank = mysession.query(Rank.name).filter(Rank.id==T4_r_id).scalar()
T5_name = mysession.query(Shooter.name).filter(Shooter.id==TShooting.target_5_id).scalar()
T5_service = mysession.query(Shooter.service_id).filter(Shooter.id==TShooting.target_5_id).scalar()
T5_r_id = mysession.query(Shooter.rank_id).filter(Shooter.id==TShooting.target_5_id).scalar()
T5_rank = mysession.query(Rank.name).filter(Rank.id==T5_r_id).scalar()
T6_name = mysession.query(Shooter.name).filter(Shooter.id==TShooting.target_6_id).scalar()
T6_service = mysession.query(Shooter.service_id).filter(Shooter.id==TShooting.target_6_id).scalar()
T6_r_id = mysession.query(Shooter.rank_id).filter(Shooter.id==TShooting.target_6_id).scalar()
T6_rank = mysession.query(Rank.name).filter(Rank.id==T6_r_id).scalar()
T7_name = mysession.query(Shooter.name).filter(Shooter.id==TShooting.target_7_id).scalar()
T7_service = mysession.query(Shooter.service_id).filter(Shooter.id==TShooting.target_7_id).scalar()
T7_r_id = mysession.query(Shooter.rank_id).filter(Shooter.id==TShooting.target_7_id).scalar()
T7_rank = mysession.query(Rank.name).filter(Rank.id==T7_r_id).scalar()
T8_name = mysession.query(Shooter.name).filter(Shooter.id==TShooting.target_8_id).scalar()
T8_service = mysession.query(Shooter.service_id).filter(Shooter.id==TShooting.target_8_id).scalar()
T8_r_id = mysession.query(Shooter.rank_id).filter(Shooter.id==TShooting.target_8_id).scalar()
T8_rank = mysession.query(Rank.name).filter(Rank.id==T8_r_id).scalar()
return render_template('pages/live.html' ,
T1_name=T1_name,
T1_service=T1_service,
T2_name=T2_name,
T2_service=T2_service,
T3_name=T3_name,
T3_service=T3_service,
T4_name=T4_name,
T4_service=T4_service,
T5_name=T5_name,
T5_service=T5_service,
T6_name=T6_name,
T6_service=T6_service,
T7_name=T7_name,
T7_service=T7_service,
T8_name=T8_name,
T8_service=T8_service,
T1_rank=T1_rank,
T2_rank=T2_rank,
T3_rank=T3_rank,
T4_rank=T4_rank,
T5_rank=T5_rank,
T6_rank=T6_rank,
T7_rank=T7_rank,
T8_rank=T8_rank
)
@app.route('/cam_detail_2/', methods=['GET', 'POST'])
def cam_detail_2():
return render_template('pages/cam_detail_1.html')
@app.route('/cam_detail_4/', methods=['GET', 'POST'])
def cam_detail_4():
return render_template('pages/cam_detail_2.html')
@app.route('/cam_detail_1/', methods=['GET', 'POST'])
def cam_detail_1():
return render_template('pages/cam_detail_3.html')
@app.route('/cam_detail_3/', methods=['GET', 'POST'])
def cam_detail_3():
return render_template('pages/cam_detail_4.html')
@app.route('/cam_detail_6/', methods=['GET', 'POST'])
def cam_detail_6():
return render_template('pages/cam_detail_5.html')
@app.route('/cam_detail_8/', methods=['GET', 'POST'])
def cam_detail_8():
return render_template('pages/cam_detail_6.html')
@app.route('/cam_detail_7/', methods=['GET', 'POST'])
def cam_detail_7():
return render_template('pages/cam_detail_7.html')
@app.route('/cam_detail_5/', methods=['GET', 'POST'])
def cam_detail_5():
return render_template('pages/cam_detail_8.html')
@app.route('/session_setup/', methods=['GET', 'POST'])
def session_setup():
try:
data = Shooter.query.all()
rang= Range.query.all()
firearms = Firearms.query.all()
ammunation = Ammunation.query.all()
rang_name = request.form.get('comp_select_4')
fire_name = request.form.get('comp_select_5')
ammu_name = request.form.get('comp_select_6')
form=SessionForm()
if(rang_name is None):
range_id=999
fire_id=999
ammu_id=999
else:
range_id = db.session.query(Range.id).filter(Range.name==rang_name).scalar()
fire_id = db.session.query(Firearms.id).filter(Firearms.name==fire_name).scalar()
ammu_id = db.session.query(Ammunation.id).filter(Ammunation.name==ammu_name).scalar()
if form.validate_on_submit():
shooting=Shooting_Session(
date=form.date.data.strftime('%Y-%m-%d'),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
shooting_range_id=range_id,
firearms_id=fire_id,
ammunation_id=ammu_id,
target_distance = form.target_distance.data,
weather_notes = form.weather_notes.data,
comments = form.comments.data,
session_no=form.session_no.data,
occasion=form.occ.data
)
db.session.add(shooting)
db.session.commit()
return redirect(url_for('session_config'))
except Exception as e:
return redirect(url_for('error5_505.html'))
return render_template('forms/shooting_form.html', form=form, data =data ,rang=rang , firearmns=firearms, ammunation = ammunation)
@app.route('/configuration/', methods=['GET', 'POST'])
def session_config():
config = Shooting_Session.query.all()
for con in config:
con.range_name = Range.query.filter(Range.id==con.shooting_range_id).scalar()
con.firerarms_name = Firearms.query.filter(Firearms.id==con.firearms_id).scalar()
con.ammunation_name = Ammunation.query.filter(Ammunation.id==con.ammunation_id).scalar()
return render_template('pages/shooting_configuration_detail.html',con=config)
@app.route('/image_process/')
def image_process():
dt=time.strftime("%Y-%m-%d")
detail_data=db.session.query(Session_Detail).filter(Session_Detail.date==dt,Session_Detail.save_flag==0).all()
data =TShooting.query.scalar()
if(data is None):
T1_name ="NA"
T1_service ="NA"
T1_rank="NA"
T2_name ="NA"
T2_service ="NA"
T2_rank="NA"
T3_name ="NA"
T3_service ="NA"
T3_rank="NA"
T4_name ="NA"
T4_service ="NA"
T4_rank="NA"
T5_name ="NA"
T5_service ="NA"
T5_rank="NA"
T6_name ="NA"
T6_service ="NA"
T6_rank="NA"
T7_name ="NA"
T7_service ="NA"
T7_rank="NA"
T8_name ="NA"
T8_service ="NA"
T8_rank="NA"
elif(data.save_flag == 1 ):
db.session.query(TShooting).delete()
db.session.commit()
T1_name ="NA"
T1_service ="NA"
T1_rank="NA"
T2_name ="NA"
T2_service ="NA"
T2_rank="NA"
T3_name ="NA"
T3_service ="NA"
T3_rank="NA"
T4_name ="NA"
T4_service ="NA"
T4_rank="NA"
T5_name ="NA"
T5_service ="NA"
T5_rank="NA"
T6_name ="NA"
T6_service ="NA"
T6_rank="NA"
T7_name ="NA"
T7_service ="NA"
T7_rank="NA"
T8_name ="NA"
T8_service ="NA"
T8_rank="NA"
else:
T1=Shooter.query.filter(Shooter.id==TShooting.target_1_id).scalar()
if(T1 is None):
T1_name ="NA"
T1_service ="NA"
T1_rank="NA"
else:
T1_name = T1.name
T1_service = T1.service_id
T1_r_id = T1.rank_id
T1_rank_id = Rank.query.filter(Rank.id==T1_r_id).scalar()
T1_rank=T1_rank_id.name
T2=Shooter.query.filter(Shooter.id==TShooting.target_2_id).scalar()
if(T2 is None):
T2_name ="NA"
T2_service ="NA"
T2_rank="NA"
else:
T2_name = T2.name
T2_service = T2.service_id
T2_r_id = T2.rank_id
T2_rank_id = Rank.query.filter(Rank.id==T2_r_id).scalar()
T2_rank=T2_rank_id.name
T3=Shooter.query.filter(Shooter.id==TShooting.target_3_id,TShooting.target_3_id!=999).scalar()
if(T3 is None):
T3_name ="NA"
T3_service ="NA"
T3_rank="NA"
else:
T3_name = T3.name
T3_service = T3.service_id
T3_r_id = T3.rank_id
T3_rank_id = Rank.query.filter(Rank.id==T3_r_id).scalar()
T3_rank=T3_rank_id.name
T4=Shooter.query.filter(Shooter.id==TShooting.target_4_id,TShooting.target_4_id!=999).scalar()
if(T4 is None):
T4_name ="NA"
T4_service ="NA"
T4_rank="NA"
else:
T4_name = T4.name
T4_service = T4.service_id
T4_r_id = T4.rank_id
T4_rank_id = Rank.query.filter(Rank.id==T4_r_id).scalar()
T4_rank=T4_rank_id.name
T5=Shooter.query.filter(Shooter.id==TShooting.target_5_id).scalar()
if(T5 is None):
T5_name ="NA"
T5_service ="NA"
T5_rank="NA"
else:
T5_name = T5.name
T5_service = T5.service_id
T5_r_id = T5.rank_id
T5_rank_id = Rank.query.filter(Rank.id==T5_r_id).scalar()
T5_rank=T5_rank_id.name
T6=Shooter.query.filter(Shooter.id==TShooting.target_6_id).scalar()
if(T6 is None):
T6_name ="NA"
T6_service ="NA"
T6_rank="NA"
else:
T6_name = T6.name
T6_service = T6.service_id
T6_r_id = T6.rank_id
T6_rank_id = Rank.query.filter(Rank.id==T6_r_id).scalar()
T6_rank=T6_rank_id.name
T7=Shooter.query.filter(Shooter.id==TShooting.target_7_id).scalar()
if(T7 is None):
T7_name ="NA"
T7_service ="NA"
T7_rank="NA"
else:
T7_name = T7.name
T7_service = T7.service_id
T7_r_id = T7.rank_id
T7_rank_id = Rank.query.filter(Rank.id==T7_r_id).scalar()
T7_rank=T7_rank_id.name
T8=Shooter.query.filter(Shooter.id==TShooting.target_8_id).scalar()
if(T8 is None):
T8_name ="NA"
T8_service ="NA"
T8_rank="NA"
else:
T8_name = T8.name
T8_service = T8.service_id
T8_r_id = T8.rank_id
T8_rank_id = Rank.query.filter(Rank.id==T8_r_id).scalar()
T8_rank=T8_rank_id.name
return render_template('pages/image_process.html' ,
T1_name=T1_name,
detail_data=detail_data,
T1_service=T1_service,
T2_name=T2_name,
T2_service=T2_service,
T3_name=T3_name,
T3_service=T3_service,
T4_name=T4_name,
T4_service=T4_service,
T5_name=T5_name,
T5_service=T5_service,
T6_name=T6_name,
T6_service=T6_service,
T7_name=T7_name,
T7_service=T7_service,
T8_name=T8_name,
T8_service=T8_service,
T1_rank=T1_rank,
T2_rank=T2_rank,
T3_rank=T3_rank,
T4_rank=T4_rank,
T5_rank=T5_rank,
T6_rank=T6_rank,
T7_rank=T7_rank,
T8_rank=T8_rank
)
@app.route('/image_edit_1/', methods=['GET', 'POST'])
def image_edit_1():
return render_template('pages/image_edit_1.html')
@app.route('/image_edit_2/', methods=['GET', 'POST'])
def image_edit_2():
return render_template('pages/image_edit_2.html')
@app.route('/image_edit_3/', methods=['GET', 'POST'])
def image_edit_3():
return render_template('pages/image_edit_3.html')
@app.route('/image_edit_4/', methods=['GET', 'POST'])
def image_edit_4():
return render_template('pages/image_edit_4.html')
@app.route('/image_edit_5/', methods=['GET', 'POST'])
def image_edit_5():
return render_template('pages/image_edit_5.html')
@app.route('/image_edit_6/', methods=['GET', 'POST'])
def image_edit_6():
return render_template('pages/image_edit_6.html')
@app.route('/image_edit_7/', methods=['GET', 'POST'])
def image_edit_7():
return render_template('pages/image_edit_7.html')
@app.route('/image_edit_8/', methods=['GET', 'POST'])
def image_edit_8():
return render_template('pages/image_edit_8.html')
@app.route('/configuration/detail/<id>', methods=['GET', 'POST'])
def session_config_detail(id):
config = Shooting_Session.query.filter(Shooting_Session.id == id)
for con in config:
con.range_name = Range.query.filter(Range.id==con.shooting_range_id).scalar()
con.firerarms_name = Firearms.query.filter(Firearms.id==con.firearms_id).scalar()
con.ammunation_name = Ammunation.query.filter(Ammunation.id==con.ammunation_id).scalar()
return render_template('pages/shooting_configuration_detail_view.html',con=config)
@app.route('/configuration/edit/<id>', methods=['GET', 'POST'])
def shooting_config_edit(id):
edit = Shooting_Session.query.get_or_404(id)
form = SessionEditForm(obj=edit)
if form.validate_on_submit():
edit.session_no = form.session_no.data
edit.date = form.date.data
edit.occasion=form.occ.data
edit.target_distance = form.target_distance.data
ammunation_id=Ammunation.query.filter(Ammunation.name==form.ammunation_name.data).scalar()
edit.ammunation_id=ammunation_id.id
firearms_id=Firearms.query.filter(Firearms.name==form.firerarms_name.data).scalar()
edit.firearms_id=firearms_id.id
range_id=Range.query.filter(Range.name==form.range_name.data).scalar()
edit.shooting_range_id=range_id.id
edit.weather_notes=form.weather_notes.data
edit.comments=form.comments.data
db.session.commit()
return redirect(url_for('session_config'))
form.session_no.data=edit.session_no
form.date.data=edit.date
form.occ.data=edit.occasion
ammunation_name=Ammunation.query.filter(Ammunation.id==edit.ammunation_id).scalar()
form.ammunation_name.data=ammunation_name.name
firerarms_name=Firearms.query.filter(Firearms.id==edit.firearms_id).scalar()
form.firerarms_name.data=firerarms_name.name
range_name=Range.query.filter(Range.id==edit.shooting_range_id).scalar()
form.range_name.data=range_name.name
form.weather_notes.data=edit.weather_notes
form.comments.data=edit.comments
return render_template('pages/shooting_configuration_edit.html',form=form,edit=edit)
@app.route('/detail_dashboard/')
def detail_dashboard():
tshoot=db.session.query(TShooting).scalar()
if(tshoot is None):
T1_name = "NA"
T1_service="NA"
T1_rank ="NA"
T2_name = "NA"
T2_service="NA"
T2_rank ="NA"
T3_name = "NA"
T3_service="NA"
T3_rank ="NA"
T4_name = "NA"
T4_service="NA"
T4_rank ="NA"
T5_name = "NA"
T5_service="NA"
T5_rank ="NA"
T6_name = "NA"
T6_service="NA"
T6_rank ="NA"
T7_name = "NA"
T7_service="NA"
T7_rank ="NA"
T8_name = "NA"
T8_service="NA"
T8_rank ="NA"
else:
T1=Shooter.query.filter(Shooter.id==TShooting.target_1_id).scalar()
T1_name = T1.name
T1_service = T1.service_id
T1_r_id = T1.rank_id
T1_rank_id = Rank.query.filter(Rank.id==T1_r_id).scalar()
T1_rank=T1_rank_id.name
T2=Shooter.query.filter(Shooter.id==TShooting.target_2_id).scalar()
T2_name = T2.name
T2_service = T2.service_id
T2_r_id = T2.rank_id
T2_rank_id = Rank.query.filter(Rank.id==T2_r_id).scalar()
T2_rank=T2_rank_id.name
T3=Shooter.query.filter(Shooter.id==TShooting.target_3_id).scalar()
T3_name = T3.name
T3_service = T3.service_id
T3_r_id = T3.rank_id
T3_rank_id = Rank.query.filter(Rank.id==T3_r_id).scalar()
T3_rank=T3_rank_id.name
T4=Shooter.query.filter(Shooter.id==TShooting.target_4_id).scalar()
T4_name = T4.name
T4_service = T4.service_id
T4_r_id = T4.rank_id
T4_rank_id = Rank.query.filter(Rank.id==T4_r_id).scalar()
T4_rank=T4_rank_id.name
T5=Shooter.query.filter(Shooter.id==TShooting.target_5_id).scalar()
T5_name = T5.name
T5_service = T5.service_id
T5_r_id = T5.rank_id
T5_rank_id = Rank.query.filter(Rank.id==T5_r_id).scalar()
T5_rank=T5_rank_id.name
T6=Shooter.query.filter(Shooter.id==TShooting.target_6_id).scalar()
T6_name = T6.name
T6_service = T6.service_id
T6_r_id = T6.rank_id
T6_rank_id = Rank.query.filter(Rank.id==T6_r_id).scalar()
T6_rank=T6_rank_id.name
T7=Shooter.query.filter(Shooter.id==TShooting.target_7_id).scalar()
T7_name = T7.name
T7_service = T7.service_id
T7_r_id = T7.rank_id
T7_rank_id = Rank.query.filter(Rank.id==T7_r_id).scalar()
T7_rank=T7_rank_id.name
T8=Shooter.query.filter(Shooter.id==TShooting.target_8_id).scalar()
T8_name = T8.name
T8_service = T8.service_id
T8_r_id = T8.rank_id
T8_rank_id = Rank.query.filter(Rank.id==T8_r_id).scalar()
T8_rank=T8_rank_id.name
return render_template('pages/detail_dashboard.html' ,
T1_name=T1_name,
T1_service=T1_service,
T2_name=T2_name,
T2_service=T2_service,
T3_name=T3_name,
T3_service=T3_service,
T4_name=T4_name,
T4_service=T4_service,
T5_name=T5_name,
T5_service=T5_service,
T6_name=T6_name,
T6_service=T6_service,
T7_name=T7_name,
T7_service=T7_service,
T8_name=T8_name,
T8_service=T8_service,
T1_rank=T1_rank,
T2_rank=T2_rank,
T3_rank=T3_rank,
T4_rank=T4_rank,
T5_rank=T5_rank,
T6_rank=T6_rank,
T7_rank=T7_rank,
T8_rank=T8_rank
)
@app.route('/adhoc_detail_1/', methods=['GET', 'POST'])
def adhoc_detail_1():
name_1=None
army=None
rank=None
cant=None
set_1_name=None
set_1_army=None
set_2_name=None
set_2_army=None
set_3_name=None
set_3_army=None
set_4_name=None
set_4_army=None
res=[]
ten=[]
gp_len=[]
if request.method == "POST":
data1 = request.get_json()
army=data1['usr']
curdate=time.strftime("%Y-%m-%d")
name_1=db.session.query(Shooter.name).filter(Shooter.service_id==army).scalar()
target_1_id=db.session.query(Shooter.id).filter(Shooter.service_id==army).scalar()
rank_id=db.session.query(Shooter.rank_id).filter(Shooter.service_id==army).scalar()
cant_id=db.session.query(Shooter.cantonment_id).filter(Shooter.service_id==army).scalar()
rank=db.session.query(Rank.name).filter(Rank.id==rank_id).scalar()
cant=db.session.query(Cantonment.cantonment).filter(Cantonment.id==cant_id).scalar()
tten=db.session.query(MPI.tendency_code).filter(MPI.firer_id==target_1_id).order_by(MPI.datetimestamp.desc()).limit(5).all()[::-1]
tres = db.session.query(Grouping.result).filter(Grouping.firer_id==target_1_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
tgp = db.session.query(Grouping.grouping_length_f).filter(Grouping.firer_id==target_1_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
for ele in tres:
for ele2 in ele:
res.append(ele2)
for ele3 in tten:
for ele4 in ele3:
ten.append(ele4)
for ele5 in tgp:
for ele6 in ele5:
gp_len.append(ele6)
set_1_id = db.session.query(Firer_Details.firer_id).filter(Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==1
).distinct().scalar()
set_1_name=db.session.query(Shooter.name).filter(Shooter.id==set_1_id).scalar()
set_1_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_1_id).scalar()
set_2_id = db.session.query(Firer_Details.firer_id).filter(Firer_Details.date==curdate,
Firer_Details.target_no==2,
Firer_Details.set_no==2
).distinct().scalar()
set_2_name=db.session.query(Shooter.name).filter(Shooter.id==set_2_id).scalar()
set_2_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_2_id).scalar()
set_3_id = db.session.query(Firer_Details.firer_id).filter(Firer_Details.date==curdate,
Firer_Details.target_no==3,
Firer_Details.set_no==3
).distinct().scalar()
set_3_name=db.session.query(Shooter.name).filter(Shooter.id==set_3_id).scalar()
set_3_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_3_id).scalar()
set_4_id = db.session.query(Firer_Details.firer_id).filter(Firer_Details.date==curdate,
Firer_Details.target_no==4,
Firer_Details.set_no==4
).distinct().scalar()
set_4_name=db.session.query(Shooter.name).filter(Shooter.id==set_4_id).scalar()
set_4_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_4_id).scalar()
return jsonify(name_1=name_1,army=army,rank=rank,cant=cant,
set_1_name=set_1_name,
set_2_name=set_2_name,
set_3_name=set_3_name,
set_4_name=set_4_name,
set_1_army=set_1_army,
set_2_army=set_2_army,
set_3_army=set_3_army,
set_4_army=set_4_army,
gp_len=gp_len,
res=res,
ten=ten
)
@app.route('/individual_score/target_1', methods=['GET', 'POST'])
def individual_score_target_1():
session.clear()
data=TShooting.query.scalar()
firing_set_arr=[]
cantonment=Cantonment.query.distinct(Cantonment.cantonment)
curdate=time.strftime("%Y-%m-%d")
selection=Shooting_Session.query.filter(Shooting_Session.date>=curdate).order_by(Shooting_Session.datetimestamp.desc()).all()
gender =Gender.query.all()
rank_s = Rank.query.all()
firing_set=db.session.query(Firer_Details.set_no).filter(Firer_Details.target_no==1).distinct().all()
for ele in firing_set:
for ele2 in ele:
firing_set_arr.append(ele2)
if(len(firing_set_arr)<1):
pass
else:
i=len(firing_set_arr)-1
if(firing_set_arr[i]==5):
db.session.query(Firer_Details).filter(Firer_Details.target_no==1).delete()
db.session.commit()
else:
pass
dt=time.strftime("%Y-%m-%d")
curdatetime=datetime.now()
firer_1 = [row.service_id for row in Shooter.query.all()]
detail_data=db.session.query(Session_Detail).filter(Session_Detail.date==dt,Session_Detail.save_flag==0).all()
name = "NA"
detail_no ="NA"
rank ="NA"
target_no = 1
service_id ="NA"
ten = []
res = []
selection=Shooting_Session.query.filter(Shooting_Session.date>=dt).order_by(Shooting_Session.datetimestamp.desc()).all()
firearms = Firearms.query.all()
rang= Range.query.all()
ammunation = Ammunation.query.all()
return render_template('pages/prediction_target_1.html',
curdatetime=curdatetime,
name = name,
firer_1=firer_1,
rank=rank,
detail_data=detail_data,
detail_no=detail_no,
target_no=target_no,
service_id=service_id,
firearms=firearms,
ammunation=ammunation,
data=selection,
rang=rang,
res=res,
date=dt,
ten=ten,
cantonment=cantonment,
gender=gender,
rank_s=rank_s)
@app.route('/session_target_1/', methods=['GET', 'POST'])
def session_target_1():
if request.method == "POST":
data1 = request.get_json()
session=data1["session"]
ran=data1["range"]
arms=data1["arms"]
distance=data1["dis"]
occ=data1["occ"]
ammu=data1["ammu"]
weather=data1["weather"]
comment=data1["comment"]
range_id=db.session.query(Range.id).filter(Range.name==ran).scalar()
arms_id=db.session.query(Firearms.id).filter(Firearms.name==arms).scalar()
ammu_id=db.session.query(Ammunation.id).filter(Ammunation.name==ammu).scalar()
shooting=Shooting_Session(
date=time.strftime("%Y-%m-%d"),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
shooting_range_id=range_id,
firearms_id=arms_id,
ammunation_id=ammu_id,
target_distance=distance,
weather_notes =weather,
comments =comment,
session_no=session,
occasion=occ
)
db.session.add(shooting)
db.session.commit()
result="This is Successfully Saved"
return jsonify(result=result ,session=session)
@app.route('/target_1_populate/', methods=['GET', 'POST'])
def target_1_populate():
if request.method == 'POST':
session_id=db.session.query(TShooting.session_id).scalar()
return jsonify(session_id=session_id)
@app.route('/load_detail_1/', methods=['GET', 'POST'])
def load_detail_1():
result_1="Done"
if request.method == 'POST':
curdate=time.strftime("%Y-%m-%d")
r8=None
data=request.get_json()
tmp_list = []
duplicate = False
detail =data["detail"]
sess=data["session"]
paper=data["paper"]
shot=data["shot"]
set=data["set"]
if(data["r1"]==""):
r1_id=999
else:
r1=data["r1"]
r1_id=db.session.query(Shooter.id).filter(Shooter.service_id==r1).scalar()
if(data["r2"]==""):
r2_id=999
else:
r2=data["r2"]
r2_id=db.session.query(Shooter.id).filter(Shooter.service_id==r2).scalar()
if(data["r3"]==""):
r3_id=999
else:
r3=data["r3"]
r3_id=db.session.query(Shooter.id).filter(Shooter.service_id==r3).scalar()
if(data["r4"]==""):
r4_id=999
else:
r4=data["r4"]
r4_id=db.session.query(Shooter.id).filter(Shooter.service_id==r4).scalar()
if(data["r5"]==""):
r5_id=999
else:
r5=data["r5"]
r5_id=db.session.query(Shooter.id).filter(Shooter.service_id==r5).scalar()
if(data["r6"]==""):
r6_id=999
else:
r6=data["r6"]
r6_id=db.session.query(Shooter.id).filter(Shooter.service_id==r6).scalar()
if(data["r7"]==""):
r7_id=999
else:
r7=data["r7"]
r7_id=db.session.query(Shooter.id).filter(Shooter.service_id==r7).scalar()
if(data["r8"]==""):
r8_id=999
else:
r8=data["r8"]
r8_id=db.session.query(Shooter.id).filter(Shooter.service_id==r8).scalar()
tmp_list.append(r1_id)
tmp_list.append(r2_id)
tmp_list.append(r3_id)
tmp_list.append(r4_id)
tmp_list.append(r5_id)
tmp_list.append(r6_id)
tmp_list.append(r7_id)
tmp_list.append(r8_id)
db.session.query(TPaper_ref).delete()
db.session.commit()
ref_db = TPaper_ref(
date=time.strftime("%Y-%m-%d"),
paper_ref=paper,
detail_no=detail,
session_no=sess
)
db.session.add(ref_db)
db.session.commit()
for i in range(len(tmp_list)):
for j in range(len(tmp_list)):
if(i!=j and tmp_list[i]==tmp_list[j]):
if(tmp_list[i]== 999 and tmp_list[j]==999):
duplicate = False
else:
duplicate = True
else:
duplicate = False
if(duplicate):
print("inside dup")
error="dup"
else:
db.session.query(TShooting).delete()
db.session.commit()
tshoot=TShooting(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail,
target_1_id=r1_id,
target_2_id=r2_id,
target_3_id=r3_id,
target_4_id=r4_id,
target_5_id=r5_id,
target_6_id=r6_id,
target_7_id=r7_id,
target_8_id=r8_id,
paper_ref=paper,
set_no=set,
save_flag=0
)
db.session.add(tshoot)
db.session.commit()
detail_shots =Session_Detail(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail,
target_1_id=r1_id,
target_2_id=r2_id,
target_3_id=r3_id,
target_4_id=r4_id,
target_5_id=r5_id,
target_6_id=r6_id,
target_7_id=r7_id,
target_8_id=r8_id,
paper_ref=paper,
set_no=set,
save_flag=0
)
db.session.add(detail_shots)
db.session.commit()
error="ok"
firer_name,cant,rank,service_id,res,tenden,gp_len,set_4_name,set_4_army,set_4_session_no,set_4_detail_no,set_3_name,set_3_army,set_3_session_no,set_3_detail_no,set_2_name,set_2_army,set_2_session_no,set_2_detail_no,set_1_name,set_1_army,set_1_session_no,set_1_detail_no,current_firer_name,current_army_no,current_session_no,current_detail_no=get_information(r1_id,sess,paper)
result="The Detail is Saved Successfully"
return jsonify(result=result,data1=firer_name,ra_1=rank,detail=detail,
service_id_1=service_id,
session=sess,
paper=paper,
set_no=set,
cant=cant,
gp_len=gp_len,
res=res,
ten=tenden,
set_4_name=set_4_name,
set_3_name=set_3_name,
set_2_name=set_2_name,
set_1_name=set_1_name,
current_firer_name=current_firer_name,
set_4_army=set_4_army,
set_3_army=set_3_army,
set_2_army=set_2_army,
set_1_army=set_1_army,
current_army_no=current_army_no,
set_4_session_no=set_4_session_no,
set_3_session_no=set_3_session_no,
set_2_session_no=set_2_session_no,
set_1_session_no=set_1_session_no,
current_session_no=current_session_no,
set_4_detail_no=set_4_detail_no,
set_3_detail_no=set_3_detail_no,
set_2_detail_no=set_2_detail_no,
set_1_detail_no=set_1_detail_no,
current_detail_no=current_detail_no
)
return jsonify(result_1=result_1)
def get_information(target_1_id,sess,paper_ref):
res=[]
ten=[]
gp_len=[]
curdate=time.strftime("%Y-%m-%d")
tten=db.session.query(MPI.tendency_code).filter(MPI.firer_id==target_1_id).order_by(MPI.datetimestamp.desc()).limit(5).all()[::-1]
tres = db.session.query(Grouping.result).filter(Grouping.firer_id==target_1_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
tgp = db.session.query(Grouping.grouping_length_f).filter(Grouping.firer_id==target_1_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
for ele in tres:
for ele2 in ele:
res.append(ele2)
for ele3 in tten:
for ele4 in ele3:
ten.append(ele4)
for ele5 in tgp:
for ele6 in ele5:
gp_len.append(int(ele6))
da_1=db.session.query(Shooter.name).filter(Shooter.id==target_1_id).scalar()
cant_id=db.session.query(Shooter.cantonment_id).filter(Shooter.id==target_1_id).scalar()
cant=db.session.query(Cantonment.cantonment).filter(Cantonment.id==cant_id).scalar()
ra_1_id=db.session.query(Shooter.rank_id).filter(Shooter.id==target_1_id).scalar()
ra_1 = db.session.query(Rank.name).filter(Rank.id==ra_1_id).scalar()
service_id_1 = db.session.query(Shooter.service_id).filter(Shooter.id==target_1_id).scalar()
set_1_id = db.session.query(Firer_Details.firer_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==1
).distinct().scalar()
set_1_session_no=db.session.query(Firer_Details.session_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==1
).distinct().scalar()
set_1_detail_no=db.session.query(Firer_Details.detail_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==1
).distinct().scalar()
set_1_name=db.session.query(Shooter.name).filter(
Shooter.id==set_1_id
).scalar()
set_1_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_1_id).scalar()
set_2_id = db.session.query(Firer_Details.firer_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==2
).distinct().scalar()
set_2_session_no=db.session.query(Firer_Details.session_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==2
).distinct().scalar()
set_2_detail_no=db.session.query(Firer_Details.detail_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==2
).distinct().scalar()
set_2_name=db.session.query(Shooter.name).filter(
Shooter.id==set_2_id
).scalar()
set_2_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_2_id).scalar()
set_3_id = db.session.query(Firer_Details.firer_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==3
).distinct().scalar()
set_3_session_no=db.session.query(Firer_Details.session_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==3
).distinct().scalar()
set_3_detail_no=db.session.query(Firer_Details.detail_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==3
).distinct().scalar()
set_3_name=db.session.query(Shooter.name).filter(
Shooter.id==set_3_id
).scalar()
set_3_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_3_id).scalar()
set_4_id = db.session.query(Firer_Details.firer_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==4
).distinct().scalar()
set_4_session_no=db.session.query(Firer_Details.session_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==4
).distinct().scalar()
set_4_detail_no=db.session.query(Firer_Details.detail_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==4
).distinct().scalar()
set_4_name=db.session.query(Shooter.name).filter(
Shooter.id==set_4_id
).scalar()
set_4_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_4_id).scalar()
current_firer_name = db.session.query(Shooter.name).filter(Shooter.id==target_1_id).scalar()
current_army_no = db.session.query(Shooter.service_id).filter(Shooter.id==target_1_id).scalar()
current_session_no=db.session.query(TShooting.session_id).filter(TShooting.target_1_id==target_1_id).scalar()
current_detail_no=db.session.query(TShooting.detail_no).filter(TShooting.target_1_id==target_1_id).scalar()
return(da_1,cant,ra_1,service_id_1,res,ten,gp_len,
set_4_name,set_4_army,set_4_session_no,set_4_detail_no,
set_3_name,set_3_army,set_3_session_no,set_3_detail_no,
set_2_name,set_2_army,set_2_session_no,set_2_detail_no,
set_1_name,set_1_army,set_1_session_no,set_1_detail_no,
current_firer_name,current_army_no,current_session_no,current_detail_no
)
@app.route('/individual_score/target_2', methods=['GET', 'POST'])
def individual_score_target_2():
firer_id =db.session.query(TShooting.target_2_id).scalar()
detail_no =db.session.query(TShooting.detail_no).scalar()
session_no =db.session.query(TShooting.session_id).scalar()
target_no = 2
tres = db.session.query(Grouping.result).filter(Grouping.firer_id==firer_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
res=[]
ten=[]
tten=db.session.query(MPI.tendency_code).filter(MPI.firer_id==firer_id).order_by(MPI.datetimestamp.desc()).limit(5).all()[::-1]
print(tres,)
for ele in tres:
for ele2 in ele:
print(type(ele2))
res.append(ele2)
for ele3 in tten:
for ele4 in ele3:
print(type(ele4))
ten.append(ele4)
service_id = db.session.query(Shooter.service_id).filter(Shooter.id==firer_id).scalar()
rank_id=db.session.query(Shooter.rank_id).filter(Shooter.id==firer_id).scalar()
rank=db.session.query(Rank.name).filter(Rank.id==rank_id).scalar()
name = db.session.query(Shooter.name).filter(Shooter.id==firer_id).scalar()
firer_id,sess,o,p,u,q,t1_x,t1_y,xmpi,ympi,f,gp,Tfirt_x,Tfirt_y,fin_x_1,fin_y_1,result_1,fir_tendency_1=prediction_calculation_2()
if request.method == 'POST':
paper_ref=db.session.query(TPaper_ref.paper_ref).scalar()
print("paper_ref")
print(paper_ref)
return render_template('pages/prediction_target_2.html',
name = name,
detail_no=detail_no,
session_no=session_no,
target_no=target_no,
service_id=service_id,
rank=rank,
res=res,
ten=ten)
@app.route('/individual_score/target_3', methods=['GET', 'POST'])
def individual_score_target_3():
firer_id =db.session.query(TShooting.target_3_id).scalar()
detail_no =db.session.query(TShooting.detail_no).scalar()
session_no =db.session.query(TShooting.session_id).scalar()
target_no = 3
tres = db.session.query(Grouping.result).filter(Grouping.firer_id==firer_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
res=[]
ten=[]
tten=db.session.query(MPI.tendency_code).filter(MPI.firer_id==firer_id).order_by(MPI.datetimestamp.desc()).limit(5).all()[::-1]
print(tres)
for ele in tres:
for ele2 in ele:
print(type(ele2))
res.append(ele2)
for ele3 in tten:
for ele4 in ele3:
print(type(ele4))
ten.append(ele4)
service_id = db.session.query(Shooter.service_id).filter(Shooter.id==firer_id).scalar()
rank_id=db.session.query(Shooter.rank_id).filter(Shooter.id==firer_id).scalar()
rank=db.session.query(Rank.name).filter(Rank.id==rank_id).scalar()
name = db.session.query(Shooter.name).filter(Shooter.id==firer_id).scalar()
return render_template('pages/prediction_target_3.html',
name = name,
detail_no=detail_no,
session_no=session_no,
target_no=target_no,
service_id=service_id,
rank=rank,
res=res,
ten=ten)
@app.route('/individual_score/target_4', methods=['GET', 'POST'])
def individual_score_target_4():
firer_id =db.session.query(TShooting.target_4_id).scalar()
detail_no =db.session.query(TShooting.detail_no).scalar()
session_no =db.session.query(TShooting.session_id).scalar()
target_no = 4
tres = db.session.query(Grouping.result).filter(Grouping.firer_id==firer_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
res=[]
ten=[]
tten=db.session.query(MPI.tendency_code).filter(MPI.firer_id==firer_id).order_by(MPI.datetimestamp.desc()).limit(5).all()[::-1]
print(tres)
for ele in tres:
for ele2 in ele:
print(type(ele2))
res.append(ele2)
for ele3 in tten:
for ele4 in ele3:
print(type(ele4))
ten.append(ele4)
service_id = db.session.query(Shooter.service_id).filter(Shooter.id==firer_id).scalar()
rank_id=db.session.query(Shooter.rank_id).filter(Shooter.id==firer_id).scalar()
rank=db.session.query(Rank.name).filter(Rank.id==rank_id).scalar()
name = db.session.query(Shooter.name).filter(Shooter.id==firer_id).scalar()
return render_template('pages/prediction_target_4.html',
name = name,
detail_no=detail_no,
session_no=session_no,
target_no=target_no,
service_id=service_id,
rank=rank,
res=res,
ten=ten)
@app.route('/individual_score/target_5', methods=['GET', 'POST'])
def individual_score_target_5():
firer_id =db.session.query(TShooting.target_5_id).scalar()
detail_no =db.session.query(TShooting.detail_no).scalar()
session_no =db.session.query(TShooting.session_id).scalar()
target_no = 5
tres = db.session.query(Grouping.result).filter(Grouping.firer_id==firer_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
res=[]
ten=[]
tten=db.session.query(MPI.tendency_code).filter(MPI.firer_id==firer_id).order_by(MPI.datetimestamp.desc()).limit(5).all()[::-1]
print(tres)
for ele in tres:
for ele2 in ele:
print(type(ele2))
res.append(ele2)
for ele3 in tten:
for ele4 in ele3:
print(type(ele4))
ten.append(ele4)
service_id = db.session.query(Shooter.service_id).filter(Shooter.id==firer_id).scalar()
rank_id=db.session.query(Shooter.rank_id).filter(Shooter.id==firer_id).scalar()
rank=db.session.query(Rank.name).filter(Rank.id==rank_id).scalar()
name = db.session.query(Shooter.name).filter(Shooter.id==firer_id).scalar()
return render_template('pages/prediction_target_5.html',
name = name,
detail_no=detail_no,
session_no=session_no,
target_no=target_no,
service_id=service_id,
rank=rank,
res=res,
ten=ten)
@app.route('/individual_score/target_6', methods=['GET', 'POST'])
def individual_score_target_6():
firer_id =db.session.query(TShooting.target_6_id).scalar()
detail_no =db.session.query(TShooting.detail_no).scalar()
session_no =db.session.query(TShooting.session_id).scalar()
target_no = 6
tres = db.session.query(Grouping.result).filter(Grouping.firer_id==firer_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
res=[]
ten=[]
tten=db.session.query(MPI.tendency_code).filter(MPI.firer_id==firer_id).order_by(MPI.datetimestamp.desc()).limit(5).all()[::-1]
print(tres)
for ele in tres:
for ele2 in ele:
print(type(ele2))
res.append(ele2)
for ele3 in tten:
for ele4 in ele3:
print(type(ele4))
ten.append(ele4)
service_id = db.session.query(Shooter.service_id).filter(Shooter.id==firer_id).scalar()
rank_id=db.session.query(Shooter.rank_id).filter(Shooter.id==firer_id).scalar()
rank=db.session.query(Rank.name).filter(Rank.id==rank_id).scalar()
name = db.session.query(Shooter.name).filter(Shooter.id==firer_id).scalar()
return render_template('pages/prediction_target_6.html',
name = name,
detail_no=detail_no,
session_no=session_no,
target_no=target_no,
service_id=service_id,
rank=rank,
res=res,
ten=ten)
@app.route('/individual_score/target_7', methods=['GET', 'POST'])
def individual_score_target_7():
firer_id =db.session.query(TShooting.target_7_id).scalar()
detail_no =db.session.query(TShooting.detail_no).scalar()
session_no =db.session.query(TShooting.session_id).scalar()
target_no = 7
tres = db.session.query(Grouping.result).filter(Grouping.firer_id==firer_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
res=[]
ten=[]
tten=db.session.query(MPI.tendency_code).filter(MPI.firer_id==firer_id).order_by(MPI.datetimestamp.desc()).limit(5).all()[::-1]
print(tres)
for ele in tres:
for ele2 in ele:
print(type(ele2))
res.append(ele2)
for ele3 in tten:
for ele4 in ele3:
print(type(ele4))
ten.append(ele4)
service_id = db.session.query(Shooter.service_id).filter(Shooter.id==firer_id).scalar()
rank_id=db.session.query(Shooter.rank_id).filter(Shooter.id==firer_id).scalar()
rank=db.session.query(Rank.name).filter(Rank.id==rank_id).scalar()
name = db.session.query(Shooter.name).filter(Shooter.id==firer_id).scalar()
return render_template('pages/prediction_target_7.html',
name = name,
detail_no=detail_no,
session_no=session_no,
target_no=target_no,
service_id=service_id,
rank=rank,
res=res,
ten=ten)
@app.route('/individual_score/target_8', methods=['GET', 'POST'])
def individual_score_target_8():
firer_id =db.session.query(TShooting.target_8_id).scalar()
detail_no =db.session.query(TShooting.detail_no).scalar()
session_no =db.session.query(TShooting.session_id).scalar()
target_no = 7
tres = db.session.query(Grouping.result).filter(Grouping.firer_id==firer_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
res=[]
ten=[]
tten=db.session.query(MPI.tendency_code).filter(MPI.firer_id==firer_id).order_by(MPI.datetimestamp.desc()).limit(5).all()[::-1]
print(tres)
for ele in tres:
for ele2 in ele:
print(type(ele2))
res.append(ele2)
for ele3 in tten:
for ele4 in ele3:
print(type(ele4))
ten.append(ele4)
service_id = db.session.query(Shooter.service_id).filter(Shooter.id==firer_id).scalar()
rank_id=db.session.query(Shooter.rank_id).filter(Shooter.id==firer_id).scalar()
rank=db.session.query(Rank.name).filter(Rank.id==rank_id).scalar()
name = db.session.query(Shooter.name).filter(Shooter.id==firer_id).scalar()
return render_template('pages/prediction_target_8.html',
name = name,
detail_no=detail_no,
session_no=session_no,
target_no=target_no,
service_id=service_id,
rank=rank,
res=res,
ten=ten)
@app.route('/prediction_target_1/', methods=['GET', 'POST'])
def prediction_target_1():
t1_x=0
t1_y=0
xmpi_j=0
ympi_j=0
gp=0
Tfirt_x_j=0
Tfirt_y_j=0
fin_x_1=0
fin_y_1=0
xmpi_inch = 0
ympi_inch = 0
result_1=None
fir_tendency=None
set_1_name = None
set_1_army =None
set_1_session_no = None
set_1_detail_no=None
set_1_id =None
set_2_name = None
set_2_army =None
set_2_session_no = None
set_2_detail_no=None
set_2_id =None
set_3_name = None
set_3_army =None
set_3_session_no = None
set_3_detail_no=None
set_3_id =None
set_4_name = None
set_4_army =None
set_4_session_no = None
set_4_detail_no=None
set_4_id =None
fir_tendency_1=None
firer_id=None
current_army_no=None
current_firer_name=None
current_session_no=None
session_detail_no=None
current_detail_no=None
set_2_x=None
set_2_y=None
set_3_x=None
set_3_y=None
set_4_x=None
set_4_y=None
paper_ref=None
sess=None
res=None
set_2_x_arr=[]
set_2_y_arr=[]
set_3_x_arr=[]
set_3_y_arr=[]
set_4_x_arr=[]
set_4_y_arr=[]
fin_x_arr_1=[]
fin_y_arr_1=[]
curdate=time.strftime("%Y-%m-%d")
if request.method == 'POST':
firer_id,sess,detail,p,u,q,t1_x,t1_y,xmpi,ympi,f,gp,Tfirt_x,Tfirt_y,fin_x_1,fin_y_1,result_1,fir_tendency_1=prediction_calculation_1()
paper_ref=db.session.query(TPaper_ref.paper_ref).scalar()
set_2_x=db.session.query(Firer_Details).filter(Firer_Details.date==curdate , Firer_Details.target_no==1 ,Firer_Details.set_no==2 , Firer_Details.session_id==sess).all()
set_2_y=db.session.query(Firer_Details).filter(Firer_Details.date==curdate , Firer_Details.target_no==1 , Firer_Details.set_no==2 , Firer_Details.session_id==sess).all()
for x_2 in set_2_x:
set_2_x_arr.append(int(x_2.final_x))
for y_2 in set_2_y:
set_2_y_arr.append(int(y_2.final_y))
set_3_x=db.session.query(Firer_Details).filter(Firer_Details.date==curdate , Firer_Details.target_no==1 , Firer_Details.set_no==3 , Firer_Details.session_id==sess).all()
set_3_y=db.session.query(Firer_Details).filter(Firer_Details.date==curdate , Firer_Details.target_no==1 , Firer_Details.set_no==3 , Firer_Details.session_id==sess).all()
for x_3 in set_3_x:
set_3_x_arr.append(int(x_3.final_x))
for y_3 in set_3_y:
set_3_y_arr.append(int(y_3.final_y))
print(set_3_x_arr)
set_4_x=db.session.query(Firer_Details).filter(Firer_Details.date==curdate , Firer_Details.target_no==1 , Firer_Details.set_no==4 , Firer_Details.session_id==sess).all()
set_4_y=db.session.query(Firer_Details).filter(Firer_Details.date==curdate , Firer_Details.target_no==1 , Firer_Details.set_no==4 , Firer_Details.session_id==sess).all()
for x_4 in set_4_x:
set_4_x_arr.append(int(x_4.final_x))
for y_4 in set_4_y:
set_4_y_arr.append(int(y_4.final_y))
set_1_id = db.session.query(Firer_Details.firer_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==1
).distinct().scalar()
set_1_session_no=db.session.query(Firer_Details.session_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==1
).distinct().scalar()
set_1_detail_no=db.session.query(Firer_Details.detail_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==1
).distinct().scalar()
set_1_name=db.session.query(Shooter.name).filter(
Shooter.id==set_1_id
).scalar()
set_1_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_1_id).scalar()
set_2_id = db.session.query(Firer_Details.firer_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==2
).distinct().scalar()
set_2_session_no=db.session.query(Firer_Details.session_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==2
).distinct().scalar()
set_2_detail_no=db.session.query(Firer_Details.detail_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==2
).distinct().scalar()
print("set_2_detail_no")
print(set_2_detail_no)
print(set_2_detail_no)
set_2_name=db.session.query(Shooter.name).filter(
Shooter.id==set_2_id
).scalar()
set_2_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_2_id).scalar()
set_3_id = db.session.query(Firer_Details.firer_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==3
).distinct().scalar()
set_3_session_no=db.session.query(Firer_Details.session_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==3
).distinct().scalar()
set_3_detail_no=db.session.query(Firer_Details.detail_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==3
).distinct().scalar()
set_3_name=db.session.query(Shooter.name).filter(
Shooter.id==set_3_id
).scalar()
set_3_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_3_id).scalar()
set_4_id = db.session.query(Firer_Details.firer_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==4
).distinct().scalar()
set_4_session_no=db.session.query(Firer_Details.session_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==4
).distinct().scalar()
set_4_detail_no=db.session.query(Firer_Details.detail_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==4
).distinct().scalar()
set_4_name=db.session.query(Shooter.name).filter(
Shooter.id==set_4_id
).scalar()
set_4_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_4_id).scalar()
current_firer_name = db.session.query(Shooter.name).filter(Shooter.id==firer_id).scalar()
current_army_no = db.session.query(Shooter.service_id).filter(Shooter.id==firer_id).scalar()
current_session_no=db.session.query(TShooting.session_id).filter(TShooting.target_1_id==firer_id).scalar()
current_detail_no=db.session.query(TShooting.detail_no).filter(TShooting.target_1_id==firer_id).scalar()
xmpi_inch = pixeltoinch(xmpi)
ympi_inch = pixeltoinch(ympi)
xmpi_j =pd.Series(xmpi_inch).to_json(orient='values')
ympi_j =pd.Series(ympi_inch).to_json(orient='values')
Tfirt_x_j =pd.Series(Tfirt_x).to_json(orient='values')
Tfirt_y_j =pd.Series(Tfirt_y).to_json(orient='values')
for x_1 in fin_x_1:
fin_x_arr_1.append(int(x_1.final_x))
for y_1 in fin_y_1 :
fin_y_arr_1.append(int(y_1.final_y))
return jsonify(x1=t1_x ,
y1=t1_y ,
xmpi1=Tfirt_x_j ,
ympi1=Tfirt_y_j,
gp=gp,
txf1=Tfirt_x_j,
tyf1=Tfirt_y_j,
fx1=fin_x_arr_1,
fy1=fin_y_arr_1,
result_1=result_1,
fir_tendency_1=fir_tendency_1,
set_1_name=set_1_name,
current_firer_name=current_firer_name,
set_1_army=set_1_army,
current_army_no=current_army_no,
set_1_session_no=set_1_session_no,
current_session_no=current_session_no,
set_1_detail_no=set_1_detail_no,
current_detail_no=current_detail_no,
set_2_x=set_2_x_arr,
set_2_y=set_2_y_arr,
set_2_name=set_2_name,
set_2_army=set_2_army,
set_2_detail_no=set_2_detail_no,
set_2_session_no=set_2_session_no,
set_3_x=set_3_x_arr,
set_3_y=set_3_y_arr,
set_3_name=set_3_name,
set_3_army=set_3_army,
set_3_session_no=set_3_session_no,
set_3_detail_no=set_3_detail_no,
set_4_x=set_4_x_arr,
set_4_y=set_4_y_arr,
set_4_name=set_4_name,
set_4_army=set_4_army,
set_4_session_no=set_4_session_no,
set_4_detail_no=set_4_detail_no
)
@app.route('/prediction_target_2/', methods=['GET', 'POST'])
def prediction_target_2():
t1_x=0
t1_y=0
xmpi_j=0
ympi_j=0
gp=0
Tfirt_x_j=0
Tfirt_y_j=0
fin_x_1=0
fin_y_1=0
xmpi_inch = 0
ympi_inch = 0
result_1=None
fir_tendency=None
set_1_name = None
set_1_army =None
set_1_session_no = None
set_1_detail_no=None
set_1_id =None
set_2_name = None
set_2_army =None
set_2_session_no = None
set_2_detail_no=None
set_2_id =None
set_3_name = None
set_3_army =None
set_3_session_no = None
set_3_detail_no=None
set_3_id =None
set_4_name = None
set_4_army =None
set_4_session_no = None
set_4_detail_no=None
set_4_id =None
fir_tendency_1=None
firer_id=None
current_army_no=None
current_firer_name=None
current_session_no=None
session_detail_no=None
current_detail_no=None
set_2_x=None
set_2_y=None
set_3_x=None
set_3_y=None
set_4_x=None
set_4_y=None
paper_ref=None
sess=None
res=None
set_2_x_arr=[]
set_2_y_arr=[]
set_3_x_arr=[]
set_3_y_arr=[]
set_4_x_arr=[]
set_4_y_arr=[]
fin_x_arr_1=[]
fin_y_arr_1=[]
curdate=time.strftime("%Y-%m-%d")
if request.method == 'POST':
firer_id,sess,o,p,u,q,t1_x,t1_y,xmpi,ympi,f,gp,Tfirt_x,Tfirt_y,fin_x_1,fin_y_1,result_1,fir_tendency_1=prediction_calculation_2()
paper_ref=db.session.query(TPaper_ref.paper_ref).scalar()
set_2_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==2 ,T_Firer_Details.set_no==2 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_2_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==2 , T_Firer_Details.set_no==2 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_2 in set_2_x:
set_2_x_arr.append(int(x_2.final_x))
for y_2 in set_2_y:
set_2_y_arr.append(int(y_2.final_y))
set_3_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==2 , T_Firer_Details.set_no==3 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_3_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==2 , T_Firer_Details.set_no==3 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_3 in set_3_x:
set_3_x_arr.append(int(x_3.final_x))
for y_3 in set_3_y:
set_3_y_arr.append(int(y_3.final_y))
set_4_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==2 , T_Firer_Details.set_no==4 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_4_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==2 , T_Firer_Details.set_no==4 ,T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_4 in set_4_x:
set_4_x_arr.append(int(x_4.final_x))
for y_4 in set_4_y:
set_4_y_arr.append(int(y_4.final_y))
set_1_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==2,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==2,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==2,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_name=db.session.query(Shooter.name).filter(
Shooter.id==set_1_id
).scalar()
set_1_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_1_id).scalar()
set_2_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==2,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==2,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==2,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_name=db.session.query(Shooter.name).filter(
Shooter.id==set_2_id
).scalar()
set_2_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_2_id).scalar()
set_3_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==2,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==2,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==2,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_name=db.session.query(Shooter.name).filter(
Shooter.id==set_3_id
).scalar()
set_3_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_3_id).scalar()
set_4_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==2,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==2,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==2,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_name=db.session.query(Shooter.name).filter(
Shooter.id==set_4_id
).scalar()
set_4_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_4_id).scalar()
current_firer_name = db.session.query(Shooter.name).filter(Shooter.id==firer_id).scalar()
current_army_no = db.session.query(Shooter.service_id).filter(Shooter.id==firer_id).scalar()
current_session_no=db.session.query(TShooting.session_id).filter(TShooting.target_1_id==firer_id).scalar()
current_detail_no=db.session.query(TShooting.detail_no).filter(TShooting.target_1_id==firer_id).scalar()
xmpi_inch = pixeltoinch(xmpi)
ympi_inch = pixeltoinch(ympi)
xmpi_j =pd.Series(xmpi_inch).to_json(orient='values')
ympi_j =pd.Series(ympi_inch).to_json(orient='values')
Tfirt_x_j =pd.Series(Tfirt_x).to_json(orient='values')
Tfirt_y_j =pd.Series(Tfirt_y).to_json(orient='values')
fin_x_arr_1=[]
fin_y_arr_1=[]
for x_1 in fin_x_1:
fin_x_arr_1.append(int(x_1.final_x))
for y_1 in fin_y_1 :
fin_y_arr_1.append(int(y_1.final_y))
return jsonify(x1=t1_x ,
y1=t1_y ,
xmpi1=Tfirt_x_j ,
ympi1=Tfirt_y_j,
gp=gp,
txf1=Tfirt_x_j,
tyf1=Tfirt_y_j,
fx1=fin_x_arr_1,
fy1=fin_y_arr_1,
result_1=result_1,
fir_tendency_1=fir_tendency_1,
set_1_name=set_1_name,
current_firer_name=current_firer_name,
set_1_army=set_1_army,
current_army_no=current_army_no,
set_1_session_no=set_1_session_no,
current_session_no=current_session_no,
set_1_detail_no=set_1_detail_no,
current_detail_no=current_detail_no,
set_2_x=set_2_x_arr,
set_2_y=set_2_y_arr,
set_2_name=set_2_name,
set_2_army=set_2_army,
set_2_detail_no=set_2_detail_no,
set_2_session_no=set_2_session_no,
set_3_x=set_3_x_arr,
set_3_y=set_3_y_arr,
set_3_name=set_3_name,
set_3_army=set_3_army,
set_3_session_no=set_3_session_no,
set_3_detail_no=set_3_detail_no,
set_4_x=set_4_x_arr,
set_4_y=set_4_y_arr,
set_4_name=set_4_name,
set_4_army=set_4_army,
set_4_session_no=set_4_session_no,
set_4_detail_no=set_4_detail_no
)
@app.route('/prediction_target_3/', methods=['GET', 'POST'])
def prediction_target_3():
t1_x=0
t1_y=0
xmpi_j=0
ympi_j=0
gp=0
Tfirt_x_j=0
Tfirt_y_j=0
fin_x_1=0
fin_y_1=0
xmpi_inch = 0
ympi_inch = 0
result_1=None
fir_tendency=None
set_1_name = None
set_1_army =None
set_1_session_no = None
set_1_detail_no=None
set_1_id =None
set_2_name = None
set_2_army =None
set_2_session_no = None
set_2_detail_no=None
set_2_id =None
set_3_name = None
set_3_army =None
set_3_session_no = None
set_3_detail_no=None
set_3_id =None
set_4_name = None
set_4_army =None
set_4_session_no = None
set_4_detail_no=None
set_4_id =None
fir_tendency_1=None
firer_id=None
current_army_no=None
current_firer_name=None
current_session_no=None
session_detail_no=None
current_detail_no=None
set_2_x=None
set_2_y=None
set_3_x=None
set_3_y=None
set_4_x=None
set_4_y=None
paper_ref=None
sess=None
res=None
set_2_x_arr=[]
set_2_y_arr=[]
set_3_x_arr=[]
set_3_y_arr=[]
set_4_x_arr=[]
set_4_y_arr=[]
fin_x_arr_1=[]
fin_y_arr_1=[]
curdate=time.strftime("%Y-%m-%d")
if request.method == 'POST':
firer_id,sess,o,p,u,q,t1_x,t1_y,xmpi,ympi,f,gp,Tfirt_x,Tfirt_y,fin_x_1,fin_y_1,result_1,fir_tendency_1=prediction_calculation_3()
paper_ref=db.session.query(TPaper_ref.paper_ref).scalar()
set_2_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==3 ,T_Firer_Details.set_no==2 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_2_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==3 , T_Firer_Details.set_no==2 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_2 in set_2_x:
set_2_x_arr.append(int(x_2.final_x))
for y_2 in set_2_y:
set_2_y_arr.append(int(y_2.final_y))
set_3_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==3 , T_Firer_Details.set_no==3 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_3_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==3 , T_Firer_Details.set_no==3 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_3 in set_3_x:
set_3_x_arr.append(int(x_3.final_x))
for y_2 in set_2_y:
set_3_y_arr.append(int(y_3.final_y))
set_4_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==3 , T_Firer_Details.set_no==4 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_4_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==3 , T_Firer_Details.set_no==4 ,T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_4 in set_4_x:
set_4_x_arr.append(int(x_4.final_x))
for y_2 in set_2_y:
set_4_y_arr.append(int(y_4.final_y))
set_1_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==3,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==3,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==3,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_name=db.session.query(Shooter.name).filter(
Shooter.id==set_1_id
).scalar()
set_1_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_1_id).scalar()
set_2_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==3,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==3,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==3,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_name=db.session.query(Shooter.name).filter(
Shooter.id==set_2_id
).scalar()
set_2_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_2_id).scalar()
set_3_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==3,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==3,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==3,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_name=db.session.query(Shooter.name).filter(
Shooter.id==set_3_id
).scalar()
set_3_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_3_id).scalar()
set_4_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==3,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==3,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==3,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_name=db.session.query(Shooter.name).filter(
Shooter.id==set_4_id
).scalar()
set_4_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_4_id).scalar()
current_firer_name = db.session.query(Shooter.name).filter(Shooter.id==firer_id).scalar()
current_army_no = db.session.query(Shooter.service_id).filter(Shooter.id==firer_id).scalar()
current_session_no=db.session.query(TShooting.session_id).filter(TShooting.target_1_id==firer_id).scalar()
current_detail_no=db.session.query(TShooting.detail_no).filter(TShooting.target_1_id==firer_id).scalar()
xmpi_inch = pixeltoinch(xmpi)
ympi_inch = pixeltoinch(ympi)
xmpi_j =pd.Series(xmpi_inch).to_json(orient='values')
ympi_j =pd.Series(ympi_inch).to_json(orient='values')
Tfirt_x_j =pd.Series(Tfirt_x).to_json(orient='values')
Tfirt_y_j =pd.Series(Tfirt_y).to_json(orient='values')
fin_x_arr_1=[]
fin_y_arr_1=[]
for x_1 in fin_x_1:
fin_x_arr_1.append(int(x_1.final_x))
for y_1 in fin_y_1 :
fin_y_arr_1.append(int(y_1.final_y))
return jsonify(x1=t1_x ,
y1=t1_y ,
xmpi1=Tfirt_x_j ,
ympi1=Tfirt_y_j,
gp=gp,
txf1=Tfirt_x_j,
tyf1=Tfirt_y_j,
fx1=fin_x_arr_1,
fy1=fin_y_arr_1,
result_1=result_1,
fir_tendency_1=fir_tendency_1,
set_1_name=set_1_name,
current_firer_name=current_firer_name,
set_1_army=set_1_army,
current_army_no=current_army_no,
set_1_session_no=set_1_session_no,
current_session_no=current_session_no,
set_1_detail_no=set_1_detail_no,
current_detail_no=current_detail_no,
set_2_x=set_2_x_arr,
set_2_y=set_2_y_arr,
set_2_name=set_2_name,
set_2_army=set_2_army,
set_2_detail_no=set_2_detail_no,
set_2_session_no=set_2_session_no,
set_3_x=set_3_x_arr,
set_3_y=set_3_y_arr,
set_3_name=set_3_name,
set_3_army=set_3_army,
set_3_session_no=set_3_session_no,
set_3_detail_no=set_3_detail_no,
set_4_x=set_4_x_arr,
set_4_y=set_4_y_arr,
set_4_name=set_4_name,
set_4_army=set_4_army,
set_4_session_no=set_4_session_no,
set_4_detail_no=set_4_detail_no
)
@app.route('/prediction_target_4/', methods=['GET', 'POST'])
def prediction_target_4():
t1_x=0
t1_y=0
xmpi_j=0
ympi_j=0
gp=0
Tfirt_x_j=0
Tfirt_y_j=0
fin_x_1=0
fin_y_1=0
xmpi_inch = 0
ympi_inch = 0
result_1=None
fir_tendency=None
set_1_name = None
set_1_army =None
set_1_session_no = None
set_1_detail_no=None
set_1_id =None
set_2_name = None
set_2_army =None
set_2_session_no = None
set_2_detail_no=None
set_2_id =None
set_3_name = None
set_3_army =None
set_3_session_no = None
set_3_detail_no=None
set_3_id =None
set_4_name = None
set_4_army =None
set_4_session_no = None
set_4_detail_no=None
set_4_id =None
fir_tendency_1=None
firer_id=None
current_army_no=None
current_firer_name=None
current_session_no=None
session_detail_no=None
current_detail_no=None
set_2_x=None
set_2_y=None
set_3_x=None
set_3_y=None
set_4_x=None
set_4_y=None
paper_ref=None
sess=None
res=None
set_2_x_arr=[]
set_2_y_arr=[]
set_3_x_arr=[]
set_3_y_arr=[]
set_4_x_arr=[]
set_4_y_arr=[]
fin_x_arr_1=[]
fin_y_arr_1=[]
curdate=time.strftime("%Y-%m-%d")
if request.method == 'POST':
firer_id,sess,o,p,u,q,t1_x,t1_y,xmpi,ympi,f,gp,Tfirt_x,Tfirt_y,fin_x_1,fin_y_1,result_1,fir_tendency_1=prediction_calculation_4()
paper_ref=db.session.query(TPaper_ref.paper_ref).scalar()
set_2_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==4 ,T_Firer_Details.set_no==2 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_2_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==4 , T_Firer_Details.set_no==2 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_2 in set_2_x:
set_2_x_arr.append(int(x_2.final_x))
for y_2 in set_2_y:
set_2_y_arr.append(int(y_2.final_y))
set_3_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==4 , T_Firer_Details.set_no==3 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_3_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==4 , T_Firer_Details.set_no==3 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_3 in set_3_x:
set_3_x_arr.append(int(x_3.final_x))
for y_3 in set_3_y:
set_3_y_arr.append(int(y_3.final_y))
set_4_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==4 , T_Firer_Details.set_no==4 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_4_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==4 , T_Firer_Details.set_no==4 ,T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_4 in set_4_x:
set_4_x_arr.append(int(x_4.final_x))
for y_4 in set_4_y:
set_4_y_arr.append(int(y_4.final_y))
set_1_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==4,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==4,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==4,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_name=db.session.query(Shooter.name).filter(
Shooter.id==set_1_id
).scalar()
set_1_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_1_id).scalar()
set_2_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==4,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==4,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==4,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_name=db.session.query(Shooter.name).filter(
Shooter.id==set_2_id
).scalar()
set_2_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_2_id).scalar()
set_3_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==4,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==4,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==4,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_name=db.session.query(Shooter.name).filter(
Shooter.id==set_3_id
).scalar()
set_3_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_3_id).scalar()
set_4_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==4,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==4,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==4,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_name=db.session.query(Shooter.name).filter(
Shooter.id==set_4_id
).scalar()
set_4_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_4_id).scalar()
current_firer_name = db.session.query(Shooter.name).filter(Shooter.id==firer_id).scalar()
current_army_no = db.session.query(Shooter.service_id).filter(Shooter.id==firer_id).scalar()
current_session_no=db.session.query(TShooting.session_id).filter(TShooting.target_1_id==firer_id).scalar()
current_detail_no=db.session.query(TShooting.detail_no).filter(TShooting.target_1_id==firer_id).scalar()
xmpi_inch = pixeltoinch(xmpi)
ympi_inch = pixeltoinch(ympi)
xmpi_j =pd.Series(xmpi_inch).to_json(orient='values')
ympi_j =pd.Series(ympi_inch).to_json(orient='values')
Tfirt_x_j =pd.Series(Tfirt_x).to_json(orient='values')
Tfirt_y_j =pd.Series(Tfirt_y).to_json(orient='values')
for x_1 in fin_x_1:
fin_x_arr_1.append(int(x_1.final_x))
for y_1 in fin_y_1 :
fin_y_arr_1.append(int(y_1.final_y))
return jsonify(x1=t1_x ,
y1=t1_y ,
xmpi1=Tfirt_x_j ,
ympi1=Tfirt_y_j,
gp=gp,
txf1=Tfirt_x_j,
tyf1=Tfirt_y_j,
fx1=fin_x_arr_1,
fy1=fin_y_arr_1,
result_1=result_1,
fir_tendency_1=fir_tendency_1,
set_1_name=set_1_name,
current_firer_name=current_firer_name,
set_1_army=set_1_army,
current_army_no=current_army_no,
set_1_session_no=set_1_session_no,
current_session_no=current_session_no,
set_1_detail_no=set_1_detail_no,
current_detail_no=current_detail_no,
set_2_x=set_2_x_arr,
set_2_y=set_2_y_arr,
set_2_name=set_2_name,
set_2_army=set_2_army,
set_2_detail_no=set_2_detail_no,
set_2_session_no=set_2_session_no,
set_3_x=set_3_x_arr,
set_3_y=set_3_y_arr,
set_3_name=set_3_name,
set_3_army=set_3_army,
set_3_session_no=set_3_session_no,
set_3_detail_no=set_3_detail_no,
set_4_x=set_4_x_arr,
set_4_y=set_4_y_arr,
set_4_name=set_4_name,
set_4_army=set_4_army,
set_4_session_no=set_4_session_no,
set_4_detail_no=set_4_detail_no
)
@app.route('/prediction_target_5/', methods=['GET', 'POST'])
def prediction_target_5():
t1_x=0
t1_y=0
xmpi_j=0
ympi_j=0
gp=0
Tfirt_x_j=0
Tfirt_y_j=0
fin_x_1=0
fin_y_1=0
xmpi_inch = 0
ympi_inch = 0
result_1=None
fir_tendency=None
set_1_name = None
set_1_army =None
set_1_session_no = None
set_1_detail_no=None
set_1_id =None
set_2_name = None
set_2_army =None
set_2_session_no = None
set_2_detail_no=None
set_2_id =None
set_3_name = None
set_3_army =None
set_3_session_no = None
set_3_detail_no=None
set_3_id =None
set_4_name = None
set_4_army =None
set_4_session_no = None
set_4_detail_no=None
set_4_id =None
fir_tendency_1=None
firer_id=None
current_army_no=None
current_firer_name=None
current_session_no=None
session_detail_no=None
current_detail_no=None
set_2_x=None
set_2_y=None
set_3_x=None
set_3_y=None
set_4_x=None
set_4_y=None
paper_ref=None
sess=None
res=None
set_2_x_arr=[]
set_2_y_arr=[]
set_3_x_arr=[]
set_3_y_arr=[]
set_4_x_arr=[]
set_4_y_arr=[]
fin_x_arr_1=[]
fin_y_arr_1=[]
curdate=time.strftime("%Y-%m-%d")
if request.method == 'POST':
firer_id,sess,o,p,u,q,t1_x,t1_y,xmpi,ympi,f,gp,Tfirt_x,Tfirt_y,fin_x_1,fin_y_1,result_1,fir_tendency_1=prediction_calculation_5()
paper_ref=db.session.query(TPaper_ref.paper_ref).scalar()
set_2_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==5 ,T_Firer_Details.set_no==2 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_2_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==5 , T_Firer_Details.set_no==2 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_2 in set_2_x:
set_2_x_arr.append(int(x_2.final_x))
for y_2 in set_2_y:
set_2_y_arr.append(int(y_2.final_y))
set_3_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==5 , T_Firer_Details.set_no==3 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_3_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==5 , T_Firer_Details.set_no==3 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_3 in set_3_x:
set_3_x_arr.append(int(x_3.final_x))
for y_3 in set_3_y:
set_3_y_arr.append(int(y_3.final_y))
set_4_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==5 , T_Firer_Details.set_no==4 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_4_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==5 , T_Firer_Details.set_no==4 ,T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_4 in set_4_x:
set_4_x_arr.append(int(x_4.final_x))
for y_4 in set_4_y:
set_4_y_arr.append(int(y_4.final_y))
set_1_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==5,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==5,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==5,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_name=db.session.query(Shooter.name).filter(
Shooter.id==set_1_id
).scalar()
set_1_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_1_id).scalar()
set_2_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==5,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==5,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==5,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_name=db.session.query(Shooter.name).filter(
Shooter.id==set_2_id
).scalar()
set_2_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_2_id).scalar()
set_3_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==5,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==5,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==5,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_name=db.session.query(Shooter.name).filter(
Shooter.id==set_3_id
).scalar()
set_3_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_3_id).scalar()
set_4_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==5,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==5,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==5,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_name=db.session.query(Shooter.name).filter(
Shooter.id==set_4_id
).scalar()
set_4_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_4_id).scalar()
current_firer_name = db.session.query(Shooter.name).filter(Shooter.id==firer_id).scalar()
current_army_no = db.session.query(Shooter.service_id).filter(Shooter.id==firer_id).scalar()
current_session_no=db.session.query(TShooting.session_id).filter(TShooting.target_1_id==firer_id).scalar()
current_detail_no=db.session.query(TShooting.detail_no).filter(TShooting.target_1_id==firer_id).scalar()
xmpi_inch = pixeltoinch(xmpi)
ympi_inch = pixeltoinch(ympi)
xmpi_j =pd.Series(xmpi_inch).to_json(orient='values')
ympi_j =pd.Series(ympi_inch).to_json(orient='values')
Tfirt_x_j =pd.Series(Tfirt_x).to_json(orient='values')
Tfirt_y_j =pd.Series(Tfirt_y).to_json(orient='values')
for x_1 in fin_x_1:
fin_x_arr_1.append(int(x_1.final_x))
for y_1 in fin_y_1 :
fin_y_arr_1.append(int(y_1.final_y))
return jsonify(x1=t1_x ,
y1=t1_y ,
xmpi1=Tfirt_x_j ,
ympi1=Tfirt_y_j,
gp=gp,
txf1=Tfirt_x_j,
tyf1=Tfirt_y_j,
fx1=fin_x_arr_1,
fy1=fin_y_arr_1,
result_1=result_1,
fir_tendency_1=fir_tendency_1,
set_1_name=set_1_name,
current_firer_name=current_firer_name,
set_1_army=set_1_army,
current_army_no=current_army_no,
set_1_session_no=set_1_session_no,
current_session_no=current_session_no,
set_1_detail_no=set_1_detail_no,
current_detail_no=current_detail_no,
set_2_x=set_2_x_arr,
set_2_y=set_2_y_arr,
set_2_name=set_2_name,
set_2_army=set_2_army,
set_2_detail_no=set_2_detail_no,
set_2_session_no=set_2_session_no,
set_3_x=set_3_x_arr,
set_3_y=set_3_y_arr,
set_3_name=set_3_name,
set_3_army=set_3_army,
set_3_session_no=set_3_session_no,
set_3_detail_no=set_3_detail_no,
set_4_x=set_4_x_arr,
set_4_y=set_4_y_arr,
set_4_name=set_4_name,
set_4_army=set_4_army,
set_4_session_no=set_4_session_no,
set_4_detail_no=set_4_detail_no
)
@app.route('/prediction_target_6/', methods=['GET', 'POST'])
def prediction_target_6():
t1_x=0
t1_y=0
xmpi_j=0
ympi_j=0
gp=0
Tfirt_x_j=0
Tfirt_y_j=0
fin_x_1=0
fin_y_1=0
xmpi_inch = 0
ympi_inch = 0
result_1=None
fir_tendency=None
set_1_name = None
set_1_army =None
set_1_session_no = None
set_1_detail_no=None
set_1_id =None
set_2_name = None
set_2_army =None
set_2_session_no = None
set_2_detail_no=None
set_2_id =None
set_3_name = None
set_3_army =None
set_3_session_no = None
set_3_detail_no=None
set_3_id =None
set_4_name = None
set_4_army =None
set_4_session_no = None
set_4_detail_no=None
set_4_id =None
fir_tendency_1=None
firer_id=None
current_army_no=None
current_firer_name=None
current_session_no=None
session_detail_no=None
current_detail_no=None
set_2_x=None
set_2_y=None
set_3_x=None
set_3_y=None
set_4_x=None
set_4_y=None
paper_ref=None
sess=None
res=None
set_2_x_arr=[]
set_2_y_arr=[]
set_3_x_arr=[]
set_3_y_arr=[]
set_4_x_arr=[]
set_4_y_arr=[]
fin_x_arr_1=[]
fin_y_arr_1=[]
curdate=time.strftime("%Y-%m-%d")
if request.method == 'POST':
firer_id,sess,o,p,u,q,t1_x,t1_y,xmpi,ympi,f,gp,Tfirt_x,Tfirt_y,fin_x_1,fin_y_1,result_1,fir_tendency_1=prediction_calculation_6()
paper_ref=db.session.query(TPaper_ref.paper_ref).scalar()
set_2_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==6 ,T_Firer_Details.set_no==2 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_2_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==6 , T_Firer_Details.set_no==2 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_2 in set_2_x:
set_2_x_arr.append(int(x_2.final_x))
for y_2 in set_2_y:
set_2_y_arr.append(int(y_2.final_y))
set_3_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==6 , T_Firer_Details.set_no==3 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_3_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==6 , T_Firer_Details.set_no==3 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_3 in set_3_x:
set_3_x_arr.append(int(x_3.final_x))
for y_3 in set_3_y:
set_3_y_arr.append(int(y_3.final_y))
set_4_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==6 , T_Firer_Details.set_no==4 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_4_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==6 , T_Firer_Details.set_no==4 ,T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_4 in set_4_x:
set_4_x_arr.append(int(x_4.final_x))
for y_4 in set_4_y:
set_4_y_arr.append(int(y_4.final_y))
set_1_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==6,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==6,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==6,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_name=db.session.query(Shooter.name).filter(
Shooter.id==set_1_id
).scalar()
set_1_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_1_id).scalar()
set_2_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==6,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==6,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==6,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_name=db.session.query(Shooter.name).filter(
Shooter.id==set_2_id
).scalar()
set_2_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_2_id).scalar()
set_3_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==6,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==6,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==6,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_name=db.session.query(Shooter.name).filter(
Shooter.id==set_3_id
).scalar()
set_3_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_3_id).scalar()
set_4_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==6,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==6,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==6,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_name=db.session.query(Shooter.name).filter(
Shooter.id==set_4_id
).scalar()
set_4_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_4_id).scalar()
current_firer_name = db.session.query(Shooter.name).filter(Shooter.id==firer_id).scalar()
current_army_no = db.session.query(Shooter.service_id).filter(Shooter.id==firer_id).scalar()
current_session_no=db.session.query(TShooting.session_id).filter(TShooting.target_1_id==firer_id).scalar()
current_detail_no=db.session.query(TShooting.detail_no).filter(TShooting.target_1_id==firer_id).scalar()
xmpi_inch = pixeltoinch(xmpi)
ympi_inch = pixeltoinch(ympi)
xmpi_j =pd.Series(xmpi_inch).to_json(orient='values')
ympi_j =pd.Series(ympi_inch).to_json(orient='values')
Tfirt_x_j = | pd.Series(Tfirt_x) | pandas.Series |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.4.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# # P1 REST API
#
# - This Jupyter notebook is an example of how to access the RestAPI interface, described at:
# https://doc.particle.one/
# %% [markdown]
# # Credentials / Settings
# %%
# %load_ext autoreload
# %autoreload 2
import json
import os
import pandas as pd
import requests
# %%
# Enter your token here.
# You can get your token by signing up at `www.particle.one`.
# P1_API_TOKEN = "YOUR_TOKEN_HERE"
# An example token is like:
# P1_API_TOKEN = "e<PASSWORD>"
P1_API_TOKEN = os.environ["P1_API_TOKEN"]
print("P1_API_TOKEN=", P1_API_TOKEN)
HEADERS = {
"Authorization": f"Token {P1_API_TOKEN}",
"Content-Type": "application/json",
}
# %% [markdown]
# # Search query structure
#
# Search query is a Python `dict` with the following structure:
# ```python
# query = {
# "text": "",
# "commodity": [],
# "business_category": "",
# "country": [],
# "frequency": []
# }
# ```
# The fields are:
# - `text`: string. Works as a filter. Free text. Everything that have no match with this phrase will be filtered out.
# - `commodity`: list of strings. Works as a filter. You can find valid values in paragraph 7.1 of this notebook.
# - `business_category`: string. Works as a filter. You can find valid values in paragraph 7.2 of this notebook.
# - `country`: list of strings. Works as a filter. You can find valid values in paragraph 7.3 of this notebook.
# - `frequency`: list of strings. Works as a filter. You can find valid values in paragraph 7.4 of this notebook.
#
# Combination of fields work with logical operator AND.
# E.g. you will get all records that satisfy all filters.
#
# `text` **AND** `commodity` **AND** `business_category` **AND** `country` **AND** `frequency`
# %% [markdown]
# # Imports
# %% [markdown]
# # POST data-api/v1/search-count/
# Returns count for the given query.
# %%
# Build entrypoint url.
base_url = "https://data.particle.one"
count_url = os.path.join(base_url, "data-api/v1/search-count/")
print("count_url=", count_url)
# %%
# Prepare query.
query = {
"text": "",
"commodity": ["Corn"],
"business_category": "",
"country": [],
"frequency": [],
}
payload = json.dumps(query)
# %%
# Perform query.
response = requests.request("POST", count_url, headers=HEADERS, data=payload)
data = json.loads(response.text.encode("utf8"))
print("data=", data)
# %% [markdown]
# # POST data-api/v1/search/
#
# - It returns the first chunk of the payload metadata for the given query, where a chunk is 1000 records.
# - It also returns `scroll_id` to get the next portion of the data.
# %%
search_url = os.path.join(base_url, "data-api/v1/search/")
print("search_url=", search_url)
# %%
# Prepare query.
query = {
"text": "Gas",
"commodity": [],
"business_category": "",
"country": [],
"frequency": [],
}
payload = json.dumps(query)
# %%
# Perform query.
response = requests.request("POST", search_url, headers=HEADERS, data=payload)
data = json.loads(response.text.encode("utf8"))
print("data.keys()=", list(data.keys()))
assert "detail" not in data, data
print("total_count=", data["total_count"])
# Saving scroll_id for the next query.
scroll_id = data["scroll_id"]
print("scroll_id=", scroll_id)
df = pd.DataFrame.from_records(data["rows"])
print("df.shape=", df.shape)
print("df.head()=")
display(df.head())
# %% [markdown]
# # GET data-api/v1/search-scroll/?scroll_id=
# %%
# Build entrypoint url.
# We use scroll id from the previous query.
search_scroll_url = os.path.join(
base_url, f"data-api/v1/search-scroll/?scroll_id={scroll_id}"
)
print("search_scroll_url=", search_scroll_url)
# %%
# Perform query.
response = requests.request("GET", search_scroll_url, headers=HEADERS)
data = json.loads(response.text.encode("utf8"))
print("data.keys()=", list(data.keys()))
print("data['rows'][0]=", data["rows"][0])
df = pd.DataFrame.from_records(data["rows"])
print("df.shape=", df.shape)
print("df.head()=")
display(df.head())
# %% [markdown]
# # GET data-api/v1/payload/?payload_id=
# Returns payload for the given `payload_id`
# %%
# Build entrypoint url.
# We use one of the `payload_id` from one of the previous queries.
payload_id = "8f26ba4734df3a62352cce9d64987d64da54b400"
payload_url = os.path.join(
base_url, f"data-api/v1/payload/?payload_id={payload_id}"
)
print("payload_url=", payload_url)
# %%
# Perform query.
response = requests.request("GET", payload_url, headers=HEADERS)
data = json.loads(response.text.encode("utf8"))
print("data.keys()=", list(data.keys()))
df = | pd.DataFrame.from_records(data["payload_data"]) | pandas.DataFrame.from_records |
from six import string_types, text_type, PY2
from docassemble.webapp.core.models import MachineLearning
from docassemble.base.core import DAObject, DAList, DADict
from docassemble.webapp.db_object import db
from sqlalchemy import or_, and_
from sklearn.datasets import load_iris
from sklearn.ensemble import RandomForestClassifier
import pandas as pd
from pandas.api.types import CategoricalDtype
import numpy as np
import re
import random
import codecs
from io import open
if PY2:
import cPickle as pickle
else:
import pickle
import datetime
import os
import yaml
import json
import sys
from pattern.vector import count, KNN, SVM, stem, PORTER, words, Document
from docassemble.base.logger import logmessage
from docassemble.webapp.backend import get_info_from_file_reference
from docassemble.webapp.fixpickle import fix_pickle_obj
import docassemble.base.functions
learners = dict()
svms = dict()
lastmodtime = dict()
reset_counter = dict()
class MachineLearningEntry(DAObject):
"""An entry in the machine learning system"""
def classify(self, dependent=None):
"""Sets the dependent variable of the machine learning entry"""
if dependent is not None:
self.dependent = dependent
self.ml.set_dependent_by_id(self.id, self.dependent)
return self
def save(self):
"""Saves the entry to the data set. The independent variable must be
defined in order to save."""
args = dict(independent=self.independent)
if hasattr(self, 'dependent'):
args['dependent'] = self.dependent
if hasattr(self, 'key'):
args['key'] = self.key
if hasattr(self, 'id'):
args['id'] = self.id
if hasattr(self, 'info') and self.info is not None:
args['info'] = self.info
self.ml._save_entry(**args)
return self
def predict(self, probabilities=False):
"""Returns predictions for this entry's independent variable."""
return self.ml.predict(self.independent, probabilities=probabilities)
class MachineLearner(object):
"""Base class for machine learning objects"""
def __init__(self, *pargs, **kwargs):
if len(pargs) > 0:
if ':' in pargs[0]:
raise Exception("MachineLearner: you cannot use a colon in a machine learning name")
question = docassemble.base.functions.get_current_question()
if question is not None:
self.group_id = question.interview.get_ml_store() + ':' + pargs[0]
else:
self.group_id = pargs[0]
if len(pargs) > 1:
self.initial_file = pargs[1]
if 'group_id' in kwargs:
self.group_id = kwargs['group_id']
if 'initial_file' in kwargs:
self.initial_file = kwargs['initial_file']
if kwargs.get('use_initial_file', False):
question = docassemble.base.functions.get_current_question()
if question is not None:
self.initial_file = question.interview.get_ml_store()
self.reset_counter = 0
def reset(self):
self.reset_counter += 1
def _initialize(self, reset=False):
if hasattr(self, 'initial_file'):
self.start_from_file(self.initial_file)
if hasattr(self, 'group_id') and (self.group_id not in lastmodtime or reset):
lastmodtime[self.group_id] = datetime.datetime(year=1970, month=1, day=1)
reset_counter = self.reset_counter
def export_training_set(self, output_format='json', key=None):
self._initialize()
output = list()
for entry in self.classified_entries(key=key):
the_entry = dict(independent=entry.independent, dependent=entry.dependent)
if entry.info is not None:
the_entry['info'] = entry.info
output.append(the_entry)
if output_format == 'json':
return json.dumps(output, sort_keys=True, indent=4)
elif output_format == 'yaml':
return yaml.safe_dump(output, default_flow_style=False)
else:
raise Exception("Unknown output format " + str(output_format))
def dependent_in_use(self, key=None):
in_use = set()
if key is None:
query = db.session.query(MachineLearning.dependent).filter(MachineLearning.group_id == self.group_id).group_by(MachineLearning.dependent)
else:
query = db.session.query(MachineLearning.dependent).filter(and_(MachineLearning.group_id == self.group_id, MachineLearning.key == key)).group_by(MachineLearning.dependent)
for record in query:
if record.dependent is not None:
in_use.add(fix_pickle_obj(codecs.decode(bytearray(record.dependent, encoding='utf-8'), 'base64')))
return sorted(in_use)
def is_empty(self):
existing_entry = MachineLearning.query.filter_by(group_id=self.group_id).first()
if existing_entry is None:
return True
return False
def start_from_file(self, fileref):
#logmessage("Starting from file " + str(fileref))
existing_entry = MachineLearning.query.filter_by(group_id=self.group_id).first()
if existing_entry is not None:
return
file_info = get_info_from_file_reference(fileref, folder='sources')
if 'fullpath' not in file_info or file_info['fullpath'] is None or not os.path.exists(file_info['fullpath']):
return
#raise Exception("File reference " + str(fileref) + " is invalid")
with open(file_info['fullpath'], 'rU', encoding='utf-8') as fp:
content = fp.read()
if 'mimetype' in file_info and file_info['mimetype'] == 'application/json':
aref = json.loads(content)
elif 'extension' in file_info and file_info['extension'].lower() in ['yaml', 'yml']:
aref = yaml.load(content, Loader=yaml.FullLoader)
if type(aref) is dict and hasattr(self, 'group_id'):
the_group_id = re.sub(r'.*:', '', self.group_id)
if the_group_id in aref:
aref = aref[the_group_id]
if type(aref) is list:
nowtime = datetime.datetime.utcnow()
for entry in aref:
if 'independent' in entry:
new_entry = MachineLearning(group_id=self.group_id, independent=codecs.encode(pickle.dumps(entry['independent']), 'base64').decode(), dependent=codecs.encode(pickle.dumps(entry.get('dependent', None)), 'base64').decode(), modtime=nowtime, create_time=nowtime, active=True, key=entry.get('key', None), info=codecs.encode(pickle.dumps(entry['info']), 'base64').decode() if entry.get('info', None) is not None else None)
db.session.add(new_entry)
db.session.commit()
def add_to_training_set(self, independent, dependent, key=None, info=None):
self._initialize()
nowtime = datetime.datetime.utcnow()
new_entry = MachineLearning(group_id=self.group_id, independent=codecs.encode(pickle.dumps(independent), 'base64').decode(), dependent=codecs.encode(pickle.dumps(dependent), 'base64').decode(), info=codecs.encode(pickle.dumps(info), 'base64').decode() if info is not None else None, create_time=nowtime, modtime=nowtime, active=True, key=key)
db.session.add(new_entry)
db.session.commit()
return new_entry.id
def save_for_classification(self, indep, key=None, info=None):
self._initialize()
if key is None:
existing_entry = MachineLearning.query.filter_by(group_id=self.group_id, dependent=None, independent=codecs.encode(pickle.dumps(indep), 'base64').decode()).first()
else:
existing_entry = MachineLearning.query.filter_by(group_id=self.group_id, key=key, independent=codecs.encode(pickle.dumps(indep), 'base64').decode()).first()
if existing_entry is not None:
logmessage("entry is already there")
return existing_entry.id
new_entry = MachineLearning(group_id=self.group_id, independent=codecs.encode(pickle.dumps(indep), 'base64').decode(), create_time=datetime.datetime.utcnow(), active=False, key=key, info=codecs.encode(pickle.dumps(info), 'base64').decode() if info is not None else None)
db.session.add(new_entry)
db.session.commit()
return new_entry.id
def retrieve_by_id(self, the_id):
self._initialize()
existing_entry = MachineLearning.query.filter_by(group_id=self.group_id, id=the_id).first()
if existing_entry is None:
raise Exception("There was no entry in the database for id " + str(the_id) + " with group id " + str(self.group_id))
if existing_entry.dependent:
dependent = fix_pickle_obj(codecs.decode(bytearray(existing_entry.dependent, encoding='utf-8'), 'base64'))
return MachineLearningEntry(ml=self, id=existing_entry.id, independent=fix_pickle_obj(codecs.decode(bytearray(existing_entry.independent, encoding='utf-8'), 'base64')), dependent=dependent, create_time=existing_entry.create_time, key=existing_entry.key, info=fix_pickle_obj(codecs.decode(bytearray(existing_entry.info, encoding='utf-8'), 'base64')) if existing_entry.info is not None else None)
else:
return MachineLearningEntry(ml=self, id=existing_entry.id, independent=fix_pickle_obj(codecs.decode(bytearray(existing_entry.independent, encoding='utf-8'), 'base64')), create_time=existing_entry.create_time, key=existing_entry.key, info=fix_pickle_obj(codecs.decode(bytearray(existing_entry.info, encoding='utf-8'), 'base64')) if existing_entry.info is not None else None)
def one_unclassified_entry(self, key=None):
self._initialize()
if key is None:
entry = MachineLearning.query.filter_by(group_id=self.group_id, active=False).order_by(MachineLearning.id).first()
else:
entry = MachineLearning.query.filter_by(group_id=self.group_id, key=key, active=False).order_by(MachineLearning.id).first()
if entry is None:
return None
return MachineLearningEntry(ml=self, id=entry.id, independent=fix_pickle_obj(codecs.decode(bytearray(entry.independent, encoding='utf-8'), 'base64')), create_time=entry.create_time, key=entry.key, info=fix_pickle_obj(codecs.decode(bytearray(entry.info, encoding='utf-8'), 'base64')) if entry.info is not None else None)._set_instance_name_for_method()
def new_entry(self, **kwargs):
return MachineLearningEntry(ml=self, **kwargs)._set_instance_name_for_method()
def unclassified_entries(self, key=None):
self._initialize()
results = DAList()._set_instance_name_for_method()
results.gathered = True
if key is None:
query = MachineLearning.query.filter_by(group_id=self.group_id, active=False).order_by(MachineLearning.id).all()
else:
query = MachineLearning.query.filter_by(group_id=self.group_id, key=key, active=False).order_by(MachineLearning.id).all()
for entry in query:
results.appendObject(MachineLearningEntry, ml=self, id=entry.id, independent=fix_pickle_obj(codecs.decode(bytearray(entry.independent, encoding='utf-8'), 'base64')), create_time=entry.create_time, key=entry.key, info=fix_pickle_obj(codecs.decode(bytearray(entry.info, encoding='utf-8'), 'base64')) if entry.info is not None else None)
return results
def classified_entries(self, key=None):
self._initialize()
results = DAList()
results.gathered = True
results.set_random_instance_name()
if key is None:
query = MachineLearning.query.filter_by(group_id=self.group_id, active=True).order_by(MachineLearning.id).all()
else:
query = MachineLearning.query.filter_by(group_id=self.group_id, active=True, key=key).order_by(MachineLearning.id).all()
for entry in query:
results.appendObject(MachineLearningEntry, ml=self, id=entry.id, independent=fix_pickle_obj(codecs.decode(bytearray(entry.independent, encoding='utf-8'), 'base64')), dependent=fix_pickle_obj(codecs.decode(bytearray(entry.dependent, encoding='utf-8'), 'base64')), info=fix_pickle_obj(codecs.decode(bytearray(entry.info, encoding='utf-8'), 'base64')) if entry.info is not None else None, create_time=entry.create_time, key=entry.key)
return results
def _save_entry(self, **kwargs):
self._initialize()
the_id = kwargs.get('id', None)
need_to_reset = False
if the_id is None:
the_entry = MachineLearning(group_id=self.group_id)
existing = False
else:
the_entry = MachineLearning.query.filter_by(group_id=self.group_id, id=the_id).first()
existing = True
if the_entry is None:
raise Exception("There was no entry in the database for id " + str(the_id) + " with group id " + str(self.group_id))
if 'dependent' in kwargs:
if existing and the_entry.dependent is not None and the_entry.dependent != kwargs['dependent']:
need_to_reset = True
the_entry.dependent = codecs.encode(pickle.dumps(kwargs['dependent']), 'base64').decode()
the_entry.active = True
if 'independent' in kwargs:
if existing and the_entry.independent is not None and the_entry.independent != kwargs['independent']:
need_to_reset = True
the_entry.independent = codecs.encode(pickle.dumps(kwargs['independent']), 'base64').decode()
if 'key' in kwargs:
the_entry.key = kwargs['key']
if 'info' in kwargs:
the_entry.info = codecs.encode(pickle.dumps(kwargs['info']), 'base64').decode()
the_entry.modtime = datetime.datetime.utcnow()
if not existing:
db.session.add(the_entry)
db.session.commit()
if need_to_reset:
self.reset()
def set_dependent_by_id(self, the_id, the_dependent):
self._initialize()
existing_entry = MachineLearning.query.filter_by(group_id=self.group_id, id=the_id).with_for_update().first()
if existing_entry is None:
db.session.commit()
raise Exception("There was no entry in the database for id " + str(the_id) + " with group id " + str(self.group_id))
existing_entry.dependent = codecs.encode(pickle.dumps(the_dependent), 'base64').decode()
existing_entry.modtime = datetime.datetime.utcnow()
existing_entry.active = True
db.session.commit()
def delete_by_id(self, the_id):
self._initialize()
MachineLearning.query.filter_by(group_id=self.group_id, id=the_id).delete()
db.session.commit()
self.reset()
def delete_by_key(self, key):
self._initialize()
MachineLearning.query.filter_by(group_id=self.group_id, key=key).delete()
db.session.commit()
self.reset()
def save(self):
db.session.commit()
def _train_from_db(self):
#logmessage("Doing train_from_db")
self._initialize()
nowtime = datetime.datetime.utcnow()
success = False
for record in MachineLearning.query.filter(and_(MachineLearning.group_id == self.group_id, MachineLearning.active == True, MachineLearning.modtime > lastmodtime[self.group_id])).all():
#logmessage("Training...")
self._train(fix_pickle_obj(codecs.decode(bytearray(record.independent, encoding='utf-8'), 'base64')), fix_pickle_obj(codecs.decode(bytearray(record.dependent, encoding='utf-8'), 'base64')))
success = True
lastmodtime[self.group_id] = nowtime
return success
def delete_training_set(self):
self._initialize()
MachineLearning.query.filter_by(group_id=self.group_id).all().delete()
db.session.commit()
def _train(self, indep, depend):
pass
def _predict(self, indep):
pass
class SimpleTextMachineLearner(MachineLearner):
"""A class used to interact with the machine learning system, using the K Nearest Neighbors method"""
def _learner(self):
return KNN()
def _initialize(self):
"""Initializes a fresh machine learner."""
if self.group_id not in reset_counter or self.reset_counter != reset_counter[self.group_id]:
need_to_reset = True
if hasattr(self, 'group_id') and (self.group_id not in learners or need_to_reset):
learners[self.group_id] = self._learner()
return super(SimpleTextMachineLearner, self)._initialize(reset=need_to_reset)
def _train(self, indep, depend):
"""Trains the machine learner given an independent variable and a corresponding dependent variable."""
if indep is None:
return
the_text = re.sub(r'[\n\r]+', r' ', indep).lower()
learners[self.group_id].train(Document(the_text.lower(), stemmer=PORTER), depend)
def predict(self, indep, probabilities=False):
"""Returns a list of predicted dependent variables for a given independent variable."""
indep = re.sub(r'[\n\r]+', r' ', indep).lower()
if not self._train_from_db():
return list()
probs = dict()
for key, value in learners[self.group_id].classify(Document(indep.lower(), stemmer=PORTER), discrete=False).items():
probs[key] = value
if not len(probs):
single_result = learners[self.group_id].classify(Document(indep.lower(), stemmer=PORTER))
if single_result is not None:
probs[single_result] = 1.0
if probabilities:
return [(x, probs[x]) for x in sorted(probs.keys(), key=probs.get, reverse=True)]
else:
return sorted(probs.keys(), key=probs.get, reverse=True)
def confusion_matrix(self, key=None, output_format=None, split=False):
"""Returns a confusion matrix for the model based on splitting the data set randomly into two pieces, training on one and testing on the other"""
if split:
list_of_dependent = self.dependent_in_use(key=key)
else:
list_of_dependent = [None]
output = ''
matrices = dict()
for current_dep in list_of_dependent:
testing_set = list()
model = self._learner()
for record in self.classified_entries(key=key):
if split:
dep_result = str(record.dependent == current_dep)
else:
dep_result = record.dependent
if random.random() < 0.5:
model.train(Document(record.independent.lower(), stemmer=PORTER), dep_result)
else:
testing_set.append((Document(record.independent.lower(), stemmer=PORTER), dep_result))
matrix = model.confusion_matrix(documents=testing_set)
matrices[current_dep] = matrix
if output_format == 'html':
if split:
output += '<h4>' + current_dep + "</h4>"
vals = matrix.keys()
output += '<table class="table table-bordered"><thead><tr><td></td><td></td><td style="text-align: center" colspan="' + str(len(vals)) + '">Actual</td></tr><tr><th></th><th></th>'
first = True
for val in vals:
output += '<th>' + val + '</th>'
output += '</tr></thead><tbody>'
for val_a in vals:
output += '<tr>'
if first:
output += '<td style="text-align: right; vertical-align: middle;" rowspan="' + str(len(vals)) + '">Predicted</td>'
first = False
output += '<th>' + val_a + '</th>'
for val_b in vals:
output += '<td>' + str(matrix[val_b].get(val_a, 0)) + '</td>'
output += '</tr>'
output += '</tbody></table>'
#output += "\n\n`" + str(matrix) + "`"
# output += '<ul>'
# for document, actual in testing_set:
# predicted = model.classify(document)
# output += '<li>Predicted: ' + predicted + '; Actual: ' + actual + '</li>'
# output += '</ul>'
if output_format == 'html':
return output
if split:
ret_val = matrices
else:
ret_val = matrices[None]
if output_format == 'json':
return json.dumps(ret_val, sort_keys=True, indent=4)
if output_format == 'yaml':
return yaml.safe_dump(ret_val, default_flow_style=False)
if output_format is None:
return ret_val
return ret_val
def reset(self):
"""Clears the cache of the machine learner"""
return super(SimpleTextMachineLearner, self).reset()
def delete_training_set(self):
"""Deletes all of the training data in the database"""
return super(SimpleTextMachineLearner, self).delete_training_set()
def delete_by_key(self, key):
"""Deletes all of the training data in the database that was added with a given key"""
return super(SimpleTextMachineLearner, self).delete_training_set(key)
def delete_by_id(self, the_id):
"""Deletes the entry in the training data with the given ID"""
return super(SimpleTextMachineLearner, self).delete_by_id(the_id)
def set_dependent_by_id(self, the_id, depend):
"""Sets the dependent variable for the entry in the training data with the given ID"""
return super(SimpleTextMachineLearner, self).set_dependent_by_id(the_id, depend)
def classified_entries(self, key=None):
"""Returns a list of entries in the data that have been classified."""
return super(SimpleTextMachineLearner, self).classified_entries(key=key)
def unclassified_entries(self, key=None):
"""Returns a list of entries in the data that have not yet been classified."""
return super(SimpleTextMachineLearner, self).unclassified_entries(key=key)
def one_unclassified_entry(self, key=None):
"""Returns the first entry in the data that has not yet been classified, or None if all entries have been classified."""
return super(SimpleTextMachineLearner, self).one_unclassified_entry(key=key)
def retrieve_by_id(self, the_id):
"""Returns the entry in the data that has the given ID."""
return super(SimpleTextMachineLearner, self).retrieve_by_id(the_id)
def save_for_classification(self, indep, key=None, info=None):
"""Creates a not-yet-classified entry in the data for the given independent variable and returns the ID of the entry."""
return super(SimpleTextMachineLearner, self).save_for_classification(indep, key=key, info=info)
def add_to_training_set(self, indep, depend, key=None, info=None):
"""Creates an entry in the data for the given independent and dependent variable and returns the ID of the entry."""
return super(SimpleTextMachineLearner, self).add_to_training_set(indep, depend, key=key, info=info)
def is_empty(self):
"""Returns True if no data have been defined, otherwise returns False."""
return super(SimpleTextMachineLearner, self).is_empty()
def dependent_in_use(self, key=None):
"""Returns a sorted list of unique dependent variables in the data."""
return super(SimpleTextMachineLearner, self).dependent_in_use(key=key)
def export_training_set(self, output_format='json'):
"""Returns the classified entries in the data as JSON or YAML."""
return super(SimpleTextMachineLearner, self).export_training_set(output_format=output_format)
def new_entry(self, **kwargs):
"""Creates a new entry in the data."""
return super(SimpleTextMachineLearner, self).new_entry(**kwargs)
class SVMMachineLearner(SimpleTextMachineLearner):
"""Machine Learning object using the Symmetric Vector Machine method"""
def _learner(self):
return SVM(extension='libsvm')
class RandomForestMachineLearner(MachineLearner):
def _learner(self):
return RandomForestClassifier(n_jobs=2)
def feature_importances(self):
"""Returns the importances of each of the features"""
if not self._train_from_db():
return list()
return learners[self.group_id]['learner'].feature_importances_
def _initialize(self):
"""Initializes a fresh machine learner."""
if self.group_id not in reset_counter or self.reset_counter != reset_counter[self.group_id]:
need_to_reset = True
if hasattr(self, 'group_id') and (self.group_id not in learners or need_to_reset):
learners[self.group_id] = dict(learner=self._learner(), dep_type=None, indep_type=dict(), indep_categories=dict(), dep_categories=None)
return super(RandomForestMachineLearner, self)._initialize(reset=need_to_reset)
def _train_from_db(self):
#logmessage("Doing train_from_db")
self._initialize()
nowtime = datetime.datetime.utcnow()
success = False
data = list()
depend_data = list()
for record in MachineLearning.query.filter(and_(MachineLearning.group_id == self.group_id, MachineLearning.active == True, MachineLearning.modtime > lastmodtime[self.group_id])).all():
indep_var = fix_pickle_obj(codecs.decode(bytearray(record.independent, encoding='utf-8'), 'base64'))
depend_var = fix_pickle_obj(codecs.decode(bytearray(record.dependent, encoding='utf-8'), 'base64'))
if type(depend_var) is str:
depend_var = text_type(depend_var)
if learners[self.group_id]['dep_type'] is not None:
if type(depend_var) is not learners[self.group_id]['dep_type']:
if type(depend_var) is int and learners[self.group_id]['dep_type'] is float:
depend_var = float(depend_var)
elif type(depend_var) is float and learners[self.group_id]['dep_type'] is int:
learners[self.group_id]['dep_type'] = float
else:
raise Exception("RandomForestMachineLearner: dependent variable type was not consistent")
else:
if not isinstance(depend_var, (string_types, int, bool, float)):
raise Exception("RandomForestMachineLearner: dependent variable type for key " + repr(key) + " was not a standard variable type")
learners[self.group_id]['dep_type'] = type(depend_var)
depend_data.append(depend_var)
if isinstance(indep_var, DADict):
indep_var = indep_var.elements
if type(indep_var) is not dict:
raise Exception("RandomForestMachineLearner: independent variable was not a dictionary")
for key, val in indep_var.items():
if type(val) is str:
val = text_type(val)
if key in learners[self.group_id]['indep_type']:
if type(val) is not learners[self.group_id]['indep_type'][key]:
if type(val) is int and learners[self.group_id]['indep_type'][key] is float:
val = float(val)
elif type(val) is float and learners[self.group_id]['indep_type'][key] is int:
learners[self.group_id]['indep_type'][key] = float
else:
raise Exception("RandomForestMachineLearner: independent variable type for key " + repr(key) + " was not consistent")
else:
if not isinstance(val, (string_types, int, bool, float)):
raise Exception("RandomForestMachineLearner: independent variable type for key " + repr(key) + " was not a standard variable type")
learners[self.group_id]['indep_type'][key] = type(val)
data.append(indep_var)
success = True
if success:
df = | pd.DataFrame(data) | pandas.DataFrame |
import os
import warnings
import contextlib
import tempfile
import zipfile
from typing import Iterable
import logging
import pandas as pd
from .base import BaseEndpoint
from ..models.odata import Series, Task
logger = logging.getLogger(__name__)
@contextlib.contextmanager
def _with_dir_path(dir_path=None):
# prepare dir_path
if dir_path is None:
temp_dir = tempfile.TemporaryDirectory()
dir_path = temp_dir.name
else:
temp_dir, dir_path = None, dir_path
# call function
try:
yield dir_path
finally:
# cleanup
if temp_dir is not None:
temp_dir.cleanup()
class SeriesEndpoint(BaseEndpoint):
def select_data(
self,
series: Iterable[Series],
start=None,
end=None,
resample=None,
dropna=None,
closed=None,
resample_rule=None,
convention=None,
start_mode=None,
utc_now=None,
max_acceptable_delay=None,
max_rows_nb=None,
clock=None,
with_tags=None,
unfilter=None,
return_df=True
):
# prepare tsdb pks
otsdb_pks = [se.otsdb_pk for se in series]
# check not empty
if len(otsdb_pks) == 0:
raise ValueError("series must at least contain one series, none was found")
# prepare params
params = dict()
for k in (
"start",
"end",
"resample",
"dropna",
"closed",
"resample_rule",
"convention",
"start_mode",
"utc_now",
"max_acceptable_delay",
"max_rows_nb",
"clock",
"with_tags",
"unfilter"
):
v = locals()[k]
if v is not None:
params[k] = v
# perform request
series_data = self.client.rest_client.list_action(
"odata/series",
"post",
"multi_select",
params=params,
data={"otsdb_pk": otsdb_pks}
)
# see if response was cut (1e6 is max number of points return by backend)
max_points_per_series = int(1e6) // len(otsdb_pks)
for se_dict in series_data.values():
if len(se_dict["index"]) == max_points_per_series:
warnings.warn(
"You requested more data than allowed on the platform (maximum number of points: 1.000.000).\n"
f"This caused the series returned here to be cut to a maximum of {max_points_per_series} points.\n"
"To get the full results, please launch an export (recommended) or split your current request into "
"several smaller requests (query a smaller number of series at a time, "
"or use the start and end arguments)",
stacklevel=2
)
break
if not return_df:
return series_data
# parse to data frame
df_data = {}
for pk, se_dict in series_data.items():
se = pd.Series(se_dict["data"], se_dict["index"])
df_data[se_dict["name"]] = se
df = | pd.DataFrame(df_data) | pandas.DataFrame |
"""
This package will create the simplified comix matrix needed by simple_network_sim basing itself in the following data:
- The CoMix matrix: https://cmmid.github.io/topics/covid19/reports/20200327_comix_social_contacts.xlsx
- The Scottish demographics (NRS): ftp://boydorr.gla.ac.uk/scrc/human/demographics/scotland/data/demographics.h5
- The England & Wales demographics (ONS): https://www.nomisweb.co.uk/api/v01/dataset/NM_2010_1.data.csv (the data was
aggregated into the wales_england_pop.csv file)
Python requirements:
- pandas
- xlrd
- h5py
How to run this module:
Assuming you have your environment setup with conda, this script can be run with
```
python comix_downsampler.py
```
That will generate a file called mixing-matrix.csv. That file can then be copied into
`sample_input_files/mixing-matrix/1/data.csv` inside the simple network sim model
(https://github.com/ScottishCovidResponse/simple_network_sim)
"""
import io
import urllib.request
from ftplib import FTP
from typing import NamedTuple, List
import h5py
import numpy as np
import pandas as pd
ContactsTable = pd.DataFrame
ComixTable = pd.DataFrame
class Data(NamedTuple):
"""
All data needed to do the downsampling
"""
comix: ComixTable
population: pd.Series
def main():
data = download()
contacts = comix_to_contacts(data.comix, _aggregate_pop_full_comix(data.population, data.comix))
contacts = split_17_years_old(contacts, data.population)
contacts = collapse_columns(contacts, ["[0,5)", "[5,17)"], "[0,17)")
contacts = collapse_columns(contacts, ["17", "[18,30)", "[30,40)", "[40,50)", "[50,60)", "[60,70)"], "[17,70)")
# The 70+ entry is already what we want
comix = contacts_to_comix(contacts, _aggregate_pop_simplified_comix(data.population, contacts))
_flatten(comix).to_csv("mixing-matrix.csv", index=False)
def collapse_columns(df: ContactsTable, names: List[str], new_name: str) -> ContactsTable:
"""
This function assumes that df has both columns and indexes identified by the same `names`. They will all be added
together to create a new column and row named `new_name`. Eg.:
>>> df = ContactsTable(pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}, index=list("abc")))
>>> collapse_columns(df, ["a", "b"], "a'")
a' c
a' 12 15
c 9 9
:param df: a contacts table type table. That means it's a upper triangle matrix
:param names: name of the columns and indexes to aggregate
:param new_name: name of the new column and row that will be created
:return: A dataframe with collapsed columns and indexes
"""
if not names:
raise ValueError("Names must be a non-empty list")
missing_columns = set(names) - set(df.columns)
if missing_columns:
raise ValueError(f"Names mismatch: {missing_columns}")
if not all(df.columns == df.index):
raise ValueError("Indexes and columns must match")
agg = df.copy()
agg[names[0]] = df[names].sum(axis=1)
agg = agg.rename({names[0]: new_name}, axis=1)
agg = agg.drop(columns=names[1:])
agg.loc[names[0]] = agg.loc[names].sum()
agg = agg.rename({names[0]: new_name}, axis=0)
agg = agg.drop(index=names[1:])
return ContactsTable(agg)
def comix_to_contacts(comix: ComixTable, agg: pd.DataFrame) -> ContactsTable:
"""
Converts the CoMix matrix to a matrix of contacts (total number of contacts rather than averages). Although the
CoMix matrix is not symmetric, the contacts matrix should be. That will not always happen given the population
we have. Therefore, we average out both triangles and return a upper triangular matrix
"""
contacts = comix * agg
# Now we are averaging out the triangles so the matrix becomes symmetric
averaged = (np.tril(contacts).T + np.triu(contacts)) / 2
return ContactsTable(pd.DataFrame(averaged, columns=comix.columns, index=comix.index))
def contacts_to_comix(contacts: ContactsTable, agg: pd.DataFrame) -> ComixTable:
"""
Converts a matrix of contacts to the CoMix matrix
"""
contacts = pd.DataFrame(contacts.T + np.triu(contacts, k=1), columns=contacts.columns, index=contacts.index)
return ComixTable(contacts / agg)
def split_17_years_old(contacts: ContactsTable, pop: pd.Series) -> ContactsTable:
"""
The original CoMix matrix has the ranges [0,5) and [5,18) ranges, whereas we need it to be [0,17). Adding two
columns together is a simple operation, but we must first need to move the 17 year olds out of [5,18) into the
[18,30) range and rename the ranges.
Based the age stratified population, this function will move a number of contacts proportional to the proportion
of 17 year olds in the [5,18) population
:param contacts: The upper triangular contact matrix
:param pop: age stratified population series
:return:
"""
age_groups = list(contacts.columns)
proportion_17 = pop[17] / pop[5:17].sum()
contacts = contacts.copy()
contacts["17"] = contacts["[5,18)"] * proportion_17
contacts.loc["17"] = contacts.loc["[5,18)"] * proportion_17
# special case
contacts["17"]["17"] = contacts["[5,18)"]["[5,18)"] * proportion_17 ** 2
# The following two lines will calculate contacts["[5,17)"]["[5,17)"] twice
contacts["[5,17)"] = contacts["[5,18)"] * (1 - proportion_17)
contacts.loc["[5,17)"] = contacts.loc["[5,18)"] * (1 - proportion_17)
# this will fix that
contacts["[5,17)"]["[5,17)"] = contacts["[5,18)"]["[5,18)"] * (1 - proportion_17)
# special cases
contacts.loc["[5,17)", "17"] = contacts.loc["[5,18)", "[5,18)"] * (1 - proportion_17) * proportion_17
contacts.loc["17", "[5,17)"] = 0.0
# reorder the table columns and indexes
age_groups.insert(age_groups.index("[18,30)"), "17")
age_groups[age_groups.index("[5,18)")] = "[5,17)"
return ContactsTable(contacts.loc[age_groups, age_groups])
def download() -> Data:
"""
Downloads the CoMix matrix and the population data
:return: Data object with all the needed data for the downsampling
"""
return Data(comix=download_comix(), population=download_nrs() + download_ons())
def download_comix() -> ComixTable:
"""
Downloads the CoMix matrix from an external source
:return: the original comix table
"""
filename, _ = urllib.request.urlretrieve(
"https://cmmid.github.io/topics/covid19/reports/20200327_comix_social_contacts.xlsx"
)
df = pd.read_excel(filename, sheet_name="All_contacts_imputed")
return ComixTable(df.set_index("Unnamed: 0"))
def download_nrs() -> pd.Series:
"""
Downloads NRS data from the university's FTP
:return: Population series, indexed by age
"""
ftp = FTP("boydorr.gla.ac.uk")
ftp.set_pasv(False)
ftp.login()
buf = io.BytesIO()
ftp.retrbinary("RETR scrc/human/demographics/scotland/data/demographics.h5", buf.write)
h5 = h5py.File(buf, "r")
people = h5["hb"]["1year"]["persons"]
ages = [int(s.replace(b"AGE", b"").replace(b"+", b"")) for s in people["Dimension_2_names"]]
# df: columns are the locations (Dimension 1) and the rows are the ages
df = pd.DataFrame(people["array"], index=ages, columns=list(people["Dimension_1_names"])).T
# We don't care about the locations, so just add them up
return df.sum()
def download_ons() -> pd.Series:
"""
Downlads ONS data from university's FTP
:return: Population series, indexed by age
"""
# TODO: This data is not yet integrated into SCRCdata. Below are the steps to generate it:
#
# The bash script below was used to scrape all the data from upstream (takes ~5 hours to finish):
# set -ex
#
# offset=0
# step=24000
# while true; do
# curl -H 'Accept-Encoding: deflate, gzip;q=1.0, *;q=0.5' -s "https://www.nomisweb.co.uk/api/v01/dataset/NM_2010_1.data.csv?measures=20100&time=latest&gender=0&geography=TYPE299&RecordLimit=$step&RecordOffset=$offset" > NM_2010_1.$offset.csv.gz
# if [ $(zcat "NM_2010_1.$offset.csv.gz" | wc -l) -lt $step ]; then
# break
# fi
# offset=$(( offset + step ))
# done
#
# After running that, a bit of bash processing is still required. First, we need to decompress it
# $ for x in *gz; do gunzip $x; done
# Then we need to remove the header
# $ head -1 NM_2010_1.0.csv > header
# $ for x in *.csv; do sed -i 1d $x; done
# Aggregate them all into a single file
# cat header $(for x in $(ls -1 *.csv | sed 's/NM_2010_1.//' | sort -n); do echo NM_2010_1.$x; done) > NM_2010_1.csv
# Finally, this will need dask, if you don't have enough memory:
# >>> import dask.dataframe as dd
# >>> df = dd.read_csv("NM_2010_1.csv")
# >>> tot = df[df.C_AGE.isin(list(range(101,192)))].groupby("C_AGE").OBS_VALUE.sum().compute()
# >>> tot.index = list(range(0,91)
# >>> tot.to_frame("POPULATION").to_csv("wales_england_pop.csv", index_label="AGE")
# That's the csv file we are reading below
df = pd.read_csv("wales_england_pop.csv", index_col="AGE")
return df.POPULATION
def _aggregate_pop_full_comix(pop: pd.Series, target: pd.DataFrame) -> pd.DataFrame:
"""
Aggregates the population matrix based on the CoMix table.
:param pop: 1-year based population
:param target: target dataframe we will want to multiply or divide with
:return: Retuns a dataframe that can be multiplied with the comix matrix to get a table of contacts or it can be
used to divide the contacts table to get the CoMix back
"""
agg = pd.DataFrame(
{
"[0,5)": [pop[:5].sum()],
"[5,18)": [pop[5:18].sum()],
"[18,30)": [pop[18:30].sum()],
"[30,40)": [pop[30:40].sum()],
"[40,50)": [pop[40:50].sum()],
"[50,60)": [pop[50:60].sum()],
"[60,70)": [pop[60:70].sum()],
"70+": [pop[70:].sum()],
}
)
return pd.concat([agg] * len(target.columns)).set_index(target.index).T
def _aggregate_pop_simplified_comix(pop: pd.Series, target: pd.DataFrame) -> pd.DataFrame:
"""
Aggregates the population matrix based on the CoMix table.
:param pop: 1-year based population
:param target: target dataframe we will want to multiply or divide with
:return: Retuns a dataframe that can be multiplied with the comix matrix to get a table of contacts or it can be
used to divide the contacts table to get the CoMix back
"""
agg = pd.DataFrame({"[0,17)": [pop[:17].sum()], "[17,70)": [pop[17:69].sum()], "70+": [pop[70:].sum()]})
return pd.concat([agg] * len(target.columns)).set_index(target.index).T
def _flatten(comix: ComixTable):
rows = []
for source, columns in comix.iterrows():
for target, mixing in columns.iteritems():
rows.append({"source": source, "target": target, "mixing": mixing})
return | pd.DataFrame(rows) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 12 08:47:38 2018
@author: cenv0574
"""
import os
import json
import pandas as pd
import geopandas as gpd
from itertools import product
def load_config():
# Define current directory and data directory
config_path = os.path.realpath(
os.path.join(os.path.dirname(__file__), '..', '..', '..', 'config.json')
)
with open(config_path, 'r') as config_fh:
config = json.load(config_fh)
return config
def load_table(data_path):
vnm_IO_path = os.path.join(data_path,"INPUT-OUTPUT TABLE 2012","IO Table 2012 English.xlsx")
return pd.read_excel(vnm_IO_path,sheet_name='IO_clean',index_col=0)
def load_sectors(data_path):
vnm_IO_path = os.path.join(data_path,"INPUT-OUTPUT TABLE 2012","IO Table 2012 English.xlsx")
vnmIO_rowcol = pd.read_excel(vnm_IO_path,sheet_name='SectorName')
return vnmIO_rowcol
def get_final_sector_classification():
return ['secA','secB','secC','secD','secE','secF','secG','secH','secI']
def map_sectors(vnm_IO_rowcol):
row_only = vnm_IO_rowcol[vnm_IO_rowcol['mapped'].str.contains("row") | vnm_IO_rowcol['mapped'].str.contains("sec") ]
col_only = vnm_IO_rowcol[vnm_IO_rowcol['mapped'].str.contains("col") | vnm_IO_rowcol['mapped'].str.contains("sec") ]
return dict(zip(row_only.code,row_only.mapped)),dict(zip(col_only.code,col_only.mapped))
def aggregate_table(vnm_IO,vnm_IO_rowcol,in_million=True):
sectors = get_final_sector_classification()
#aggregate table
mapper_row,mapper_col = map_sectors(vnm_IO_rowcol)
vnm_IO.index = vnm_IO.index.map(mapper_row.get)
vnm_IO.columns = vnm_IO.columns.to_series().map(mapper_col)
aggregated = vnm_IO.groupby(vnm_IO.index,axis=0).sum().groupby(vnm_IO.columns, axis=1).sum()
aggregated = aggregated.reindex(sectors+['col1','col2','col3'],axis='columns')
aggregated = aggregated.reindex(sectors+['row1','row2','row3'],axis='index')
if in_million == True:
return aggregated/1000000
else:
return aggregated
def is_balanced(io_table):
row = io_table.sum(axis=0)
col = io_table.sum(axis=1)
if ((row-col).sum() < 1):
print('Table is balanced')
def load_provincial_stats(data_path):
prov_path = os.path.join(data_path,'Vietnam_boundaries','boundaries_stats','province_level_stats.shp')
return gpd.read_file(prov_path)
def estimate_gva(regions,in_million=True):
if in_million == True:
return list(((regions.pro_nfirm*regions.laborcost)+(regions.pro_nfirm*regions.capital))/1000000)
else:
return list(((regions.pro_nfirm*regions.laborcost)+(regions.pro_nfirm*regions.capital)))
def create_proxies(data_path,notrade=False,own_production_ratio=0.9,min_rice=True):
provinces = load_provincial_stats(data_path)
provinces.name_eng = provinces.name_eng.apply(lambda x: x.replace(' ','_').replace('-','_'))
od_table = load_od(data_path,min_rice=min_rice)
create_indices(data_path,provinces,write_to_csv=True)
create_regional_proxy(data_path,provinces,write_to_csv=True)
create_sector_proxies(data_path,provinces,write_to_csv=True)
create_zero_proxies(data_path,od_table,notrade=notrade,write_to_csv=True)
if notrade == False:
create_level14_proxies(data_path,od_table,own_production_ratio,write_to_csv=True)
def create_regional_proxy(data_path,regions,write_to_csv=True):
regions['raw_gva'] = estimate_gva(regions) #regions['pro_nfirm']*regions['laborcost'] + regions['pro_nfirm']*regions['capital']
subset = regions.loc[:,['name_eng','raw_gva']]
subset['year'] = 2010
subset['raw_gva'] = subset.raw_gva.apply(int)/(subset['raw_gva'].sum(axis='index'))
subset = subset[['year','name_eng','raw_gva']]
subset.columns = ['year','id','gdp']
if write_to_csv == True:
csv_path = os.path.join(data_path,'IO_analysis','MRIO_TABLE','proxy_reg_vnm.csv')
subset.to_csv(csv_path,index=False)
def create_indices(data_path,provinces,write_to_csv=True):
# prepare index and cols
region_names = list(provinces.name_eng)
rowcol_names = list(load_sectors(data_path)['mapped'].unique())
rows = [x for x in rowcol_names if (x.startswith('sec') | x.startswith('row'))]*len(region_names)
region_names_list = [item for sublist in [[x]*12 for x in region_names] for item in sublist]
indices = pd.DataFrame([region_names_list,rows]).T
indices.columns = ['region','sector']
indices['sector'] = indices['sector'].apply(lambda x: x.replace('row','other'))
if write_to_csv == True:
csv_path = os.path.join(data_path,'IO_analysis','MRIO_TABLE','indices_mrio.csv')
indices.to_csv(csv_path,index=False)
def create_sector_proxies(data_path,regions,write_to_csv=True):
#list of sectors
sector_list = get_final_sector_classification()
#get own sector classification for region file
map_dict = map_sect_vnm_to_eng()
regions=regions.rename(columns = map_dict)
# get sectoral gva based on proportion of firms in the region
sector_shares = regions[sector_list].multiply(regions['raw_gva'],axis='index')
sector_shares.index = regions.name_eng
for sector in sector_list+['other1','other2','other3']:
if sector in ['other1','other2','other3']:
subset = pd.DataFrame(sector_shares.sum(axis='columns')).divide(pd.DataFrame(sector_shares.sum(axis='columns')).sum(axis='index'))
subset.columns = [sector]
else:
subset = pd.DataFrame(sector_shares.loc[:,sector]).divide(pd.DataFrame(sector_shares.loc[:,sector]).sum(axis='index'))
subset.reset_index(inplace=True,drop=False)
subset['year'] = 2010
subset['sector'] = sector+str(1)
subset[sector] = subset[sector].apply(lambda x: round(x,7))
subset = subset[['year','sector','name_eng',sector]]
subset.columns = ['year','sector','region','gdp']
if write_to_csv == True:
csv_path = os.path.join(data_path,'IO_analysis','MRIO_TABLE','proxy_{}.csv'.format(sector))
subset.to_csv(csv_path,index=False)
def get_trade_value(x,sum_use,sector,own_production_ratio=0.9):
if x.Destination == x.Origin:
try:
return list(sum_use.loc[(sum_use['region'] == x.Destination) & (sum_use['sector'] == sector)]['value'])[0]*own_production_ratio
except:
return 1
elif x.gdp == 0:
return 0
else:
try:
return list(sum_use.loc[(sum_use['region'] == x.Destination) & (sum_use['sector'] == sector)]['value'])[0]*(1-own_production_ratio)*x.ratio
except:
return 0
def create_level14_proxies(data_path,od_table,own_production_ratio=0.9,write_to_csv=True):
# get sector list
sector_list_ini = get_final_sector_classification()+['other1','other2','other3']
sector_list = [x+str(1) for x in sector_list_ini]
od_table.loc[od_table['Destination'] == od_table['Origin'],'gdp'] = 10
od_sum = pd.DataFrame(od_table.groupby(['Destination','Origin']).sum().sum(axis=1))
od_sum['ratio'] = od_sum.groupby(level=0).apply(lambda x:
x / float(x.sum()))
od_sum.reset_index(inplace=True)
od_sum.columns = ['Destination','Origin','gdp','ratio']
df_pretable = pd.read_csv(os.path.join(data_path,'IO_analysis','MRIO_TABLE','notrade_trade.csv'),index_col=[0,1],header=[0,1])
df_pretable = df_pretable.iloc[:,:567]
sum_use = df_pretable.sum(axis=1)
sum_use = pd.DataFrame(sum_use*0.1)
sum_use.reset_index(inplace=True)
sum_use.columns = ['region','sector','value']
combine = []
for sector in sector_list:
if sector[:-1] in ['other1','other2','other3']:
subset = od_sum.copy()
subset['year'] = 2010
subset['sector'] = sector
subset['gdp'] = 0
subset.drop('ratio',axis=1,inplace=True)
combine.append(subset)
else:
subset = od_sum.copy()
subset = subset.loc[od_sum.gdp != 0]
subset['year'] = 2010
subset['sector'] = sector
subset['gdp'] = subset.apply(lambda x: get_trade_value(x,sum_use,sector[:-1],own_production_ratio),axis=1) #subset['gdp'].apply(lambda x: round(x,2))
subset.drop('ratio',axis=1,inplace=True)
combine.append(subset)
all_ = pd.concat(combine)
final_sub = all_[['year','sector','Origin','Destination','gdp']]
final_sub.columns = ['year','sector','region','region','gdp']
if write_to_csv == True:
csv_path = os.path.join(data_path,'IO_analysis','MRIO_TABLE','proxy_trade14_{}.csv'.format(sector[:-1]))
final_sub.to_csv(csv_path,index=False)
def create_zero_proxies(data_path,od_table,notrade=False,write_to_csv=True):
# get sector list
sector_list = get_final_sector_classification()+['other1','other2','other3']
sector_list = [x+str(1) for x in sector_list]
#map sectors to be the same
mapper = map_regions()
od_table['Destination'] = od_table['Destination'].apply(lambda x: mapper[x])
od_table['Origin'] = od_table['Origin'].apply(lambda x: mapper[x])
od_table = od_table.loc[od_table['Destination'] != od_table['Origin']]
od_sum = pd.DataFrame(od_table.groupby(['Destination','Origin']).sum().sum(axis=1))
od_sum.reset_index(inplace=True)
od_sum.columns = ['Destination','Origin','gdp']
if notrade == True:
od_sum['gdp'] = 0
for sector in sector_list:
if sector[:-1] in ['other1','other2','other3']:
subset = od_sum.copy()
subset['year'] = 2010
subset['sector'] = sector
subset['gdp'] = 0
combine = []
for sector2 in sector_list:
sub_subset = subset.copy()
sub_subset['subsector'] = sector2
combine.append(sub_subset)
else:
subset = od_sum.copy()
if notrade == False:
subset = subset.loc[od_sum.gdp == 0]
subset['year'] = 2010
subset['sector'] = sector
subset['gdp'] = 0
combine = []
for sector2 in sector_list:
sub_subset = subset.copy()
sub_subset['subsector'] = sector2
combine.append(sub_subset)
all_ = pd.concat(combine)
final_sub = all_[['year','sector','Origin','subsector','Destination','gdp']]
final_sub.columns = ['year','sector','region','sector','region','gdp']
if write_to_csv == True:
csv_path = os.path.join(data_path,'IO_analysis','MRIO_TABLE','proxy_trade_{}.csv'.format(sector[:-1]))
final_sub.to_csv(csv_path,index=False)
def load_output(data_path,provinces,notrade=True):
# prepare index and cols
region_names = list(provinces.name_eng)
rowcol_names = list(load_sectors(data_path)['mapped'].unique())
rows = [x for x in rowcol_names if (x.startswith('sec') | x.startswith('row'))]*len(region_names)
cols = [x for x in rowcol_names if (x.startswith('sec') | x.startswith('col'))]*len(region_names)
region_names_list = [item for sublist in [[x]*12 for x in region_names] for item in sublist]
index_mi = pd.MultiIndex.from_arrays([region_names_list,rows], names=('region', 'row'))
column_mi = pd.MultiIndex.from_arrays([region_names_list,cols], names=('region', 'col'))
# read output
if notrade == True:
output_path = os.path.join(data_path,'IO_analysis','MRIO_TABLE','output_notrade.csv')
else:
output_path = os.path.join(data_path,'IO_analysis','MRIO_TABLE','output.csv')
output_df = pd.read_csv(output_path,header=None)
output_df.index = index_mi
output_df.columns = column_mi
# create predefined index and col, which is easier to read
sector_only = [x for x in rowcol_names if x.startswith('sec')]*len(region_names)
col_only = [x for x in rowcol_names if x.startswith('col')]*len(region_names)
region_col = [item for sublist in [[x]*9 for x in region_names] for item in sublist] + [item for sublist in [[x]*3 for x in region_names] for item in sublist]
column_mi_reorder = pd.MultiIndex.from_arrays([region_col,sector_only+col_only], names=('region', 'col'))
#sum va and imports
tax_sub = output_df.loc[output_df.index.get_level_values(1)=='row1'].sum(axis='index')
import_ = output_df.loc[output_df.index.get_level_values(1)=='row2'].sum(axis='index')
valueA = output_df.loc[output_df.index.get_level_values(1)=='row3'].sum(axis='index')
output_new = pd.concat([output_df.loc[~output_df.index.get_level_values(1).isin(['row1','row2','row3'])], | pd.DataFrame(tax_sub) | pandas.DataFrame |
#!python3
# -*- coding:utf-8 -*-
import os
import numpy as np
import pandas as pd
'''
This source code is a sample of using pandas library.
Series and DataFrame.
'''
# Series Object: It's one dimension object.
# It's not ndarray, list, and other sequense object.
print("make instance of Series")
ser = | pd.Series([10,20,30,40]) | pandas.Series |
"""
A class to carry localization data.
"""
import copy
import logging
import time
import warnings
from itertools import accumulate
import numpy as np
import pandas as pd
from google.protobuf import json_format, text_format
try:
from scipy.spatial import QhullError
except ImportError:
from scipy.spatial.qhull import QhullError # needed for Python 3.7
import locan.data.hulls
from locan import locdata_id # is required to use locdata_id as global variable
from locan.constants import PROPERTY_KEYS, PropertyKey
from locan.data import metadata_pb2
from locan.data.metadata_utils import _modify_meta, metadata_to_formatted_string
from locan.data.region import Region, RoiRegion
__all__ = ["LocData"]
logger = logging.getLogger(__name__)
class LocData:
"""
This class carries localization data, aggregated properties and meta data.
Data consist of individual elements being either localizations or other `LocData` objects. Both, localizations and
`Locdata` objects have properties. Properties come from the original data or are added by analysis procedures.
Parameters
----------
references : LocData, list(LocData), None
A `LocData` reference or an array with references to `LocData` objects referring to the selected localizations
in dataset.
dataframe : pandas.DataFrame, None
Dataframe with localization data.
indices : slice object, list(int), None
Indices for dataframe in references that makes up the data. `indices` refers to index label, not position.
meta : locan.data.metadata_pb2.Metadata, dictionary
Metadata about the current dataset and its history.
Attributes
----------
references : LocData, list(LocData), None
A LocData reference or an array with references to LocData objects referring to the selected localizations
in dataframe.
dataframe : pandas.DataFrame, None
Dataframe with localization data.
indices : slice object, list(int), None
Indices for dataframe in references that makes up the data.
meta : locan.data.metadata_pb2.Metadata
Metadata about the current dataset and its history.
properties : pandas.DataFrame
List of properties generated from data.
coordinate_labels : list of str
The available coordinate properties.
dimension : int
Number of coordinates available for each localization (i.e. size of `coordinate_labels`).
"""
count = 0
"""int: A counter for counting LocData instantiations (class attribute)."""
def __init__(
self, references=None, dataframe=pd.DataFrame(), indices=None, meta=None
):
self.__class__.count += 1
self.references = references
self.dataframe = dataframe
self.indices = indices
self.meta = metadata_pb2.Metadata()
self.properties = {}
# regions and hulls
self._region = None
self._bounding_box = None
self._oriented_bounding_box = None
self._convex_hull = None
self._alpha_shape = None
self._inertia_moments = None
self.coordinate_labels = sorted(
list(
set(self.data.columns).intersection(
{"position_x", "position_y", "position_z"}
)
)
)
self.dimension = len(self.coordinate_labels)
self._update_properties()
# meta
global locdata_id
locdata_id += 1
self.meta.identifier = str(locdata_id)
self.meta.creation_time.GetCurrentTime()
self.meta.source = metadata_pb2.DESIGN
self.meta.state = metadata_pb2.RAW
self.meta.element_count = len(self.data.index)
if "frame" in self.data.columns:
self.meta.frame_count = len(self.data["frame"].unique())
if meta is None:
pass
elif isinstance(meta, dict):
for key, value in meta.items():
setattr(self.meta, key, value)
else:
self.meta.MergeFrom(meta)
def _update_properties(self):
self.properties["localization_count"] = len(self.data.index)
# property for mean spatial coordinates (centroids)
self.properties.update(dict(self.data[self.coordinate_labels].mean()))
self.bounding_box # update self._bounding_box
def __del__(self):
"""Updating the counter upon deletion of class instance."""
self.__class__.count -= 1
def __len__(self):
"""Return the length of data, i.e. the number of elements (localizations or collection elements)."""
return len(self.data.index)
def __getstate__(self):
"""Modify pickling behavior."""
# Copy the object's state from self.__dict__ to avoid modifying the original state.
state = self.__dict__.copy()
# Serialize the unpicklable protobuf entries.
json_string = json_format.MessageToJson(
self.meta, including_default_value_fields=False
)
state["meta"] = json_string
return state
def __setstate__(self, state):
"""Modify pickling behavior."""
# Restore instance attributes.
self.__dict__.update(state)
# Restore protobuf class for meta attribute
self.meta = metadata_pb2.Metadata()
self.meta = json_format.Parse(state["meta"], self.meta)
def __copy__(self):
"""
Create a shallow copy of locdata (keeping all references) with the following exceptions:
(i) The class variable `count` is increased for the copied LocData object.
(ii) Metadata keeps the original meta.creation_time while meta.modification_time and meta.history is updated.
"""
new_locdata = LocData(self.references, self.dataframe, self.indices, meta=None)
new_locdata._region = self._region
# meta
meta_ = _modify_meta(
self, new_locdata, function_name="LocData.copy", parameter=None, meta=None
)
new_locdata.meta = meta_
return new_locdata
def __deepcopy__(self, memodict=None):
"""
Create a deep copy of locdata (including all references) with the following exceptions:
(i) The class variable `count` is increased for all deepcopied LocData objects.
(ii) Metadata keeps the original meta.creation_time while meta.modification_time and meta.history is updated.
"""
if memodict is None:
memodict = {}
new_locdata = LocData(
copy.deepcopy(self.references, memodict),
copy.deepcopy(self.dataframe, memodict),
copy.deepcopy(self.indices, memodict),
meta=None,
)
new_locdata._region = self._region
# meta
meta_ = _modify_meta(
self,
new_locdata,
function_name="LocData.deepcopy",
parameter=None,
meta=None,
)
new_locdata.meta = meta_
return new_locdata
@property
def bounding_box(self):
"""Hull object: Return an object representing the axis-aligned minimal bounding box."""
if self._bounding_box is None:
try:
self._bounding_box = locan.data.hulls.BoundingBox(self.coordinates)
self.properties["region_measure_bb"] = self._bounding_box.region_measure
if self._bounding_box.region_measure:
self.properties["localization_density_bb"] = (
self.properties["localization_count"]
/ self._bounding_box.region_measure
)
if self._bounding_box.subregion_measure:
self.properties[
"subregion_measure_bb"
] = self._bounding_box.subregion_measure
except ValueError:
warnings.warn(
"Properties related to bounding box could not be computed.",
UserWarning,
)
return self._bounding_box
@property
def convex_hull(self):
"""Hull object: Return an object representing the convex hull of all localizations."""
if self._convex_hull is None:
try:
self._convex_hull = locan.data.hulls.ConvexHull(self.coordinates)
self.properties["region_measure_ch"] = self._convex_hull.region_measure
if self._convex_hull.region_measure:
self.properties["localization_density_ch"] = (
self.properties["localization_count"]
/ self._convex_hull.region_measure
)
except (TypeError, QhullError):
warnings.warn(
"Properties related to convex hull could not be computed.",
UserWarning,
)
return self._convex_hull
@property
def oriented_bounding_box(self):
"""Hull object: Return an object representing the oriented minimal bounding box."""
if self._oriented_bounding_box is None:
try:
self._oriented_bounding_box = locan.data.hulls.OrientedBoundingBox(
self.coordinates
)
self.properties[
"region_measure_obb"
] = self._oriented_bounding_box.region_measure
if self._oriented_bounding_box.region_measure:
self.properties["localization_density_obb"] = (
self.properties["localization_count"]
/ self._oriented_bounding_box.region_measure
)
self.properties["orientation_obb"] = self._oriented_bounding_box.angle
self.properties[
"circularity_obb"
] = self._oriented_bounding_box.elongation
except TypeError:
warnings.warn(
"Properties related to oriented bounding box could not be computed.",
UserWarning,
)
return self._oriented_bounding_box
@property
def alpha_shape(self):
"""Hull object: Return an object representing the alpha-shape of all localizations."""
return self._alpha_shape
def update_alpha_shape(self, alpha):
"""Compute the alpha shape for specific `alpha` and update `self.alpha_shape`.
Parameters
----------
alpha : float
Alpha parameter specifying a unique alpha complex.
Returns
-------
LocData
The modified object
"""
try:
if self._alpha_shape is None:
self._alpha_shape = locan.data.hulls.AlphaShape(
points=self.coordinates, alpha=alpha
)
else:
self._alpha_shape.alpha = alpha
self.properties["region_measure_as"] = self._alpha_shape.region_measure
try:
self.properties["localization_density_as"] = (
self._alpha_shape.n_points_alpha_shape
/ self._alpha_shape.region_measure
)
except ZeroDivisionError:
self.properties["localization_density_as"] = float("nan")
except TypeError:
warnings.warn(
"Properties related to alpha shape could not be computed.", UserWarning
)
return self
def update_alpha_shape_in_references(self, alpha):
"""
Compute the alpha shape for each element in `locdata.references` and update `locdata.dataframe`.
Returns
-------
LocData
The modified object
"""
if isinstance(self.references, list):
for reference in self.references:
reference.update_alpha_shape(alpha=alpha)
new_df = pd.DataFrame(
[reference.properties for reference in self.references]
)
new_df.index = self.data.index
self.dataframe.update(new_df)
new_columns = [
column for column in new_df.columns if column in self.dataframe.columns
]
new_df.drop(columns=new_columns, inplace=True, errors="ignore")
self.dataframe = pd.concat([self.dataframe, new_df], axis=1)
return self
@property
def inertia_moments(self):
"""Inertia moments are returned as computed by :func:`locan.data.properties.inertia_moments`."""
if self._inertia_moments is None:
try:
self._inertia_moments = locan.data.properties.inertia_moments(
self.coordinates
)
self.properties["orientation_im"] = self._inertia_moments.orientation
self.properties["circularity_im"] = self._inertia_moments.eccentricity
except TypeError:
warnings.warn(
"Properties related to inertia_moments could not be computed.",
UserWarning,
)
return self._inertia_moments
def update_inertia_moments_in_references(self):
"""
Compute inertia_moments for each element in locdata.references and update locdata.dataframe.
Returns
-------
LocData
The modified object
"""
if isinstance(self.references, list):
for reference in self.references:
reference.inertia_moments # request property to update
new_df = pd.DataFrame(
[reference.properties for reference in self.references]
)
new_df.index = self.data.index
self.dataframe.update(new_df)
new_columns = [
column for column in new_df.columns if column in self.dataframe.columns
]
new_df.drop(columns=new_columns, inplace=True, errors="ignore")
self.dataframe = pd.concat([self.dataframe, new_df], axis=1)
return self
@property
def region(self):
"""RoiRegion object: Return the region that supports all localizations."""
return self._region
@region.setter
def region(self, region):
if region is not None:
if region.dimension != self.dimension:
raise TypeError(
"Region dimension and coordinates dimension must be identical."
)
elif len(self) != len(region.contains(self.coordinates)):
logger.warning("Not all coordinates are within region.")
if isinstance(region, (Region, RoiRegion)) or region is None:
self._region = region
elif isinstance(
region, dict
): # legacy code to deal with deprecated RoiLegacy_0
region_ = RoiRegion(**region)
if region_ is not None:
if region_.dimension != self.dimension:
raise TypeError(
"Region dimension and coordinates dimension must be identical."
)
elif len(self) != len(region_.contains(self.coordinates)):
logger.warning("Not all coordinates are within region.")
self._region = region_
else:
raise TypeError
# property for region measures
if self._region is not None:
if self._region.region_measure:
self.properties["region_measure"] = self._region.region_measure
self.properties["localization_density"] = (
self.meta.element_count / self._region.region_measure
)
if self._region.subregion_measure:
self.properties["subregion_measure"] = self._region.subregion_measure
@property
def data(self):
"""pandas.DataFrame: Return all elements either copied from the reference or referencing the current
dataframe. """
if isinstance(self.references, LocData):
# we refer to the localization data by its index label, not position
# in other words we decided not to use iloc but loc
# df = self.references.data.loc[self.indices] ... but this does not work in pandas.
# also see:
# https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#deprecate-loc-reindex-listlike
try:
df = self.references.data.loc[self.indices]
except KeyError:
df = self.references.data.loc[
self.references.data.index.intersection(self.indices)
]
df = pd.merge(
df, self.dataframe, left_index=True, right_index=True, how="outer"
)
return df
else:
return self.dataframe
@property
def coordinates(self):
"""ndarray: Return all coordinate values. """
return self.data[self.coordinate_labels].values
@property
def centroid(self):
"""ndarray: Return coordinate values of the centroid
(being the property values for all coordinate labels)."""
return np.array(
[
self.properties[coordinate_label]
for coordinate_label in self.coordinate_labels
]
)
@classmethod
def from_dataframe(cls, dataframe=pd.DataFrame(), meta=None):
"""
Create new LocData object from pandas.DataFrame with localization data.
Parameters
----------
dataframe : pandas.DataFrame
Localization data.
meta : locan.data.metadata_pb2.Metadata
Metadata about the current dataset and its history.
Returns
-------
LocData
A new LocData instance with dataframe representing the concatenated data.
"""
dataframe = dataframe
meta_ = metadata_pb2.Metadata()
meta_.source = metadata_pb2.DESIGN
meta_.state = metadata_pb2.RAW
meta_.history.add(name="LocData.from_dataframe")
if meta is None:
pass
elif isinstance(meta, dict):
for key, value in meta.items():
setattr(meta_, key, value)
else:
meta_.MergeFrom(meta)
return cls(dataframe=dataframe, meta=meta_)
@classmethod
def from_coordinates(cls, coordinates=(), coordinate_labels=None, meta=None):
"""
Create new LocData object from a sequence of localization coordinates.
Parameters
----------
coordinates : sequence of tuples with shape (n_loclizations, dimension)
Sequence of tuples with localization coordinates
coordinate_labels : sequence of str
The available coordinate properties.
meta : locan.data.metadata_pb2.Metadata
Metadata about the current dataset and its history.
Returns
-------
LocData
A new LocData instance with dataframe representing the concatenated data.
"""
if np.size(coordinates):
dimension = len(coordinates[0])
if coordinate_labels is None:
coordinate_labels = ["position_x", "position_y", "position_z"][
0:dimension
]
else:
if all(cl in PROPERTY_KEYS for cl in coordinate_labels):
coordinate_labels = coordinate_labels
else:
raise ValueError(
"The given coordinate_labels are not standard property keys."
)
dataframe = pd.DataFrame.from_records(
data=coordinates, columns=coordinate_labels
)
else:
dataframe = pd.DataFrame()
meta_ = metadata_pb2.Metadata()
meta_.source = metadata_pb2.DESIGN
meta_.state = metadata_pb2.RAW
meta_.history.add(name="LocData.from_coordinates")
if meta is None:
pass
elif isinstance(meta, dict):
for key, value in meta.items():
setattr(meta_, key, value)
else:
meta_.MergeFrom(meta)
return cls(dataframe=dataframe, meta=meta_)
@classmethod
def from_selection(cls, locdata, indices=slice(0, None), meta=None):
"""
Create new LocData object from selected elements in another `LocData`.
Parameters
----------
locdata : LocData
Locdata object from which to select elements.
indices : slice object, list(int), None
Index labels for elements in locdata that make up the new data.
Note that contrary to usual python slices, both the start and the stop are included
(see pandas documentation). `Indices` refer to index value not position in list.
meta : locan.data.metadata_pb2.Metadata
Metadata about the current dataset and its history.
Returns
-------
LocData
A new LocData instance with dataframe representing the selected data.
Note
----
No error is raised if indices do not exist in locdata.
"""
references = locdata
indices = indices
meta_ = metadata_pb2.Metadata()
meta_.CopyFrom(locdata.meta)
try:
meta_.ClearField("identifier")
except ValueError:
pass
try:
meta_.ClearField("element_count")
except ValueError:
pass
try:
meta_.ClearField("frame_count")
except ValueError:
pass
meta_.modification_time.GetCurrentTime()
meta_.state = metadata_pb2.MODIFIED
meta_.ancestor_identifiers.append(locdata.meta.identifier)
meta_.history.add(name="LocData.from_selection")
if meta is None:
pass
elif isinstance(meta, dict):
for key, value in meta.items():
setattr(meta_, key, value)
else:
meta_.MergeFrom(meta)
new_locdata = cls(references=references, indices=indices, meta=meta_)
new_locdata.region = references.region
return new_locdata
@classmethod
def from_collection(cls, locdatas, meta=None):
"""
Create new LocData object by collecting LocData objects.
Parameters
----------
locdatas : list of LocData
Locdata objects to collect.
meta : locan.data.metadata_pb2.Metadata
Metadata about the current dataset and its history.
Returns
-------
LocData
A new LocData instance with dataframe representing the concatenated data.
"""
references = locdatas
dataframe = pd.DataFrame([ref.properties for ref in references])
meta_ = metadata_pb2.Metadata()
meta_.source = metadata_pb2.DESIGN
meta_.state = metadata_pb2.RAW
meta_.ancestor_identifiers[:] = [ref.meta.identifier for ref in references]
meta_.history.add(name="LocData.from_collection")
if meta is None:
pass
elif isinstance(meta, dict):
for key, value in meta.items():
setattr(meta_, key, value)
else:
meta_.MergeFrom(meta)
return cls(references=references, dataframe=dataframe, meta=meta_)
@classmethod
def concat(cls, locdatas, meta=None):
"""
Concatenate LocData objects.
Parameters
----------
locdatas : list of LocData
Locdata objects to concatenate.
meta : locan.data.metadata_pb2.Metadata
Metadata about the current dataset and its history.
Returns
-------
LocData
A new LocData instance with dataframe representing the concatenated data.
"""
dataframe = pd.concat([i.data for i in locdatas], ignore_index=True, sort=False)
# concatenate references also if None
references = []
for locdata in locdatas:
try:
references.extend(locdata.references)
except TypeError:
references.append(locdata.references)
# check if all elements are None
if not any(references):
references = None
meta_ = metadata_pb2.Metadata()
meta_.creation_time.GetCurrentTime()
meta_.source = metadata_pb2.DESIGN
meta_.state = metadata_pb2.MODIFIED
meta_.ancestor_identifiers[:] = [dat.meta.identifier for dat in locdatas]
meta_.history.add(name="concat")
if meta is None:
pass
elif isinstance(meta, dict):
for key, value in meta.items():
setattr(meta_, key, value)
else:
meta_.MergeFrom(meta)
return cls(references=references, dataframe=dataframe, meta=meta_)
@classmethod
def from_chunks(
cls,
locdata,
chunks=None,
chunk_size=None,
n_chunks=None,
order="successive",
drop=False,
meta=None,
):
"""
Divide locdata in chunks of localization elements.
Parameters
----------
locdatas : list of LocData
Locdata objects to concatenate.
chunks : list[tuples]
Localization chunks as defined by a list of index-tuples
chunk_size : int, None
Number of localizations per chunk. One of `chunk_size` or `n_chunks` must be different from None.
n_chunks : int, None
Number of chunks. One of `chunk_size` or `n_chunks` must be different from None.
order : str
The order in which to select localizations. One of 'successive' or 'alternating'.
drop : bool
If True the last chunk will be eliminated if it has fewer localizations than the other chunks.
meta : locan.data.metadata_pb2.Metadata
Metadata about the current dataset and its history.
Returns
-------
LocData
A new LocData instance with references and dataframe elements representing the individual chunks.
"""
n_nones = sum(element is None for element in [chunks, chunk_size, n_chunks])
if n_nones != 2:
raise ValueError(
"One and only one of `chunks`, `chunk_size` or `n_chunks` must be different from None."
)
elif chunks is not None:
index_lists = list(chunks)
else:
if chunk_size is not None:
if (len(locdata) % chunk_size) == 0:
n_chunks = len(locdata) // chunk_size
else:
n_chunks = len(locdata) // chunk_size + 1
else: # if n_chunks is not None
if (len(locdata) % n_chunks) == 0:
chunk_size = len(locdata) // n_chunks
else:
chunk_size = len(locdata) // (n_chunks - 1)
if order == "successive":
if (len(locdata) % chunk_size) == 0:
chunk_sizes = [chunk_size] * n_chunks
else:
chunk_sizes = [chunk_size] * (n_chunks - 1) + [
(len(locdata) % chunk_size)
]
cum_chunk_sizes = list(accumulate(chunk_sizes))
cum_chunk_sizes.insert(0, 0)
index_lists = [
locdata.data.index[slice(lower, upper)]
for lower, upper in zip(cum_chunk_sizes[:-1], cum_chunk_sizes[1:])
]
elif order == "alternating":
index_lists = [
locdata.data.index[slice(i_chunk, None, n_chunks)]
for i_chunk in range(n_chunks)
]
else:
raise ValueError(f"The order {order} is not implemented.")
if drop and len(index_lists) > 1 and len(index_lists[-1]) < len(index_lists[0]):
index_lists = index_lists[:-1]
references = [
LocData.from_selection(locdata=locdata, indices=index_list)
for index_list in index_lists
]
dataframe = pd.DataFrame([ref.properties for ref in references])
meta_ = metadata_pb2.Metadata()
meta_.creation_time.GetCurrentTime()
meta_.source = metadata_pb2.DESIGN
meta_.state = metadata_pb2.RAW
meta_.ancestor_identifiers[:] = [ref.meta.identifier for ref in references]
meta_.history.add(name="LocData.chunks")
if meta is None:
pass
elif isinstance(meta, dict):
for key, value in meta.items():
setattr(meta_, key, value)
else:
meta_.MergeFrom(meta)
return cls(references=references, dataframe=dataframe, meta=meta_)
def reset(self, reset_index=False):
"""
Reset hulls and properties. This is needed after the dataframe attribute has been modified in place.
Note
----
Should be used with care because metadata is not updated accordingly.
The region property is not changed.
Better to just re-instantiate with `LocData.from_dataframe()` or use `locdata.update()`.
Parameters
----------
reset_index : Bool
Flag indicating if the index is reset to integer values. If True the previous index values are discarded.
Returns
-------
LocData
The modified object
"""
if reset_index is True:
self.dataframe.reset_index(drop=True, inplace=True)
self.properties = {}
self._bounding_box = None
self._oriented_bounding_box = None
self._convex_hull = None
self._alpha_shape = None
self._update_properties()
return self
def update(self, dataframe, reset_index=False, meta=None):
"""
Update the dataframe attribute in place.
Use this function rather than setting locdata.dataframe directly in order to automatically update
the attributes for dimension, coordinate_labels, hulls, properties, and metadata.
Parameters
----------
dataframe : pandas.DataFrame, None
Dataframe with localization data.
reset_index : Bool
Flag indicating if the index is reset to integer values. If True the previous index values are discarded.
meta : locan.data.metadata_pb2.Metadata
Metadata about the current dataset and its history.
Returns
-------
LocData
The modified object
"""
local_parameter = locals()
del local_parameter[
"dataframe"
] # dataframe is obvious and possibly large and should not be repeated in meta.
if self.references is not None:
self.reduce(reset_index=reset_index)
logger.warning(
"LocData.reduce() was applied since self.references was not None."
)
self.dataframe = dataframe
self.coordinate_labels = sorted(
list(
set(self.data.columns).intersection(
{"position_x", "position_y", "position_z"}
)
)
)
self.dimension = len(self.coordinate_labels)
self.reset(reset_index=reset_index) # update hulls and properties
# update meta
self.meta.modification_time.GetCurrentTime()
self.meta.state = metadata_pb2.MODIFIED
self.meta.history.add(name="LocData.update", parameter=str(local_parameter))
self.meta.element_count = len(self.data.index)
if "frame" in self.data.columns:
self.meta.frame_count = len(self.data["frame"].unique())
if meta is None:
pass
elif isinstance(meta, dict):
for key, value in meta.items():
setattr(self.meta, key, value)
else:
self.meta.MergeFrom(meta)
return self
def reduce(self, reset_index=False):
"""
Clean up references.
This includes to update `Locdata.dataframe` and set `LocData.references` and `LocData.indices` to None.
Parameters
----------
reset_index : Bool
Flag indicating if the index is reset to integer values. If True the previous index values are discarded.
Returns
-------
LocData
The modified object
"""
if self.references is None:
pass
elif isinstance(self.references, (LocData, list)):
self.dataframe = self.data
self.indices = None
self.references = None
else:
raise ValueError("references has undefined value.")
if reset_index is True:
self.dataframe.reset_index(drop=True, inplace=True)
return self
def update_convex_hulls_in_references(self):
"""
Compute the convex hull for each element in locdata.references and update locdata.dataframe.
Returns
-------
LocData
The modified object
"""
if isinstance(self.references, list):
for reference in self.references:
reference.convex_hull # request property to update reference._convex_hull
new_df = pd.DataFrame(
[reference.properties for reference in self.references]
)
new_df.index = self.data.index
self.dataframe.update(new_df)
new_columns = [
column for column in new_df.columns if column in self.dataframe.columns
]
new_df.drop(columns=new_columns, inplace=True, errors="ignore")
self.dataframe = pd.concat([self.dataframe, new_df], axis=1)
return self
def update_oriented_bounding_box_in_references(self):
"""
Compute the oriented bounding box for each element in locdata.references and update locdata.dataframe.
Returns
-------
LocData
The modified object
"""
if isinstance(self.references, list):
for reference in self.references:
reference.oriented_bounding_box # request property to update reference._convex_hull
new_df = pd.DataFrame(
[reference.properties for reference in self.references]
)
new_df.index = self.data.index
self.dataframe.update(new_df)
new_columns = [
column for column in new_df.columns if column in self.dataframe.columns
]
new_df.drop(columns=new_columns, inplace=True, errors="ignore")
self.dataframe = | pd.concat([self.dataframe, new_df], axis=1) | pandas.concat |
import baostock as bs
import pandas as pd
import numpy as np
from IPython import embed
class Data_Reader():
"""
reading the data from the file
"""
def __init__(self, file="stock.csv"):
self.file = file
self.code_list = []
self.data = None
def read_data(self, file="stock.csv"):
data_list = np.array( | pd.read_csv(file, encoding="gbk") | pandas.read_csv |
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
def pyscript_diseases():
# measels
measlesdf = pd.read_csv('https://docs.google.com/spreadsheets/d/1ogMiFRnX-N4lp1cqI0N22F9K9fFVVFfCWxw4T6W2iVw/export?format=csv&id')
measlesdf['Total Measles Cases'] = measlesdf.sum(axis=1)
total_cases = measlesdf
total_measles_cases = total_cases.loc[:, 'Country':'2018']
final_total_measles = total_measles_cases.sort_values(by='2018', ascending=False)
final_total_measles = final_total_measles.rename(columns = {final_total_measles.columns[0]: "country",
final_total_measles.columns[1]: "confirmed_cases" })
final_total_measles = final_total_measles[final_total_measles.confirmed_cases != 0]
countriesdf = pd.read_csv("Data/countries.csv", encoding = "ISO-8859-1")
final_total_measles = pd.merge(final_total_measles, countriesdf, how = 'left', on = 'country')
measles_json = final_total_measles.to_json(orient='records')
# COVID-19
corona_data = | pd.read_csv("Data/COVID-19.csv") | pandas.read_csv |
import os
import unittest
from builtins import range
import matplotlib
import mock
import numpy as np
import pandas as pd
import root_numpy
from mock import MagicMock, patch, mock_open
import six
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_frame_equal
import ROOT
from PyAnalysisTools.AnalysisTools import MLHelper as mh
from PyAnalysisTools.base import InvalidInputError
from PyAnalysisTools.base.FileHandle import FileHandle
if six.PY2:
builtin = '__builtin__'
else:
builtin = 'builtins'
cwd = os.path.dirname(__file__)
ROOT.gROOT.SetBatch(True)
class TestMLHelpers(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_print_classification(self):
model = MagicMock()
model.predict_classes = MagicMock(return_value=[1])
mh.print_classification(model, 1, [2], 3, [4])
@mock.patch.object(matplotlib.pyplot, 'savefig', lambda x: None)
def test_plot_scoring(self):
class Object(object):
pass
history = Object()
history.history = {'foo': [(100, 100), (200, 200)]}
history.history['val_foo'] = history.history['foo']
mh.plot_scoring(history, 'foo', ['foo'], 'foo')
class TestMLConfig(unittest.TestCase):
def test_ctor_default(self):
self.assertRaises(KeyError, mh.MLConfig)
def test_ctor(self):
cfg = mh.MLConfig(branch_name='foo', variable_list=[], selection=None)
self.assertEqual('foo', cfg.score_name)
self.assertEqual([], cfg.varset)
self.assertIsNone(cfg.scaler)
self.assertIsNone(cfg.scale_algo)
self.assertIsNone(cfg.selection)
def test_equality(self):
cfg = mh.MLConfig(branch_name='foo', variable_list=[], selection=None)
cfg2 = mh.MLConfig(branch_name='foo', variable_list=[], selection=None)
self.assertEqual(cfg, cfg2)
def test_inequality(self):
cfg = mh.MLConfig(branch_name='foo', variable_list=[], selection=None)
cfg2 = mh.MLConfig(branch_name='bar', variable_list=[], selection=None)
self.assertNotEqual(cfg, cfg2)
def test_inequality_scaler(self):
scaler = mh.DataScaler()
cfg = mh.MLConfig(branch_name='foo', variable_list=[], selection=None, scaler=scaler)
cfg2 = mh.MLConfig(branch_name='bar', variable_list=[], selection=None)
self.assertNotEqual(cfg, cfg2)
self.assertNotEqual(cfg2, cfg)
def test_inequality_scaler_algo(self):
scaler_def = mh.DataScaler()
scaler = mh.DataScaler('foo')
cfg = mh.MLConfig(branch_name='foo', variable_list=[], selection=None, scaler=scaler)
cfg2 = mh.MLConfig(branch_name='bar', variable_list=[], selection=None, scaler=scaler_def)
self.assertNotEqual(cfg, cfg2)
self.assertNotEqual(cfg2, cfg)
def test_inequality_type(self):
cfg = mh.MLConfig(branch_name='foo', variable_list=[], selection=None)
self.assertNotEqual(cfg, 5.)
def test_handle_ctor(self):
handle = mh.MLConfigHandle(branch_name='foo', variable_list=[], selection=None)
cfg = mh.MLConfig(branch_name='foo', variable_list=[], selection=None)
self.assertEqual(cfg, handle.config)
self.assertEqual('.', handle.output_path)
self.assertEqual('./ml_config_summary.pkl', handle.file_name)
def test_print(self):
handle = mh.MLConfig(branch_name='foo', variable_list=['foo'], selection=['bar'])
print_out = 'Attached ML branch foo was created with the following configuration \nvariables: \n\t foo\n' \
'selection: \n\t bar\nscaler: None\n'
self.assertEqual(print_out, handle.__str__())
class TestRootNumpyConverter(unittest.TestCase):
def test_ctor(self):
converter = mh.Root2NumpyConverter(['foo'])
self.assertEqual(['foo'], converter.branches)
def test_ctor_no_list(self):
converter = mh.Root2NumpyConverter('foo')
self.assertEqual(['foo'], converter.branches)
def test_merge(self):
arr1 = np.array([1, 2])
arr2 = np.array([3, 4])
arr3 = np.array([5, 6])
arr4 = np.array([7, 8])
converter = mh.Root2NumpyConverter('foo')
data, labels = converter.merge([arr1, arr2], [arr3, arr4])
np.testing.assert_array_equal(np.array([i+1 for i in range(8)]), data)
np.testing.assert_array_equal(np.array([1]*4+[0]*4), labels)
@patch.object(root_numpy, 'tree2array', lambda x, **kwargs: (x, kwargs))
def test_convert(self):
converter = mh.Root2NumpyConverter(['foo'])
data = converter.convert_to_array(None, 'sel', 1000)
self.assertIsNone(data[0])
self.assertEqual({'branches': ['foo'], 'selection': 'sel', 'start': 0, 'stop': 1000}, data[1])
class TestTrainingReader(unittest.TestCase):
def test_default_ctor(self):
reader = mh.TrainingReader()
self.assertEqual('', reader.mode)
self.assertFalse(reader.numpy_input)
@mock.patch.object(pd, 'read_json', lambda _: None)
@patch(builtin + ".open", new_callable=mock_open)
def test_ctor_json(self, _):
reader = mh.TrainingReader(input_file_list=['foo.json'])
self.assertEqual('pandas', reader.mode)
self.assertFalse(reader.numpy_input)
self.assertEqual({'foo.json': None}, reader.data)
def test_ctor_numpy_list(self):
reader = mh.TrainingReader(input_file=['foo.npy', 'bar.npy'])
self.assertEqual('', reader.mode)
self.assertTrue(reader.numpy_input)
def test_ctor_numpy(self):
reader = mh.TrainingReader(input_file='foo.npy', signal_tree_names=['sig'], bkg_tree_names=['bkg'])
self.assertEqual('', reader.mode)
self.assertFalse(reader.numpy_input)
self.assertEqual(['sig'], reader.signal_tree_names)
self.assertEqual(['bkg'], reader.bkg_tree_names)
def test_parse_tree_names(self):
reader = mh.TrainingReader(input_file='foo.npy', signal_tree_names=['sig'], bkg_tree_names=['bkg'])
sig_train, bkg_train, sig_eval, bkg_eval = reader.parse_tree_names()
self.assertEqual(['train_sig'], sig_train)
self.assertEqual(['eval_sig'], sig_eval)
self.assertEqual(['train_bkg'], bkg_train)
self.assertEqual(['eval_bkg'], bkg_eval)
@mock.patch.object(FileHandle, 'get_object_by_name', lambda _, x: x)
def test_get_trees(self):
reader = mh.TrainingReader(input_file='foo.npy', signal_tree_names=['sig'], bkg_tree_names=['bkg'])
sig_train, bkg_train, sig_eval, bkg_eval = reader.get_trees()
self.assertEqual(['train_sig'], sig_train)
self.assertEqual(['eval_sig'], sig_eval)
self.assertEqual(['train_bkg'], bkg_train)
self.assertEqual(['eval_bkg'], bkg_eval)
def test_prepare_data(self):
reader = mh.TrainingReader()
reader.mode = 'pandas'
reader.data = {'_foo': pd.DataFrame({'var1': [1., 2.]}), '_bar': pd.DataFrame({'var1': [3., 4.]})}
cfg = mh.MLTrainConfig(signals=['foo'], backgrounds=['bar'])
signal, bkg, labels = reader.prepare_data(cfg)
assert_frame_equal( | pd.DataFrame({'var1': [1., 2.]}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
HornMT_Dataset_Preparation
Created on Mon Dec 12 01:25:16 2021
@author: <NAME>
"""
# Import libs
import pandas as pd
# Load HornMT dataset
file_path = '/data/HornMT.xlsx'
HornMT = pd.read_excel(file_path)
#HornMT.head(1)
# Preprocess the dataframe
eng = pd.DataFrame(HornMT['eng'])
aaf = pd.DataFrame(HornMT['aaf'])
amh = pd.DataFrame(HornMT['amh'])
orm = pd.DataFrame(HornMT['orm'])
som = pd.DataFrame(HornMT['som'])
tir = | pd.DataFrame(HornMT['tir']) | pandas.DataFrame |
from datetime import timedelta
from functools import partial
from operator import attrgetter
import dateutil
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import OutOfBoundsDatetime, conversion
import pandas as pd
from pandas import (
DatetimeIndex, Index, Timestamp, date_range, datetime, offsets,
to_datetime)
from pandas.core.arrays import DatetimeArray, period_array
import pandas.util.testing as tm
class TestDatetimeIndex(object):
@pytest.mark.parametrize('dt_cls', [DatetimeIndex,
DatetimeArray._from_sequence])
def test_freq_validation_with_nat(self, dt_cls):
# GH#11587 make sure we get a useful error message when generate_range
# raises
msg = ("Inferred frequency None from passed values does not conform "
"to passed frequency D")
with pytest.raises(ValueError, match=msg):
dt_cls([pd.NaT, pd.Timestamp('2011-01-01')], freq='D')
with pytest.raises(ValueError, match=msg):
dt_cls([pd.NaT, pd.Timestamp('2011-01-01').value],
freq='D')
def test_categorical_preserves_tz(self):
# GH#18664 retain tz when going DTI-->Categorical-->DTI
# TODO: parametrize over DatetimeIndex/DatetimeArray
# once CategoricalIndex(DTA) works
dti = pd.DatetimeIndex(
[pd.NaT, '2015-01-01', '1999-04-06 15:14:13', '2015-01-01'],
tz='US/Eastern')
ci = pd.CategoricalIndex(dti)
carr = pd.Categorical(dti)
cser = pd.Series(ci)
for obj in [ci, carr, cser]:
result = pd.DatetimeIndex(obj)
tm.assert_index_equal(result, dti)
def test_dti_with_period_data_raises(self):
# GH#23675
data = pd.PeriodIndex(['2016Q1', '2016Q2'], freq='Q')
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
DatetimeIndex(data)
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
to_datetime(data)
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
DatetimeIndex(period_array(data))
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
to_datetime(period_array(data))
def test_dti_with_timedelta64_data_deprecation(self):
# GH#23675
data = np.array([0], dtype='m8[ns]')
with tm.assert_produces_warning(FutureWarning):
result = DatetimeIndex(data)
assert result[0] == Timestamp('1970-01-01')
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = to_datetime(data)
assert result[0] == Timestamp('1970-01-01')
with tm.assert_produces_warning(FutureWarning):
result = DatetimeIndex(pd.TimedeltaIndex(data))
assert result[0] == Timestamp('1970-01-01')
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = to_datetime(pd.TimedeltaIndex(data))
assert result[0] == Timestamp('1970-01-01')
def test_construction_caching(self):
df = pd.DataFrame({'dt': pd.date_range('20130101', periods=3),
'dttz': pd.date_range('20130101', periods=3,
tz='US/Eastern'),
'dt_with_null': [pd.Timestamp('20130101'), pd.NaT,
pd.Timestamp('20130103')],
'dtns': pd.date_range('20130101', periods=3,
freq='ns')})
assert df.dttz.dtype.tz.zone == 'US/Eastern'
@pytest.mark.parametrize('kwargs', [
{'tz': 'dtype.tz'},
{'dtype': 'dtype'},
{'dtype': 'dtype', 'tz': 'dtype.tz'}])
def test_construction_with_alt(self, kwargs, tz_aware_fixture):
tz = tz_aware_fixture
i = pd.date_range('20130101', periods=5, freq='H', tz=tz)
kwargs = {key: attrgetter(val)(i) for key, val in kwargs.items()}
result = DatetimeIndex(i, **kwargs)
tm.assert_index_equal(i, result)
@pytest.mark.parametrize('kwargs', [
{'tz': 'dtype.tz'},
{'dtype': 'dtype'},
{'dtype': 'dtype', 'tz': 'dtype.tz'}])
def test_construction_with_alt_tz_localize(self, kwargs, tz_aware_fixture):
tz = tz_aware_fixture
i = pd.date_range('20130101', periods=5, freq='H', tz=tz)
kwargs = {key: attrgetter(val)(i) for key, val in kwargs.items()}
if str(tz) in ('UTC', 'tzutc()'):
warn = None
else:
warn = FutureWarning
with tm.assert_produces_warning(warn, check_stacklevel=False):
result = DatetimeIndex(i.tz_localize(None).asi8, **kwargs)
expected = DatetimeIndex(i, **kwargs)
tm.assert_index_equal(result, expected)
# localize into the provided tz
i2 = DatetimeIndex(i.tz_localize(None).asi8, tz='UTC')
expected = i.tz_localize(None).tz_localize('UTC')
tm.assert_index_equal(i2, expected)
# incompat tz/dtype
pytest.raises(ValueError, lambda: DatetimeIndex(
i.tz_localize(None).asi8, dtype=i.dtype, tz='US/Pacific'))
def test_construction_index_with_mixed_timezones(self):
# gh-11488: no tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01'),
Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01'),
Timestamp('2011-01-02')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# same tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01 10:00'), Timestamp('2011-01-02 10:00')
], tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# same tz results in DatetimeIndex (DST)
result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
Timestamp('2011-08-01 10:00', tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# Different tz results in Index(dtype=object)
result = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
exp = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
exp = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
# length = 1
result = Index([Timestamp('2011-01-01')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# length = 1 with tz
result = Index(
[Timestamp('2011-01-01 10:00', tz='Asia/Tokyo')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00')], tz='Asia/Tokyo',
name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
def test_construction_index_with_mixed_timezones_with_NaT(self):
# see gh-11488
result = Index([pd.NaT, Timestamp('2011-01-01'),
pd.NaT, Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01'),
pd.NaT, Timestamp('2011-01-02')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# Same tz results in DatetimeIndex
result = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00')],
tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# same tz results in DatetimeIndex (DST)
result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
pd.NaT,
Timestamp('2011-08-01 10:00', tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'), pd.NaT,
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# different tz results in Index(dtype=object)
result = Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='US/Eastern')],
name='idx')
exp = Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
result = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='US/Eastern')], name='idx')
exp = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
# all NaT
result = Index([pd.NaT, pd.NaT], name='idx')
exp = DatetimeIndex([pd.NaT, pd.NaT], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# all NaT with tz
result = Index([pd.NaT, pd.NaT], tz='Asia/Tokyo', name='idx')
exp = DatetimeIndex([pd.NaT, pd.NaT], tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
def test_construction_dti_with_mixed_timezones(self):
# GH 11488 (not changed, added explicit tests)
# no tz results in DatetimeIndex
result = DatetimeIndex(
[Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
# same tz results in DatetimeIndex
result = DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00',
tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00')],
tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
# same tz results in DatetimeIndex (DST)
result = DatetimeIndex([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
Timestamp('2011-08-01 10:00',
tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
# tz mismatch affecting to tz-aware raises TypeError/ValueError
with pytest.raises(ValueError):
DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
msg = 'cannot be converted to datetime64'
with pytest.raises(ValueError, match=msg):
DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='Asia/Tokyo', name='idx')
with pytest.raises(ValueError):
DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='US/Eastern', name='idx')
with pytest.raises(ValueError, match=msg):
# passing tz should results in DatetimeIndex, then mismatch raises
# TypeError
Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='Asia/Tokyo', name='idx')
def test_construction_base_constructor(self):
arr = [pd.Timestamp('2011-01-01'), pd.NaT, pd.Timestamp('2011-01-03')]
tm.assert_index_equal(pd.Index(arr), pd.DatetimeIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.DatetimeIndex(np.array(arr)))
arr = [np.nan, pd.NaT, pd.Timestamp('2011-01-03')]
tm.assert_index_equal(pd.Index(arr), pd.DatetimeIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.DatetimeIndex(np.array(arr)))
def test_construction_outofbounds(self):
# GH 13663
dates = [datetime(3000, 1, 1), datetime(4000, 1, 1),
datetime(5000, 1, 1), datetime(6000, 1, 1)]
exp = Index(dates, dtype=object)
# coerces to object
tm.assert_index_equal(Index(dates), exp)
with pytest.raises(OutOfBoundsDatetime):
# can't create DatetimeIndex
DatetimeIndex(dates)
def test_construction_with_ndarray(self):
# GH 5152
dates = [datetime(2013, 10, 7),
datetime(2013, 10, 8),
datetime(2013, 10, 9)]
data = DatetimeIndex(dates, freq=pd.offsets.BDay()).values
result = DatetimeIndex(data, freq=pd.offsets.BDay())
expected = DatetimeIndex(['2013-10-07',
'2013-10-08',
'2013-10-09'],
freq='B')
tm.assert_index_equal(result, expected)
def test_verify_integrity_deprecated(self):
# GH#23919
with tm.assert_produces_warning(FutureWarning):
DatetimeIndex(['1/1/2000'], verify_integrity=False)
def test_range_kwargs_deprecated(self):
# GH#23919
with tm.assert_produces_warning(FutureWarning):
DatetimeIndex(start='1/1/2000', end='1/10/2000', freq='D')
def test_integer_values_and_tz_deprecated(self):
# GH-24559
values = np.array([946684800000000000])
with tm.assert_produces_warning(FutureWarning):
result = DatetimeIndex(values, tz='US/Central')
expected = pd.DatetimeIndex(['2000-01-01T00:00:00'], tz="US/Central")
tm.assert_index_equal(result, expected)
# but UTC is *not* deprecated.
with tm.assert_produces_warning(None):
result = DatetimeIndex(values, tz='UTC')
expected = pd.DatetimeIndex(['2000-01-01T00:00:00'], tz="US/Central")
def test_constructor_coverage(self):
rng = date_range('1/1/2000', periods=10.5)
exp = date_range('1/1/2000', periods=10)
tm.assert_index_equal(rng, exp)
msg = 'periods must be a number, got foo'
with pytest.raises(TypeError, match=msg):
date_range(start='1/1/2000', periods='foo', freq='D')
with pytest.raises(ValueError):
with tm.assert_produces_warning(FutureWarning):
DatetimeIndex(start='1/1/2000', end='1/10/2000')
with pytest.raises(TypeError):
DatetimeIndex('1/1/2000')
# generator expression
gen = (datetime(2000, 1, 1) + timedelta(i) for i in range(10))
result = DatetimeIndex(gen)
expected = DatetimeIndex([datetime(2000, 1, 1) + timedelta(i)
for i in range(10)])
tm.assert_index_equal(result, expected)
# NumPy string array
strings = np.array(['2000-01-01', '2000-01-02', '2000-01-03'])
result = DatetimeIndex(strings)
expected = DatetimeIndex(strings.astype('O'))
tm.assert_index_equal(result, expected)
from_ints = DatetimeIndex(expected.asi8)
tm.assert_index_equal(from_ints, expected)
# string with NaT
strings = np.array(['2000-01-01', '2000-01-02', 'NaT'])
result = DatetimeIndex(strings)
expected = DatetimeIndex(strings.astype('O'))
tm.assert_index_equal(result, expected)
from_ints = DatetimeIndex(expected.asi8)
tm.assert_index_equal(from_ints, expected)
# non-conforming
pytest.raises(ValueError, DatetimeIndex,
['2000-01-01', '2000-01-02', '2000-01-04'], freq='D')
pytest.raises(ValueError, date_range, start='2011-01-01',
freq='b')
pytest.raises(ValueError, date_range, end='2011-01-01',
freq='B')
pytest.raises(ValueError, date_range, periods=10, freq='D')
@pytest.mark.parametrize('freq', ['AS', 'W-SUN'])
def test_constructor_datetime64_tzformat(self, freq):
# see GH#6572: ISO 8601 format results in pytz.FixedOffset
idx = date_range('2013-01-01T00:00:00-05:00',
'2016-01-01T23:59:59-05:00', freq=freq)
expected = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59',
freq=freq, tz=pytz.FixedOffset(-300))
tm.assert_index_equal(idx, expected)
# Unable to use `US/Eastern` because of DST
expected_i8 = date_range('2013-01-01T00:00:00',
'2016-01-01T23:59:59', freq=freq,
tz='America/Lima')
tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8)
idx = date_range('2013-01-01T00:00:00+09:00',
'2016-01-01T23:59:59+09:00', freq=freq)
expected = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59',
freq=freq, tz=pytz.FixedOffset(540))
tm.assert_index_equal(idx, expected)
expected_i8 = date_range('2013-01-01T00:00:00',
'2016-01-01T23:59:59', freq=freq,
tz='Asia/Tokyo')
tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8)
# Non ISO 8601 format results in dateutil.tz.tzoffset
idx = date_range('2013/1/1 0:00:00-5:00', '2016/1/1 23:59:59-5:00',
freq=freq)
expected = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59',
freq=freq, tz=pytz.FixedOffset(-300))
tm.assert_index_equal(idx, expected)
# Unable to use `US/Eastern` because of DST
expected_i8 = date_range('2013-01-01T00:00:00',
'2016-01-01T23:59:59', freq=freq,
tz='America/Lima')
tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8)
idx = date_range('2013/1/1 0:00:00+9:00',
'2016/1/1 23:59:59+09:00', freq=freq)
expected = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59',
freq=freq, tz=pytz.FixedOffset(540))
tm.assert_index_equal(idx, expected)
expected_i8 = date_range('2013-01-01T00:00:00',
'2016-01-01T23:59:59', freq=freq,
tz='Asia/Tokyo')
tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8)
def test_constructor_dtype(self):
# passing a dtype with a tz should localize
idx = DatetimeIndex(['2013-01-01', '2013-01-02'],
dtype='datetime64[ns, US/Eastern]')
expected = DatetimeIndex(['2013-01-01', '2013-01-02']
).tz_localize('US/Eastern')
tm.assert_index_equal(idx, expected)
idx = DatetimeIndex(['2013-01-01', '2013-01-02'],
tz='US/Eastern')
tm.assert_index_equal(idx, expected)
# if we already have a tz and its not the same, then raise
idx = DatetimeIndex(['2013-01-01', '2013-01-02'],
dtype='datetime64[ns, US/Eastern]')
pytest.raises(ValueError,
lambda: DatetimeIndex(idx,
dtype='datetime64[ns]'))
# this is effectively trying to convert tz's
pytest.raises(TypeError,
lambda: DatetimeIndex(idx,
dtype='datetime64[ns, CET]'))
pytest.raises(ValueError,
lambda: DatetimeIndex(
idx, tz='CET',
dtype='datetime64[ns, US/Eastern]'))
result = DatetimeIndex(idx, dtype='datetime64[ns, US/Eastern]')
tm.assert_index_equal(idx, result)
def test_constructor_name(self):
idx = date_range(start='2000-01-01', periods=1, freq='A',
name='TEST')
assert idx.name == 'TEST'
def test_000constructor_resolution(self):
# 2252
t1 = Timestamp((1352934390 * 1000000000) + 1000000 + 1000 + 1)
idx = DatetimeIndex([t1])
assert idx.nanosecond[0] == t1.nanosecond
def test_disallow_setting_tz(self):
# GH 3746
dti = DatetimeIndex(['2010'], tz='UTC')
with pytest.raises(AttributeError):
dti.tz = pytz.timezone('US/Pacific')
@pytest.mark.parametrize('tz', [
None, 'America/Los_Angeles', pytz.timezone('America/Los_Angeles'),
Timestamp('2000', tz='America/Los_Angeles').tz])
def test_constructor_start_end_with_tz(self, tz):
# GH 18595
start = Timestamp('2013-01-01 06:00:00', tz='America/Los_Angeles')
end = Timestamp('2013-01-02 06:00:00', tz='America/Los_Angeles')
result = date_range(freq='D', start=start, end=end, tz=tz)
expected = DatetimeIndex(['2013-01-01 06:00:00',
'2013-01-02 06:00:00'],
tz='America/Los_Angeles')
tm.assert_index_equal(result, expected)
# Especially assert that the timezone is consistent for pytz
assert pytz.timezone('America/Los_Angeles') is result.tz
@pytest.mark.parametrize('tz', ['US/Pacific', 'US/Eastern', 'Asia/Tokyo'])
def test_constructor_with_non_normalized_pytz(self, tz):
# GH 18595
non_norm_tz = Timestamp('2010', tz=tz).tz
result = DatetimeIndex(['2010'], tz=non_norm_tz)
assert pytz.timezone(tz) is result.tz
def test_constructor_timestamp_near_dst(self):
# GH 20854
ts = [Timestamp('2016-10-30 03:00:00+0300', tz='Europe/Helsinki'),
Timestamp('2016-10-30 03:00:00+0200', tz='Europe/Helsinki')]
result = DatetimeIndex(ts)
expected = DatetimeIndex([ts[0].to_pydatetime(),
ts[1].to_pydatetime()])
tm.assert_index_equal(result, expected)
# TODO(GH-24559): Remove the xfail for the tz-aware case.
@pytest.mark.parametrize('klass', [Index, DatetimeIndex])
@pytest.mark.parametrize('box', [
np.array, partial(np.array, dtype=object), list])
@pytest.mark.parametrize('tz, dtype', [
pytest.param('US/Pacific', 'datetime64[ns, US/Pacific]',
marks=[pytest.mark.xfail(),
pytest.mark.filterwarnings(
"ignore:\\n Passing:FutureWarning")]),
[None, 'datetime64[ns]'],
])
def test_constructor_with_int_tz(self, klass, box, tz, dtype):
# GH 20997, 20964
ts = Timestamp('2018-01-01', tz=tz)
result = klass(box([ts.value]), dtype=dtype)
expected = klass([ts])
assert result == expected
# This is the desired future behavior
@pytest.mark.xfail(reason="Future behavior", strict=False)
@pytest.mark.filterwarnings("ignore:\\n Passing:FutureWarning")
def test_construction_int_rountrip(self, tz_naive_fixture):
# GH 12619
# TODO(GH-24559): Remove xfail
tz = tz_naive_fixture
result = 1293858000000000000
expected = DatetimeIndex([1293858000000000000], tz=tz).asi8[0]
assert result == expected
def test_construction_from_replaced_timestamps_with_dst(self):
# GH 18785
index = pd.date_range(pd.Timestamp(2000, 1, 1),
pd.Timestamp(2005, 1, 1),
freq='MS', tz='Australia/Melbourne')
test = pd.DataFrame({'data': range(len(index))}, index=index)
test = test.resample('Y').mean()
result = pd.DatetimeIndex([x.replace(month=6, day=1)
for x in test.index])
expected = pd.DatetimeIndex(['2000-06-01 00:00:00',
'2001-06-01 00:00:00',
'2002-06-01 00:00:00',
'2003-06-01 00:00:00',
'2004-06-01 00:00:00',
'2005-06-01 00:00:00'],
tz='Australia/Melbourne')
tm.assert_index_equal(result, expected)
def test_construction_with_tz_and_tz_aware_dti(self):
# GH 23579
dti = date_range('2016-01-01', periods=3, tz='US/Central')
with pytest.raises(TypeError):
DatetimeIndex(dti, tz='Asia/Tokyo')
def test_construction_with_nat_and_tzlocal(self):
tz = dateutil.tz.tzlocal()
result = DatetimeIndex(['2018', 'NaT'], tz=tz)
expected = DatetimeIndex([Timestamp('2018', tz=tz), pd.NaT])
tm.assert_index_equal(result, expected)
class TestTimeSeries(object):
def test_dti_constructor_preserve_dti_freq(self):
rng = date_range('1/1/2000', '1/2/2000', freq='5min')
rng2 = DatetimeIndex(rng)
assert rng.freq == rng2.freq
def test_dti_constructor_years_only(self, tz_naive_fixture):
tz = tz_naive_fixture
# GH 6961
rng1 = date_range('2014', '2015', freq='M', tz=tz)
expected1 = date_range('2014-01-31', '2014-12-31', freq='M', tz=tz)
rng2 = date_range('2014', '2015', freq='MS', tz=tz)
expected2 = date_range('2014-01-01', '2015-01-01', freq='MS', tz=tz)
rng3 = date_range('2014', '2020', freq='A', tz=tz)
expected3 = date_range('2014-12-31', '2019-12-31', freq='A', tz=tz)
rng4 = date_range('2014', '2020', freq='AS', tz=tz)
expected4 = date_range('2014-01-01', '2020-01-01', freq='AS', tz=tz)
for rng, expected in [(rng1, expected1), (rng2, expected2),
(rng3, expected3), (rng4, expected4)]:
tm.assert_index_equal(rng, expected)
def test_dti_constructor_small_int(self, any_int_dtype):
# see gh-13721
exp = DatetimeIndex(['1970-01-01 00:00:00.00000000',
'1970-01-01 00:00:00.00000001',
'1970-01-01 00:00:00.00000002'])
arr = np.array([0, 10, 20], dtype=any_int_dtype)
tm.assert_index_equal(DatetimeIndex(arr), exp)
def test_ctor_str_intraday(self):
rng = | DatetimeIndex(['1-1-2000 00:00:01']) | pandas.DatetimeIndex |
'''
MIT License
Copyright (c) 2020 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
"""
Los productos que salen del reporte diario son:
3
4
5
7
8
9
10
11
12
13
14
17
20
23
24
26
27
30
36
"""
import pandas as pd
from utils import *
from shutil import copyfile
from os import listdir
from os.path import isfile, join
from datetime import datetime
import numpy as np
def prod4(fte, producto):
print('Generando producto 4')
now = datetime.now()
today = now.strftime("%Y-%m-%d")
output = producto + today + '-CasosConfirmados-totalRegional.csv'
df = pd.read_csv(fte, quotechar='"', sep=',', thousands=r'.', decimal=",")
df.rename(columns={'Unnamed: 0': 'Region'}, inplace=True)
if 'Unnamed: 7' in df.columns:
df.drop(columns=['Unnamed: 7'], inplace=True)
df_obj = df.select_dtypes(['object'])
df[df_obj.columns] = df_obj.apply(lambda x: x.str.strip())
regionName(df)
df.at[16, 'Region'] = 'Total'
# texttract reconoce 0 como o
df.replace({'O': 0}, inplace=True)
numeric_columns = [x for x in df.columns if x != 'Region']
for i in numeric_columns:
df[i] = df[i].astype(str)
#df[i] = df[i].replace({r'\.': ''}, regex=True)
df[i] = df[i].replace({r'\,': '.'}, regex=True)
df.to_csv(output, index=False)
def prod5(fte, producto):
print('Generando producto 5')
# necesito series a nivel nacional por fecha:
# Casos nuevos con sintomas
# Casos totales
# Casos recuperados #ya no se reporta
# Fallecidos
# Casos activos
# Casos nuevos sin sintomas
now = datetime.now()
timestamp = now.strftime("%Y-%m-%d")
timestamp_dia_primero = now.strftime("%d-%m-%Y")
a = pd.read_csv(fte + 'CasosConfirmados.csv')
a['Fecha'] = timestamp
a = a[a['Region'] == 'Total']
print(a.to_string())
#las columnas son :
# Casos totales acumulados Casos nuevos totales Casos nuevos con sintomas Casos nuevos sin sintomas* Fallecidos totales % Total Fecha
a.rename(columns={'Casos totales acumulados': 'Casos totales',
'Casos nuevos totales': 'Casos nuevos totales',
'Casos nuevos con sintomas': 'Casos nuevos con sintomas',
'Casos nuevos sin sintomas*': 'Casos nuevos sin sintomas',
'Fallecidos totales': 'Fallecidos'}, inplace=True)
#Faltan casos activos: prod 5 esta ahora en el reporte diario, hay que migrar para alla
casos_confirmados_totales = pd.read_csv('../input/ReporteDiario/CasosConfirmadosTotales.csv')
today_row = (casos_confirmados_totales[casos_confirmados_totales['Fecha'] == timestamp_dia_primero])
a['Casos activos'] = today_row['Casos activos'].values
## esto es estandar
totales = pd.read_csv(producto)
#print(totales.columns[1:])
# add Casos nuevos totales = Casos nuevos con sintomas + Casos nuevos sin sintomas
for eachColumn in totales.columns[1:]:
print('Checking if Casos nuevos totales is fine on ' + eachColumn)
#print(totales.index[totales['Fecha'] == 'Casos nuevos con sintomas'].values[0])
#print(totales.at[totales.index[totales['Fecha'] == 'Casos nuevos con sintomas'].values[0], eachColumn])
rowConSintomas = totales.index[totales['Fecha'] == 'Casos nuevos con sintomas'].values[0]
rowSinSintomas = totales.index[totales['Fecha'] == 'Casos nuevos sin sintomas'].values[0]
rowCasosNuevosTotales = totales.index[totales['Fecha'] == 'Casos nuevos totales'].values[0]
#print('row con ' + str(rowConSintomas))
#print('row sin ' + str(rowSinSintomas))
#print('expected is ' + str(totales.at[rowConSintomas, eachColumn]) + ' + ' + str(totales.at[rowSinSintomas, eachColumn]))
#check for NaN
if not np.isnan(totales.at[rowConSintomas, eachColumn]) and not np.isnan(totales.at[rowSinSintomas, eachColumn]):
expectedTotal = totales.at[rowConSintomas, eachColumn] + totales.at[rowSinSintomas, eachColumn]
elif not np.isnan(totales.at[rowConSintomas, eachColumn]) and np.isnan(totales.at[rowSinSintomas, eachColumn]):
expectedTotal = totales.at[rowConSintomas, eachColumn]
elif np.isnan(totales.at[rowConSintomas, eachColumn]) and not np.isnan(totales.at[rowSinSintomas, eachColumn]):
expectedTotal = totales.at[rowSinSintomas, eachColumn]
registeredTotal = totales.at[rowCasosNuevosTotales, eachColumn]
if registeredTotal != expectedTotal:
print('Casos nuevos totales debería ser ' + str(expectedTotal) + ' pero es ' + str(registeredTotal))
#print(totales.at[rowCasosNuevosTotales, eachColumn])
totales.at[rowCasosNuevosTotales, eachColumn] = expectedTotal
#print(totales.at[rowCasosNuevosTotales, eachColumn])
#print(totales.to_string())
#normalizamos headers
#expectedHeaders=['Casos nuevos con sintomas', 'Casos totales', 'Casos recuperados', 'Fallecidos',
# 'Casos activos', 'Casos nuevos sin sintomas', 'Casos totales acumulados', 'Casos nuevos totales']
emptyrow = [] * len(totales.columns)
if 'Casos nuevos con sintomas' not in totales['Fecha'].values:
totales['Fecha'][0] = 'Casos nuevos con sintomas'
if 'Casos nuevos sin sintomas' not in totales['Fecha'].values:
ax = ['Casos nuevos sin sintomas']
bx = [''] * (len(totales.columns) - 1)
ax.extend(bx)
row = pd.DataFrame([ax], columns=totales.columns)
aux = pd.concat([totales, row], ignore_index=True)
totales = aux
#totales['Fecha'][len(totales['Fecha']) + 1] = 'Casos nuevos sin sintomas'
if 'Casos totales' not in totales['Fecha'].values:
print('Casos totales not found')
ax = ['Casos totales']
bx = [''] * (len(totales.columns) - 1)
ax.extend(bx)
row = pd.DataFrame([ax], columns=totales.columns)
aux = pd.concat([totales, row], ignore_index=True)
totales = aux
if 'Casos nuevos totales' not in totales['Fecha'].values:
ax = ['Casos nuevos totales']
bx = [''] * (len(totales.columns) - 1)
ax.extend(bx)
row = pd.DataFrame([ax], columns=totales.columns)
aux = pd.concat([totales, row], ignore_index=True)
totales = aux
#print(totales)
#print(totales['Fecha'])
#print(str(a['Fecha'].values[0]) + ' is in ' + str(totales.columns))
if (a['Fecha'].values[0]) in totales.columns:
print(a['Fecha'] + ' ya esta en el dataframe. No actualizamos')
return
else:
#print(totales.iloc[:, 0])
newColumn=[]
#Need to add new rows to totales:
for eachValue in totales.iloc[:, 0]:
print('each values is ' + eachValue)
if eachValue in a.columns:
print((a[eachValue].values))
newColumn.append(str(a[eachValue].values[0]))
else:
#print('appending ""')
newColumn.append('')
print(newColumn)
totales[timestamp] = newColumn
totales.to_csv(producto, index=False)
totales_t = totales.transpose()
totales_t.to_csv(producto.replace('.csv', '_T.csv'), header=False)
#print(totales.to_string())
totales.rename(columns={'Fecha': 'Dato'}, inplace=True)
identifiers = ['Dato']
variables = [x for x in totales.columns if x not in identifiers]
df_std = pd.melt(totales, id_vars=identifiers, value_vars=variables, var_name='Fecha',
value_name='Total')
df_std.to_csv(producto.replace('.csv', '_std.csv'), index=False)
def prod3_13_14_26_27(fte):
onlyfiles = [f for f in listdir(fte) if isfile(join(fte, f))]
cumulativoCasosNuevos = pd.DataFrame({'Region': [],
'Casos nuevos': []})
cumulativoCasosTotales = pd.DataFrame({'Region': [],
'Casos totales': []})
cumulativoFallecidos = pd.DataFrame({'Region': [],
'Fallecidos': []})
casosNuevosConSintomas = pd.DataFrame({'Region': [],
'Fecha': []})
casosNuevosSinSintomas = pd.DataFrame({'Region': [],
'Fecha': []})
onlyfiles.sort()
onlyfiles.remove('README.md')
for eachfile in onlyfiles:
print('processing ' + eachfile)
date = eachfile.replace("-CasosConfirmados-totalRegional", "").replace(".csv", "")
dataframe = pd.read_csv(fte + eachfile)
# sanitize headers
#print(eachfile)
dataframe.rename(columns={'Región': 'Region'}, inplace=True)
dataframe.rename(columns={'Casos nuevos': 'Casos nuevos'}, inplace=True)
dataframe.rename(columns={' Casos nuevos': 'Casos nuevos'}, inplace=True)
dataframe.rename(columns={'Casos nuevos totales': 'Casos nuevos'}, inplace=True)
dataframe.rename(columns={'Casos nuevos totales ': 'Casos nuevos'}, inplace=True)
dataframe.rename(columns={'Casos nuevos totales': 'Casos nuevos'}, inplace=True)
dataframe.rename(columns={'Casos totales': 'Casos totales'}, inplace=True)
dataframe.rename(columns={' Casos totales': 'Casos totales'}, inplace=True)
dataframe.rename(columns={'Casos totales acumulados': 'Casos totales'}, inplace=True)
dataframe.rename(columns={'Casos totales acumulados ': 'Casos totales'}, inplace=True)
dataframe.rename(columns={'Casos totales acumulados': 'Casos totales'}, inplace=True)
dataframe.rename(columns={' Casos fallecidos': 'Fallecidos'}, inplace=True)
dataframe.rename(columns={'Fallecidos totales ': 'Fallecidos'}, inplace=True)
dataframe.rename(columns={'Fallecidos totales': 'Fallecidos'}, inplace=True)
dataframe.rename(columns={'Casos nuevos con síntomas': 'Casos nuevos con sintomas'}, inplace=True)
dataframe.rename(columns={' Casos nuevos con síntomas': 'Casos nuevos con sintomas'}, inplace=True)
dataframe.rename(columns={'Casos nuevos con síntomas': 'Casos nuevos con sintomas'}, inplace=True)
dataframe.rename(columns={'Casos nuevos con sintomas': 'Casos nuevos con sintomas'}, inplace=True)
dataframe.rename(columns={' Casos nuevos con sintomas': 'Casos nuevos con sintomas'}, inplace=True)
dataframe.rename(columns={'Casos nuevos con sintomas': 'Casos nuevos con sintomas'}, inplace=True)
dataframe.rename(columns={'Casos nuevos con sintomas ': 'Casos nuevos con sintomas'}, inplace=True)
dataframe.rename(columns={'Casos nuevos sin síntomas': 'Casos nuevos sin sintomas'}, inplace=True)
dataframe.rename(columns={' Casos nuevos sin síntomas': 'Casos nuevos sin sintomas'}, inplace=True)
dataframe.rename(columns={'Casos nuevos sin síntomas': 'Casos nuevos sin sintomas'}, inplace=True)
dataframe.rename(columns={'Casos nuevos sin síntomas*': 'Casos nuevos sin sintomas'}, inplace=True)
dataframe.rename(columns={' Casos nuevos sin síntomas*': 'Casos nuevos sin sintomas'}, inplace=True)
dataframe.rename(columns={'Casos nuevos sin síntomas*': 'Casos nuevos sin sintomas'}, inplace=True)
dataframe.rename(columns={'Casos nuevos sin sintomas': 'Casos nuevos sin sintomas'}, inplace=True)
dataframe.rename(columns={' Casos nuevos sin sintomas': 'Casos nuevos sin sintomas'}, inplace=True)
dataframe.rename(columns={'Casos nuevos sin sintomas': 'Casos nuevos sin sintomas'}, inplace=True)
dataframe.rename(columns={'Casos nuevos sin sintomas*': 'Casos nuevos sin sintomas'}, inplace=True)
dataframe.rename(columns={' Casos nuevos sin sintomas*': 'Casos nuevos sin sintomas'}, inplace=True)
dataframe.rename(columns={'Casos nuevos sin sintomas*': 'Casos nuevos sin sintomas'}, inplace=True)
dataframe.rename(columns={'Casos nuevos sin sintomas* ': 'Casos nuevos sin sintomas'}, inplace=True)
if cumulativoCasosNuevos['Region'].empty:
cumulativoCasosNuevos[['Region', 'Casos nuevos']] = dataframe[['Region', 'Casos nuevos']]
cumulativoCasosNuevos.rename(columns={'Casos nuevos': date}, inplace=True)
cumulativoCasosTotales[['Region', 'Casos totales']] = dataframe[['Region', 'Casos totales']]
cumulativoCasosTotales.rename(columns={'Casos totales': date}, inplace=True)
else:
print(dataframe.columns)
cumulativoCasosNuevos[date] = dataframe['Casos nuevos']
cumulativoCasosTotales[date] = dataframe['Casos totales']
if 'Fallecidos' in dataframe.columns:
if cumulativoFallecidos['Region'].empty:
cumulativoFallecidos[['Region', 'Fallecidos']] = dataframe[['Region', 'Fallecidos']]
cumulativoFallecidos.rename(columns={'Fallecidos': date}, inplace=True)
else:
cumulativoFallecidos[date] = dataframe['Fallecidos']
if 'Casos nuevos con sintomas' in dataframe.columns:
if casosNuevosConSintomas['Region'].empty:
casosNuevosConSintomas[['Region', 'Fecha']] = dataframe[['Region', 'Casos nuevos con sintomas']]
casosNuevosConSintomas.rename(columns={'Fecha': date}, inplace=True)
else:
casosNuevosConSintomas[date] = dataframe['Casos nuevos con sintomas']
else:
date2 = (pd.to_datetime(date)).strftime('%Y-%m-%d')
if date2 < '2020-04-29':
if casosNuevosConSintomas['Region'].empty:
casosNuevosConSintomas[['Region', 'Fecha']] = dataframe[['Region','Casos nuevos']]
casosNuevosConSintomas.rename(columns={'Fecha': date}, inplace=True)
else:
casosNuevosConSintomas[date] = dataframe['Casos nuevos']
if 'Casos nuevos sin sintomas' in dataframe.columns:
if casosNuevosSinSintomas['Region'].empty:
casosNuevosSinSintomas[['Region', 'Fecha']] = dataframe[['Region', 'Casos nuevos sin sintomas']]
casosNuevosSinSintomas.rename(columns={'Fecha': date}, inplace=True)
else:
casosNuevosSinSintomas[date] = dataframe['Casos nuevos sin sintomas']
# estandarizar nombres de regiones
regionName(cumulativoCasosNuevos)
regionName(cumulativoCasosTotales)
regionName(cumulativoFallecidos)
regionName(casosNuevosConSintomas)
regionName(casosNuevosSinSintomas)
cumulativoCasosNuevos_T = cumulativoCasosNuevos.transpose()
cumulativoCasosTotales_T = cumulativoCasosTotales.transpose()
cumulativoFallecidos_T = cumulativoFallecidos.transpose()
casosNuevosConSintomas_T = casosNuevosConSintomas.transpose()
casosNuevosSinSintomas_T = casosNuevosSinSintomas.transpose()
#### PRODUCTO 3
cumulativoCasosTotales.to_csv('../output/producto3/CasosTotalesCumulativo.csv', index=False)
cumulativoCasosTotales_T.to_csv('../output/producto3/CasosTotalesCumulativo_T.csv', header=False)
identifiers = ['Region']
variables = [x for x in cumulativoCasosTotales.columns if x not in identifiers]
df_std = pd.melt(cumulativoCasosTotales, id_vars=identifiers, value_vars=variables, var_name='Fecha',
value_name='Total')
df_std.to_csv('../output/producto3/CasosTotalesCumulativo_std.csv', index=False)
#### PRODUCTO 13
cumulativoCasosNuevos.to_csv('../output/producto13/CasosNuevosCumulativo.csv', index=False)
cumulativoCasosNuevos_T.to_csv('../output/producto13/CasosNuevosCumulativo_T.csv', header=False)
identifiers = ['Region']
variables = [x for x in cumulativoCasosTotales.columns if x not in identifiers]
df_std = pd.melt(cumulativoCasosNuevos, id_vars=identifiers, value_vars=variables, var_name='Fecha',
value_name='Total')
df_std.to_csv('../output/producto13/CasosNuevosCumulativo_std.csv', index=False)
#### PRODUCTO 14
cumulativoFallecidos.to_csv('../output/producto14/FallecidosCumulativo.csv', index=False)
cumulativoFallecidos_T.to_csv('../output/producto14/FallecidosCumulativo_T.csv', header=False)
identifiers = ['Region']
variables = [x for x in cumulativoFallecidos.columns if x not in identifiers]
df_std = pd.melt(cumulativoFallecidos, id_vars=identifiers, value_vars=variables, var_name='Fecha',
value_name='Total')
df_std.to_csv('../output/producto14/FallecidosCumulativo_std.csv', index=False)
#### PRODUCTO 26
casosNuevosConSintomas.to_csv('../output/producto26/CasosNuevosConSintomas.csv', index=False)
casosNuevosConSintomas_T.to_csv('../output/producto26/CasosNuevosConSintomas_T.csv', header=False)
identifiers = ['Region']
variables = [x for x in casosNuevosConSintomas.columns if x not in identifiers]
df_std = pd.melt(casosNuevosConSintomas, id_vars=identifiers, value_vars=variables, var_name='Fecha',
value_name='Casos confirmados')
df_std.to_csv('../output/producto26/CasosNuevosConSintomas_std.csv', index=False)
#### PRODUCTO 27
casosNuevosSinSintomas.to_csv('../output/producto27/CasosNuevosSinSintomas.csv', index=False)
casosNuevosSinSintomas_T.to_csv('../output/producto27/CasosNuevosSinSintomas_T.csv', header=False)
identifiers = ['Region']
variables = [x for x in casosNuevosSinSintomas.columns if x not in identifiers]
df_std = pd.melt(casosNuevosSinSintomas, id_vars=identifiers, value_vars=variables, var_name='Fecha',
value_name='Casos confirmados')
df_std.to_csv('../output/producto27/CasosNuevosSinSintomas_std.csv', index=False)
def prod7_8(fte, producto):
df = pd.read_csv(fte, dtype={'Codigo region': object})
regionName(df)
df = df.replace('-', '', regex=True)
df_t = df.T
df.to_csv(producto + '.csv', index=False)
df_t.to_csv(producto + '_T.csv', header=False)
identifiers = ['Region', 'Codigo region', 'Poblacion']
variables = [x for x in df.columns if x not in identifiers]
df_std = pd.melt(df, id_vars=identifiers, value_vars=variables, var_name='fecha', value_name='numero')
df_std.to_csv(producto + '_std.csv', index=False)
def prod9_10(fte, producto):
copyfile(fte, producto + '.csv')
HospitalizadosUCIEtario_T = transpone_csv(producto + '.csv')
HospitalizadosUCIEtario_T.to_csv(producto + '_T.csv', header=False)
df = | pd.read_csv(fte) | pandas.read_csv |
# coding: utf-8
# In[6]:
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# # 权利数据5right.csv提取特征
# 1. 企业拥有权利的个数,RIGHT_CNT
# 2. 企业拥有权利类型的个数,RIGHT_TYPE_CNT
# 3. 企业拥有权利类型的比例,RIGHT_TYPE_RATE
# 4. 第一个获得的权利的类型,RIGHT_FIRST_TYPECODE
# 5. 最后一个获得的权利的类型,RIGHT_END_TYPECODE
# 6. 获得最多的权利的类型,RIGHT_TYPECODE_MUCHID
#
# 7. 第一个权利申请日期, RIGHT_FIRST_ASK_TIME
# 8. 第一个权利富于日期, RIGHT_FIRST_FB_TIME
# 9. 最后一个权利申请日期, RIGHT_END_ASK_TIME
# 10. 最后一个权利富于日期, RIGHT_END_FB_TIME
#
# 11. 第一个权利申请日期和权利富于日期的差值,RIGHT_FIRST_ASK_FB_DIFF
# 12. 最后一个权利申请日期和权利富于日期的差值,RIGHT_END_ASK_FB_DIFF
# 13. 第一个和最后一个权利申请日期的差值,RIGHT_FIRST_END_ASK_DIFF
# 14. 第一个和最后一个权利富于日期的差值,RIGHT_FIRST_END_FB_DIFF
# 15. 第一个申请和最后一个权利富于日期的差值,RIGHT_FIRST_ASK_END_FB_DIFF
#
# 16. 第一个权利的申请日期和公司注册时间的差值,RIGHT_FIRST_ASK_RGYEAR_DIFF
# 17. 第一个权利的富于日期和公司注册时间的差值,RIGHT_FIRST_FB_RGYEAR_DIFF
# 18. 最后一个权利的申请日期和公司注册时间的差值,RIGHT_END_ASK_RGYEAR_DIFF
# 19. 最后一个权利的富于日期和公司注册时间的差值,RIGHT_END_FB_RGYEAR_DIFF
#
# 20. 第一个权利的申请日期和公司第一个变动时间的差值,RIGHT_FIRST_ASK_FIRST_CHANGE_DIFF
# 21. 第一个权利的富于日期和公司第一个变动时间的差值,RIGHT_FIRST_FB_FIRST_CHANGE_DIFF
# 22. 最后一个权利的申请日期和公司第一个变动时间的差值,RIGHT_END_ASK_FIRST_CHANGE_DIFF
# 23. 最后一个权利的富于日期和公司第一个变动时间的差值,RIGHT_END_FB_FIRST_CHANGE_DIFF
#
# 24. 第一个权利的申请日期和公司最后一个变动时间的差值,RIGHT_FIRST_ASK_END_CHANGE_DIFF
# 25. 第一个权利的富于日期和公司最后一个变动时间的差值,RIGHT_FIRST_FB_END_CHANGE_DIFF
# 26. 最后一个权利的申请日期和公司最后一个变动时间的差值,RIGHT_END_ASK_END_CHANGE_DIFF
# 27. 最后一个权利的富于日期和公司最后一个变动时间的差值,RIGHT_END_FB_END_CHANGE_DIFF
#
# ---------------------------------------------------------------------------------------------------
# 28. 企业权利的个数占所有权利个数平均值的比例。RIGHT_CNT_ALL_RATE
#
# 29. 企业对应的大类HY的平均权利的个数,RIGHT_HY_CNT_AVG
# 30. 企业对应大类HY的平均权利个数占所有权利平均个数的比例,RIGHT_HY_CNT_ALL_RATE
# 31. 企业权利的个数占其对应的大类HY的平均值的比例,RIGHT_CNT_HY_RATE
#
# -------------------------------------------------------------------------------------------------
#
# 29. 企业对应的大类ETYPE的平均权利的个数,RIGHT_ETYPE_CNT_AVG
# 30. 企业对应大类ETYPE的平均权利个数占所有权利平均个数的比例,RIGHT_ETYPE_CNT_ALL_RATE
# 31. 企业权利的个数占其对应的大类ETYPE的平均值的比例,RIGHT_CNT_ETYPE_RATE
#
# 添加对应的最大值的特征
#
# 复赛新特征,滑动窗口系列的特征,时间段是1年,2年,3年,5年,计算在最近k[1,2,3,5]年之后的数据,主要是个数和次数。
# 时间是开始是2017-08之前的k年
# 1. 之前k年的变更时间的个数。
# RIGHT_K_OPEN_CNT, RIGHT_K_CLOSE_CNT
#
# In[7]:
df_all = pd.read_csv("../data/alldata/df_data1234.csv")
df_right = pd.read_csv("../data/public/5right.csv")
# In[8]:
# df_all.info()
# df_all.head()
# In[9]:
# df_right.info()
# df_right.head()
df_right = df_right.sort_values(['ASKDATE','FBDATE'])
# In[10]:
# set(df_right['RIGHTTYPE'])
def settime(x):
y = int(x[:x.find('-')])
m = int(x[x.find('-')+1:])
m+=2
y = y+int(m/12)
if m%12 == 0:
m = 12
else:
m = m%12
if(m<10):
return str(y)+"-0"+str(m)
return str(y)+"-"+str(m)
df_right.loc[df_right[df_right['RIGHTTYPE']==11][df_right['FBDATE'].isnull()].index,'FBDATE'] = df_right[df_right['RIGHTTYPE']==11][df_right['FBDATE'].isnull()]['ASKDATE']
df_right.loc[df_right[df_right['RIGHTTYPE']==40][df_right['FBDATE'].isnull()].index,'FBDATE'] = df_right[df_right['RIGHTTYPE']==40][df_right['FBDATE'].isnull()]['ASKDATE'].apply(settime)
df_right.loc[df_right[df_right['RIGHTTYPE']==50][df_right['FBDATE'].isnull()].index,'FBDATE'] = df_right[df_right['RIGHTTYPE']==50][df_right['FBDATE'].isnull()]['ASKDATE'].apply(settime)
# In[11]:
EIDS = set(df_right['EID'])
len(EIDS)
# In[12]:
columns = df_right.columns
df_xright = pd.DataFrame(columns=columns)
# print(columns)
# In[13]:
k = 0
for EID in EIDS:
if k%3000 == 0:
print('第%d次处理--------->',k)
k+=1
tmp = df_right[df_right['EID'] == EID]
row = [EID,tmp['RIGHTTYPE'].values,tmp['TYPECODE'].values,tmp['ASKDATE'].values,tmp['FBDATE'].values]
df_xright = df_xright.append( | pd.Series(row,columns) | pandas.Series |
# Copyright (c) 2016. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import logging
import pandas
EXPECTED_COLUMNS = [
"source",
"contig",
"interbase_start",
"interbase_end",
"allele",
"count",
]
def allele_support_df(loci, sources):
"""
Returns a DataFrame of allele counts for all given loci in the read sources
"""
return pandas.DataFrame(
allele_support_rows(loci, sources),
columns=EXPECTED_COLUMNS)
def allele_support_rows(loci, sources):
for source in sources:
logging.info("Reading from: %s (%s)" % (source.name, source.filename))
for locus in loci:
grouped = dict(source.pileups([locus]).group_by_allele(locus))
if grouped:
items = grouped.items()
else:
items = [("N" * (locus.end - locus.start), None)]
for (allele, group) in items:
d = collections.OrderedDict([
("source", source.name),
("contig", locus.contig),
("interbase_start", str(locus.start)),
("interbase_end", str(locus.end)),
("allele", allele),
("count", group.num_reads() if group is not None else 0),
])
yield pandas.Series(d)
def variant_support(variants, allele_support_df, ignore_missing=False):
'''
Collect the read evidence support for the given variants.
Parameters
----------
variants : iterable of varcode.Variant
allele_support_df : dataframe
Allele support dataframe, as output by the varlens-allele-support tool.
It should have columns: source, contig, interbase_start, interbase_end,
allele. The remaining columns are interpreted as read counts of various
subsets of reads (e.g. all reads, non-duplicate reads, etc.)
ignore_missing : boolean
If True, then varaints with no allele counts will be interpreted as
having 0 depth. If False, then an exception will be raised if any
variants have no allele counts.
Returns
----------
A pandas.Panel4D frame with these axes:
labels (axis=0) : the type of read being counted, i.e. the read count
fields in allele_support_df.
items (axis=1) : the type of measurement (num_alt, num_ref, num_other,
total_depth, alt_fraction, any_alt_fraction)
major axis (axis=2) : the variants
minor axis (axis=3) : the sources
'''
missing = [
c for c in EXPECTED_COLUMNS if c not in allele_support_df.columns
]
if missing:
raise ValueError("Missing columns: %s" % " ".join(missing))
# Ensure our start and end fields are ints.
allele_support_df[["interbase_start", "interbase_end"]] = (
allele_support_df[["interbase_start", "interbase_end"]].astype(int))
sources = sorted(allele_support_df["source"].unique())
allele_support_dict = collections.defaultdict(dict)
for (i, row) in allele_support_df.iterrows():
key = (
row['source'],
row.contig,
row.interbase_start,
row.interbase_end)
allele_support_dict[key][row.allele] = row["count"]
# We want an exception on bad lookups, so convert to a regular dict.
allele_support_dict = dict(allele_support_dict)
dataframe_dicts = collections.defaultdict(
lambda: collections.defaultdict(list))
for variant in variants:
for source in sources:
key = (source, variant.contig, variant.start - 1, variant.end)
try:
alleles = allele_support_dict[key]
except KeyError:
message = (
"No allele counts in source %s for variant %s" % (
source, str(variant)))
if ignore_missing:
logging.warning(message)
alleles = {}
else:
raise ValueError(message)
alt = alleles.get(variant.alt, 0)
ref = alleles.get(variant.ref, 0)
total = sum(alleles.values())
other = total - alt - ref
dataframe_dicts["num_alt"][source].append(alt)
dataframe_dicts["num_ref"][source].append(ref)
dataframe_dicts["num_other"][source].append(other)
dataframe_dicts["total_depth"][source].append(total)
dataframe_dicts["alt_fraction"][source].append(
float(alt) / max(1, total))
dataframe_dicts["any_alt_fraction"][source].append(
float(alt + other) / max(1, total))
dataframes = dict(
(label, pandas.DataFrame(value, index=variants))
for (label, value) in dataframe_dicts.items())
return | pandas.Panel(dataframes) | pandas.Panel |
import datetime as dt
import os
import unittest
import numpy as np
import pandas as pd
import devicely
class SpacelabsTestCase(unittest.TestCase):
READ_PATH = "tests/SpaceLabs_test_data/spacelabs.abp"
WRITE_PATH = "tests/SpaceLabs_test_data/spacelabs_written.abp"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.expected_subject = "000002"
timestamps = pd.to_datetime([
"1.1.99 17:03",
"1.1.99 17:05",
"1.1.99 17:07",
"1.1.99 17:09",
"1.1.99 17:11",
"1.1.99 17:13",
"1.1.99 17:25",
"1.1.99 17:28",
"1.1.99 17:31",
"1.1.99 17:34",
"1.1.99 17:36",
"1.1.99 17:39",
"1.1.99 23:42",
"1.1.99 23:59",
"1.2.99 00:01",
])
self.expected_data = pd.DataFrame({
"timestamp": timestamps,
"date": timestamps.map(lambda timestamp: timestamp.date()),
"time": timestamps.map(lambda timestamp: timestamp.time()),
"SYS(mmHg)": [11, 142, 152, 151, 145, 3, 4, 164, 154, 149, 153, 148, 148, 148, 148],
"DIA(mmHg)": [0, 118, 112, 115, 110, 0, 0, 119, 116, 119, 118, 114, 114, 114, 114],
"ACC_x": [0, 99, 95, 96, 91, 0, 0, 95, 95, 98, 96, 93, 93, 93, 93],
"ACC_y": [0, 61, 61, 61, 59, 0, 0, 63, 63, 63, 60, 62, 62, 62, 62],
"ACC_z": 15 * [np.nan],
"error": ["EB", np.nan, np.nan, np.nan, np.nan, "EB", "EB", np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
})
self.expected_metadata = {
"PATIENTINFO": {
"DOB": "16.09.1966",
"RACE": "native american"
},
"REPORTINFO": {
"PHYSICIAN": "Dr. <NAME>",
"NURSETECH": "admin",
"STATUS": "NOTCONFIRMED",
"CALIPERSUMMARY": {
"COUNT": "0"
},
},
}
def setUp(self):
self.spacelabs_reader = devicely.SpacelabsReader(self.READ_PATH)
def test_read(self):
# Tests if a basic reading operation.
pd.testing.assert_frame_equal(self.spacelabs_reader.data,
self.expected_data)
self.assertEqual(self.spacelabs_reader.subject, self.expected_subject)
self.assertEqual(self.spacelabs_reader.metadata,
self.expected_metadata)
def test_deidentify(self):
# Tests if the SpacelabsReader.deidentify method removes all patient metadata.
self.spacelabs_reader.deidentify()
self.assertEqual(self.spacelabs_reader.subject, "")
self.assertEqual(
self.spacelabs_reader.metadata,
{
"PATIENTINFO": {
"DOB": "",
"RACE": ""
},
"REPORTINFO": {
"PHYSICIAN": "",
"NURSETECH": "",
"STATUS": "",
"CALIPERSUMMARY": {
"COUNT": ""
},
},
},
)
def test_write(self):
# Tests the SpacelabsReader.write operation by writing, reading again and comparing the old and new signals.
self.spacelabs_reader.write(self.WRITE_PATH)
new_reader = devicely.SpacelabsReader(self.WRITE_PATH)
pd.testing.assert_frame_equal(new_reader.data,
self.spacelabs_reader.data)
self.assertEqual(new_reader.metadata, self.spacelabs_reader.metadata)
self.assertEqual(new_reader.subject, self.spacelabs_reader.subject)
os.remove(self.WRITE_PATH)
def test_random_timeshift(self):
earliest_possible_shifted_time_col = pd.to_datetime([
'1997-01-01 17:03:00',
'1997-01-01 17:05:00',
'1997-01-01 17:07:00',
'1997-01-01 17:09:00',
'1997-01-01 17:11:00',
'1997-01-01 17:13:00',
'1997-01-01 17:25:00',
'1997-01-01 17:28:00',
'1997-01-01 17:31:00',
'1997-01-01 17:34:00',
'1997-01-01 17:36:00',
'1997-01-01 17:39:00',
'1997-01-01 23:42:00',
'1997-01-01 23:59:00',
'1997-01-02 00:01:00'
])
latest_possible_shifted_time_col = pd.to_datetime([
'1998-12-02 17:03:00',
'1998-12-02 17:05:00',
'1998-12-02 17:07:00',
'1998-12-02 17:09:00',
'1998-12-02 17:11:00',
'1998-12-02 17:13:00',
'1998-12-02 17:25:00',
'1998-12-02 17:28:00',
'1998-12-02 17:31:00',
'1998-12-02 17:34:00',
'1998-12-02 17:36:00',
'1998-12-02 17:39:00',
'1998-12-02 23:42:00',
'1998-12-02 23:59:00',
'1998-12-03 00:01:00'
])
old_timestamp_column = self.spacelabs_reader.data["timestamp"].copy()
self.spacelabs_reader.timeshift()
new_timestamp_column = self.spacelabs_reader.data["timestamp"]
self.assertTrue((earliest_possible_shifted_time_col <= new_timestamp_column).all())
self.assertTrue((new_timestamp_column <= latest_possible_shifted_time_col).all())
new_date_column = self.spacelabs_reader.data["date"]
new_time_column = self.spacelabs_reader.data["time"]
testing_timestamp_column = pd.Series([
dt.datetime.combine(new_date_column[i], new_time_column[i])
for i in range(len(self.spacelabs_reader.data))
])
pd.testing.assert_series_equal(new_timestamp_column, testing_timestamp_column, check_names=False)
def test_drop_EB(self):
# The drop_EB method should make timestamp the index column and remove all rows with 'EB' entries in the error column.
self.expected_data.drop(index=[0, 5, 6], inplace=True)
self.expected_data.set_index("timestamp", inplace=True)
self.spacelabs_reader.drop_EB()
pd.testing.assert_frame_equal(self.spacelabs_reader.data, self.expected_data)
# When run again, drop_EB should not do anythin.
self.spacelabs_reader.drop_EB()
pd.testing.assert_frame_equal(self.spacelabs_reader.data, self.expected_data)
def test_set_window_column(self):
self.spacelabs_reader.set_window(dt.timedelta(seconds=30), "bfill")
window_start = pd.to_datetime("1.1.99 17:02:30")
window_end = pd.to_datetime("1.1.99 17:03:00")
self.assertEqual(window_start, self.spacelabs_reader.data.loc[0, "window_start"])
self.assertEqual(window_end, self.spacelabs_reader.data.loc[0, "window_end"])
self.spacelabs_reader.set_window(dt.timedelta(seconds=30), "bffill")
window_start = pd.to_datetime("1.1.99 17:02:45")
window_end = pd.to_datetime("1.1.99 17:03:15")
self.assertEqual(window_start, self.spacelabs_reader.data.loc[0, "window_start"])
self.assertEqual(window_end, self.spacelabs_reader.data.loc[0, "window_end"])
self.spacelabs_reader.set_window(dt.timedelta(seconds=30), "ffill")
window_start = pd.to_datetime("1.1.99 17:03:00")
window_end = pd.to_datetime("1.1.99 17:03:30")
self.assertEqual(window_start, self.spacelabs_reader.data.loc[0, "window_start"])
self.assertEqual(window_end, self.spacelabs_reader.data.loc[0, "window_end"])
def test_set_window_index(self):
self.spacelabs_reader.drop_EB()
self.spacelabs_reader.set_window(dt.timedelta(seconds=30), "bfill")
window_start = pd.to_datetime("1.1.99 17:04:30")
window_end = | pd.to_datetime("1.1.99 17:05:00") | pandas.to_datetime |
import os
import types
import pandas as pd
import data_go_kr as dgk
def category_from_url(url:str) -> str:
return os.path.basename( os.path.dirname(url) )
for k,v in dgk.api.__dict__.items():
if isinstance(v, types.ModuleType):
print(k,v)
lst_name = []
lst_desc = []
lst_url = []
lst_cat = []
lst_flag = []
for k,v in dgk.api.__dict__.items():
if isinstance(v, types.ModuleType):
print(k)
print(' ', v.SVC_DESC)
print(' ', v.SVC_URL)
print(' ', category_from_url(v.SVC_URL) )
lst_name.append(k)
lst_desc.append(v.SVC_DESC)
lst_url.append(v.SVC_URL)
lst_cat.append(category_from_url(v.SVC_URL) )
lst_flag.append(v.SVC_FLAG)
# print(k)
# print(k)
df = pd.DataFrame( {
'cat': lst_cat,
'name' : lst_name,
'desc' : lst_desc,
'url' : lst_url,
'flag' : lst_flag
})
# df = df.sort_values(by=['cat', 'name'] )
# df = df.sort_values(by=['url', 'name'] )
# df.style.set_properties(**{'text-align': 'left'})
# df.style.set_properties(**{'text-align': 'left'}).set_table_styles([ dict(selector='th', props=[('text-align', 'left')] ) ])
| pd.set_option('display.colheader_justify', 'left') | pandas.set_option |
#!/usr/bin/env python3
"""tests.compare.compare.py: Auxiliary variables for tests.compare"""
import pandas as pd
from exfi.io.read_bed import read_bed3
from exfi.io.bed import BED3_COLS, BED3_DTYPES
from exfi.compare import \
TP_DF_COLS, TP_DF_DTYPES, \
STATS_COLS, STATS_DTYPES
BED3_EMPTY_FN = "tests/compare/empty.bed"
BED3_TRUE_FN = "tests/compare/true.bed"
BED3_PRED_FN = "tests/compare/pred.bed"
BED3_EMPTY = read_bed3(BED3_EMPTY_FN)
BED3_TRUE = read_bed3(BED3_TRUE_FN)
BED3_PRED = read_bed3(BED3_PRED_FN)
TP_DF = pd.DataFrame(
data=[
['ENSDART00000000004', '474', '542', 'ENSDART00000000004', '474', '542', '68'],
['ENSDART00000000004', '542', '1311', 'ENSDART00000000004', '542', '1311', '769'],
['ENSDART00000000004', '1413', '2475', 'ENSDART00000000004', '1413', '2476', '1062'],
['ENSDART00000000005', '0', '1655', 'ENSDART00000000005', '0', '1656', '1655'],
['ENSDART00000000005', '1656', '1812', 'ENSDART00000000005', '1656', '1813', '156'],
['ENSDART00000000005', '1812', '1949', 'ENSDART00000000005', '1813', '1950', '136'],
['ENSDART00000000005', '2289', '2603', 'ENSDART00000000005', '2290', '2604', '313']
]
)
FP_DF = pd.DataFrame(
data=[
['ENSDART00000000004', '0', '47'],
['ENSDART00000000004', '45', '240'],
['ENSDART00000000004', '239', '339'],
['ENSDART00000000004', '338', '474'],
['ENSDART00000000004', '1310', '1414'],
['ENSDART00000000005', '1948', '2101'],
['ENSDART00000000005', '2100', '2207'],
['ENSDART00000000005', '2205', '2292']
]
)
FN_DF = pd.DataFrame(
data=[
['ENSDART00000000004', '0', '48'],
['ENSDART00000000004', '48', '241'],
['ENSDART00000000004', '241', '340'],
['ENSDART00000000004', '340', '474'],
['ENSDART00000000004', '1311', '1413'],
['ENSDART00000000005', '1950', '2102'],
['ENSDART00000000005', '2102', '2206'],
['ENSDART00000000005', '2206', '2290']
]
)
TRUE_POSITIVES = TP_DF\
.rename(columns={i: j for i, j in enumerate(TP_DF_COLS)})\
.astype(dtype=TP_DF_DTYPES)\
.drop(columns=6)
FALSE_POSITIVES = FP_DF\
.rename(columns={i: j for i, j in enumerate(BED3_COLS)})\
.astype(BED3_DTYPES)
FALSE_NEGATIVES = FN_DF\
.rename(columns={i: j for i, j in enumerate(BED3_COLS)})\
.astype(BED3_DTYPES)
CLASSIFICATION_EMPTY = {
'true_positives': pd.DataFrame(columns=TP_DF_COLS)\
.astype(dtype=TP_DF_DTYPES),
'false_positives': pd.DataFrame(columns=BED3_COLS)\
.astype(dtype=BED3_DTYPES),
'false_negatives': | pd.DataFrame(columns=BED3_COLS) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 10 11:56:35 2017
@author: Madhav.L
"""
import pandas as pd
from sklearn import tree
from sklearn import model_selection
import io
import pydot
import os
os.environ["PATH"] += os.pathsep + 'D:/datascience/graphviz-2.38/release/bin/'
#returns current working directory
os.getcwd()
#changes working directory
os.chdir("D:/datascience/titanic/")
titanic_train = pd.read_csv("train.csv")
print(type(titanic_train))
#EDA
titanic_train.shape
titanic_train.info()
#Apply one hot encoding
titanic_train1 = pd.get_dummies(titanic_train, columns=['Pclass', 'Sex', 'Embarked'])
X_train = titanic_train1.drop(['PassengerId','Age','Cabin','Ticket', 'Name','Survived'], 1)
y_train = titanic_train['Survived']
dt1 = tree.DecisionTreeClassifier()
#Apply K-fold technicque and find out the Cross Validation(CV) score.
cv_scores1 = model_selection.cross_val_score(dt1, X_train, y_train, cv=10)
print(cv_scores1) #Return type is a [List] of 10 scores.
print(cv_scores1.mean()) #Find out the mean of CV scores
dt1.fit(X_train,y_train)
print(dt1.score(X_train,y_train))
titanic_test = | pd.read_csv("test.csv") | pandas.read_csv |
import csv
import os
import re
import numpy as np
import pandas as pd
from functools import reduce
from modules.classes.item_id_parser import ItemIDParser
def map_dict(elem, dictionary):
if elem in dictionary:
return dictionary[elem]
else:
return np.nan
def create_time_feature(series, window_size):
"""
Transform time string of format "DD:MM:YY HH:MM:SS" into "DD:MM:YY {Number}"
Parameters
----------
series: object
The time strings as series
window_size: int
Number of hours for one time step
Returns
-------
Transformed time strings
"""
# Timestring is of format
time_strings = series.astype('str').str.split(' ')
# After splitting it is of format: "DD:MM:YY" and "HH:MM:SS"
# Extract the HH and divide them by window_size to get the portion of the day this hour lies in
# Then add it to the day part of the string
return time_strings.apply(lambda x: f'{x[0]}_' + str(int(int(x[1].split(':')[0]) / window_size)))
class MimicParser(object):
"""
This class processes a MIMIC database into a single file
"""
def __init__(self, mimic_folder_path, folder, file_name, id_column, label_column, mimic_version=4):
if mimic_version == 4:
id_column = id_column.lower()
label_column = label_column.lower()
elif mimic_version == 3:
id_column = id_column.upper()
label_column = label_column.upper()
else:
raise Exception(f"Unsupported Mimic Version: {mimic_version}")
self.mimic_version = mimic_version
self.mimic_folder_path = mimic_folder_path
self.output_folder = f'{mimic_folder_path}/{folder}'
self.base_file_name = file_name
self.standard_path = f'{self.output_folder}/{self.base_file_name}'
self.pid = ItemIDParser(mimic_folder_path, id_column, label_column)
if not os.path.exists(self.output_folder):
os.makedirs(self.output_folder)
# Output paths for the files produced by methods below
self.rt_path = self.standard_path + '_0_reduced'
self.cdb_path = self.standard_path + '_1_24hrs_blocks'
self.aac_path = self.standard_path + '_2_p_admissions'
self.apc_path = self.standard_path + '_3_p_patients'
self.ap_path = self.standard_path + '_4_p_scripts'
self.aii_path = self.standard_path + '_5_p_icds'
self.an_path = self.standard_path + '_6_p_notes'
def reduce_total(self):
"""
Filters out rows from CHARTEVENTS.csv that are not feature relevant
"""
feature_relevant_columns = ['subject_id', 'hadm_id', 'itemid', 'charttime', 'value', 'valuenum']
if self.mimic_version == 3:
feature_relevant_columns += ['icustay_id']
elif self.mimic_version == 4:
feature_relevant_columns += ['stay_id']
feature_dict = self.pid.get_feature_dictionary()
chunksize = 10000000
mode = 'w'
header = True
print('Start processing df')
for i, df_chunk in enumerate(
pd.read_csv(self.mimic_folder_path + '/CHARTEVENTS.csv', iterator=True, chunksize=chunksize)):
print(f'\rChunk number: {i}', end='')
df_chunk.columns = df_chunk.columns.str.lower()
df = df_chunk[df_chunk['itemid'].isin(reduce(lambda x, y: x.union(y), feature_dict.values()))]
df = df.dropna(inplace=False, axis=0, subset=feature_relevant_columns)
if i == 1:
mode = 'a'
header = None
df.to_csv(self.rt_path + '.csv', index=False, columns=feature_relevant_columns,
header=header, mode=mode)
print(f"\r Finished reducing chart events")
def create_day_blocks(self, window_size, create_statistics=False):
"""
Create the time feature as well as optionally std, min and max
Parameters
----------
window_size: int
Number of hours for one time step
create_statistics: bool
Whether or not to create std, min and max columns
Returns
-------
Dict of subject ID and day
"""
reversed_feature_dict = self.pid.get_reversed_feature_dictionary()
df = pd.read_csv(self.rt_path + '.csv')
print("Loaded df")
# create time feature
df['chartday'] = create_time_feature(df['charttime'], window_size)
print("New feature chartday")
df['hadmid_day'] = df['hadm_id'].astype('str') + '_' + df['chartday']
print("New feature hadmid_day")
df['features'] = df['itemid'].apply(lambda x: reversed_feature_dict[x])
print("New feature features")
hadm_dict = dict(zip(df['hadmid_day'], df['subject_id']))
df2 = pd.pivot_table(df, index='hadmid_day', columns='features',
values='valuenum', fill_value=np.nan)
print("Start statistics")
if create_statistics:
df3 = pd.pivot_table(df, index='hadmid_day', columns='features',
values='valuenum', aggfunc=lambda x: np.std(x), fill_value=0)
df3.columns = ["{0}_std".format(i) for i in list(df2.columns)]
print("std finished")
df4 = pd.pivot_table(df, index='hadmid_day', columns='features',
values='valuenum', aggfunc=np.amin, fill_value=np.nan)
df4.columns = ["{0}_min".format(i) for i in list(df2.columns)]
print("min finished")
df5 = pd.pivot_table(df, index='hadmid_day', columns='features',
values='valuenum', aggfunc=np.amax, fill_value=np.nan)
df5.columns = ["{0}_max".format(i) for i in list(df2.columns)]
print("max finished")
df2 = pd.concat([df2, df3, df4, df5], axis=1)
df2.columns = df2.columns.str.lower()
if 'tobacco' in df2.columns:
df2['tobacco'].apply(lambda x: np.around(x))
df2 = df2.drop(['tobacco_std', 'tobacco_min', 'tobacco_max'], axis=1, errors='ignore')
df2 = df2.drop(['daily weight_std', 'daily weight_min', 'daily weight_max'], axis=1, errors='ignore')
rel_columns = [i for i in list(df2.columns) if '_' not in i]
for col in rel_columns:
if len(np.unique(df2[col])[np.isfinite(np.unique(df2[col]))]) <= 2:
print(col)
df2 = df2.drop([col + '_std', col + '_min', col + '_max'], axis=1, errors='ignore')
for i in list(df2.columns):
df2[i][df2[i] > df2[i].quantile(.95)] = df2[i].median()
df2[i].fillna(df2[i].median(), inplace=True)
df2['hadmid_day'] = df2.index
if 'pt' in df2.columns:
df2['inr'] = df2['inr'] + df2['pt']
df2['inr_std'] = df2['inr_std'] + df2['pt_std']
df2['inr_min'] = df2['inr_min'] + df2['pt_min']
df2['inr_max'] = df2['inr_max'] + df2['pt_max']
df2 = df2.drop(['pt', 'pt_std', 'pt_min', 'pt_max'], axis=1, errors='ignore')
df2.dropna(thresh=int(0.75 * len(df2.columns)), axis=0, inplace=True)
df2.to_csv(self.cdb_path + '.csv', index=False)
print("Created Dayblocks")
return hadm_dict
def add_admissions_columns(self):
"""
Add admission time
"""
df = pd.read_csv(f'{self.mimic_folder_path}/ADMISSIONS.csv')
df.columns = df.columns.str.lower()
admittime_dict = dict(zip(df['hadm_id'], df['admittime']))
file_name = self.cdb_path + '.csv'
df_shard = pd.read_csv(file_name)
df_shard['hadm_id'] = df_shard['hadmid_day'].str.split('_').apply(lambda x: x[0])
df_shard['hadm_id'] = df_shard['hadm_id'].astype('int')
df_shard['admittime'] = df_shard['hadm_id'].apply(lambda x: map_dict(x, admittime_dict))
df_shard.to_csv(self.aac_path + '.csv', index=False)
print("Added Admittime")
def add_patient_columns(self, hadm_dict):
"""
Add gender columns
Parameters
----------
hadm_dict: dict
Dict of subject ID and day
"""
df = pd.read_csv(self.mimic_folder_path + '/PATIENTS.csv')
df.columns = df.columns.str.lower()
gender_dict = dict(zip(df['subject_id'], df['gender']))
df_shard = pd.read_csv(self.aac_path + '.csv')
df_shard['subject_id'] = df_shard['hadmid_day'].apply(lambda x: map_dict(x, hadm_dict))
df_shard['admityear'] = df_shard['admittime'].str.split('-').apply(lambda x: x[0]).astype('int')
df_shard['gender'] = df_shard['subject_id'].apply(lambda x: map_dict(x, gender_dict))
if 'dob' in df.columns:
dob_dict = dict(zip(df['subject_id'], df['dob']))
df_shard['dob'] = df_shard['subject_id'].apply(lambda x: map_dict(x, dob_dict))
df_shard['yob'] = df_shard['dob'].str.split('-').apply(lambda x: x[0]).astype('int')
# Date of birth replaced by anchor_age
df_shard['age'] = df_shard['admityear'].subtract(df_shard['yob'])
gender_dummied = pd.get_dummies(df_shard['gender'], drop_first=True)
gender_dummied.rename(columns={'M': 'Male', 'F': 'Female'})
columns = list(df_shard.columns)
columns.remove('gender')
df_shard = | pd.concat([df_shard[columns], gender_dummied], axis=1) | pandas.concat |
import json
from pathlib import Path
from itertools import repeat
from collections import OrderedDict
import pandas as pd
import os
import warnings
def check_input(text_list):
"""
检查预测的输入list
:param text_list:
:return:
"""
# 一个str的话,转list
if isinstance(text_list, str):
text_list = [text_list, ]
# 处理前,文本个数、最大长度
len_ = len(text_list)
max_len_ = max([len(i) for i in text_list])
# 去长度为0
text_list = [i for i in text_list if len(i) != 0]
# 取长度最大256
for idx, text in enumerate(text_list):
if len(text) > 256:
text_list[idx] = text[:256]
# 提醒
if len(text_list) == 0:
raise NotImplementedError("输入的文本全部为空, 长度为0!")
if len(text_list) < len_:
warnings.warn("输入的文本中有长度为0的句子, 它们将被忽略掉!")
# print("输入的文本中有长度为0的句子, 它们将被忽略掉!")
if max_len_ > 256:
warnings.warn("输入的文本中有长度大于256的句子, 它们将被截断掉!")
# print("输入的文本中有长度大于256的句子, 它们将被截断掉!")
return text_list
def write_csv(text_list):
"""
将输入的预测list写为csv,会覆盖原来的csv
:param text_list:
:return:
"""
df = pd.DataFrame({'label': {}, 'review': {}})
for idx, val in enumerate(text_list):
# 2,代表暂无分类
new_line = [str(2), val]
df.loc[idx] = new_line
df.to_csv("data/prediction/text_list.csv", index=False)
def delete_temp_file(file_path):
"""
删除预测list产生的,文本嵌入pkl、词向量pkl
:return:
"""
if os.path.exists(file_path):
os.remove(file_path)
else:
print("The file \'" + file_path + "\' does not exist.")
def ensure_dir(dirname):
"""
确保目录存在,不存在的话创建目录
"""
dirname = Path(dirname)
if not dirname.is_dir():
dirname.mkdir(parents=True, exist_ok=False)
def read_json(filename):
"""
读取json
:param filename: 文件路径
:return: json.loads()
"""
# Path(),PurePath subclass that can make system calls.
filename = Path(filename)
"""
========= ===============================================================
Character Meaning
--------- ---------------------------------------------------------------
'r' open for reading (default)
'w' open for writing, truncating the file first
'x' create a new file and open it for writing
'a' open for writing, appending to the end of the file if it exists
'b' binary mode
't' text mode (default)
'+' open a disk file for updating (reading and writing)
'U' universal newline mode (deprecated)
========= ===============================================================
"""
# rt模式下,r读取文本时,t会自动把\r\n转换成\n.(文本模式)
with filename.open('rt') as handle:
# 返回一个json.loads(),OrderedDict类型的,有序字典?
# object_hook=,用object_hook这种类对象返回
"""
json.load(),加载python json格式文件
json.loads(),解码python json格式
"""
return json.load(handle, object_hook=OrderedDict)
def write_json(content, filename):
"""
写一个json(不一定是新的)
:param content: 内容,json
:param filename: 文件路径和名
:return: json.loads()
"""
filename = Path(filename)
# wt,“写+文本”模式
with filename.open('wt') as handle:
# indent,缩进
json.dump(content, handle, indent=4, sort_keys=False)
def inf_loop(data_loader):
"""
让传进来的data_loader可以一直重复循环里面的data,信息循环
wrapper function for endless data loader.
:param data_loader:
:return: data_loader里面的loader里面的一项
"""
# repeat(),一直重复返回迭代对象
for loader in repeat(data_loader):
# 迭代返回loader里面的data
yield from loader
class MetricTracker:
"""
作一个度量工具对象(只用来记录度量结果)
"""
def __init__(self, *keys, writer=None):
# 写对象
self.writer = writer
# 数据对象(pd.DataFrame),
# 行:keys,即accuracy、f1、auc
# 列:[总量, 个数, 平均]
self._data = | pd.DataFrame(index=keys, columns=['total', 'counts', 'average']) | pandas.DataFrame |
# %%
from datetime import datetime, timedelta
from pathlib import Path
import random
import pandas as pd
# %%
data = pd.read_csv("../data/base2020.csv", sep=";")
# %%
def report(state, date, last_date, last_state, age, sex):
if last_state is not None:
events.append(dict(
from_state=last_state,
to_state=state,
age=age,
sex=sex,
days=(date - last_date).days if not pd.isna(last_date) else 0,
))
return date, state
# %%
events = []
for i,r in data.iterrows():
# NOTA: NO HAY SEXO EN LOS DATOS!!!!
sex = random.choice(["M", "F"])
age = r["Edad2020"]
date = pd.NaT
state = None
symptoms_date = pd.to_datetime(r['FIS2020'], format="%m/%d/%Y", errors="coerce")
hospital_date = pd.to_datetime(r['Fingreso2020'], format="%m/%d/%Y", errors="coerce")
confirm_date = pd.to_datetime(r['F.Conf2020'], format="%m/%d/%Y", errors="coerce")
uci_enter_date = pd.to_datetime(r['FechaingresoUCI3112'], format="%m/%d/%Y", errors="coerce")
uci_exit_date = pd.to_datetime(r['FechaegresoUTI'], format="%m/%d/%Y", errors="coerce")
release_date = pd.to_datetime(r['FechaAltaN'], format="%m/%d/%Y", errors="coerce")
if | pd.isna(confirm_date) | pandas.isna |
# Author: <NAME>, PhD
#
# Email: <EMAIL>
#
#
# Ref: https://scikit-learn.org/stable/modules/generated/sklearn.manifold.TSNE.html
# Ref: https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.jaccard.html#scipy.spatial.distance.jaccard
# Ref: https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.pdist.html
# Ref: https://sourceforge.net/p/rdkit/mailman/message/24426410/
# Ref: https://python-graph-gallery.com/197-available-color-palettes-with-matplotlib/
# Ref: https://stackoverflow.com/questions/57568311/matplotlib-scatter-issue-with-python-3-x
# Ref: https://www.science-emergence.com/Articles/How-to-create-a-scatter-plot-with-several-colors-in-matplotlib-/
# Ref: https://www.pluralsight.com/guides/choosing-color-palettes
# Ref: https://www.nceas.ucsb.edu/~frazier/RSpatialGuides/colorPaletteCheatsheet.pdf
# Ref: https://htmlcolorcodes.com/color-picker/
# #1179B0
# #F58C30
# #74BB5A
# #BC412C
# #795B9A
# #764A0C
# #D37DB5
# #7A7A7A
# #B8C449
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import seaborn as sns
import sys
import pandas as pd
import rdkit
from rdkit import Chem
from rdkit.Chem import AllChem
from scaffold_keys import smiles2bmscaffold, smiles2scaffoldkey, sk_distance
#import matplotlib
#matplotlib.use('agg')
#import matplotlib.pyplot as plt
#import seaborn as sns
import numpy as np
from sklearn.manifold import TSNE
import math
from knn import get_mol, get_fingerprint, is_valid
def read_data(fname):
df = pd.read_csv(fname, sep ='\t')
return (df)
def tr_expand_coords (df, source_col, id_col, delimiter):
df_orig = df
df = df[source_col].str.split(delimiter, expand = True)
nr_cols = len (df.columns)
columns = []
for i in range (nr_cols):
columns.append('Dim_' + str(i + 1))
df.columns = columns
df = df.astype('int32')
#df[id_col] = df_orig[id_col]
df = pd.concat([df_orig, df], axis = 1)
return (df)
def get_coordinates (hc, bucket_id):
# print (bucket_id)
coordinates = []
coordinates = hc.coordinates_from_distance(bucket_id - 1)
coordinate_str = ''
nr_dim = len (coordinates)
for i in range(nr_dim):
coordinate_str += str(coordinates[i]) + ';'
coordinate_str = coordinate_str[:-1]
return (coordinate_str)
def to_bitstring (fp):
fpstr = 'NA'
try:
fpstr = fp.ToBitString()
except:
fpstr = 'NA'
return (fpstr)
def fp_gen_with_errohandling (mol):
fp = None
try:
fp = AllChem.GetMorganFingerprintAsBitVect(mol, radius = 3, nBits = 2048)
except:
fp = None
return (fp)
def generate_fp (df):
df['is_valid'] = df.apply (lambda x: is_valid(x['Structure']), axis = 1)
df = df[df['is_valid'] == True].copy()
df['mol'] = df.apply(lambda x: get_mol (x['Structure']), axis = 1)
print (df.dtypes)
print(df.columns)
print (df.head)
df['fp'] = df.apply (lambda x: fp_gen_with_errohandling(x['mol']), axis = 1)
print (df.columns)
df = df[df['fp'] != None].copy()
df['fp_str'] = df.apply (lambda x: to_bitstring(x['fp']), axis = 1)
df = df[df['fp_str'] != 'NA'].copy()
return (df)
def get_fp_np_array (fps):
first = True
fpl = 0
all_fp = []
fp_array = []
for i in range(len(fps)):
fp_array = []
fp = fps[i]
if first:
fpl = len(fp)
first = False
else:
if len(fp) != fpl:
print ('[ERROR] Fingerprint length mismatch. Terminating ...')
sys.exit (-1)
for j in range(len(fp)):
fp_array.append(int(fp[j]))
all_fp.append(fp_array)
all_fp = np.array(all_fp)
return (all_fp)
def embed_compounds (df, tsne_model):
df = generate_fp (df)
print (df.head)
X = list(df['fp_str'])
X = get_fp_np_array (X)
#print (X)
X_embedded = tsne_model.fit_transform(X)
print (X_embedded)
ids = list(df['ID'])
df_embedded = | pd.DataFrame ({'ID': ids, 'Dim_1': X_embedded[:,0], 'Dim_2': X_embedded[:,1]}) | pandas.DataFrame |
from matplotlib import pyplot as plt
import matplotlib.ticker as mtick
import pandas as pd
import json
from matplotlib import rcParams
# Plots the qualifier statistics for the LC-QuAD 2.0 dataset and Wikidata
line_width = 2
font_size = 15
rcParams.update({"figure.autolayout": True})
with open("./results/datasets_stats/results_lcquad2.0.json") as f:
data = json.load(f)
count_dict_lc_quad: dict = data["count_dict"]
sorted_lc_quad = sorted(count_dict_lc_quad.items(), key=lambda x: int(x[0]))
with open("./scripts/output.json") as f:
data = json.load(f)
count_dict_wikidata: dict = data["items"]["claims_counter"]
sorted_wikidata = sorted(count_dict_wikidata.items(), key=lambda x: int(x[0]))
x = [int(item[0]) for item in sorted_lc_quad]
y = [int(item[1]) for item in sorted_lc_quad]
y = [item / sum(y) * 100 for item in y]
panda_data = {x[idx]: [y[idx]] for idx in range(6)}
panda_data["> 5"] = sum(y[6:])
df_lc_quad = | pd.DataFrame(panda_data, index=["LC-QuAD 2.0"]) | pandas.DataFrame |
#!/usr/bin/env python
#ADAPTED FROM
#https://github.com/bio-ontology-research-group/deepgoplus/blob/master/evaluate_deepgoplus.py
import numpy as np
import pandas as pd
import click as ck
from sklearn.metrics import classification_report
from sklearn.metrics.pairwise import cosine_similarity
import sys
from collections import deque
import time
import logging
from sklearn.metrics import roc_curve, auc, matthews_corrcoef
from scipy.spatial import distance
from scipy import sparse
import math
#from utils2 import FUNC_DICT, Ontology, NAMESPACES
from matplotlib import pyplot as plt
#copied from utils.py
from collections import deque, Counter
import warnings
#import pandas as pd
#import numpy as np
#from xml.etree import ElementTree as ET
#import math
BIOLOGICAL_PROCESS = 'GO:0008150'
MOLECULAR_FUNCTION = 'GO:0003674'
CELLULAR_COMPONENT = 'GO:0005575'
FUNC_DICT = {
'cc': CELLULAR_COMPONENT,
'mf': MOLECULAR_FUNCTION,
'bp': BIOLOGICAL_PROCESS}
NAMESPACES = {
'cc': 'cellular_component',
'mf': 'molecular_function',
'bp': 'biological_process'
}
EXP_CODES = set([
'EXP', 'IDA', 'IPI', 'IMP', 'IGI', 'IEP', 'TAS', 'IC',])
# 'HTP', 'HDA', 'HMP', 'HGI', 'HEP'])
CAFA_TARGETS = set([
'10090', '223283', '273057', '559292', '85962',
'10116', '224308', '284812', '7227', '9606',
'160488', '237561', '321314', '7955', '99287',
'170187', '243232', '3702', '83333', '208963',
'243273', '44689', '8355'])
def is_cafa_target(org):
return org in CAFA_TARGETS
def is_exp_code(code):
return code in EXP_CODES
class Ontology(object):
def __init__(self, filename='data/go.obo', with_rels=False):
self.ont = self.load(filename, with_rels)
self.ic = None
def has_term(self, term_id):
return term_id in self.ont
def calculate_ic(self, annots):
cnt = Counter()
for x in annots:
cnt.update(x)
self.ic = {}
for go_id, n in cnt.items():
parents = self.get_parents(go_id)
if len(parents) == 0:
min_n = n
else:
min_n = min([cnt[x] for x in parents])
self.ic[go_id] = math.log(min_n / n, 2)
def get_ic(self, go_id):
if self.ic is None:
raise Exception('Not yet calculated')
if go_id not in self.ic:
return 0.0
return self.ic[go_id]
def load(self, filename, with_rels):
ont = dict()
obj = None
with open(filename, 'r') as f:
for line in f:
line = line.strip()
if not line:
continue
if line == '[Term]':
if obj is not None:
ont[obj['id']] = obj
obj = dict()
obj['is_a'] = list()
obj['part_of'] = list()
obj['regulates'] = list()
obj['alt_ids'] = list()
obj['is_obsolete'] = False
continue
elif line == '[Typedef]':
obj = None
else:
if obj is None:
continue
l = line.split(": ")
if l[0] == 'id':
obj['id'] = l[1]
elif l[0] == 'alt_id':
obj['alt_ids'].append(l[1])
elif l[0] == 'namespace':
obj['namespace'] = l[1]
elif l[0] == 'is_a':
obj['is_a'].append(l[1].split(' ! ')[0])
elif with_rels and l[0] == 'relationship':
it = l[1].split()
# add all types of relationships
obj['is_a'].append(it[1])
elif l[0] == 'name':
obj['name'] = l[1]
elif l[0] == 'is_obsolete' and l[1] == 'true':
obj['is_obsolete'] = True
if obj is not None:
ont[obj['id']] = obj
for term_id in list(ont.keys()):
for t_id in ont[term_id]['alt_ids']:
ont[t_id] = ont[term_id]
if ont[term_id]['is_obsolete']:
del ont[term_id]
for term_id, val in ont.items():
if 'children' not in val:
val['children'] = set()
for p_id in val['is_a']:
if p_id in ont:
if 'children' not in ont[p_id]:
ont[p_id]['children'] = set()
ont[p_id]['children'].add(term_id)
return ont
def get_anchestors(self, term_id):
if term_id not in self.ont:
return set()
term_set = set()
q = deque()
q.append(term_id)
while(len(q) > 0):
t_id = q.popleft()
if t_id not in term_set:
term_set.add(t_id)
for parent_id in self.ont[t_id]['is_a']:
if parent_id in self.ont:
q.append(parent_id)
return term_set
def get_parents(self, term_id):
if term_id not in self.ont:
return set()
term_set = set()
for parent_id in self.ont[term_id]['is_a']:
if parent_id in self.ont:
term_set.add(parent_id)
return term_set
def get_namespace_terms(self, namespace):
terms = set()
for go_id, obj in self.ont.items():
if obj['namespace'] == namespace:
terms.add(go_id)
return terms
def get_namespace(self, term_id):
return self.ont[term_id]['namespace']
def get_term_set(self, term_id):
if term_id not in self.ont:
return set()
term_set = set()
q = deque()
q.append(term_id)
while len(q) > 0:
t_id = q.popleft()
if t_id not in term_set:
term_set.add(t_id)
for ch_id in self.ont[t_id]['children']:
q.append(ch_id)
return term_set
def evaluate_deepgoplus(train_data_file, test_data_file, terms_file,
diamond_scores_file, gofile, ont, preds=None, export=False,evaluate=True,verbose=False):
go_rels = Ontology(gofile, with_rels=True)
if(isinstance(terms_file,list) or isinstance(terms_file,np.ndarray)):
terms = terms_file
else:
terms_df = pd.read_pickle(terms_file)
terms = terms_df['terms'].values.flatten()
terms_dict = {v: i for i, v in enumerate(terms)}
train_df = pd.read_pickle(train_data_file)
test_df = pd.read_pickle(test_data_file)
annotations = train_df['annotations'].values
annotations = list(map(lambda x: set(x), annotations))
test_annotations = test_df['annotations'].values
test_annotations = list(map(lambda x: set(x), test_annotations))
go_rels.calculate_ic(annotations + test_annotations)
# Print IC values of terms
ics = {}
for term in terms:
ics[term] = go_rels.get_ic(term)
prot_index = {}
for i, row in enumerate(train_df.itertuples()):
prot_index[row.proteins] = i
# BLAST Similarity (Diamond)
diamond_scores = {}
with open(diamond_scores_file) as f:
for line in f:
it = line.strip().split()
if it[0] not in diamond_scores:
diamond_scores[it[0]] = {}
diamond_scores[it[0]][it[1]] = float(it[2])
blast_preds = []
for i, row in enumerate(test_df.itertuples()):
annots = {}
prot_id = row.proteins
# BlastKNN
if prot_id in diamond_scores:
sim_prots = diamond_scores[prot_id]
allgos = set()
total_score = 0.0
for p_id, score in sim_prots.items():
allgos |= annotations[prot_index[p_id]]
total_score += score
allgos = list(sorted(allgos))
sim = np.zeros(len(allgos), dtype=np.float32)
for j, go_id in enumerate(allgos):
s = 0.0
for p_id, score in sim_prots.items():
if go_id in annotations[prot_index[p_id]]:
s += score
sim[j] = s / total_score
ind = np.argsort(-sim)
for go_id, score in zip(allgos, sim):
annots[go_id] = score
blast_preds.append(annots)
# DeepGOPlus
go_set = go_rels.get_namespace_terms(NAMESPACES[ont])
go_set.remove(FUNC_DICT[ont])
labels = test_df['annotations'].values
labels = list(map(lambda x: set(filter(lambda y: y in go_set, x)), labels))
# print(len(go_set))
deep_preds = []
alphas = {NAMESPACES['mf']: 0.55, NAMESPACES['bp']: 0.59, NAMESPACES['cc']: 0.46}
for i, row in enumerate(test_df.itertuples()):
annots_dict = blast_preds[i].copy()
for go_id in annots_dict:
annots_dict[go_id] *= alphas[go_rels.get_namespace(go_id)]
for j, score in enumerate(row.preds if preds is None else preds[i]):
go_id = terms[j]
score *= 1 - alphas[go_rels.get_namespace(go_id)]
if go_id in annots_dict:
annots_dict[go_id] += score
else:
annots_dict[go_id] = score
deep_preds.append(annots_dict)
if(export):
export_cafa(test_df,deep_preds,"DeepGOPlus_1_all.txt")
if(evaluate):
print("Evaluating scores")
compute_prmetrics(labels,deep_preds,go_rels,ont=ont,verbose=verbose)
#aucs = compute_roc(labels,deep_preds)
#print("aucs:",aucs)
#print("mean aucs(predicted):",np.mean(aucs))
#print("mean aucs(all):",(np.sum(aucs)+(len(test_annotations)-len(aucs))*0.5)/len(test_annotations))
def evaluate(train_data_file, test_data_file, terms_file,
gofile, ont, preds=None, propagate_scores=False,export=False,evaluate=True,verbose=False):
'''
train_data_file: path to train_data.pkl
test_data_file: path to test_data.pkl
terms_file: path to terms.pkl or just a list or nparray of labels
'''
go_rels = Ontology(gofile, with_rels=True)
if(isinstance(terms_file,list) or isinstance(terms_file,np.ndarray)):
terms = terms_file
else:
terms_df = | pd.read_pickle(terms_file) | pandas.read_pickle |
"""
This module contains transformers that apply string functions.
"""
import pandas as pd
from tubular.base import BaseTransformer
class SeriesStrMethodTransformer(BaseTransformer):
"""Tranformer that applies a pandas.Series.str method.
Transformer assigns the output of the method to a new column. It is possible to
supply other key word arguments to the transform method, which will be passed to the
pandas.Series.str method being called.
Be aware it is possible to supply incompatible arguments to init that will only be
identified when transform is run. This is because there are many combinations of method, input
and output sizes. Additionally some methods may only work as expected when called in
transform with specific key word arguments.
Parameters
----------
new_column_name : str
The name of the column to be assigned to the output of running the pd.Series.str in transform.
pd_method_name : str
The name of the pandas.Series.str method to call.
columns : str
Column to apply the transformer to. If a str is passed this is put into a list. Value passed
in columns is saved in the columns attribute on the object. Note this has no default value so
the user has to specify the columns when initialising the transformer. This is avoid likely
when the user forget to set columns, in this case all columns would be picked up when super
transform runs.
pd_method_kwargs : dict, default = {}
A dictionary of keyword arguments to be passed to the pd.Series.str method when it is called.
**kwargs
Arbitrary keyword arguments passed onto BaseTransformer.__init__().
Attributes
----------
new_column_name : str
The name of the column or columns to be assigned to the output of running the
pd.Series.str in transform.
pd_method_name : str
The name of the pd.Series.str method to call.
"""
def __init__(
self, new_column_name, pd_method_name, columns, pd_method_kwargs={}, **kwargs
):
if type(columns) is list:
if len(columns) > 1:
raise ValueError(
f"columns arg should contain only 1 column name but got {len(columns)}"
)
super().__init__(columns=columns, **kwargs)
if type(new_column_name) is not str:
raise TypeError(
f"unexpected type ({type(new_column_name)}) for new_column_name, must be str"
)
if type(pd_method_name) is not str:
raise TypeError(
f"unexpected type ({type(pd_method_name)}) for pd_method_name, expecting str"
)
if type(pd_method_kwargs) is not dict:
raise TypeError(
f"pd_method_kwargs should be a dict but got type {type(pd_method_kwargs)}"
)
else:
for i, k in enumerate(pd_method_kwargs.keys()):
if not type(k) is str:
raise TypeError(
f"unexpected type ({type(k)}) for pd_method_kwargs key in position {i}, must be str"
)
self.new_column_name = new_column_name
self.pd_method_name = pd_method_name
self.pd_method_kwargs = pd_method_kwargs
try:
ser = | pd.Series(["a"]) | pandas.Series |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
INPUT_DIR = "~/data/query-result/"
OUTPUT_DIR = "~/data/summary-stats/"
RES_LIST = ['cpu', 'mem', 'net_send', 'net_receive', 'disk_read', 'disk_write']
METRIC_LIST = ['_util_per_instance_95p', '_util_per_instance_max', '_util_per_pool', '_util_per_pod']
COST_MAP = {'action-classify': 0.248, 'action-gke': 1.22, 'db': 0.663, 'db-preempt': 0.663, 'druid-preempt': 0.663,
'druid-ssd-preempt': 0.704, 'mixed': 0.248, 'mixed-preempt': 0.248, 'nginx': 0.266, 'ping-gke': 0.69}
PERCENTILES = [.5, .95, .99]
END_TIME = 1514995200917
#END_TIME = 1515028900917
class StatsAggregator(object):
def __init__(self, metric_name):
self.metric_name = metric_name
def get_csv_list(self, res_list, data_dir):
csv_list = {}
for res in res_list:
csv_file = data_dir + res + self.metric_name + ".csv"
csv_list[res] = csv_file
print("Constructed list of csv filess:", csv_list)
return csv_list
def process_csv(self, res, csvfile, outfile):
df = pd.read_csv(csvfile, sep=',')
summary_df = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
from geopy.distance import great_circle
#filter and drop duplcates rows
data = pd.read_csv("AllStudent.csv")
data = data.filter(['N°Ins','Adresse']).drop_duplicates()
data.fillna("other", inplace=True)
| pd.DataFrame(data) | pandas.DataFrame |
import unittest
import numpy as np
import pandas as pd
from pyalink.alink import *
class TestPinyi(unittest.TestCase):
def run_segment(self):
# -*- coding=UTF-8 -*-
data = np.array([
[0, u'二手旧书:医学电磁成像'],
[1, u'二手美国文学选读( 下册 )李宜燮南开大学出版社 9787310003969'],
[2, u'二手正版图解象棋入门/谢恩思主编/华龄出版社'],
[3, u'二手中国糖尿病文献索引'],
[4, u'二手郁达夫文集( 国内版 )全十二册馆藏书']
])
df = | pd.DataFrame({"id": data[:, 0], "text": data[:, 1]}) | pandas.DataFrame |