prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
#!/usr/bin/python3
import sys
import copy
from pathlib import Path
from datetime import datetime,timedelta
import re
import matplotlib.pyplot as plt
import math
import numpy as np
import random
import pandas as pd
import subprocess
from pickle import dump,load
from predictor.utility import msg2log
from clustgelDL.auxcfg import D_LOGS,log2All
from canbus.BF import BF
DB_NAME="canbas"
""" DB in repository.
DB fields are following:
"""
DT="Date Time"
DUMP="Dump"
MATCH_KEY="Match_key"
METHOD="Method"
PKL="PKL"
REPOSITORY="Repository"
MISC="Misc"
DB_COLS=[DT, DUMP, MATCH_KEY, METHOD, PKL, REPOSITORY, MISC]
# number of randomly generated 'no signal' bits in bit stream
INSERTED_NO_SIGNAL=5
# phy layer state
SIG_ = 0
SIG_0 = 1
SIG_1 = 2
#transtions
T__ = 0 # no signal to no signal SIG_ -> SIG_
T_0 = 1 # SIG_ -> SIG_0
T0_ = 2 # SIG_0 -> SIG_
T_1 = 3 # SIG_ -> SIG_1
T1_ = 4 # SIG_1 _> SIG_
T00 = 5 # SIG_0 -> SIG_0
T01 = 6 # SIG_0 -> SIG_1
T10 = 7 # SIG_1 -> SIG_0
T11 = 8 # SIG_1 -> SIG_1
TAN = 9
tr_names={T__:"no signal waveform",
T_0 :"transition to zero",
T0_ : "transition from zero",
T_1 : "transition to one",
T1_ : "transition from one",
T00 : "transition zero-zero",
T01 : "transition zero-one",
T10 : "transition one-zero",
T11 : "transition one-one",
TAN : "possible anomaly"
}
tr_labels={T__:"**", T_0:"*0",T0_:"0*",T_1:"*1",T1_:"1*",T00:"00",T01:"01",T10:"10",T11:"11",TAN:"XX"}
""" Linear interpolator for 'slope' part of waveform."""
def interpSlopeWF(fsample:float=16e+06,bitrate:float=125000.0,slope:float=0.1,left_y:float=0.0, right_y:float=1.0,
f:object=None)->np.array:
"""
:param fsample:
:param bitrate:
:param slope:
:param left_y:
:param right_y:
:param f:
:return:
"""
n0 = int(slope * (fsample / bitrate))
x = [0, n0]
y = [left_y, right_y]
xnew = np.arange(0, n0, 1)
yinterp = np.interp(xnew, x, y)
pure = np.array([yinterp[i] for i in range(n0)] + [right_y for i in range(n0, int(fsample / bitrate))])
return pure
def transitionsPng(fsample:float=16e+06,bitrate:float=125000.0,snr:float=30.0,slope:float=0.2,f:object=None):
transition_list=[T__WF,T_0WF,T_1WF,T0_WF,T00WF,T01WF,T1_WF,T10WF,T11WF]
# fsample = 16e+06
# bitrate = 125000.0
# slope = 0.3
SNR = 20
f = None
x = np.arange(0,int(fsample / bitrate))
suffics = '.png'
name="simulated_waveforms"
waveform_png = Path(D_LOGS['plot'] / Path(name)).with_suffix(suffics)
title="Transition Waveform( SNR={} DB, slope ={}, Fsample={} MHz, Bitrate ={} K/sec)".format( SNR, slope,
fsample/10e+6, bitrate/1e+3)
fig,ax_array =plt.subplots(nrows=3,ncols=3,figsize = (18,5),sharex=True, sharey=True)
fig.subplots_adjust(wspace=0.5, hspace=0.5)
fig.suptitle(title,fontsize=16)
i=0
for ax in np.ravel(ax_array):
tobj=transition_list[i](fsample=fsample, bitrate=bitrate, slope=slope, SNR=SNR, f=f)
tobj.genWF()
auxTransitionsPng(ax, tobj, x)
i=i+1
plt.savefig(waveform_png)
plt.close("all")
return
def auxTransitionsPng(ax,tobj, x):
# ln,=ax.plot(x, tobj.pure, x, tobj.signal)
ln, = ax.plot(x, tobj.pure)
ln, = ax.plot(x, tobj.signal)
# ax[i, j].set_xlim(0, len(x) * 1 / fsample)
ax.set_xlabel('time')
ax.set_ylabel('Signal')
ax.set_title(tobj.title)
ax.grid(True)
return ln
class canbusWF():
"""
canbus
"""
def __init__(self,fsample:float=16e+06,bitrate:float=125000.0,slope:float=0.1,SNR:int=3, f:object=None):
self.fsample=fsample
self.bitrate=bitrate
self.slope=slope
self.vcan_lD=1.5
self.vcan_lR=2.5
self.vcan_hR=2.5
self.vcan_hD=3.5
self.SNR=SNR # 10*math.log(Vsignal/Vnoise)
self.signal=None
self.pure = None
self.title=""
self.f =f
#hist
self.h_min=self.vcan_lD-0.7
self.h_max=self.vcan_hD+0.7
self.h_step=0.5
self.bins=[float(w/10) for w in range( int(self.h_min*10), int((self.h_max+self.h_step)*10),
int(self.h_step *10))]
self.hist = None
self.density = None
pass
""" Additive white Gaussian noise (awgn)"""
def awgn(self,signal:np.array=None):
sigpower = sum([math.pow(abs(signal[i]),2) for i in range (len(signal))])
sigpower=sigpower/len(signal)
noisepower = sigpower/(math.pow(10,self.SNR/10))
noise=math.sqrt(noisepower)*(np.random.uniform(-1,1,size=len(signal)))
return noise
def histogram(self):
self.hist,_ = np.histogram(self.signal, self.bins, density=False)
self.density, _ = np.histogram(self.signal, self.bins, density=True)
return
""" Random signal waveform shift along t-axisto simulate the random latency in bit stream.
Max. shift is 10% from bit waveform period. shift_n -the number of signal samples by which the shift occurs is randomly
generated. shift_direction - the direction of the shift forward or back is randomized too.
"""
def rndshift(self):
if self.signal is None:
return
n,=self.signal.shape
n_dist=int(n*0.1)
shift_n=np.random.randint(n_dist,size=1)
shift_direction = np.random.randint(3, size=1)
signal_list=self.signal.tolist()
if shift_direction ==0: # left shift, append
for i in range(shift_n):
signal_list.pop(0)
signal_list.append(signal_list[-1])
elif shift_direction==1: #right shift, insert at 0
for i in range(shift_n):
signal_list.pop(-1)
signal_list.insert(0,signal_list[0])
elif shift_direction == 2:
for i in range(shift_n):
signal_list.pop(-1)
signal_list.insert(0, self.vcan_lR )
del self.signal
self.signal=np.array(signal_list)
return
class T__WF(canbusWF):
def __init__(self,fsample:float=16e+06,bitrate:float=125000.0,slope:float=0.1,SNR:int=3, f:object=None):
super().__init__(fsample=fsample,bitrate=bitrate,slope=0.0,SNR=SNR, f=f)
self.title="Transition _->_"
def genWF(self):
self.pure=np.array([self.vcan_hR for i in range(int(self.fsample/self.bitrate))])
self.signal=np.add(self.pure,self.awgn(self.pure))
class T_0WF(canbusWF):
def __init__(self, fsample: float = 16e+06, bitrate: float = 125000.0, slope: float = 0.1, SNR: int = 3,
f: object = None):
super().__init__(fsample=fsample, bitrate=bitrate, slope=slope, SNR=SNR, f=f)
self.title = "Transition _->'0'"
def genWF(self):
self.pure = interpSlopeWF(fsample=self.fsample, bitrate=self.bitrate, slope=self.slope,
left_y=self.vcan_lD, right_y=self.vcan_hD, f=self.f)
self.signal = np.add(self.pure, self.awgn(self.pure))
class T_1WF(canbusWF):
def __init__(self, fsample: float = 16e+06, bitrate: float = 125000.0, slope: float = 0.1, SNR: int = 3,
f: object = None):
super().__init__(fsample=fsample, bitrate=bitrate, slope=slope, SNR=SNR, f=f)
self.title = "Transition _->'1'"
def genWF(self):
self.pure = interpSlopeWF(fsample=self.fsample, bitrate=self.bitrate, slope=self.slope,
left_y=self.vcan_lR, right_y=self.vcan_lD, f=self.f)
self.signal = np.add(self.pure, self.awgn(self.pure))
class T0_WF(canbusWF):
def __init__(self, fsample: float = 16e+06, bitrate: float = 125000.0, slope: float = 0.1, SNR: int = 3,
f: object = None):
super().__init__(fsample=fsample, bitrate=bitrate, slope=slope, SNR=SNR, f=f)
self.title = "Transition '0'->_"
def genWF(self):
self.pure = interpSlopeWF(fsample=self.fsample, bitrate=self.bitrate, slope=self.slope,
left_y=self.vcan_hD, right_y=self.vcan_lD, f=self.f)
self.signal = np.add(self.pure, self.awgn(self.pure))
class T1_WF(canbusWF):
def __init__(self, fsample: float = 16e+06, bitrate: float = 125000.0, slope: float = 0.1, SNR: int = 3,
f: object = None):
super().__init__(fsample=fsample, bitrate=bitrate, slope=slope, SNR=SNR, f=f)
self.title = "Transition '1'->_"
def genWF(self):
self.pure= interpSlopeWF(fsample=self.fsample, bitrate=self.bitrate, slope=self.slope,
left_y=self.vcan_lD, right_y=self.vcan_lR, f=self.f)
self.signal=np.add(self.pure,self.awgn(self.pure))
class T00WF(canbusWF):
def __init__(self, fsample: float = 16e+06, bitrate: float = 125000.0, slope: float = 0.1, SNR: int = 3,
f: object = None):
super().__init__(fsample=fsample, bitrate=bitrate, slope=slope, SNR=SNR, f=f)
self.title = "Transition '0'->'0'"
def genWF(self):
self.pure=np.array([self.vcan_hD for i in range(int(self.fsample/self.bitrate))])
self.signal=np.add(self.pure,self.awgn(self.pure))
class T11WF(canbusWF):
def __init__(self, fsample: float = 16e+06, bitrate: float = 125000.0, slope: float = 0.1, SNR: int = 3,
f: object = None):
super().__init__(fsample=fsample, bitrate=bitrate, slope=slope, SNR=SNR, f=f)
self.title = "Transition '1'->'1'"
def genWF(self):
self.pure=np.array([self.vcan_lD for i in range(int(self.fsample/self.bitrate))])
self.signal=np.add(self.pure,self.awgn(self.pure))
class T10WF(canbusWF):
def __init__(self, fsample: float = 16e+06, bitrate: float = 125000.0, slope: float = 0.1, SNR: int = 3,
f: object = None):
super().__init__(fsample=fsample, bitrate=bitrate, slope=slope, SNR=SNR, f=f)
self.title = "Transition '1'->'0'"
def genWF(self):
self.pure= interpSlopeWF(fsample=self.fsample, bitrate=self.bitrate, slope=self.slope,
left_y=self.vcan_lD, right_y=self.vcan_hD, f=self.f)
self.signal=np.add(self.pure,self.awgn(self.pure))
class T01WF(canbusWF):
def __init__(self, fsample: float = 16e+06, bitrate: float = 125000.0, slope: float = 0.1, SNR: int = 3,
f: object = None):
super().__init__(fsample=fsample, bitrate=bitrate, slope=slope, SNR=SNR, f=f)
self.title = "Transition '0'->'1'"
def genWF(self):
self.pure= interpSlopeWF(fsample=self.fsample, bitrate=self.bitrate, slope=self.slope,
left_y=self.vcan_hD, right_y=self.vcan_lD, f=self.f)
self.signal=np.add(self.pure,self.awgn(self.pure))
""" Waveform per transition dictionary """
TR_DICT={T__: T__WF,
T_0: T_0WF,
T_1: T_1WF,
T0_: T0_WF,
T00: T00WF,
T01: T01WF,
T1_: T1_WF,
T10: T10WF,
T11: T11WF}
""" Return list of following dict
{'DateTime':<Date Time>,
'IF':<interface>>,
'ID':<canbus packet ID>,
'Data':<canbus packet data>,
'Packet':<ID | data> in hexa,
'bitstr_list':<list of bits>
'bit_str':<string of bits>
}
"""
def readChunkFromCanBusDump(offset_line:int =0, chunk_size:int=128,canbusdump:str=None, f:object=None)->list:
parsed_list=[]
if canbusdump is None or canbusdump=="" or not Path(canbusdump).exists():
return parsed_list
line_count=0
last_line=offset_line + chunk_size
with open(canbusdump,'r') as fcanbus:
while line_count<offset_line:
line = fcanbus.readline()
if not line:
return parsed_list
line_count+=1
while line_count<last_line:
line = fcanbus.readline()
if not line:
return parsed_list
line_count+=1
parsed_list.append(parseCanBusLine(line))
return parsed_list
"""This function parses string to 'DateTime', 'interface', 'packet ID' and 'packet Data'.
The concatenation of two elements 'ID' and 'Data' forms an additional return element 'packet'.
The packet string converts to list bit strings. Every two symbols are converted to the bit string.
All return items are merged into a dictionary.
"""
def parseCanBusLine(line:str=None, f:object=None)->dict:
if line is None:
return {}
aitems=line.split(' ')
itemDateTime=re.search(r'\((.*?)\)',line).group(1)
itemData=re.search('(?<=#)\w+',line).group(0)
aitemID=aitems[2].split('#')
itemID=aitemID[0]
itemIF=aitems[1]
if len(itemID)%2 != 0:
itemID='0'+itemID
if len(itemData)%2 !=0:
itemData='0'+itemData
itemPacket=itemID +itemData
bitstr_list =packet2bits(packet=itemPacket,f=f)
bit_str=''.join(bitstr_list)
""" random generation 0-INSERTED_NO_SIGNAL 'no signal' bits marked as *"""
nrnd=random.randrange(0,INSERTED_NO_SIGNAL+1)
insnosigb=''.join(["*" for i in range(nrnd+1)])
if len(insnosigb)>0:
bit_str=bit_str+insnosigb
return {'DateTime':itemDateTime,'IF':itemIF, 'ID':itemID,'Data':itemData,'Packet':itemPacket,
'bitstr_list':bitstr_list,'bit_str':bit_str}
""" This function forms a list of bits string from a packet data
Every two symbols (two nibbles or byte) is hex number which is converted to bit array.
The function returns the list of bit strings.
For example, packet is '6B6B00FF'
'6B'=107 =>'1101011'
'6B'=107 =>'1101011'
'00'=0 =>'00000000'
'FF'=255 => '11111111'
The result list contains ['1101011','1101011','00000000' ,'11111111']
"""
def packet2bits(packet:str=None,f:object=None)->list:
start=0
step=2
bits_list=[]
for i in range(start,len(packet),step):
bss="{0:b}".format(int( packet[start:start+step], 16)).zfill(8)
bits_list.append(bss)
start=start +step
return bits_list
""" Transform bit to the state, the type of waveform being be generated, according by current bit and previous state
st=R(bit, prev_st).
The set of states is {T__,T_0,T_1,T0_.T1_,T00,T01,T10,T11}, the current bit belongs to { '0' , '1', '*'-no signal}.
"""
def transitionRules(prev_state:int, current_bit:str)->(int, int):
"""
:param prev_state:
:param current_bit:
:return:
"""
if prev_state==SIG_:
if current_bit=='0':
transition=T_0
elif current_bit=='1':
transition=T_1
elif current_bit=='*':
transition=T__
else:
transition=T__
elif prev_state==SIG_0:
if current_bit == '0':
transition = T00
elif current_bit == '1':
transition = T01
elif current_bit == '*':
transition = T0_
else:
transition = T0_
elif prev_state==SIG_1:
if current_bit == '0':
transition = T10
elif current_bit == '1':
transition = T11
elif current_bit == '*':
transition = T1_
else:
transition = T1_
if current_bit=='0':
new_state=SIG_0
elif current_bit=='1':
new_state=SIG_1
elif current_bit == '*':
new_state=SIG_
else:
new_state=SIG_
return transition, new_state
""" Transform bit to transition according by rules
transition=R(bit,prev_state),
where states are { SIG_-no signal, SIG_0- zero signal, SIG_1- one signal} and
transition belongs to {T__, T_0, T_1, T0_ , T00, T01, T1_, T10, T11 }.
Return list of transition and new prev_state for next packet."""
def genTransition(prev_st:int=SIG_, bit_str:str=None, f:object=None)->(list,int):
""" transition array generation"""
transition=[]
st=prev_st
for i in range(len(bit_str)):
tr,st=transitionRules(st, bit_str[i])
transition.append(tr)
prev_st=SIG_
return transition,prev_st
def logPackets(ld:list,offset_line:int=0,chunk_size:int=16):
msg = "\nChunk start: {} Chunk size: {}\n".format(offset_line,chunk_size)
msg2log(None,msg,D_LOGS['block'])
msg = "{:<30s} {:<9s} {:^8s} {:^8s} {:<16s} ".format('Date Time','Interface','ID', 'Data','Packet')
for dct in ld:
msg="{:<30s} {:<9s} {:<8s} {:<8s} {:<16s} ".format(dct['DateTime'], dct['IF'], dct['ID'], dct['Data'],
dct['Packet'])
msg2log(None,msg,D_LOGS['block'])
return
""" For chunk generate list of trasitions."""
def trstreamGen(canbusdump:str="", offset_line:int=0, chunk_size:int=16, prev_state:int=SIG_, f:object=None)->list:
# offset_line = offset_line
# chunk_size = chunk_size
# canbusdump = canbusdump
transition_stream = []
ld = readChunkFromCanBusDump(offset_line=offset_line, chunk_size=chunk_size, canbusdump=canbusdump, f=f)
if not ld:
return transition_stream
logPackets(ld=ld,offset_line=offset_line,chunk_size=chunk_size)
for dct in ld:
transition, prev_state = genTransition(prev_st=prev_state, bit_str=dct['bit_str'], f=f)
transition_stream.append(transition)
return transition_stream
""" Generation of the waveforma according to the bit stream.
A statistical estimate of the histogram is calculated for each waveform.
At the training stage within one packet (frame), histograms are averaged over the type of bit transitions.
The resulting histogram concatenated with type of the bit is added to Blooom Filter. (T.B.D. - add to DB too).
At the test stage no averaging. The histogram concatenated with the type of the bit is checked with BF. If no matc there
is an anomaly symptom.
"""
def wfstreamGen(mode:str='train',transition_stream:list=[],fsample:float=16e+6,bitrate:float=125000.0, slope:float=0.1,
snr:float=20, trwf_d:dict=TR_DICT,bf:BF=None, title:str="", repository:str="", f:object=None)->dict:
"""
:param mode:
:param transition_stream:
:param fsample:
:param bitrate:
:param slope:
:param snr:
:param trwf_d:
:param bf:
:param title:
:param f:
:return:
"""
packet_in_stream = -1
anomaly_d={}
loggedSignal = np.array([])
loggedHist = []
numberLoggedBit = 16
subtitle="Fsample={} MHz Bitrate={} Kbit/sec SNR={} Db".format(round(fsample/10e+6,3), round(bitrate/10e3,2),
round(snr,0))
""" random number for logged packet in the stream """
loggedPacket=random.randrange(0,len(transition_stream))
sum_match_train = 0
sum_no_match_train = 0
sum_match_test = 0
sum_no_match_test = 0
for packet in transition_stream:
packet_in_stream+=1
# here accumulated histogram per transition in following structue {transit:list}
tr_hist ={T__: [], T_0: [],T0_: [],T_1: [], T1_: [],T00: [],T01: [],T10: [], T11: []}
n_match_train=0
no_match_train=0
n_match_test = 0
no_match_test = 0
startLoggedBit=-1
endLoggedBit = -1
""" logged bits in the packet """
if packet_in_stream == loggedPacket:
startLoggedBit=random.randrange(0,len(packet))
endLoggedBit =startLoggedBit + numberLoggedBit
bit_in_packet=-1
for transit in packet:
bit_in_packet+=1
cb=trwf_d[transit](fsample=fsample,bitrate=bitrate, slope=slope, SNR=snr, f=f)
cb.genWF()
cb.histogram()
""" select signals for charting """
if bit_in_packet>=startLoggedBit and bit_in_packet<endLoggedBit:
loggedSignal=np.concatenate((loggedSignal,cb.signal))
loggedHist.append(cb.hist)
if mode=='train':
tr_hist[transit].append(cb.hist)
continue
""" hist to word """
if bf is None:
continue
word=hex(transit).lstrip("0x")+"_"
word = word + ''.join([hex(vv).lstrip("0x").rstrip("L") for vv in cb.hist.tolist()])
if not bf.check_item(word):
msg="no match in DB for {} transition in {} packet".format(transit, packet_in_stream)
msg2log("Warning!",msg,D_LOGS['predict'])
msg2log("Warning!", msg, f)
anomaly_d[packet_in_stream]={transit:tr_names[transit]}
no_match_test += 1
else:
msg2log(None, "Match for {} transition in {} packet".format(transit, packet_in_stream), D_LOGS['predict'])
n_match_test += 1
if mode=='test':
msg2log(None, "\nTest\nmatch: {} no match: {}".format(n_match_test,no_match_test), D_LOGS['predict'])
if mode=='train':
""" histogram averaging """
for key,val in tr_hist.items():
if not val:
continue
allhists=np.array(val)
avehist=np.average(allhists,axis=0)
if bf is None:
continue
word = hex(key).lstrip("0x") + "_"
word = word + ''.join([hex(int(vv)).lstrip("0x").rstrip("L") for vv in avehist.tolist()])
if bf.check_item(word):
msg2log(None,"Match for {} transition in {} packet".format(key,packet_in_stream),D_LOGS['train'])
n_match_train+=1
else:
bf.add_item(word)
no_match_train+=1
msg2log(None,"\nTrain\nmatch: {} no match:{}".format(n_match_train,no_match_train),D_LOGS['train'])
sum_match_train +=n_match_train
sum_no_match_train +=no_match_train
sum_match_test +=n_match_test
sum_no_match_test +=no_match_test
if mode=="train":
msg2log(None, "\nTrain summary for SNR={} DB\nmatch: {} no match:{}".format(snr, sum_match_train, sum_no_match_train),
D_LOGS['train'])
bf.save(repository)
if mode=="test":
msg2log(None, "\nTest summary for SNR = {} DB\nmatch: {} no match:{}".format(snr,sum_match_test, sum_no_match_test),
D_LOGS['predict'])
log2All()
if len(loggedSignal)>0:
plotSignal(mode=mode, signal=loggedSignal, packetNumber=0, fsample=fsample, startBit=startLoggedBit,
title=title, subtitle=subtitle)
return anomaly_d
def plotSignal(mode:str="train", signal:np.array=None, fsample:float=1.0, packetNumber:int=0, startBit:int=0,
title:str="",subtitle:str=""):
pass
suffics = '.png'
signal_png = Path(D_LOGS['plot'] / Path(title)).with_suffix(suffics)
delta=1.0/fsample
t=np.arange(0.0, (len(signal)-1)*delta, delta)
n=min(len(t),len(signal))
fig, ax = plt.subplots(figsize=(18, 5))
ax.plot(t[:n],signal[:n], color='r')
ax.set_xlabel('time')
ax.set_ylabel('Signal wavefors')
ax.set_title(title)
ax.grid(True)
plt.savefig(signal_png)
plt.close("all")
return
""" Get number of lines in dump file.
This function is executed ib the subprocess"""
def file_len(fname)->int:
n=-1
if Path(fname).exists():
if sys.platform.startswith('linux'):
try:
p = subprocess.Popen(['wc', '-l', fname], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
result, err = p.communicate()
if p.returncode != 0:
# we will not raise any exception , raise IOError(err)
n= -2
n= int(result.strip().split()[0])
except:
pass
finally:
pass
elif sys.platform.startswith('win'):
fr=open(fname,'r')
n=0
while 1:
line=fr.readline()
if line is None:
break
n+=1
fr.close()
else:
n=-3
return n
def dict2csv(d:dict=None, folder:str="", title:str="", dset_name:str=None, match_key:str='ID', f:object=None):
if d is None:
msg2log(None,"No dictionary {} {} for saving".format(title,match_key),f)
return
if dset_name is None or len(dset_name)<1 or ".csv" not in dset_name:
msg2log(None,"Dtaset name is not set correctly {}".format(dset_name),f)
return
df=pd.DataFrame(d)
df.to_csv(dset_name)
msg2log(None,"{} dictionary for {} saved in {}".format(title,match_key,dset_name),f)
return
def dict2pkl(d:dict=None, folder:str="", title:str="", match_key:str='ID', f:object=None)->(str,str):
if d is None:
msg2log(None,"No dictionary {} {} for saving".format(title,match_key),f)
return
file_pkl=Path(Path(folder)/Path("{}_{}".format(title,match_key))).with_suffix(".pkl")
f_pkl=open(str(file_pkl),"wb")
dump(d,f_pkl)
msg2log(None,"{} dictionary for {} saved in {}".format(title,match_key,str(file_pkl)),f)
pkl_stem=file_pkl.stem
return pkl_stem, str(file_pkl)
def pkl2dict( folder: str = "", title: str = "", match_key: str = 'ID', pkl_stem:str="", f: object = None):
file_pkl = Path(Path(folder) / Path("{}".format(pkl_stem))).with_suffix(".pkl")
if not file_pkl.exists():
msg="Serialized dictionary {} for {} -match key was not found in {} repository".format(pkl_stem,
match_key,folder)
msg2log(None,msg,f)
return None
f_pkl = open(str(file_pkl), "rb")
d=load(f_pkl)
msg2log(None, "{} dictionary for {} loaded from {}".format(title, match_key, str(file_pkl)), f)
return d
"""" statistical estimation for observed data"""
def mleexp(target_dict:dict=None, mleexp_dict:dict=None, n_min:int=5, title:str="Train path", f:object=None):
msg="{}\n,Rare packets, no maximum likelihood estimation for exponential distribution of time gaps between " +\
" packets appearing.".format(title)
msg2log(None, msg, D_LOGS['cluster'])
for key,vlist in target_dict.items():
if len(vlist)<n_min:
msg=f"""Packet with matched mey: {key} is rare event: {len(vlist)} appearings"""
msg2log(None,msg,D_LOGS['cluster'])
continue
l_duration=[vlist[i]-vlist[i-1] for i in range(1,len(vlist))]
n=len(l_duration)
sum_items=float(sum(l_duration))/1e06 # in seconds
mle_lambda=float(n)/sum_items
mle_var_lambda=(mle_lambda*mle_lambda)/float(n)
mleexp_dict[key]={"n":n,"mle":mle_lambda,"var":mle_var_lambda,"sample":l_duration}
return
def KL_decision(train_mleexp:dict=None, test_mleexp:dict=None, title:str="Anomaly packet",f:object=None)->list:
trainSet=set(train_mleexp)
testSet=set(test_mleexp)
anomaly_list=[]
chi2_1_05=3.84
for key in trainSet.intersection(testSet):
anomaly_counter=0
train_val=train_mleexp[key]
test_val=test_mleexp[key]
lst_val=train_val['sample']+test_val['sample']
xmean=np.array(lst_val).mean()
xtrain=np.array(train_val['sample']).mean()
xtest = np.array(test_val['sample']).mean()
ntrain=train_val['n']
ntest = test_val['n']
KL2I12=ntrain*(xtrain-xmean)*(xtrain-xmean)/xmean + ntest*(xtest-xmean)*(xtest-xmean)/xmean
KLJ12 =0.5*KL2I12 + 0.5 *( ntrain * (xtrain - xmean) * (xtrain - xmean) / xtrain + ntest * (xtest - xmean) * (
xtest - xmean) / xtest)
if KL2I12>chi2_1_05 or KLJ12 > chi2_1_05:
anomaly_counter+=1
anomaly_list.append({'matched_key':key,"2I(1:2)":KL2I12,"J(1,2)":KLJ12, "chi2(1,0.05)":chi2_1_05,
"train":train_val,"test":test_val,})
return anomaly_list
def manageDB(repository:str=None, db:str=None,op:str='select',d_query:dict={}, f:object=None)->dict:
file_db=Path(Path(repository)/Path(db)).with_suffix(".csv")
if not file_db.exists():
createDB(file_db=file_db, f=f)
if op=='select':
d_res = selectDB(file_db=file_db, d_query=d_query,f=f)
elif op=='insert':
d_res = insertDB(file_db= file_db, d_query = d_query, f = f)
pass
elif op=='update':
pass
elif op=='log':
pass
else:
pass
return d_res
def createDB(file_db:str=None, f:object=None):
df=pd.DataFrame(columns=DB_COLS)
df.to_csv(file_db,index=False)
msg2log(None,"DB created {}".format(file_db),)
return
def selectDB(file_db:str=None, d_query:dict=None,f:object=None)->dict:
if file_db is None or not Path(file_db).exists() or d_query is None or len(d_query)==0:
return None
d_res={}
l_res=[] #list of dict
df=pd.read_csv(file_db)
for index,row in df.iterrows():
keys=list(row.keys())
if dictIndict(row,d_query,f=f):
l_res.append(row)
if len(l_res)>0:
msg=f"""
Query: {d_query}
Selected: {l_res}
"""
msg2log(None,msg,f)
d_res=dict(l_res[-1] ) # select last item in list. The item gas Series -type and so it should be casted to dict.
return d_res
def insertDB(file_db:str=None, d_query:dict=None,f:object=None)->dict:
if file_db is None or not Path(file_db).exists() or d_query is None or len(d_query)==0:
return None
keys=list(d_query.keys())
d_insert={item:"" for item in DB_COLS}
for item in keys:
d_insert[item]=d_query[item]
d_insert[DT]= | pd.Timestamp.now() | pandas.Timestamp.now |
#!/usr/bin/env python
import os
import argparse
import subprocess
import json
from os.path import isfile, join, basename
import time
import pandas as pd
from datetime import datetime
import tempfile
import sys
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'instance_generator')))
import route_gen
def main():
'''
The algorithm for benchmark works as follow:
For a certain number of iteration:
generate instance with default generator value
for each encoding inside subfolders of encoding (one folder for each encoding):
start timer
solve with clyngo
stop timer
test solution:
if legal
add time in a csv (S)
else:
add int max as time
print an error message
'''
parser = argparse.ArgumentParser(description='Benchmark ! :D')
parser.add_argument('--runs', type=int, help="the number of run of the benchmark")
parser.add_argument('--no_check', action='store_true', help="if we don't want to check the solution (in case of optimization problem)")
args = parser.parse_args()
number_of_run = args.runs
print("Start of the benchmarks")
encodings = [x for x in os.listdir("../encoding/")]
print("Encodings to test:")
for encoding in encodings:
print("\t-{}".format(encoding))
results = []
costs_run = []
for i in range(number_of_run):
print("Iteration {}".format(i + 1))
result_iteration = dict()
cost_iteration = dict()
instance, minimal_cost = route_gen.instance_generator()
# we get the upper bound of the solution generated by the generator
cost_iteration["Benchmark_Cost"] = minimal_cost
correct_solution = True
instance_temp = tempfile.NamedTemporaryFile(mode="w+", suffix='.lp', dir=".", delete=False)
instance_temp.write(repr(instance))
instance_temp.flush()
for encoding in encodings:
print("Encoding {}:".format(encoding))
files_encoding = ["../encoding/" + encoding + "/" + f for f in os.listdir("../encoding/" + encoding) if isfile(join("../encoding/" + encoding, f))]
start = time.time()
try:
if 'parallel' == encoding:
clingo = subprocess.Popen(["clingo"] + files_encoding + [basename(instance_temp.name)] + ["--outf=2"] + ['-t 8compete'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
else:
clingo = subprocess.Popen(["clingo"] + files_encoding + [basename(instance_temp.name)] + ["--outf=2"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdoutdata, stderrdata) = clingo.communicate(timeout=3600)
clingo.wait()
end = time.time()
duration = end - start
json_answers = json.loads(stdoutdata)
cost = float('inf')
answer = []
# we need to check all solution and get the best one
for call_current in json_answers["Call"]:
if "Witnesses" in call_current:
answer_current = call_current["Witnesses"][-1]
if "Costs" in answer_current:
current_cost = sum(answer_current["Costs"])
if current_cost < cost:
answer = answer_current["Value"]
cost = current_cost
else:
cost = 0
answer = answer_current["Value"]
# we append "" just to get the last . when we join latter
answer = answer + [""]
answer_str = ".".join(answer)
answer_temp = tempfile.NamedTemporaryFile(mode="w+", suffix='.lp', dir=".", delete=False)
answer_temp.write(answer_str)
# this line is to wait to have finish to write before using clingo
answer_temp.flush()
clingo_check = subprocess.Popen(
["clingo"] + ["../test_solution/test_solution.lp"] + [basename(answer_temp.name)] + [
basename(instance_temp.name)] + ["--outf=2"] + ["-q"], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdoutdata_check, stderrdata_check) = clingo_check.communicate()
clingo_check.wait()
json_check = json.loads(stdoutdata_check)
answer_temp.close()
os.remove(answer_temp.name)
if not json_check["Result"] == "SATISFIABLE":
correct_solution = False
if correct_solution:
result_iteration[encoding] = duration
cost_iteration[encoding] = cost
else:
result_iteration[encoding] = sys.maxsize
cost_iteration[encoding] = float("inf")
print("\tSatisfiable {}".format(correct_solution))
print("\tDuration {} seconds".format(result_iteration[encoding]))
print("\tBest solution {}".format(cost))
print("\tBenchmark cost {}".format(minimal_cost))
except Exception as excep:
result_iteration = str(excep)
cost_iteration = float('inf')
results.append(result_iteration)
costs_run.append(cost_iteration)
instance_temp.close()
os.remove(basename(instance_temp.name))
df = | pd.DataFrame(results) | pandas.DataFrame |
import os
from typing import List, Tuple, Union
import numpy as np
import pandas as pd
DATASET_DIR: str = "data/"
# https://www.kaggle.com/rakannimer/air-passengers
def read_air_passengers() -> Tuple[pd.DataFrame, np.ndarray]:
indexes = [6, 33, 36, 51, 60, 100, 135]
values = [205, 600, 150, 315, 150, 190, 620]
return _add_outliers_set_datetime(
| pd.read_csv(f"{DATASET_DIR}air_passengers.csv") | pandas.read_csv |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : ioutil.py
@Desc : Input and output data function.
'''
# here put the import lib
import os
import sys
import pandas as pd
import numpy as np
from . import TensorData
import csv
from .basicutil import set_trace
class File():
def __init__(self, filename, mode, idxtypes):
self.filename = filename
self.mode = mode
self.idxtypes = idxtypes
self.dtypes = None
self.sep = None
def get_sep_of_file(self):
'''
return the separator of the line.
:param infn: input file
'''
sep = None
fp = open(self.filename, self.mode)
for line in fp:
line = line.decode(
'utf-8') if isinstance(line, bytes) else line
if (line.startswith("%") or line.startswith("#")):
continue
line = line.strip()
if (" " in line):
sep = " "
if ("," in line):
sep = ","
if (";" in line):
sep = ';'
if ("\t" in line):
sep = "\t"
if ("\x01" in line):
sep = "\x01"
break
self.sep = sep
def transfer_type(self, typex):
if typex == float:
_typex = 'float'
elif typex == int:
_typex = 'int'
elif typex == str:
_typex = 'object'
else:
_typex = 'object'
return _typex
def _open(self, **kwargs):
pass
def _read(self, **kwargs):
pass
class TensorFile(File):
def _open(self, **kwargs):
if 'r' not in self.mode:
self.mode += 'r'
f = open(self.filename, self.mode)
pos = 0
cur_line = f.readline()
while cur_line.startswith("#"):
pos = f.tell()
cur_line = f.readline()
f.seek(pos)
_f = open(self.filename, self.mode)
_f.seek(pos)
fin = pd.read_csv(f, sep=self.sep, **kwargs)
column_names = fin.columns
self.dtypes = {}
if not self.idxtypes is None:
for idx, typex in self.idxtypes:
self.dtypes[column_names[idx]] = self.transfer_type(typex)
fin = pd.read_csv(_f, dtype=self.dtypes, sep=self.sep, **kwargs)
else:
fin = pd.read_csv(_f, sep=self.sep, **kwargs)
return fin
def _read(self, **kwargs):
tensorlist = []
self.get_sep_of_file()
_file = self._open(**kwargs)
if not self.idxtypes is None:
idx = [i[0] for i in self.idxtypes]
tensorlist = _file[idx]
else:
tensorlist = _file
return tensorlist
class CSVFile(File):
def _open(self, **kwargs):
f = pd.read_csv(self.filename, **kwargs)
column_names = list(f.columns)
self.dtypes = {}
if not self.idxtypes is None:
for idx, typex in self.idxtypes:
self.dtypes[column_names[idx]] = self.transfer_type(typex)
f = pd.read_csv(self.filename, dtype=self.dtypes, **kwargs)
else:
f = pd.read_csv(self.filename, **kwargs)
return f
def _read(self, **kwargs):
tensorlist = | pd.DataFrame() | pandas.DataFrame |
import logging
import os
import pickle
import tarfile
from typing import Tuple
import numpy as np
import pandas as pd
import scipy.io as sp_io
import shutil
from scipy.sparse import csr_matrix, issparse
from scMVP.dataset.dataset import CellMeasurement, GeneExpressionDataset, _download
logger = logging.getLogger(__name__)
class ATACDataset(GeneExpressionDataset):
"""Loads a file from `10x`_ website.
:param dataset_name: Name of the dataset file. Has to be one of:
"CellLineMixture", "AdBrainCortex", "P0_BrainCortex".
:param save_path: Location to use when saving/loading the data.
:param type: Either `filtered` data or `raw` data.
:param dense: Whether to load as dense or sparse.
If False, data is cast to sparse using ``scipy.sparse.csr_matrix``.
:param measurement_names_column: column in which to find measurement names in the corresponding `.tsv` file.
:param remove_extracted_data: Whether to remove extracted archives after populating the dataset.
:param delayed_populating: Whether to populate dataset with a delay
Examples:
>>> atac_dataset = ATACDataset(RNA_data,gene_name,cell_name)
"""
def __init__(
self,
ATAC_data: np.matrix = None,
ATAC_name: pd.DataFrame = None,
cell_name: pd.DataFrame = None,
delayed_populating: bool = False,
is_filter = True,
datatype="atac_seq",
):
if ATAC_data.all() == None:
raise Exception("Invalid Input, the gene expression matrix is empty!")
self.ATAC_data = ATAC_data
self.ATAC_name = ATAC_name
self.cell_name = cell_name
self.is_filter = is_filter
self.datatype = datatype
self.cell_name_formulation = None
self.atac_name_formulation = None
if not isinstance(self.ATAC_name, pd.DataFrame):
self.ATAC_name = | pd.DataFrame(self.ATAC_name) | pandas.DataFrame |
from flask import Flask, render_template, jsonify, request
from flask_pymongo import PyMongo
from flask_cors import CORS, cross_origin
import json
import copy
import warnings
import re
import pandas as pd
pd.set_option('use_inf_as_na', True)
import numpy as np
from joblib import Memory
from xgboost import XGBClassifier
from sklearn import model_selection
from bayes_opt import BayesianOptimization
from sklearn.model_selection import cross_validate
from sklearn.model_selection import cross_val_predict
from sklearn.preprocessing import OneHotEncoder
from sklearn.metrics import classification_report
from sklearn.feature_selection import mutual_info_classif
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_classif
from sklearn.feature_selection import RFECV
from sklearn.linear_model import LogisticRegression
from eli5.sklearn import PermutationImportance
from joblib import Parallel, delayed
import multiprocessing
from statsmodels.stats.outliers_influence import variance_inflation_factor
from statsmodels.tools.tools import add_constant
# this block of code is for the connection between the server, the database, and the client (plus routing)
# access MongoDB
app = Flask(__name__)
app.config["MONGO_URI"] = "mongodb://localhost:27017/mydb"
mongo = PyMongo(app)
cors = CORS(app, resources={r"/data/*": {"origins": "*"}})
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/Reset', methods=["GET", "POST"])
def reset():
global DataRawLength
global DataResultsRaw
global previousState
previousState = []\
global StanceTest
StanceTest = False
global filterActionFinal
filterActionFinal = ''
global keySpecInternal
keySpecInternal = 1
global RANDOM_SEED
RANDOM_SEED = 42
global keyData
keyData = 0
global keepOriginalFeatures
keepOriginalFeatures = []
global XData
XData = []
global yData
yData = []
global XDataNoRemoval
XDataNoRemoval = []
global XDataNoRemovalOrig
XDataNoRemovalOrig = []
global XDataStored
XDataStored = []
global yDataStored
yDataStored = []
global finalResultsData
finalResultsData = []
global detailsParams
detailsParams = []
global algorithmList
algorithmList = []
global ClassifierIDsList
ClassifierIDsList = ''
global RetrieveModelsList
RetrieveModelsList = []
global allParametersPerfCrossMutr
allParametersPerfCrossMutr = []
global all_classifiers
all_classifiers = []
global crossValidation
crossValidation = 8
#crossValidation = 5
#crossValidation = 3
global resultsMetrics
resultsMetrics = []
global parametersSelData
parametersSelData = []
global target_names
target_names = []
global keyFirstTime
keyFirstTime = True
global target_namesLoc
target_namesLoc = []
global featureCompareData
featureCompareData = []
global columnsKeep
columnsKeep = []
global columnsNewGen
columnsNewGen = []
global columnsNames
columnsNames = []
global fileName
fileName = []
global listofTransformations
listofTransformations = ["r","b","zs","mms","l2","l1p","l10","e2","em1","p2","p3","p4"]
return 'The reset was done!'
# retrieve data from client and select the correct data set
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRequest', methods=["GET", "POST"])
def retrieveFileName():
global DataRawLength
global DataResultsRaw
global DataResultsRawTest
global DataRawLengthTest
global DataResultsRawExternal
global DataRawLengthExternal
global fileName
fileName = []
fileName = request.get_data().decode('utf8').replace("'", '"')
global keySpecInternal
keySpecInternal = 1
global filterActionFinal
filterActionFinal = ''
global dataSpacePointsIDs
dataSpacePointsIDs = []
global RANDOM_SEED
RANDOM_SEED = 42
global keyData
keyData = 0
global keepOriginalFeatures
keepOriginalFeatures = []
global XData
XData = []
global XDataNoRemoval
XDataNoRemoval = []
global XDataNoRemovalOrig
XDataNoRemovalOrig = []
global previousState
previousState = []
global yData
yData = []
global XDataStored
XDataStored = []
global yDataStored
yDataStored = []
global finalResultsData
finalResultsData = []
global ClassifierIDsList
ClassifierIDsList = ''
global algorithmList
algorithmList = []
global detailsParams
detailsParams = []
# Initializing models
global RetrieveModelsList
RetrieveModelsList = []
global resultsList
resultsList = []
global allParametersPerfCrossMutr
allParametersPerfCrossMutr = []
global HistoryPreservation
HistoryPreservation = []
global all_classifiers
all_classifiers = []
global crossValidation
crossValidation = 8
#crossValidation = 5
#crossValidation = 3
global parametersSelData
parametersSelData = []
global StanceTest
StanceTest = False
global target_names
target_names = []
global keyFirstTime
keyFirstTime = True
global target_namesLoc
target_namesLoc = []
global featureCompareData
featureCompareData = []
global columnsKeep
columnsKeep = []
global columnsNewGen
columnsNewGen = []
global columnsNames
columnsNames = []
global listofTransformations
listofTransformations = ["r","b","zs","mms","l2","l1p","l10","e2","em1","p2","p3","p4"]
DataRawLength = -1
DataRawLengthTest = -1
data = json.loads(fileName)
if data['fileName'] == 'HeartC':
CollectionDB = mongo.db.HeartC.find()
target_names.append('Healthy')
target_names.append('Diseased')
elif data['fileName'] == 'biodegC':
StanceTest = True
CollectionDB = mongo.db.biodegC.find()
CollectionDBTest = mongo.db.biodegCTest.find()
CollectionDBExternal = mongo.db.biodegCExt.find()
target_names.append('Non-biodegr.')
target_names.append('Biodegr.')
elif data['fileName'] == 'BreastC':
CollectionDB = mongo.db.breastC.find()
elif data['fileName'] == 'DiabetesC':
CollectionDB = mongo.db.diabetesC.find()
target_names.append('Negative')
target_names.append('Positive')
elif data['fileName'] == 'MaterialC':
CollectionDB = mongo.db.MaterialC.find()
target_names.append('Cylinder')
target_names.append('Disk')
target_names.append('Flatellipsold')
target_names.append('Longellipsold')
target_names.append('Sphere')
elif data['fileName'] == 'ContraceptiveC':
CollectionDB = mongo.db.ContraceptiveC.find()
target_names.append('No-use')
target_names.append('Long-term')
target_names.append('Short-term')
elif data['fileName'] == 'VehicleC':
CollectionDB = mongo.db.VehicleC.find()
target_names.append('Van')
target_names.append('Car')
target_names.append('Bus')
elif data['fileName'] == 'WineC':
CollectionDB = mongo.db.WineC.find()
target_names.append('Fine')
target_names.append('Superior')
target_names.append('Inferior')
else:
CollectionDB = mongo.db.IrisC.find()
DataResultsRaw = []
for index, item in enumerate(CollectionDB):
item['_id'] = str(item['_id'])
item['InstanceID'] = index
DataResultsRaw.append(item)
DataRawLength = len(DataResultsRaw)
DataResultsRawTest = []
DataResultsRawExternal = []
if (StanceTest):
for index, item in enumerate(CollectionDBTest):
item['_id'] = str(item['_id'])
item['InstanceID'] = index
DataResultsRawTest.append(item)
DataRawLengthTest = len(DataResultsRawTest)
for index, item in enumerate(CollectionDBExternal):
item['_id'] = str(item['_id'])
item['InstanceID'] = index
DataResultsRawExternal.append(item)
DataRawLengthExternal = len(DataResultsRawExternal)
dataSetSelection()
return 'Everything is okay'
# Retrieve data set from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/SendtoSeverDataSet', methods=["GET", "POST"])
def sendToServerData():
uploadedData = request.get_data().decode('utf8').replace("'", '"')
uploadedDataParsed = json.loads(uploadedData)
DataResultsRaw = uploadedDataParsed['uploadedData']
DataResults = copy.deepcopy(DataResultsRaw)
for dictionary in DataResultsRaw:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRaw.sort(key=lambda x: x[target], reverse=True)
DataResults.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResults:
del dictionary[target]
global AllTargets
global target_names
global target_namesLoc
AllTargets = [o[target] for o in DataResultsRaw]
AllTargetsFloatValues = []
global fileName
data = json.loads(fileName)
previous = None
Class = 0
for i, value in enumerate(AllTargets):
if (i == 0):
previous = value
if (data['fileName'] == 'IrisC' or data['fileName'] == 'BreastC'):
target_names.append(value)
else:
pass
if (value == previous):
AllTargetsFloatValues.append(Class)
else:
Class = Class + 1
if (data['fileName'] == 'IrisC' or data['fileName'] == 'BreastC'):
target_names.append(value)
else:
pass
AllTargetsFloatValues.append(Class)
previous = value
ArrayDataResults = pd.DataFrame.from_dict(DataResults)
global XData, yData, RANDOM_SEED
XData, yData = ArrayDataResults, AllTargetsFloatValues
global XDataStored, yDataStored
XDataStored = XData.copy()
yDataStored = yData.copy()
global XDataStoredOriginal
XDataStoredOriginal = XData.copy()
global finalResultsData
finalResultsData = XData.copy()
global XDataNoRemoval
XDataNoRemoval = XData.copy()
global XDataNoRemovalOrig
XDataNoRemovalOrig = XData.copy()
return 'Processed uploaded data set'
def dataSetSelection():
global XDataTest, yDataTest
XDataTest = pd.DataFrame()
global XDataExternal, yDataExternal
XDataExternal = pd.DataFrame()
global StanceTest
global AllTargets
global target_names
target_namesLoc = []
if (StanceTest):
DataResultsTest = copy.deepcopy(DataResultsRawTest)
for dictionary in DataResultsRawTest:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRawTest.sort(key=lambda x: x[target], reverse=True)
DataResultsTest.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResultsTest:
del dictionary['_id']
del dictionary['InstanceID']
del dictionary[target]
AllTargetsTest = [o[target] for o in DataResultsRawTest]
AllTargetsFloatValuesTest = []
previous = None
Class = 0
for i, value in enumerate(AllTargetsTest):
if (i == 0):
previous = value
target_namesLoc.append(value)
if (value == previous):
AllTargetsFloatValuesTest.append(Class)
else:
Class = Class + 1
target_namesLoc.append(value)
AllTargetsFloatValuesTest.append(Class)
previous = value
ArrayDataResultsTest = pd.DataFrame.from_dict(DataResultsTest)
XDataTest, yDataTest = ArrayDataResultsTest, AllTargetsFloatValuesTest
DataResultsExternal = copy.deepcopy(DataResultsRawExternal)
for dictionary in DataResultsRawExternal:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRawExternal.sort(key=lambda x: x[target], reverse=True)
DataResultsExternal.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResultsExternal:
del dictionary['_id']
del dictionary['InstanceID']
del dictionary[target]
AllTargetsExternal = [o[target] for o in DataResultsRawExternal]
AllTargetsFloatValuesExternal = []
previous = None
Class = 0
for i, value in enumerate(AllTargetsExternal):
if (i == 0):
previous = value
target_namesLoc.append(value)
if (value == previous):
AllTargetsFloatValuesExternal.append(Class)
else:
Class = Class + 1
target_namesLoc.append(value)
AllTargetsFloatValuesExternal.append(Class)
previous = value
ArrayDataResultsExternal = pd.DataFrame.from_dict(DataResultsExternal)
XDataExternal, yDataExternal = ArrayDataResultsExternal, AllTargetsFloatValuesExternal
DataResults = copy.deepcopy(DataResultsRaw)
for dictionary in DataResultsRaw:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRaw.sort(key=lambda x: x[target], reverse=True)
DataResults.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResults:
del dictionary['_id']
del dictionary['InstanceID']
del dictionary[target]
AllTargets = [o[target] for o in DataResultsRaw]
AllTargetsFloatValues = []
global fileName
data = json.loads(fileName)
previous = None
Class = 0
for i, value in enumerate(AllTargets):
if (i == 0):
previous = value
if (data['fileName'] == 'IrisC' or data['fileName'] == 'BreastC'):
target_names.append(value)
else:
pass
if (value == previous):
AllTargetsFloatValues.append(Class)
else:
Class = Class + 1
if (data['fileName'] == 'IrisC' or data['fileName'] == 'BreastC'):
target_names.append(value)
else:
pass
AllTargetsFloatValues.append(Class)
previous = value
dfRaw = pd.DataFrame.from_dict(DataResultsRaw)
# OneTimeTemp = copy.deepcopy(dfRaw)
# OneTimeTemp.drop(columns=['_id', 'InstanceID'])
# column_names = ['volAc', 'chlorides', 'density', 'fixAc' , 'totalSuDi' , 'citAc', 'resSu' , 'pH' , 'sulphates', 'freeSulDi' ,'alcohol', 'quality*']
# OneTimeTemp = OneTimeTemp.reindex(columns=column_names)
# OneTimeTemp.to_csv('dataExport.csv', index=False)
ArrayDataResults = pd.DataFrame.from_dict(DataResults)
global XData, yData, RANDOM_SEED
XData, yData = ArrayDataResults, AllTargetsFloatValues
global keepOriginalFeatures
global OrignList
if (data['fileName'] == 'biodegC'):
keepOriginalFeatures = XData.copy()
storeNewColumns = []
for col in keepOriginalFeatures.columns:
newCol = col.replace("-", "_")
storeNewColumns.append(newCol.replace("_",""))
keepOriginalFeatures.columns = [str(col) + ' F'+str(idx+1)+'' for idx, col in enumerate(storeNewColumns)]
columnsNewGen = keepOriginalFeatures.columns.values.tolist()
OrignList = keepOriginalFeatures.columns.values.tolist()
else:
keepOriginalFeatures = XData.copy()
keepOriginalFeatures.columns = [str(col) + ' F'+str(idx+1)+'' for idx, col in enumerate(keepOriginalFeatures.columns)]
columnsNewGen = keepOriginalFeatures.columns.values.tolist()
OrignList = keepOriginalFeatures.columns.values.tolist()
XData.columns = ['F'+str(idx+1) for idx, col in enumerate(XData.columns)]
XDataTest.columns = ['F'+str(idx+1) for idx, col in enumerate(XDataTest.columns)]
XDataExternal.columns = ['F'+str(idx+1) for idx, col in enumerate(XDataExternal.columns)]
global XDataStored, yDataStored
XDataStored = XData.copy()
yDataStored = yData.copy()
global XDataStoredOriginal
XDataStoredOriginal = XData.copy()
global finalResultsData
finalResultsData = XData.copy()
global XDataNoRemoval
XDataNoRemoval = XData.copy()
global XDataNoRemovalOrig
XDataNoRemovalOrig = XData.copy()
warnings.simplefilter('ignore')
executeModel([], 0, '')
return 'Everything is okay'
def create_global_function():
global estimator
location = './cachedir'
memory = Memory(location, verbose=0)
# calculating for all algorithms and models the performance and other results
@memory.cache
def estimator(n_estimators, eta, max_depth, subsample, colsample_bytree):
# initialize model
print('loopModels')
n_estimators = int(n_estimators)
max_depth = int(max_depth)
model = XGBClassifier(n_estimators=n_estimators, eta=eta, max_depth=max_depth, subsample=subsample, colsample_bytree=colsample_bytree, n_jobs=-1, random_state=RANDOM_SEED, silent=True, verbosity = 0, use_label_encoder=False)
# set in cross-validation
result = cross_validate(model, XData, yData, cv=crossValidation, scoring='accuracy')
# result is mean of test_score
return np.mean(result['test_score'])
# check this issue later because we are not getting the same results
def executeModel(exeCall, flagEx, nodeTransfName):
global XDataTest, yDataTest
global XDataExternal, yDataExternal
global keyFirstTime
global estimator
global yPredictProb
global scores
global featureImportanceData
global XData
global XDataStored
global previousState
global columnsNewGen
global columnsNames
global listofTransformations
global XDataStoredOriginal
global finalResultsData
global OrignList
global tracker
global XDataNoRemoval
global XDataNoRemovalOrig
columnsNames = []
scores = []
if (len(exeCall) == 0):
if (flagEx == 3):
XDataStored = XData.copy()
XDataNoRemovalOrig = XDataNoRemoval.copy()
OrignList = columnsNewGen
elif (flagEx == 2):
XData = XDataStored.copy()
XDataStoredOriginal = XDataStored.copy()
XDataNoRemoval = XDataNoRemovalOrig.copy()
columnsNewGen = OrignList
else:
XData = XDataStored.copy()
XDataNoRemoval = XDataNoRemovalOrig.copy()
XDataStoredOriginal = XDataStored.copy()
else:
if (flagEx == 4):
XDataStored = XData.copy()
XDataNoRemovalOrig = XDataNoRemoval.copy()
#XDataStoredOriginal = XDataStored.copy()
elif (flagEx == 2):
XData = XDataStored.copy()
XDataStoredOriginal = XDataStored.copy()
XDataNoRemoval = XDataNoRemovalOrig.copy()
columnsNewGen = OrignList
else:
XData = XDataStored.copy()
#XDataNoRemoval = XDataNoRemovalOrig.copy()
XDataStoredOriginal = XDataStored.copy()
# Bayesian Optimization CHANGE INIT_POINTS!
if (keyFirstTime):
create_global_function()
params = {"n_estimators": (5, 200), "eta": (0.05, 0.3), "max_depth": (6,12), "subsample": (0.8,1), "colsample_bytree": (0.8,1)}
bayesopt = BayesianOptimization(estimator, params, random_state=RANDOM_SEED)
bayesopt.maximize(init_points=20, n_iter=5, acq='ucb') # 20 and 5
bestParams = bayesopt.max['params']
estimator = XGBClassifier(n_estimators=int(bestParams.get('n_estimators')), eta=bestParams.get('eta'), max_depth=int(bestParams.get('max_depth')), subsample=bestParams.get('subsample'), colsample_bytree=bestParams.get('colsample_bytree'), probability=True, random_state=RANDOM_SEED, silent=True, verbosity = 0, use_label_encoder=False)
columnsNewGen = OrignList
if (len(exeCall) != 0):
if (flagEx == 1):
currentColumnsDeleted = []
for uniqueValue in exeCall:
currentColumnsDeleted.append(tracker[uniqueValue])
for column in XData.columns:
if (column in currentColumnsDeleted):
XData = XData.drop(column, axis=1)
XDataStoredOriginal = XDataStoredOriginal.drop(column, axis=1)
elif (flagEx == 2):
columnsKeepNew = []
columns = XDataGen.columns.values.tolist()
for indx, col in enumerate(columns):
if indx in exeCall:
columnsKeepNew.append(col)
columnsNewGen.append(col)
XDataTemp = XDataGen[columnsKeepNew]
XData[columnsKeepNew] = XDataTemp.values
XDataStoredOriginal[columnsKeepNew] = XDataTemp.values
XDataNoRemoval[columnsKeepNew] = XDataTemp.values
elif (flagEx == 4):
splittedCol = nodeTransfName.split('_')
for col in XDataNoRemoval.columns:
splitCol = col.split('_')
if ((splittedCol[0] in splitCol[0])):
newSplitted = re.sub("[^0-9]", "", splittedCol[0])
newCol = re.sub("[^0-9]", "", splitCol[0])
if (newSplitted == newCol):
storeRenamedColumn = col
XData.rename(columns={ storeRenamedColumn: nodeTransfName }, inplace = True)
XDataNoRemoval.rename(columns={ storeRenamedColumn: nodeTransfName }, inplace = True)
currentColumn = columnsNewGen[exeCall[0]]
subString = currentColumn[currentColumn.find("(")+1:currentColumn.find(")")]
replacement = currentColumn.replace(subString, nodeTransfName)
for ind, column in enumerate(columnsNewGen):
splitCol = column.split('_')
if ((splittedCol[0] in splitCol[0])):
newSplitted = re.sub("[^0-9]", "", splittedCol[0])
newCol = re.sub("[^0-9]", "", splitCol[0])
if (newSplitted == newCol):
columnsNewGen[ind] = columnsNewGen[ind].replace(storeRenamedColumn, nodeTransfName)
if (len(splittedCol) == 1):
XData[nodeTransfName] = XDataStoredOriginal[nodeTransfName]
XDataNoRemoval[nodeTransfName] = XDataStoredOriginal[nodeTransfName]
else:
if (splittedCol[1] == 'r'):
XData[nodeTransfName] = XData[nodeTransfName].round()
elif (splittedCol[1] == 'b'):
number_of_bins = np.histogram_bin_edges(XData[nodeTransfName], bins='auto')
emptyLabels = []
for index, number in enumerate(number_of_bins):
if (index == 0):
pass
else:
emptyLabels.append(index)
XData[nodeTransfName] = pd.cut(XData[nodeTransfName], bins=number_of_bins, labels=emptyLabels, include_lowest=True, right=True)
XData[nodeTransfName] = pd.to_numeric(XData[nodeTransfName], downcast='signed')
elif (splittedCol[1] == 'zs'):
XData[nodeTransfName] = (XData[nodeTransfName]-XData[nodeTransfName].mean())/XData[nodeTransfName].std()
elif (splittedCol[1] == 'mms'):
XData[nodeTransfName] = (XData[nodeTransfName]-XData[nodeTransfName].min())/(XData[nodeTransfName].max()-XData[nodeTransfName].min())
elif (splittedCol[1] == 'l2'):
dfTemp = []
dfTemp = np.log2(XData[nodeTransfName])
dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
dfTemp = dfTemp.fillna(0)
XData[nodeTransfName] = dfTemp
elif (splittedCol[1] == 'l1p'):
dfTemp = []
dfTemp = np.log1p(XData[nodeTransfName])
dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
dfTemp = dfTemp.fillna(0)
XData[nodeTransfName] = dfTemp
elif (splittedCol[1] == 'l10'):
dfTemp = []
dfTemp = np.log10(XData[nodeTransfName])
dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
dfTemp = dfTemp.fillna(0)
XData[nodeTransfName] = dfTemp
elif (splittedCol[1] == 'e2'):
dfTemp = []
dfTemp = np.exp2(XData[nodeTransfName])
dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
dfTemp = dfTemp.fillna(0)
XData[nodeTransfName] = dfTemp
elif (splittedCol[1] == 'em1'):
dfTemp = []
dfTemp = np.expm1(XData[nodeTransfName])
dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
dfTemp = dfTemp.fillna(0)
XData[nodeTransfName] = dfTemp
elif (splittedCol[1] == 'p2'):
XData[nodeTransfName] = np.power(XData[nodeTransfName], 2)
elif (splittedCol[1] == 'p3'):
XData[nodeTransfName] = np.power(XData[nodeTransfName], 3)
else:
XData[nodeTransfName] = np.power(XData[nodeTransfName], 4)
XDataNoRemoval[nodeTransfName] = XData[nodeTransfName]
XDataStored = XData.copy()
XDataNoRemovalOrig = XDataNoRemoval.copy()
columnsNamesLoc = XData.columns.values.tolist()
for col in columnsNamesLoc:
splittedCol = col.split('_')
if (len(splittedCol) == 1):
for tran in listofTransformations:
columnsNames.append(splittedCol[0]+'_'+tran)
else:
for tran in listofTransformations:
if (splittedCol[1] == tran):
columnsNames.append(splittedCol[0])
else:
columnsNames.append(splittedCol[0]+'_'+tran)
featureImportanceData = estimatorFeatureSelection(XDataNoRemoval, estimator)
tracker = []
for value in columnsNewGen:
value = value.split(' ')
if (len(value) > 1):
tracker.append(value[1])
else:
tracker.append(value[0])
estimator.fit(XData, yData)
yPredict = estimator.predict(XData)
yPredictProb = cross_val_predict(estimator, XData, yData, cv=crossValidation, method='predict_proba')
num_cores = multiprocessing.cpu_count()
inputsSc = ['accuracy','precision_weighted','recall_weighted']
flat_results = Parallel(n_jobs=num_cores)(delayed(solve)(estimator,XData,yData,crossValidation,item,index) for index, item in enumerate(inputsSc))
scoresAct = [item for sublist in flat_results for item in sublist]
#print(scoresAct)
# if (StanceTest):
# y_pred = estimator.predict(XDataTest)
# print('Test data set')
# print(classification_report(yDataTest, y_pred))
# y_pred = estimator.predict(XDataExternal)
# print('External data set')
# print(classification_report(yDataExternal, y_pred))
howMany = 0
if (keyFirstTime):
previousState = scoresAct
keyFirstTime = False
howMany = 3
if (((scoresAct[0]-scoresAct[1]) + (scoresAct[2]-scoresAct[3]) + (scoresAct[4]-scoresAct[5])) >= ((previousState[0]-previousState[1]) + (previousState[2]-previousState[3]) + (previousState[4]-previousState[5]))):
finalResultsData = XData.copy()
if (keyFirstTime == False):
if (((scoresAct[0]-scoresAct[1]) + (scoresAct[2]-scoresAct[3]) + (scoresAct[4]-scoresAct[5])) >= ((previousState[0]-previousState[1]) + (previousState[2]-previousState[3]) + (previousState[4]-previousState[5]))):
previousState[0] = scoresAct[0]
previousState[1] = scoresAct[1]
howMany = 3
#elif ((scoresAct[2]-scoresAct[3]) > (previousState[2]-previousState[3])):
previousState[2] = scoresAct[2]
previousState[3] = scoresAct[3]
#howMany = howMany + 1
#elif ((scoresAct[4]-scoresAct[5]) > (previousState[4]-previousState[5])):
previousState[4] = scoresAct[4]
previousState[5] = scoresAct[5]
#howMany = howMany + 1
#else:
#pass
scores = scoresAct + previousState
if (howMany == 3):
scores.append(1)
else:
scores.append(0)
return 'Everything Okay'
@app.route('/data/RequestBestFeatures', methods=["GET", "POST"])
def BestFeat():
global finalResultsData
finalResultsDataJSON = finalResultsData.to_json()
response = {
'finalResultsData': finalResultsDataJSON
}
return jsonify(response)
def featFun (clfLocalPar,DataLocalPar,yDataLocalPar):
PerFeatureAccuracyLocalPar = []
scores = model_selection.cross_val_score(clfLocalPar, DataLocalPar, yDataLocalPar, cv=None, n_jobs=-1)
PerFeatureAccuracyLocalPar.append(scores.mean())
return PerFeatureAccuracyLocalPar
location = './cachedir'
memory = Memory(location, verbose=0)
# calculating for all algorithms and models the performance and other results
@memory.cache
def estimatorFeatureSelection(Data, clf):
resultsFS = []
permList = []
PerFeatureAccuracy = []
PerFeatureAccuracyAll = []
ImpurityFS = []
RankingFS = []
estim = clf.fit(Data, yData)
importances = clf.feature_importances_
# std = np.std([tree.feature_importances_ for tree in estim.feature_importances_],
# axis=0)
maxList = max(importances)
minList = min(importances)
for f in range(Data.shape[1]):
ImpurityFS.append((importances[f] - minList) / (maxList - minList))
estim = LogisticRegression(n_jobs = -1, random_state=RANDOM_SEED)
selector = RFECV(estimator=estim, n_jobs = -1, step=1, cv=crossValidation)
selector = selector.fit(Data, yData)
RFEImp = selector.ranking_
for f in range(Data.shape[1]):
if (RFEImp[f] == 1):
RankingFS.append(0.95)
elif (RFEImp[f] == 2):
RankingFS.append(0.85)
elif (RFEImp[f] == 3):
RankingFS.append(0.75)
elif (RFEImp[f] == 4):
RankingFS.append(0.65)
elif (RFEImp[f] == 5):
RankingFS.append(0.55)
elif (RFEImp[f] == 6):
RankingFS.append(0.45)
elif (RFEImp[f] == 7):
RankingFS.append(0.35)
elif (RFEImp[f] == 8):
RankingFS.append(0.25)
elif (RFEImp[f] == 9):
RankingFS.append(0.15)
else:
RankingFS.append(0.05)
perm = PermutationImportance(clf, cv=None, refit = True, n_iter = 25).fit(Data, yData)
permList.append(perm.feature_importances_)
n_feats = Data.shape[1]
num_cores = multiprocessing.cpu_count()
print("Parallelization Initilization")
flat_results = Parallel(n_jobs=num_cores)(delayed(featFun)(clf,Data.values[:, i].reshape(-1, 1),yData) for i in range(n_feats))
PerFeatureAccuracy = [item for sublist in flat_results for item in sublist]
# for i in range(n_feats):
# scoresHere = model_selection.cross_val_score(clf, Data.values[:, i].reshape(-1, 1), yData, cv=None, n_jobs=-1)
# PerFeatureAccuracy.append(scoresHere.mean())
PerFeatureAccuracyAll.append(PerFeatureAccuracy)
clf.fit(Data, yData)
yPredict = clf.predict(Data)
yPredict = np.nan_to_num(yPredict)
RankingFSDF = pd.DataFrame(RankingFS)
RankingFSDF = RankingFSDF.to_json()
ImpurityFSDF = pd.DataFrame(ImpurityFS)
ImpurityFSDF = ImpurityFSDF.to_json()
perm_imp_eli5PD = pd.DataFrame(permList)
if (perm_imp_eli5PD.empty):
for col in Data.columns:
perm_imp_eli5PD.append({0:0})
perm_imp_eli5PD = perm_imp_eli5PD.to_json()
PerFeatureAccuracyPandas = pd.DataFrame(PerFeatureAccuracyAll)
PerFeatureAccuracyPandas = PerFeatureAccuracyPandas.to_json()
bestfeatures = SelectKBest(score_func=f_classif, k='all')
fit = bestfeatures.fit(Data,yData)
dfscores = pd.DataFrame(fit.scores_)
dfcolumns = pd.DataFrame(Data.columns)
featureScores = pd.concat([dfcolumns,dfscores],axis=1)
featureScores.columns = ['Specs','Score'] #naming the dataframe columns
featureScores = featureScores.to_json()
resultsFS.append(featureScores)
resultsFS.append(ImpurityFSDF)
resultsFS.append(perm_imp_eli5PD)
resultsFS.append(PerFeatureAccuracyPandas)
resultsFS.append(RankingFSDF)
return resultsFS
@app.route('/data/sendFeatImp', methods=["GET", "POST"])
def sendFeatureImportance():
global featureImportanceData
response = {
'Importance': featureImportanceData
}
return jsonify(response)
@app.route('/data/sendFeatImpComp', methods=["GET", "POST"])
def sendFeatureImportanceComp():
global featureCompareData
global columnsKeep
response = {
'ImportanceCompare': featureCompareData,
'FeatureNames': columnsKeep
}
return jsonify(response)
def solve(sclf,XData,yData,crossValidation,scoringIn,loop):
scoresLoc = []
temp = model_selection.cross_val_score(sclf, XData, yData, cv=crossValidation, scoring=scoringIn, n_jobs=-1)
scoresLoc.append(temp.mean())
scoresLoc.append(temp.std())
return scoresLoc
@app.route('/data/sendResults', methods=["GET", "POST"])
def sendFinalResults():
global scores
response = {
'ValidResults': scores
}
return jsonify(response)
def Transformation(quadrant1, quadrant2, quadrant3, quadrant4, quadrant5):
# XDataNumericColumn = XData.select_dtypes(include='number')
XDataNumeric = XDataStoredOriginal.select_dtypes(include='number')
columns = list(XDataNumeric)
global packCorrTransformed
packCorrTransformed = []
for count, i in enumerate(columns):
dicTransf = {}
splittedCol = columnsNames[(count)*len(listofTransformations)+0].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf1"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
XDataNumericCopy[i] = XDataNumericCopy[i].round()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf1"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+1].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf2"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
number_of_bins = np.histogram_bin_edges(XDataNumericCopy[i], bins='auto')
emptyLabels = []
for index, number in enumerate(number_of_bins):
if (index == 0):
pass
else:
emptyLabels.append(index)
XDataNumericCopy[i] = pd.cut(XDataNumericCopy[i], bins=number_of_bins, labels=emptyLabels, include_lowest=True, right=True)
XDataNumericCopy[i] = pd.to_numeric(XDataNumericCopy[i], downcast='signed')
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf2"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+2].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf3"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
XDataNumericCopy[i] = (XDataNumericCopy[i]-XDataNumericCopy[i].mean())/XDataNumericCopy[i].std()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf3"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+3].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf4"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
XDataNumericCopy[i] = (XDataNumericCopy[i]-XDataNumericCopy[i].min())/(XDataNumericCopy[i].max()-XDataNumericCopy[i].min())
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf4"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+4].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf5"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
dfTemp = []
dfTemp = np.log2(XDataNumericCopy[i])
dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
dfTemp = dfTemp.fillna(0)
XDataNumericCopy[i] = dfTemp
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf5"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+5].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf6"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
dfTemp = []
dfTemp = np.log1p(XDataNumericCopy[i])
dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
dfTemp = dfTemp.fillna(0)
XDataNumericCopy[i] = dfTemp
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf6"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+6].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf7"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
dfTemp = []
dfTemp = np.log10(XDataNumericCopy[i])
dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
dfTemp = dfTemp.fillna(0)
XDataNumericCopy[i] = dfTemp
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf7"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+7].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf8"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
dfTemp = []
dfTemp = np.exp2(XDataNumericCopy[i])
dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
dfTemp = dfTemp.fillna(0)
XDataNumericCopy[i] = dfTemp
if (np.isinf(dfTemp.var())):
flagInf = True
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf8"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+8].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf9"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
dfTemp = []
dfTemp = np.expm1(XDataNumericCopy[i])
dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
dfTemp = dfTemp.fillna(0)
XDataNumericCopy[i] = dfTemp
if (np.isinf(dfTemp.var())):
flagInf = True
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf9"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+9].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf10"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
XDataNumericCopy[i] = np.power(XDataNumericCopy[i], 2)
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf10"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+10].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf11"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
XDataNumericCopy[i] = np.power(XDataNumericCopy[i], 3)
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf11"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+11].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf12"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
XDataNumericCopy[i] = np.power(XDataNumericCopy[i], 4)
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf12"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
packCorrTransformed.append(dicTransf)
return 'Everything Okay'
def NewComputationTransf(DataRows1, DataRows2, DataRows3, DataRows4, DataRows5, quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, feature, count, flagInf):
corrMatrix1 = DataRows1.corr()
corrMatrix1 = corrMatrix1.abs()
corrMatrix2 = DataRows2.corr()
corrMatrix2 = corrMatrix2.abs()
corrMatrix3 = DataRows3.corr()
corrMatrix3 = corrMatrix3.abs()
corrMatrix4 = DataRows4.corr()
corrMatrix4 = corrMatrix4.abs()
corrMatrix5 = DataRows5.corr()
corrMatrix5 = corrMatrix5.abs()
corrMatrix1 = corrMatrix1.loc[[feature]]
corrMatrix2 = corrMatrix2.loc[[feature]]
corrMatrix3 = corrMatrix3.loc[[feature]]
corrMatrix4 = corrMatrix4.loc[[feature]]
corrMatrix5 = corrMatrix5.loc[[feature]]
DataRows1 = DataRows1.reset_index(drop=True)
DataRows2 = DataRows2.reset_index(drop=True)
DataRows3 = DataRows3.reset_index(drop=True)
DataRows4 = DataRows4.reset_index(drop=True)
DataRows5 = DataRows5.reset_index(drop=True)
targetRows1 = [yData[i] for i in quadrant1]
targetRows2 = [yData[i] for i in quadrant2]
targetRows3 = [yData[i] for i in quadrant3]
targetRows4 = [yData[i] for i in quadrant4]
targetRows5 = [yData[i] for i in quadrant5]
targetRows1Arr = np.array(targetRows1)
targetRows2Arr = np.array(targetRows2)
targetRows3Arr = np.array(targetRows3)
targetRows4Arr = np.array(targetRows4)
targetRows5Arr = np.array(targetRows5)
uniqueTarget1 = unique(targetRows1)
uniqueTarget2 = unique(targetRows2)
uniqueTarget3 = unique(targetRows3)
uniqueTarget4 = unique(targetRows4)
uniqueTarget5 = unique(targetRows5)
if (len(targetRows1Arr) > 0):
onehotEncoder1 = OneHotEncoder(sparse=False)
targetRows1Arr = targetRows1Arr.reshape(len(targetRows1Arr), 1)
onehotEncoder1 = onehotEncoder1.fit_transform(targetRows1Arr)
hotEncoderDF1 = pd.DataFrame(onehotEncoder1)
concatDF1 = pd.concat([DataRows1, hotEncoderDF1], axis=1)
corrMatrixComb1 = concatDF1.corr()
corrMatrixComb1 = corrMatrixComb1.abs()
corrMatrixComb1 = corrMatrixComb1.iloc[:,-len(uniqueTarget1):]
DataRows1 = DataRows1.replace([np.inf, -np.inf], np.nan)
DataRows1 = DataRows1.fillna(0)
X1 = add_constant(DataRows1)
X1 = X1.replace([np.inf, -np.inf], np.nan)
X1 = X1.fillna(0)
VIF1 = pd.Series([variance_inflation_factor(X1.values, i)
for i in range(X1.shape[1])],
index=X1.columns)
if (flagInf == False):
VIF1 = VIF1.replace([np.inf, -np.inf], np.nan)
VIF1 = VIF1.fillna(0)
VIF1 = VIF1.loc[[feature]]
else:
VIF1 = pd.Series()
if ((len(targetRows1Arr) > 2) and (flagInf == False)):
MI1 = mutual_info_classif(DataRows1, targetRows1Arr, n_neighbors=3, random_state=RANDOM_SEED)
MI1List = MI1.tolist()
MI1List = MI1List[count]
else:
MI1List = []
else:
corrMatrixComb1 = pd.DataFrame()
VIF1 = pd.Series()
MI1List = []
if (len(targetRows2Arr) > 0):
onehotEncoder2 = OneHotEncoder(sparse=False)
targetRows2Arr = targetRows2Arr.reshape(len(targetRows2Arr), 1)
onehotEncoder2 = onehotEncoder2.fit_transform(targetRows2Arr)
hotEncoderDF2 = pd.DataFrame(onehotEncoder2)
concatDF2 = pd.concat([DataRows2, hotEncoderDF2], axis=1)
corrMatrixComb2 = concatDF2.corr()
corrMatrixComb2 = corrMatrixComb2.abs()
corrMatrixComb2 = corrMatrixComb2.iloc[:,-len(uniqueTarget2):]
DataRows2 = DataRows2.replace([np.inf, -np.inf], np.nan)
DataRows2 = DataRows2.fillna(0)
X2 = add_constant(DataRows2)
X2 = X2.replace([np.inf, -np.inf], np.nan)
X2 = X2.fillna(0)
VIF2 = pd.Series([variance_inflation_factor(X2.values, i)
for i in range(X2.shape[1])],
index=X2.columns)
if (flagInf == False):
VIF2 = VIF2.replace([np.inf, -np.inf], np.nan)
VIF2 = VIF2.fillna(0)
VIF2 = VIF2.loc[[feature]]
else:
VIF2 = pd.Series()
if ((len(targetRows2Arr) > 2) and (flagInf == False)):
MI2 = mutual_info_classif(DataRows2, targetRows2Arr, n_neighbors=3, random_state=RANDOM_SEED)
MI2List = MI2.tolist()
MI2List = MI2List[count]
else:
MI2List = []
else:
corrMatrixComb2 = pd.DataFrame()
VIF2 = pd.Series()
MI2List = []
if (len(targetRows3Arr) > 0):
onehotEncoder3 = OneHotEncoder(sparse=False)
targetRows3Arr = targetRows3Arr.reshape(len(targetRows3Arr), 1)
onehotEncoder3 = onehotEncoder3.fit_transform(targetRows3Arr)
hotEncoderDF3 = pd.DataFrame(onehotEncoder3)
concatDF3 = pd.concat([DataRows3, hotEncoderDF3], axis=1)
corrMatrixComb3 = concatDF3.corr()
corrMatrixComb3 = corrMatrixComb3.abs()
corrMatrixComb3 = corrMatrixComb3.iloc[:,-len(uniqueTarget3):]
DataRows3 = DataRows3.replace([np.inf, -np.inf], np.nan)
DataRows3 = DataRows3.fillna(0)
X3 = add_constant(DataRows3)
X3 = X3.replace([np.inf, -np.inf], np.nan)
X3 = X3.fillna(0)
if (flagInf == False):
VIF3 = pd.Series([variance_inflation_factor(X3.values, i)
for i in range(X3.shape[1])],
index=X3.columns)
VIF3 = VIF3.replace([np.inf, -np.inf], np.nan)
VIF3 = VIF3.fillna(0)
VIF3 = VIF3.loc[[feature]]
else:
VIF3 = pd.Series()
if ((len(targetRows3Arr) > 2) and (flagInf == False)):
MI3 = mutual_info_classif(DataRows3, targetRows3Arr, n_neighbors=3, random_state=RANDOM_SEED)
MI3List = MI3.tolist()
MI3List = MI3List[count]
else:
MI3List = []
else:
corrMatrixComb3 = pd.DataFrame()
VIF3 = pd.Series()
MI3List = []
if (len(targetRows4Arr) > 0):
onehotEncoder4 = OneHotEncoder(sparse=False)
targetRows4Arr = targetRows4Arr.reshape(len(targetRows4Arr), 1)
onehotEncoder4 = onehotEncoder4.fit_transform(targetRows4Arr)
hotEncoderDF4 = pd.DataFrame(onehotEncoder4)
concatDF4 = pd.concat([DataRows4, hotEncoderDF4], axis=1)
corrMatrixComb4 = concatDF4.corr()
corrMatrixComb4 = corrMatrixComb4.abs()
corrMatrixComb4 = corrMatrixComb4.iloc[:,-len(uniqueTarget4):]
DataRows4 = DataRows4.replace([np.inf, -np.inf], np.nan)
DataRows4 = DataRows4.fillna(0)
X4 = add_constant(DataRows4)
X4 = X4.replace([np.inf, -np.inf], np.nan)
X4 = X4.fillna(0)
if (flagInf == False):
VIF4 = pd.Series([variance_inflation_factor(X4.values, i)
for i in range(X4.shape[1])],
index=X4.columns)
VIF4 = VIF4.replace([np.inf, -np.inf], np.nan)
VIF4 = VIF4.fillna(0)
VIF4 = VIF4.loc[[feature]]
else:
VIF4 = pd.Series()
if ((len(targetRows4Arr) > 2) and (flagInf == False)):
MI4 = mutual_info_classif(DataRows4, targetRows4Arr, n_neighbors=3, random_state=RANDOM_SEED)
MI4List = MI4.tolist()
MI4List = MI4List[count]
else:
MI4List = []
else:
corrMatrixComb4 = pd.DataFrame()
VIF4 = pd.Series()
MI4List = []
if (len(targetRows5Arr) > 0):
onehotEncoder5 = OneHotEncoder(sparse=False)
targetRows5Arr = targetRows5Arr.reshape(len(targetRows5Arr), 1)
onehotEncoder5 = onehotEncoder5.fit_transform(targetRows5Arr)
hotEncoderDF5 = pd.DataFrame(onehotEncoder5)
concatDF5 = pd.concat([DataRows5, hotEncoderDF5], axis=1)
corrMatrixComb5 = concatDF5.corr()
corrMatrixComb5 = corrMatrixComb5.abs()
corrMatrixComb5 = corrMatrixComb5.iloc[:,-len(uniqueTarget5):]
DataRows5 = DataRows5.replace([np.inf, -np.inf], np.nan)
DataRows5 = DataRows5.fillna(0)
X5 = add_constant(DataRows5)
X5 = X5.replace([np.inf, -np.inf], np.nan)
X5 = X5.fillna(0)
if (flagInf == False):
VIF5 = pd.Series([variance_inflation_factor(X5.values, i)
for i in range(X5.shape[1])],
index=X5.columns)
VIF5 = VIF5.replace([np.inf, -np.inf], np.nan)
VIF5 = VIF5.fillna(0)
VIF5 = VIF5.loc[[feature]]
else:
VIF5 = pd.Series()
if ((len(targetRows5Arr) > 2) and (flagInf == False)):
MI5 = mutual_info_classif(DataRows5, targetRows5Arr, n_neighbors=3, random_state=RANDOM_SEED)
MI5List = MI5.tolist()
MI5List = MI5List[count]
else:
MI5List = []
else:
corrMatrixComb5 = pd.DataFrame()
VIF5 = pd.Series()
MI5List = []
if(corrMatrixComb1.empty):
corrMatrixComb1 = pd.DataFrame()
else:
corrMatrixComb1 = corrMatrixComb1.loc[[feature]]
if(corrMatrixComb2.empty):
corrMatrixComb2 = pd.DataFrame()
else:
corrMatrixComb2 = corrMatrixComb2.loc[[feature]]
if(corrMatrixComb3.empty):
corrMatrixComb3 = pd.DataFrame()
else:
corrMatrixComb3 = corrMatrixComb3.loc[[feature]]
if(corrMatrixComb4.empty):
corrMatrixComb4 = pd.DataFrame()
else:
corrMatrixComb4 = corrMatrixComb4.loc[[feature]]
if(corrMatrixComb5.empty):
corrMatrixComb5 = pd.DataFrame()
else:
corrMatrixComb5 = corrMatrixComb5.loc[[feature]]
targetRows1ArrDF = pd.DataFrame(targetRows1Arr)
targetRows2ArrDF = pd.DataFrame(targetRows2Arr)
targetRows3ArrDF = pd.DataFrame(targetRows3Arr)
targetRows4ArrDF = pd.DataFrame(targetRows4Arr)
targetRows5ArrDF = pd.DataFrame(targetRows5Arr)
concatAllDF1 = pd.concat([DataRows1, targetRows1ArrDF], axis=1)
concatAllDF2 = pd.concat([DataRows2, targetRows2ArrDF], axis=1)
concatAllDF3 = pd.concat([DataRows3, targetRows3ArrDF], axis=1)
concatAllDF4 = pd.concat([DataRows4, targetRows4ArrDF], axis=1)
concatAllDF5 = pd.concat([DataRows5, targetRows5ArrDF], axis=1)
corrMatrixCombTotal1 = concatAllDF1.corr()
corrMatrixCombTotal1 = corrMatrixCombTotal1.abs()
corrMatrixCombTotal2 = concatAllDF2.corr()
corrMatrixCombTotal2 = corrMatrixCombTotal2.abs()
corrMatrixCombTotal3 = concatAllDF3.corr()
corrMatrixCombTotal3 = corrMatrixCombTotal3.abs()
corrMatrixCombTotal4 = concatAllDF4.corr()
corrMatrixCombTotal4 = corrMatrixCombTotal4.abs()
corrMatrixCombTotal5 = concatAllDF5.corr()
corrMatrixCombTotal5 = corrMatrixCombTotal5.abs()
corrMatrixCombTotal1 = corrMatrixCombTotal1.loc[[feature]]
corrMatrixCombTotal1 = corrMatrixCombTotal1.iloc[:,-1]
corrMatrixCombTotal2 = corrMatrixCombTotal2.loc[[feature]]
corrMatrixCombTotal2 = corrMatrixCombTotal2.iloc[:,-1]
corrMatrixCombTotal3 = corrMatrixCombTotal3.loc[[feature]]
corrMatrixCombTotal3 = corrMatrixCombTotal3.iloc[:,-1]
corrMatrixCombTotal4 = corrMatrixCombTotal4.loc[[feature]]
corrMatrixCombTotal4 = corrMatrixCombTotal4.iloc[:,-1]
corrMatrixCombTotal5 = corrMatrixCombTotal5.loc[[feature]]
corrMatrixCombTotal5 = corrMatrixCombTotal5.iloc[:,-1]
corrMatrixCombTotal1 = pd.concat([corrMatrixCombTotal1.tail(1)])
corrMatrixCombTotal2 = pd.concat([corrMatrixCombTotal2.tail(1)])
corrMatrixCombTotal3 = pd.concat([corrMatrixCombTotal3.tail(1)])
corrMatrixCombTotal4 = pd.concat([corrMatrixCombTotal4.tail(1)])
corrMatrixCombTotal5 = pd.concat([corrMatrixCombTotal5.tail(1)])
packCorrLoc = []
packCorrLoc.append(corrMatrix1.to_json())
packCorrLoc.append(corrMatrix2.to_json())
packCorrLoc.append(corrMatrix3.to_json())
packCorrLoc.append(corrMatrix4.to_json())
packCorrLoc.append(corrMatrix5.to_json())
packCorrLoc.append(corrMatrixComb1.to_json())
packCorrLoc.append(corrMatrixComb2.to_json())
packCorrLoc.append(corrMatrixComb3.to_json())
packCorrLoc.append(corrMatrixComb4.to_json())
packCorrLoc.append(corrMatrixComb5.to_json())
packCorrLoc.append(corrMatrixCombTotal1.to_json())
packCorrLoc.append(corrMatrixCombTotal2.to_json())
packCorrLoc.append(corrMatrixCombTotal3.to_json())
packCorrLoc.append(corrMatrixCombTotal4.to_json())
packCorrLoc.append(corrMatrixCombTotal5.to_json())
packCorrLoc.append(VIF1.to_json())
packCorrLoc.append(VIF2.to_json())
packCorrLoc.append(VIF3.to_json())
packCorrLoc.append(VIF4.to_json())
packCorrLoc.append(VIF5.to_json())
packCorrLoc.append(json.dumps(MI1List))
packCorrLoc.append(json.dumps(MI2List))
packCorrLoc.append(json.dumps(MI3List))
packCorrLoc.append(json.dumps(MI4List))
packCorrLoc.append(json.dumps(MI5List))
return packCorrLoc
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/thresholdDataSpace', methods=["GET", "POST"])
def Seperation():
thresholds = request.get_data().decode('utf8').replace("'", '"')
thresholds = json.loads(thresholds)
thresholdsPos = thresholds['PositiveValue']
thresholdsNeg = thresholds['NegativeValue']
getCorrectPrediction = []
for index, value in enumerate(yPredictProb):
getCorrectPrediction.append(value[yData[index]]*100)
quadrant1 = []
quadrant2 = []
quadrant3 = []
quadrant4 = []
quadrant5 = []
probabilityPredictions = []
for index, value in enumerate(getCorrectPrediction):
if (value > 50 and value > thresholdsPos):
quadrant1.append(index)
elif (value > 50 and value <= thresholdsPos):
quadrant2.append(index)
elif (value <= 50 and value > thresholdsNeg):
quadrant3.append(index)
else:
quadrant4.append(index)
quadrant5.append(index)
probabilityPredictions.append(value)
# Main Features
DataRows1 = XData.iloc[quadrant1, :]
DataRows2 = XData.iloc[quadrant2, :]
DataRows3 = XData.iloc[quadrant3, :]
DataRows4 = XData.iloc[quadrant4, :]
DataRows5 = XData.iloc[quadrant5, :]
Transformation(quadrant1, quadrant2, quadrant3, quadrant4, quadrant5)
corrMatrix1 = DataRows1.corr()
corrMatrix1 = corrMatrix1.abs()
corrMatrix2 = DataRows2.corr()
corrMatrix2 = corrMatrix2.abs()
corrMatrix3 = DataRows3.corr()
corrMatrix3 = corrMatrix3.abs()
corrMatrix4 = DataRows4.corr()
corrMatrix4 = corrMatrix4.abs()
corrMatrix5 = DataRows5.corr()
corrMatrix5 = corrMatrix5.abs()
DataRows1 = DataRows1.reset_index(drop=True)
DataRows2 = DataRows2.reset_index(drop=True)
DataRows3 = DataRows3.reset_index(drop=True)
DataRows4 = DataRows4.reset_index(drop=True)
DataRows5 = DataRows5.reset_index(drop=True)
targetRows1 = [yData[i] for i in quadrant1]
targetRows2 = [yData[i] for i in quadrant2]
targetRows3 = [yData[i] for i in quadrant3]
targetRows4 = [yData[i] for i in quadrant4]
targetRows5 = [yData[i] for i in quadrant5]
targetRows1Arr = np.array(targetRows1)
targetRows2Arr = np.array(targetRows2)
targetRows3Arr = np.array(targetRows3)
targetRows4Arr = np.array(targetRows4)
targetRows5Arr = np.array(targetRows5)
uniqueTarget1 = unique(targetRows1)
uniqueTarget2 = unique(targetRows2)
uniqueTarget3 = unique(targetRows3)
uniqueTarget4 = unique(targetRows4)
uniqueTarget5 = unique(targetRows5)
if (len(targetRows1Arr) > 0):
onehotEncoder1 = OneHotEncoder(sparse=False)
targetRows1Arr = targetRows1Arr.reshape(len(targetRows1Arr), 1)
onehotEncoder1 = onehotEncoder1.fit_transform(targetRows1Arr)
hotEncoderDF1 = pd.DataFrame(onehotEncoder1)
concatDF1 = pd.concat([DataRows1, hotEncoderDF1], axis=1)
corrMatrixComb1 = concatDF1.corr()
corrMatrixComb1 = corrMatrixComb1.abs()
corrMatrixComb1 = corrMatrixComb1.iloc[:,-len(uniqueTarget1):]
DataRows1 = DataRows1.replace([np.inf, -np.inf], np.nan)
DataRows1 = DataRows1.fillna(0)
X1 = add_constant(DataRows1)
X1 = X1.replace([np.inf, -np.inf], np.nan)
X1 = X1.fillna(0)
VIF1 = pd.Series([variance_inflation_factor(X1.values, i)
for i in range(X1.shape[1])],
index=X1.columns)
VIF1 = VIF1.replace([np.inf, -np.inf], np.nan)
VIF1 = VIF1.fillna(0)
if (len(targetRows1Arr) > 2):
MI1 = mutual_info_classif(DataRows1, targetRows1Arr, n_neighbors=3, random_state=RANDOM_SEED)
MI1List = MI1.tolist()
else:
MI1List = []
else:
corrMatrixComb1 = pd.DataFrame()
VIF1 = pd.Series()
MI1List = []
if (len(targetRows2Arr) > 0):
onehotEncoder2 = OneHotEncoder(sparse=False)
targetRows2Arr = targetRows2Arr.reshape(len(targetRows2Arr), 1)
onehotEncoder2 = onehotEncoder2.fit_transform(targetRows2Arr)
hotEncoderDF2 = pd.DataFrame(onehotEncoder2)
concatDF2 = | pd.concat([DataRows2, hotEncoderDF2], axis=1) | pandas.concat |
import pandas as pd
import numpy as np
import json
PROCESS_FILE_NAME_LIST = ["taxi_sort_01", "taxi_sort_001", "taxi_sort_002", "taxi_sort_003", "taxi_sort_004", "taxi_sort_005", "taxi_sort_006", "taxi_sort_007", "taxi_sort_008", "taxi_sort_009", "taxi_sort_0006", "taxi_sort_0007", "taxi_sort_0008", "taxi_sort_0009"]
PROCESS_FILE_SUFFIX_LIST = [".csv" for _ in range(len(PROCESS_FILE_NAME_LIST))]
for process_file_name, process_file_suffix in zip(PROCESS_FILE_NAME_LIST, PROCESS_FILE_SUFFIX_LIST):
df = pd.read_csv(process_file_name + process_file_suffix, index_col=False)
df_precinct_center = | pd.read_csv("precinct_center.csv", index_col=False) | pandas.read_csv |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('ggplot')
target = 'scale'
# IP
plot_mode = 'all_in_one'
obj = 'occ'
# Port
flow_dir = 'all'
port_dir = 'sys'
user_plot_pr = ['TCP']
user_plot_pr = ['UDP']
port_hist = pd.DataFrame({'A' : []})
user_port_hist = pd.DataFrame({'A' : []})
def acf(x, length=10):
return np.array([1]+[np.corrcoef(x[:-i], x[i:])[0,1] \
for i in range(1, length)])
def scale_check(data_idx, plot=False):
files = ['stanc', 'arcnn_f90', 'wpgan', 'ctgan', 'bsl1', 'bsl2', 'real']
names = ['stan_b', 'stan_a', 'wpgan', 'ctgan', 'bsl1', 'bsl2', 'real']
if files[data_idx] == 'real':
df = pd.read_csv("./postprocessed_data/%s/day2_90user.csv" % files[data_idx])
elif files[data_idx] == 'stanc' or files[data_idx] == 'stan':
df = pd.read_csv("./postprocessed_data/%s/%s_piece%d.csv" % (files[data_idx], files[data_idx], 0))
else:
df = pd.read_csv("./postprocessed_data/%s/%s_piece%d.csv" % (files[data_idx], files[data_idx], 0), index_col=None)
li = [df]
for piece_idx in range(1, 5):
df = pd.read_csv("./postprocessed_data/%s/%s_piece%d.csv" % (files[data_idx], files[data_idx], piece_idx), index_col=None, header=0)
li.append(df)
df = pd.concat(li, axis=0, ignore_index=True)
scale_list = []
for col in ['byt', 'pkt']:
scale_list.append(col)
scale_list.append(str(np.min(df[col])))
scale_list.append(str(np.log(np.max(df[col]))))
scale_list.append(';')
print(files[data_idx], ':', (' '.join(scale_list)))
def pr_distribution(data_idx, plot=False):
files = ['stan','stanc', 'arcnn_f90', 'wpgan', 'ctgan', 'bsl1', 'bsl2', 'real']
names = ['stan_fwd','stan_b', 'stan_a', 'wpgan', 'ctgan', 'bsl1', 'bsl2', 'real']
if files[data_idx] == 'real':
df = pd.read_csv("./postprocessed_data/%s/day2_90user.csv" % files[data_idx])
elif files[data_idx] == 'stanc' or files[data_idx] == 'stan':
df = pd.read_csv("./postprocessed_data/%s/%s_piece%d.csv" % (files[data_idx], files[data_idx], 0))
else:
df = pd.read_csv("./postprocessed_data/%s/%s_piece%d.csv" % (files[data_idx], files[data_idx], 0), index_col=None)
li = [df]
for piece_idx in range(1, 5):
df = | pd.read_csv("./postprocessed_data/%s/%s_piece%d.csv" % (files[data_idx], files[data_idx], piece_idx), index_col=None, header=0) | pandas.read_csv |
# %% [markdown]
# This python script takes audio files from "filedata" from sonicboom, runs each audio file through
# Fast Fourier Transform, plots the FFT image, splits the FFT'd images into train, test & validation
# and paste them in their respective folders
# Import Dependencies
import numpy as np
import pandas as pd
import scipy
from scipy import io
from scipy.io.wavfile import read as wavread
from scipy.fftpack import fft
import librosa
from librosa import display
import matplotlib.pyplot as plt
from glob import glob
import sklearn
from sklearn.model_selection import train_test_split
import os
from PIL import Image
import pathlib
import sonicboom
from joblib import Parallel, delayed
# %% [markdown]
# ## Read and add filepaths to original UrbanSound metadata
filedata = sonicboom.init_data('./data/UrbanSound8K/') #Read filedata as written in sonicboom
#Initialize empty dataframes to later enable saving the images into their respective folders
train = | pd.DataFrame() | pandas.DataFrame |
'''
The analysis module
Handles the analyses of the info and data space for experiment evaluation and design.
'''
from slm_lab.agent import AGENT_DATA_NAMES
from slm_lab.env import ENV_DATA_NAMES
from slm_lab.lib import logger, util, viz
import numpy as np
import os
import pandas as pd
import pydash as ps
import shutil
DATA_AGG_FNS = {
't': 'sum',
'reward': 'sum',
'loss': 'mean',
'explore_var': 'mean',
}
FITNESS_COLS = ['strength', 'speed', 'stability', 'consistency']
# TODO improve to make it work with any reward mean
FITNESS_STD = util.read('slm_lab/spec/_fitness_std.json')
NOISE_WINDOW = 0.05
MA_WINDOW = 100
logger = logger.get_logger(__name__)
'''
Fitness analysis
'''
def calc_strength(aeb_df, rand_epi_reward, std_epi_reward):
'''
For each episode, use the total rewards to calculate the strength as
strength_epi = (reward_epi - reward_rand) / (reward_std - reward_rand)
**Properties:**
- random agent has strength 0, standard agent has strength 1.
- if an agent achieve x2 rewards, the strength is ~x2, and so on.
- strength of learning agent always tends toward positive regardless of the sign of rewards (some environments use negative rewards)
- scale of strength is always standard at 1 and its multiplies, regardless of the scale of actual rewards. Strength stays invariant even as reward gets rescaled.
This allows for standard comparison between agents on the same problem using an intuitive measurement of strength. With proper scaling by a difficulty factor, we can compare across problems of different difficulties.
'''
# use lower clip 0 for noise in reward to dip slighty below rand
return (aeb_df['reward'] - rand_epi_reward).clip(0.) / (std_epi_reward - rand_epi_reward)
def calc_stable_idx(aeb_df, min_strength_ma):
'''Calculate the index (epi) when strength first becomes stable (using moving mean and working backward)'''
above_std_strength_sr = (aeb_df['strength_ma'] >= min_strength_ma)
if above_std_strength_sr.any():
# if it achieved stable (ma) min_strength_ma at some point, the index when
std_strength_ra_idx = above_std_strength_sr.idxmax()
stable_idx = std_strength_ra_idx - (MA_WINDOW - 1)
else:
stable_idx = np.nan
return stable_idx
def calc_std_strength_timestep(aeb_df):
'''
Calculate the timestep needed to achieve stable (within NOISE_WINDOW) std_strength.
For agent failing to achieve std_strength 1, it is meaningless to measure speed or give false interpolation, so set as inf (never).
'''
std_strength = 1.
stable_idx = calc_stable_idx(aeb_df, min_strength_ma=std_strength - NOISE_WINDOW)
if np.isnan(stable_idx):
std_strength_timestep = np.inf
else:
std_strength_timestep = aeb_df.loc[stable_idx, 'total_t'] / std_strength
return std_strength_timestep
def calc_speed(aeb_df, std_timestep):
'''
For each session, measure the moving average for strength with interval = 100 episodes.
Next, measure the total timesteps up to the first episode that first surpasses standard strength, allowing for noise of 0.05.
Finally, calculate speed as
speed = timestep_std / timestep_solved
**Properties:**
- random agent has speed 0, standard agent has speed 1.
- if an agent takes x2 timesteps to exceed standard strength, we can say it is 2x slower.
- the speed of learning agent always tends toward positive regardless of the shape of the rewards curve
- the scale of speed is always standard at 1 and its multiplies, regardless of the absolute timesteps.
For agent failing to achieve standard strength 1, it is meaningless to measure speed or give false interpolation, so the speed is 0.
This allows an intuitive measurement of learning speed and the standard comparison between agents on the same problem.
'''
agent_timestep = calc_std_strength_timestep(aeb_df)
speed = std_timestep / agent_timestep
return speed
def is_noisy_mono_inc(sr):
'''Check if sr is monotonically increasing, (given NOISE_WINDOW = 5%) within noise = 5% * std_strength = 0.05 * 1'''
zero_noise = -NOISE_WINDOW
mono_inc_sr = np.diff(sr) >= zero_noise
# restore sr to same length
mono_inc_sr = np.insert(mono_inc_sr, 0, np.nan)
return mono_inc_sr
def calc_stability(aeb_df):
'''
Find a baseline =
- 0. + noise for very weak solution
- max(strength_ma_epi) - noise for partial solution weak solution
- 1. - noise for solution achieving standard strength and beyond
So we get:
- weak_baseline = 0. + noise
- strong_baseline = min(max(strength_ma_epi), 1.) - noise
- baseline = max(weak_baseline, strong_baseline)
Let epi_baseline be the episode where baseline is first attained. Consider the episodes starting from epi_baseline, let #epi_+ be the number of episodes, and #epi_>= the number of episodes where strength_ma_epi is monotonically increasing.
Calculate stability as
stability = #epi_>= / #epi_+
**Properties:**
- stable agent has value 1, unstable agent < 1, and non-solution = 0.
- allows for drops strength MA of 5% to account for noise, which is invariant to the scale of rewards
- if strength is monotonically increasing (with 5% noise), then it is stable
- sharp gain in strength is considered stable
- monotonically increasing implies strength can keep growing and as long as it does not fall much, it is considered stable
'''
weak_baseline = 0. + NOISE_WINDOW
strong_baseline = min(aeb_df['strength_ma'].max(), 1.) - NOISE_WINDOW
baseline = max(weak_baseline, strong_baseline)
stable_idx = calc_stable_idx(aeb_df, min_strength_ma=baseline)
if np.isnan(stable_idx):
stability = 0.
else:
stable_df = aeb_df.loc[stable_idx:, 'strength_mono_inc']
stability = stable_df.sum() / len(stable_df)
return stability
def calc_consistency(aeb_fitness_df):
'''
Calculate the consistency of trial by the fitness_vectors of its sessions:
consistency = ratio of non-outlier vectors
**Properties:**
- outliers are calculated using MAD modified z-score
- if all the fitness vectors are zero or all strength are zero, consistency = 0
- works for all sorts of session fitness vectors, with the standard scale
When an agent fails to achieve standard strength, it is meaningless to measure consistency or give false interpolation, so consistency is 0.
'''
fitness_vecs = aeb_fitness_df.values
if ~np.any(fitness_vecs) or ~np.any(aeb_fitness_df['strength']):
# no consistency if vectors all 0
consistency = 0.
elif len(fitness_vecs) == 2:
# if only has 2 vectors, check norm_diff
diff_norm = np.linalg.norm(np.diff(fitness_vecs, axis=0)) / np.linalg.norm(np.ones(len(fitness_vecs[0])))
consistency = diff_norm <= NOISE_WINDOW
else:
is_outlier_arr = util.is_outlier(fitness_vecs)
consistency = (~is_outlier_arr).sum() / len(is_outlier_arr)
return consistency
def calc_epi_reward_ma(aeb_df):
'''Calculates the episode reward moving average with the MA_WINDOW'''
rewards = aeb_df['reward']
aeb_df['reward_ma'] = rewards.rolling(window=MA_WINDOW, min_periods=0, center=False).mean()
return aeb_df
def calc_fitness(fitness_vec):
'''
Takes a vector of qualifying standardized dimensions of fitness and compute the normalized length as fitness
L2 norm because it diminishes lower values but amplifies higher values for comparison.
'''
if isinstance(fitness_vec, pd.Series):
fitness_vec = fitness_vec.values
elif isinstance(fitness_vec, pd.DataFrame):
fitness_vec = fitness_vec.iloc[0].values
std_fitness_vector = np.ones(len(fitness_vec))
fitness = np.linalg.norm(fitness_vec) / np.linalg.norm(std_fitness_vector)
return fitness
def calc_aeb_fitness_sr(aeb_df, env_name):
'''Top level method to calculate fitness vector for AEB level data (strength, speed, stability)'''
no_fitness_sr = pd.Series({
'strength': 0., 'speed': 0., 'stability': 0.})
if len(aeb_df) < MA_WINDOW:
logger.warn(f'Run more than {MA_WINDOW} episodes to compute proper fitness')
return no_fitness_sr
std = FITNESS_STD.get(env_name)
if std is None:
std = FITNESS_STD.get('template')
logger.warn(f'The fitness standard for env {env_name} is not built yet. Contact author. Using a template standard for now.')
aeb_df['total_t'] = aeb_df['t'].cumsum()
aeb_df['strength'] = calc_strength(aeb_df, std['rand_epi_reward'], std['std_epi_reward'])
aeb_df['strength_ma'] = aeb_df['strength'].rolling(MA_WINDOW).mean()
aeb_df['strength_mono_inc'] = is_noisy_mono_inc(aeb_df['strength']).astype(int)
strength = aeb_df['strength_ma'].max()
speed = calc_speed(aeb_df, std['std_timestep'])
stability = calc_stability(aeb_df)
aeb_fitness_sr = pd.Series({
'strength': strength, 'speed': speed, 'stability': stability})
return aeb_fitness_sr
'''
Analysis interface methods
'''
def save_spec(spec, info_space, unit='experiment'):
'''Save spec to proper path. Called at Experiment or Trial init.'''
prepath = util.get_prepath(spec, info_space, unit)
util.write(spec, f'{prepath}_spec.json')
def calc_mean_fitness(fitness_df):
'''Method to calculated mean over all bodies for a fitness_df'''
return fitness_df.mean(axis=1, level=3)
def get_session_data(session):
'''
Gather data from session: MDP, Agent, Env data, hashed by aeb; then aggregate.
@returns {dict, dict} session_mdp_data, session_data
'''
session_data = {}
for aeb, body in util.ndenumerate_nonan(session.aeb_space.body_space.data):
session_data[aeb] = body.df.copy()
return session_data
def calc_session_fitness_df(session, session_data):
'''Calculate the session fitness df'''
session_fitness_data = {}
for aeb in session_data:
aeb_df = session_data[aeb]
aeb_df = calc_epi_reward_ma(aeb_df)
util.downcast_float32(aeb_df)
body = session.aeb_space.body_space.data[aeb]
aeb_fitness_sr = calc_aeb_fitness_sr(aeb_df, body.env.name)
aeb_fitness_df = pd.DataFrame([aeb_fitness_sr], index=[session.index])
aeb_fitness_df = aeb_fitness_df.reindex(FITNESS_COLS[:3], axis=1)
session_fitness_data[aeb] = aeb_fitness_df
# form multi_index df, then take mean across all bodies
session_fitness_df = | pd.concat(session_fitness_data, axis=1) | pandas.concat |
#!/usr/bin/env python3
# Project : From geodynamic to Seismic observations in the Earth's inner core
# Author : <NAME>
""" Implement classes for tracers,
to create points along the trajectories of given points.
"""
import numpy as np
import pandas as pd
import math
import matplotlib.pyplot as plt
from . import data
from . import geodyn_analytical_flows
from . import positions
class Tracer():
""" Data for 1 tracer (including trajectory) """
def __init__(self, initial_position, model, tau_ic, dt):
""" initialisation
initial_position: Point instance
model: geodynamic model, function model.trajectory_single_point is required
"""
self.initial_position = initial_position
self.model = model # geodynamic model
try:
self.model.trajectory_single_point
except NameError:
print(
"model.trajectory_single_point is required, please check the input model: {}".format(model))
point = [initial_position.x, initial_position.y, initial_position.z]
self.crystallization_time = self.model.crystallisation_time(point, tau_ic)
num_t = max(2, math.floor((tau_ic - self.crystallization_time) / dt))
# print(tau_ic, self.crystallization_time, num_t)
self.num_t = num_t
if num_t ==0:
print("oups")
# need to find cristallisation time of the particle
# then calculate the number of steps, based on the required dt
# then calculate the trajectory
else:
self.traj_x, self.traj_y, self.traj_z = self.model.trajectory_single_point(
self.initial_position, tau_ic, self.crystallization_time, num_t)
self.time = np.linspace(tau_ic, self.crystallization_time, num_t)
self.position = np.zeros((num_t, 3))
self.velocity = np.zeros((num_t, 3))
self.velocity_gradient = np.zeros((num_t, 9))
def spherical(self):
for index, (time, x, y, z) in enumerate(
zip(self.time, self.traj_x, self.traj_y, self.traj_z)):
point = positions.CartesianPoint(x, y, z)
r, theta, phi = point.r, point.theta, point.phi
grad = self.model.gradient_spherical(r, theta, phi, time)
self.position[index, :] = [r, theta, phi]
self.velocity[index, :] = [self.model.u_r(r, theta, time), self.model.u_theta(r, theta, time), self.model.u_phi(r, theta, time)]
self.velocity_gradient[index, :] = grad.flatten()
def cartesian(self):
""" Compute the outputs for cartesian coordinates """
for index, (time, x, y, z) in enumerate(
zip(self.time, self.traj_x, self.traj_y, self.traj_z)):
point = positions.CartesianPoint(x, y, z)
r, theta, phi = point.r, point.theta, point.phi
x, y, z = point.x, point.y, point.z
vel = self.model.velocity(time, [x, y, z]) # self.model.velocity_cartesian(r, theta, phi, time)
grad = self.model.gradient_cartesian(r, theta, phi, time)
self.position[index, :] = [x, y, z]
self.velocity[index, :] = vel[:]
self.velocity_gradient[index, :] = grad.flatten()
def output_spher(self, i):
list_i = i * np.ones_like(self.time)
data_i = pd.DataFrame(data=list_i, columns=["i"])
data_time = pd.DataFrame(data=self.time, columns=["time"])
dt = np.append(np.abs(np.diff(self.time)), [0])
data_dt = pd.DataFrame(data=dt, columns=["dt"])
data_pos = pd.DataFrame(data=self.position, columns=["r", "theta", "phi"])
data_velo = pd.DataFrame(data=self.velocity, columns=["v_r", "v_theta", "v_phi"])
data_strain = pd.DataFrame(data=self.velocity_gradient, columns=["dvr/dr", "dvr/dtheta", "dvr/dphi", "dvr/dtheta", "dvtheta/dtheta", "dvtheta/dphi","dvphi/dr", "dvphi/dtheta", "dvphi/dphi"])
data = pd.concat([data_i, data_time, data_dt, data_pos, data_velo, data_strain], axis=1)
return data
#data.to_csv("tracer.csv", sep=" ", index=False)
def output_cart(self, i):
list_i = i * np.ones_like(self.time)
data_i = pd.DataFrame(data=list_i, columns=["i"])
data_time = pd.DataFrame(data=self.time, columns=["time"])
dt = np.append([0], np.diff(self.time))
data_dt = pd.DataFrame(data=dt, columns=["dt"])
data_pos = pd.DataFrame(data=self.position, columns=["x", "y", "z"])
data_velo = pd.DataFrame(data=self.velocity, columns=["v_x", "v_y", "v_z"])
data_strain = | pd.DataFrame(data=self.velocity_gradient, columns=["dvx/dx", "dvx/dy", "dvx/dz", "dvy/dx", "dvy/dy", "dvy/dz", "dvz/dx", "dvz/dy", "dvz/dz"]) | pandas.DataFrame |
#!/usr/bin/env python
import sys, time, code
import numpy as np
import pickle as pickle
from pandas import DataFrame, read_pickle, get_dummies, cut
import statsmodels.formula.api as sm
from sklearn.externals import joblib
from sklearn.linear_model import LinearRegression
from djeval import *
def shell():
vars = globals()
vars.update(locals())
shell = code.InteractiveConsole(vars)
shell.interact()
def fix_colname(cn):
return cn.translate(None, ' ()[],')
msg("Hi, reading yy_df.")
yy_df = read_pickle(sys.argv[1])
# clean up column names
colnames = list(yy_df.columns.values)
colnames = [fix_colname(cn) for cn in colnames]
yy_df.columns = colnames
# change the gamenum and side from being part of the index to being normal columns
yy_df.reset_index(inplace=True)
msg("Getting subset ready.")
# TODO save the dummies along with yy_df
categorical_features = ['opening_feature']
dummies = | get_dummies(yy_df[categorical_features]) | pandas.get_dummies |
import os
import numpy as np
import pandas as pd
from numpy import abs
from numpy import log
from numpy import sign
from scipy.stats import rankdata
import scipy as sp
import statsmodels.api as sm
from data_source import local_source
from tqdm import tqdm as pb
# region Auxiliary functions
def ts_sum(df, window=10):
"""
Wrapper function to estimate rolling sum.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series sum over the past 'window' days.
"""
return df.rolling(window).sum()
def ts_prod(df, window=10):
"""
Wrapper function to estimate rolling product.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series product over the past 'window' days.
"""
return df.rolling(window).prod()
def sma(df, window=10): #simple moving average
"""
Wrapper function to estimate SMA.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series SMA over the past 'window' days.
"""
return df.rolling(window).mean()
def ema(df, n, m): #exponential moving average
"""
Wrapper function to estimate EMA.
:param df: a pandas DataFrame.
:return: ema_{t}=(m/n)*a_{t}+((n-m)/n)*ema_{t-1}
"""
result = df.copy()
for i in range(1,len(df)):
result.iloc[i]= (m*df.iloc[i-1] + (n-m)*result[i-1]) / n
return result
def wma(df, n):
"""
Wrapper function to estimate WMA.
:param df: a pandas DataFrame.
:return: wma_{t}=0.9*a_{t}+1.8*a_{t-1}+...+0.9*n*a_{t-n+1}
"""
weights = pd.Series(0.9*np.flipud(np.arange(1,n+1)))
result = pd.Series(np.nan, index=df.index)
for i in range(n-1,len(df)):
result.iloc[i]= sum(df[i-n+1:i+1].reset_index(drop=True)*weights.reset_index(drop=True))
return result
def stddev(df, window=10):
"""
Wrapper function to estimate rolling standard deviation.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series min over the past 'window' days.
"""
return df.rolling(window).std()
def correlation(x, y, window=10):
"""
Wrapper function to estimate rolling corelations.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series min over the past 'window' days.
"""
return x.rolling(window).corr(y)
def covariance(x, y, window=10):
"""
Wrapper function to estimate rolling covariance.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series min over the past 'window' days.
"""
return x.rolling(window).cov(y)
def rolling_rank(na):
"""
Auxiliary function to be used in pd.rolling_apply
:param na: numpy array.
:return: The rank of the last value in the array.
"""
return rankdata(na)[-1]
def ts_rank(df, window=10):
"""
Wrapper function to estimate rolling rank.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series rank over the past window days.
"""
return df.rolling(window).apply(rolling_rank)
def rolling_prod(na):
"""
Auxiliary function to be used in pd.rolling_apply
:param na: numpy array.
:return: The product of the values in the array.
"""
return np.prod(na)
def product(df, window=10):
"""
Wrapper function to estimate rolling product.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series product over the past 'window' days.
"""
return df.rolling(window).apply(rolling_prod)
def ts_min(df, window=10):
"""
Wrapper function to estimate rolling min.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series min over the past 'window' days.
"""
return df.rolling(window).min()
def ts_max(df, window=10):
"""
Wrapper function to estimate rolling min.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series max over the past 'window' days.
"""
return df.rolling(window).max()
def delta(df, period=1):
"""
Wrapper function to estimate difference.
:param df: a pandas DataFrame.
:param period: the difference grade.
:return: a pandas DataFrame with today’s value minus the value 'period' days ago.
"""
return df.diff(period)
def delay(df, period=1):
"""
Wrapper function to estimate lag.
:param df: a pandas DataFrame.
:param period: the lag grade.
:return: a pandas DataFrame with lagged time series
"""
return df.shift(period)
def rank(df):
"""
Cross sectional rank
:param df: a pandas DataFrame.
:return: a pandas DataFrame with rank along columns.
"""
#return df.rank(axis=1, pct=True)
return df.rank(pct=True)
def scale(df, k=1):
"""
Scaling time serie.
:param df: a pandas DataFrame.
:param k: scaling factor.
:return: a pandas DataFrame rescaled df such that sum(abs(df)) = k
"""
return df.mul(k).div(np.abs(df).sum())
def ts_argmax(df, window=10):
"""
Wrapper function to estimate which day ts_max(df, window) occurred on
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: well.. that :)
"""
return df.rolling(window).apply(np.argmax) + 1
def ts_argmin(df, window=10):
"""
Wrapper function to estimate which day ts_min(df, window) occurred on
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: well.. that :)
"""
return df.rolling(window).apply(np.argmin) + 1
def decay_linear(df, period=10):
"""
Linear weighted moving average implementation.
:param df: a pandas DataFrame.
:param period: the LWMA period
:return: a pandas DataFrame with the LWMA.
"""
try:
df = df.to_frame() #Series is not supported for the calculations below.
except:
pass
# Clean data
if df.isnull().values.any():
df.fillna(method='ffill', inplace=True)
df.fillna(method='bfill', inplace=True)
df.fillna(value=0, inplace=True)
na_lwma = np.zeros_like(df)
na_lwma[:period, :] = df.iloc[:period, :]
na_series = df.values
divisor = period * (period + 1) / 2
y = (np.arange(period) + 1) * 1.0 / divisor
# Estimate the actual lwma with the actual close.
# The backtest engine should assure to be snooping bias free.
for row in range(period - 1, df.shape[0]):
x = na_series[row - period + 1: row + 1, :]
na_lwma[row, :] = (np.dot(x.T, y))
return pd.DataFrame(na_lwma, index=df.index, columns=['CLOSE'])
def highday(df, n): #计算df前n期时间序列中最大值距离当前时点的间隔
result = pd.Series(np.nan, index=df.index)
for i in range(n,len(df)):
result.iloc[i]= i - df[i-n:i].idxmax()
return result
def lowday(df, n): #计算df前n期时间序列中最小值距离当前时点的间隔
result = pd.Series(np.nan, index=df.index)
for i in range(n,len(df)):
result.iloc[i]= i - df[i-n:i].idxmin()
return result
def daily_panel_csv_initializer(csv_name): #not used now
if os.path.exists(csv_name)==False:
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY')
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')
dataset=0
for date in date_list["TRADE_DATE"]:
stock_list[date]=stock_list["INDUSTRY"]
stock_list.drop("INDUSTRY",axis=1,inplace=True)
stock_list.set_index("TS_CODE", inplace=True)
dataset = pd.DataFrame(stock_list.stack())
dataset.reset_index(inplace=True)
dataset.columns=["TS_CODE","TRADE_DATE","INDUSTRY"]
dataset.to_csv(csv_name,encoding='utf-8-sig',index=False)
else:
dataset=pd.read_csv(csv_name)
return dataset
def IndustryAverage_vwap():
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].drop_duplicates()
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
#check for building/updating/reading dataset
try:
result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_vwap.csv")
result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)
result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
date_list_existed = pd.Series(result_industryaveraged_df.index)
date_list_update = date_list[~date_list.isin(date_list_existed)]
if len(date_list_update)==0:
print("The corresponding industry average vwap data needs not to be updated.")
return result_industryaveraged_df
else:
print("The corresponding industry average vwap data needs to be updated.")
first_date_update = date_list_update[0]
except:
print("The corresponding industry average vwap data is missing.")
result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
date_list_update = date_list
first_date_update=0
#building/updating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
try: #valid only in updating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
VWAP = (quotations_daily_chosen['AMOUNT']*1000)/(quotations_daily_chosen['VOL']*100+1)
result_unaveraged_piece = VWAP
result_unaveraged_piece.rename("VWAP_UNAVERAGED",inplace=True)
result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_update:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["VWAP_UNAVERAGED"].mean()
result_industryaveraged_df.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_df.to_csv("IndustryAverage_Data_vwap.csv",encoding='utf-8-sig')
return result_industryaveraged_df
def IndustryAverage_close():
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].drop_duplicates()
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
#check for building/updating/reading dataset
try:
result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_close.csv")
result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)
result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
date_list_existed = pd.Series(result_industryaveraged_df.index)
date_list_update = date_list[~date_list.isin(date_list_existed)]
if len(date_list_update)==0:
print("The corresponding industry average close data needs not to be updated.")
return result_industryaveraged_df
else:
print("The corresponding industry average close data needs to be updated.")
first_date_update = date_list_update[0]
except:
print("The corresponding industry average close data is missing.")
result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
date_list_update = date_list
first_date_update=0
#building/updating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
try: #valid only in updating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
CLOSE = quotations_daily_chosen['CLOSE']
result_unaveraged_piece = CLOSE
result_unaveraged_piece.rename("CLOSE_UNAVERAGED",inplace=True)
result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_update:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["CLOSE_UNAVERAGED"].mean()
result_industryaveraged_df.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_df.to_csv("IndustryAverage_Data_close.csv",encoding='utf-8-sig')
return result_industryaveraged_df
def IndustryAverage_low():
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].drop_duplicates()
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
#check for building/updating/reading dataset
try:
result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_low.csv")
result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)
result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
date_list_existed = pd.Series(result_industryaveraged_df.index)
date_list_update = date_list[~date_list.isin(date_list_existed)]
if len(date_list_update)==0:
print("The corresponding industry average low data needs not to be updated.")
return result_industryaveraged_df
else:
print("The corresponding industry average low data needs to be updated.")
first_date_update = date_list_update[0]
except:
print("The corresponding industry average low data is missing.")
result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
date_list_update = date_list
first_date_update=0
#building/updating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
try: #valid only in updating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
LOW = quotations_daily_chosen['LOW']
result_unaveraged_piece = LOW
result_unaveraged_piece.rename("LOW_UNAVERAGED",inplace=True)
result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_update:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["LOW_UNAVERAGED"].mean()
result_industryaveraged_df.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_df.to_csv("IndustryAverage_Data_low.csv",encoding='utf-8-sig')
return result_industryaveraged_df
def IndustryAverage_volume():
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].drop_duplicates()
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
#check for building/updating/reading dataset
try:
result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_volume.csv")
result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)
result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
date_list_existed = pd.Series(result_industryaveraged_df.index)
date_list_update = date_list[~date_list.isin(date_list_existed)]
if len(date_list_update)==0:
print("The corresponding industry average volume data needs not to be updated.")
return result_industryaveraged_df
else:
print("The corresponding industry average volume data needs to be updated.")
first_date_update = date_list_update[0]
except:
print("The corresponding industry average volume data is missing.")
result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
date_list_update = date_list
first_date_update=0
#building/updating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
try: #valid only in updating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
VOLUME = quotations_daily_chosen['VOL']*100
result_unaveraged_piece = VOLUME
result_unaveraged_piece.rename("VOLUME_UNAVERAGED",inplace=True)
result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_update:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["VOLUME_UNAVERAGED"].mean()
result_industryaveraged_df.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_df.to_csv("IndustryAverage_Data_volume.csv",encoding='utf-8-sig')
return result_industryaveraged_df
def IndustryAverage_adv(num):
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].drop_duplicates()
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
#check for building/updating/reading dataset
try:
result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_adv{num}.csv".format(num=num))
result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)
result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
date_list_existed = pd.Series(result_industryaveraged_df.index)
date_list_update = date_list[~date_list.isin(date_list_existed)]
if len(date_list_update)==0:
print("The corresponding industry average adv{num} data needs not to be updated.".format(num=num))
return result_industryaveraged_df
else:
print("The corresponding industry average adv{num} data needs to be updated.".format(num=num))
first_date_update = date_list_update[0]
except:
print("The corresponding industry average adv{num} data is missing.".format(num=num))
result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
date_list_update = date_list
first_date_update=0
#building/updating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
try: #valid only in updating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
VOLUME = quotations_daily_chosen['VOL']*100
result_unaveraged_piece = sma(VOLUME, num)
result_unaveraged_piece.rename("ADV{num}_UNAVERAGED".format(num=num),inplace=True)
result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_update:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["ADV{num}_UNAVERAGED".format(num=num)].mean()
result_industryaveraged_df.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_df.to_csv("IndustryAverage_Data_adv{num}.csv".format(num=num),encoding='utf-8-sig')
return result_industryaveraged_df
#(correlation(delta(close, 1), delta(delay(close, 1), 1), 250) *delta(close, 1)) / close
def IndustryAverage_PreparationForAlpha048():
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].drop_duplicates()
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
#check for building/updating/reading dataset
try:
result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_PreparationForAlpha048.csv")
result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)
result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
date_list_existed = pd.Series(result_industryaveraged_df.index)
date_list_update = date_list[~date_list.isin(date_list_existed)]
if len(date_list_update)==0:
print("The corresponding industry average data for alpha048 needs not to be updated.")
return result_industryaveraged_df
else:
print("The corresponding industry average data for alpha048 needs to be updated.")
first_date_update = date_list_update[0]
except:
print("The corresponding industry average dataset for alpha048 is missing.")
result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
date_list_update = date_list
first_date_update=0
#building/updating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
try: #valid only in updating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
CLOSE = quotations_daily_chosen['CLOSE']
result_unaveraged_piece = (correlation(delta(CLOSE, 1), delta(delay(CLOSE, 1), 1), 250) *delta(CLOSE, 1)) / CLOSE
result_unaveraged_piece.rename("PREPARATION_FOR_ALPHA048_UNAVERAGED",inplace=True)
result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_update:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["PREPARATION_FOR_ALPHA048_UNAVERAGED"].mean()
result_industryaveraged_df.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_df.to_csv("IndustryAverage_Data_PreparationForAlpha048.csv",encoding='utf-8-sig')
return result_industryaveraged_df
#(vwap * 0.728317) + (vwap *(1 - 0.728317))
def IndustryAverage_PreparationForAlpha059():
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].drop_duplicates()
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
#check for building/updating/reading dataset
try:
result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_PreparationForAlpha059.csv")
result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)
result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
date_list_existed = pd.Series(result_industryaveraged_df.index)
date_list_update = date_list[~date_list.isin(date_list_existed)]
if len(date_list_update)==0:
print("The corresponding industry average data for alpha059 needs not to be updated.")
return result_industryaveraged_df
else:
print("The corresponding industry average data for alpha059 needs to be updated.")
first_date_update = date_list_update[0]
except:
print("The corresponding industry average dataset for alpha059 is missing.")
result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
date_list_update = date_list
first_date_update=0
#building/updating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
try: #valid only in updating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
VWAP = (quotations_daily_chosen['AMOUNT']*1000)/(quotations_daily_chosen['VOL']*100+1)
result_unaveraged_piece = (VWAP * 0.728317) + (VWAP *(1 - 0.728317))
result_unaveraged_piece.rename("PREPARATION_FOR_ALPHA059_UNAVERAGED",inplace=True)
result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_update:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["PREPARATION_FOR_ALPHA059_UNAVERAGED"].mean()
result_industryaveraged_df.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_df.to_csv("IndustryAverage_Data_PreparationForAlpha059.csv",encoding='utf-8-sig')
return result_industryaveraged_df
#(close * 0.60733) + (open * (1 - 0.60733))
def IndustryAverage_PreparationForAlpha079():
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].drop_duplicates()
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
#check for building/updating/reading dataset
try:
result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_PreparationForAlpha079.csv")
result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)
result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
date_list_existed = pd.Series(result_industryaveraged_df.index)
date_list_update = date_list[~date_list.isin(date_list_existed)]
if len(date_list_update)==0:
print("The corresponding industry average data for alpha079 needs not to be updated.")
return result_industryaveraged_df
else:
print("The corresponding industry average data for alpha079 needs to be updated.")
first_date_update = date_list_update[0]
except:
print("The corresponding industry average dataset for alpha079 is missing.")
result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
date_list_update = date_list
first_date_update=0
#building/updating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
try: #valid only in updating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
OPEN = quotations_daily_chosen['OPEN']
CLOSE = quotations_daily_chosen['CLOSE']
result_unaveraged_piece = (CLOSE * 0.60733) + (OPEN * (1 - 0.60733))
result_unaveraged_piece.rename("PREPARATION_FOR_ALPHA079_UNAVERAGED",inplace=True)
result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_update:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["PREPARATION_FOR_ALPHA079_UNAVERAGED"].mean()
result_industryaveraged_df.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_df.to_csv("IndustryAverage_Data_PreparationForAlpha079.csv",encoding='utf-8-sig')
return result_industryaveraged_df
#((open * 0.868128) + (high * (1 - 0.868128))
def IndustryAverage_PreparationForAlpha080():
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].drop_duplicates()
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
#check for building/updating/reading dataset
try:
result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_PreparationForAlpha080.csv")
result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)
result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
date_list_existed = pd.Series(result_industryaveraged_df.index)
date_list_update = date_list[~date_list.isin(date_list_existed)]
if len(date_list_update)==0:
print("The corresponding industry average data for alpha080 needs not to be updated.")
return result_industryaveraged_df
else:
print("The corresponding industry average data for alpha080 needs to be updated.")
first_date_update = date_list_update[0]
except:
print("The corresponding industry average dataset for alpha080 is missing.")
result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
date_list_update = date_list
first_date_update=0
#building/updating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
try: #valid only in updating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
OPEN = quotations_daily_chosen['OPEN']
HIGH = quotations_daily_chosen['HIGH']
result_unaveraged_piece = (OPEN * 0.868128) + (HIGH * (1 - 0.868128))
result_unaveraged_piece.rename("PREPARATION_FOR_ALPHA080_UNAVERAGED",inplace=True)
result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_update:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["PREPARATION_FOR_ALPHA080_UNAVERAGED"].mean()
result_industryaveraged_df.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_df.to_csv("IndustryAverage_Data_PreparationForAlpha080.csv",encoding='utf-8-sig')
return result_industryaveraged_df
#((low * 0.721001) + (vwap * (1 - 0.721001))
def IndustryAverage_PreparationForAlpha097():
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].drop_duplicates()
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
#check for building/updating/reading dataset
try:
result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_PreparationForAlpha097.csv")
result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)
result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
date_list_existed = | pd.Series(result_industryaveraged_df.index) | pandas.Series |
from turtle import TPen, color
import numpy as np
import pandas as pd
import random
import matplotlib.pyplot as plt
import seaborn as sns
import sklearn.metrics as metrics
from keras.models import Sequential
from keras.layers import Dense, LSTM, Flatten, Dropout
def get_ace_values(temp_list):
'''
This function lists out all permutations of ace values in the array sum_array
For example, if you have 2 aces, there are 4 permutations:
[[1,1], [1,11], [11,1], [11,11]]
These permutations lead to 3 unique sums: [2, 12, 22]
of these 3, only 2 are <=21 so they are returned: [2, 12]
'''
sum_array = np.zeros((2**len(temp_list), len(temp_list)))
# This loop gets the permutations
for i in range(len(temp_list)):
n = len(temp_list) - i
half_len = int(2**n * 0.5)
for rep in range(int(sum_array.shape[0]/half_len/2)): #⭐️ shape[0] 返回 numpy 数组的行数
sum_array[rep*2**n : rep*2**n+half_len, i] = 1
sum_array[rep*2**n+half_len : rep*2**n+half_len*2, i] = 11
# Only return values that are valid (<=21)
# return list(set([int(s) for s in np.sum(sum_array, axis=1) if s<=21])) #⭐️ 将所有 'A' 能组成总和不超过 21 的值返回
return [int(s) for s in np.sum(sum_array, axis=1)] #⭐️ 将所有 'A' 能组成的点数以 int 类型返回(有重复和超过 21 点的值)
def ace_values(num_aces):
'''
Convert num_aces, an int to a list of lists
For example, if num_aces=2, the output should be [[1,11],[1,11]]
I require this format for the get_ace_values function
'''
temp_list = []
for i in range(num_aces):
temp_list.append([1,11])
return get_ace_values(temp_list)
def func(x):
'''
判断玩家起手是否为 21 点
'''
if x == 21:
return 1
else:
return 0
def make_decks(num_decks, card_types):
'''
Make a deck -- 根据给定副数洗好牌
input:
num_decks -> 牌副数
card_types -> 单副牌单个花色对应的牌值
output:
new_deck -> 一副牌对应牌值
'''
new_deck = []
for i in range(num_decks):
for j in range(4): # 代表黑红梅方
new_deck.extend(card_types) #⭐️ extend() 函数用于在列表末尾一次性追加另一个序列中的多个值
random.shuffle(new_deck)
return new_deck
def total_up(hand):
'''
Total up value of hand
input:
<list> hand -> 当前手牌组合
output:
<int> -> 计算当前手牌的合法值
'''
aces = 0 # 记录 ‘A’ 的数目
total = 0 # 记录除 ‘A’ 以外数字之和
for card in hand:
if card != 'A':
total += card
else:
aces += 1
# Call function ace_values to produce list of possible values for aces in hand
ace_value_list = ace_values(aces)
final_totals = [i+total for i in ace_value_list if i+total<=21] # ‘A’ 可以是 1 也可以是 11,当前牌值不超过 21 时,取最大值 -- 规则❗️
if final_totals == []:
return min(ace_value_list) + total
else:
return max(final_totals)
def model_decision_old(model, player_sum, has_ace, dealer_card_num, hit=0, card_count=None):
'''
Given the relevant inputs, the function below uses the neural net to make a prediction
and then based on that prediction, decides whether to hit or stay
—— 将玩家各参数传入神经网络模型,如果预测结果大于 0.52, 则 hit, 否则 stand
input:
model -> 模型(一般指 NN 模型)
player_sum -> 玩家当前手牌和
has_ace -> 玩家发牌是否有 'A'
dealer_card_num -> 庄家发牌(明牌)值
hit -> 玩家是否‘要牌’
card_count -> 记牌器
return:
1 -> hit
0 -> stand
'''
# 将需要进入神经网络模型的数据统一格式
# [[18 0 0 6]]
input_array = np.array([player_sum, hit, has_ace, dealer_card_num]).reshape(1, -1) # 二维数组变成一行 (1, n)
cc_array = pd.DataFrame.from_dict([card_count])
input_array = np.concatenate([input_array, cc_array], axis=1)
# input_array 作为输入传入神经网络,使用预测函数后存入 predict_correct
# [[0.10379896]]
predict_correct = model.predict(input_array)
if predict_correct >= 0.52:
return 1
else:
return 0
def model_decision(model, card_count, dealer_card_num):
'''
Given the relevant inputs, the function below uses the neural net to make a prediction
and then based on that prediction, decides whether to hit or stay
—— 将玩家各参数传入神经网络模型,如果预测结果大于 0.52, 则 hit, 否则 stand
input:
model -> 模型(一般指 NN 模型)
card_count -> 记牌器
dealer_card_num -> 庄家发牌(明牌)值
return:
1 -> hit
0 -> stand
'''
# 将需要进入神经网络模型的数据统一格式
cc_array_bust = pd.DataFrame.from_dict([card_count])
input_array = np.concatenate([cc_array_bust, np.array(dealer_card_num).reshape(1, -1)], axis=1)
# input_array 作为输入传入神经网络,使用预测函数后存入 predict_correct
# [[0.10379896]]
predict_correct = model.predict(input_array)
if predict_correct >= 0.52:
return 1
else:
return 0
def create_data(type, dealer_card_feature, player_card_feature, player_results, action_results=None, new_stack=None, games_played=None, card_count_list=None, dealer_bust=None):
'''
input:
type -> 0: naive 版本
1: random 版本
2: NN 版本
dealer_card_feature -> 所有游戏庄家的第一张牌
player_card_feature -> 所有游戏玩家所有手牌
player_results -> 玩家输赢结果
action_results -> 玩家是否要牌
new_stack -> 是否是第一轮游戏
games_played -> 本局第几轮游戏
card_count_list -> 记牌器
dealer_bust -> 庄家是否爆牌
return:
model_df -> dealer_card: 庄家发牌(明牌)
player_total_initial: 玩家一发牌手牌和
Y: 玩家一“输”、“平”、“赢”结果(-1, 0, 1)
lose: 玩家一“输”、“不输”结果(1, 0)
has_ace: 玩家一发牌是否有'A'
dealer_card_num: 庄家发牌(明牌)牌值
correct_action: 判断是否是正确的决定
hit?: 玩家一发牌后是否要牌
new_stack: 是否是第一轮游戏
games_played_with_stack: 本局第几轮游戏
dealer_bust: 庄家是否爆牌
blackjack?: 玩家起手是否 21 点
2 ~ 'A': 本轮游戏记牌
'''
model_df = pd.DataFrame() # 构造数据集
model_df['dealer_card'] = dealer_card_feature # 所有游戏庄家的第一张牌
model_df['player_total_initial'] = [total_up(i[0][0:2]) for i in player_card_feature] # 所有游戏第一个玩家前两张牌的点数和(第一个玩家 -- 作为数据分析对象❗️)
model_df['Y'] = [i[0] for i in player_results] # 所有游戏第一个玩家输赢结果(第一个玩家 -- 作为数据分析对象❗️)
if type == 1 or type == 2:
player_live_action = [i[0] for i in action_results]
model_df['hit?'] = player_live_action # 玩家在发牌后是否要牌
has_ace = []
for i in player_card_feature:
if ('A' in i[0][0:2]): # 玩家一发牌有 ‘A’,has_ace 列表追加一个 1
has_ace.append(1)
else: # 玩家一发牌无 ‘A’,has_ace 列表追加一个 0
has_ace.append(0)
model_df['has_ace'] = has_ace
dealer_card_num = []
for i in model_df['dealer_card']:
if i == 'A': # 庄家第一张牌是 ‘A’,dealer_card_num 列表追加一个 11
dealer_card_num.append(11)
else: # 庄家第一张牌不是 ‘A’,dealer_card_num 列表追加该值
dealer_card_num.append(i)
model_df['dealer_card_num'] = dealer_card_num
lose = []
for i in model_df['Y']:
if i == -1: # 玩家输,lose 列表追加一个 1,e.g. [1, 1, ...]
lose.append(1)
else: # 玩家平局或赢,lose 列表追加一个 0,e.g. [0, 0, ...]
lose.append(0)
model_df['lose'] = lose
if type == 1:
# 如果玩家要牌且输了,那么不要是正确的决定;
# 如果玩家不动且输了,那么要牌是正确的决定;
# 如果玩家要牌且未输,那么要牌是正确的决定;
# 如果玩家不动且未输,那么不要是正确的决定。
correct = []
for i, val in enumerate(model_df['lose']):
if val == 1: # 玩家输
if player_live_action[i] == 1: # 玩家采取要牌动作(玩家一输了 val = 1,玩家二采取了要牌动作 action = 1 有什么关系❓)
correct.append(0)
else:
correct.append(1)
else:
if player_live_action[i] == 1:
correct.append(1)
else:
correct.append(0)
model_df['correct_action'] = correct
# Make a new version of model_df that has card counts ❗️
card_count_df = pd.concat([
pd.DataFrame(new_stack, columns=['new_stack']), # 所有游戏是否是开局第一轮游戏
pd.DataFrame(games_played, columns=['games_played_with_stack']), # 所有游戏是本局内的第几轮
pd.DataFrame.from_dict(card_count_list), # 所有游戏记牌后结果
pd.DataFrame(dealer_bust, columns=['dealer_bust'])], axis=1) # 所有游戏庄家是否爆牌
model_df = pd.concat([model_df, card_count_df], axis=1)
model_df['blackjack?'] = model_df['player_total_initial'].apply(func)
# 将各模型数据保存至 data 文件夹下
# model_df.to_csv('./data/data' + str(type) + '.csv', sep=' ')
# 统计玩家一的所有输、赢、平的次数
# -1.0 199610
# 1.0 99685
# 0.0 13289
# Name: 0, dtype: int64
# 312584
count = pd.DataFrame(player_results)[0].value_counts()
print(count, sum(count))
return model_df
def play_game(type, players, live_total, dealer_hand, player_hands, blackjack, dealer_cards, player_results, action_results, hit_stay=0, multiplier=0, card_count=None, dealer_bust=None, model=None):
'''
Play a game of blackjack (after the cards are dealt)
input:
type -> 0: naive 版本
1: random 版本
2: NN 版本
players -> 玩家人数
live_total -> 玩家发牌手牌和
dealer_hand -> 庄家发牌(明牌 + 暗牌)
player_hands -> 玩家发牌(两张)
blackjack -> set(['A', 10])
dealer_cards -> 牌盒中的牌
player_results -> np.zeros((1, players))
action_results -> np.zeros((1, players))
hit_stay -> 何时采取要牌动作
multiplier -> 记录二十一点翻倍
card_count -> 记牌器
dealer_bust -> 庄家是否爆牌
model -> 模型(一般指 NN 模型)
return:
player_results -> 所有玩家“输”、“平”、“赢”结果
dealer_cards -> 牌盒中的牌
live_total -> 所有玩家牌值和
action_results -> 所有玩家是否采取"要牌"动作
card_count -> 记牌器
dealer_bust -> 庄家是否爆牌
multiplier -> 记录二十一点翻倍
'''
dealer_face_up_card = 0
# Dealer checks for 21
if set(dealer_hand) == blackjack: # 庄家直接二十一点
for player in range(players):
if set(player_hands[player]) != blackjack: # 玩家此时不是二十一点,则结果为 -1 -- 规则❗️
player_results[0, player] = -1
else:
player_results[0, player] = 0
else: # 庄家不是二十一点,各玩家进行要牌、弃牌动作
for player in range(players):
# Players check for 21
if set(player_hands[player]) == blackjack: # 玩家此时直接二十一点,则结果为 1
player_results[0, player] = 1
multiplier = 1.25
else: # 玩家也不是二十一点
if type == 0: # Hit only when we know we will not bust -- 在玩家当前手牌点数不超过 11 时,才决定拿牌
while total_up(player_hands[player]) <= 11:
player_hands[player].append(dealer_cards.pop(0))
card_count[player_hands[player][-1]] += 1 # 记下玩家此时要的牌
if total_up(player_hands[player]) > 21: # 拿完牌后再次确定是否爆牌,爆牌则结果为 -1
player_results[0, player] = -1
break
elif type == 1: # Hit randomly, check for busts -- 以 hit_stay 是否大于 0.5 的方式决定拿牌
if (hit_stay >= 0.5) and (total_up(player_hands[player]) != 21):
player_hands[player].append(dealer_cards.pop(0))
card_count[player_hands[player][-1]] += 1 # 记下玩家此时要的牌
action_results[0, player] = 1
live_total.append(total_up(player_hands[player])) # 玩家要牌后,将点数和记录到 live_total
if total_up(player_hands[player]) > 21: # 拿完牌后再次确定是否爆牌,爆牌则结果为 -1
player_results[0, player] = -1
elif type == 2: # Neural net decides whether to hit or stay
# -- 通过 model_decision 方法给神经网络计算后,决定是否继续拿牌
if 'A' in player_hands[player][0:2]: # 玩家起手有 ‘A’
ace_in_hand = 1
else:
ace_in_hand = 0
if dealer_hand[0] == 'A': # 庄家起手有 ‘A’
dealer_face_up_card = 11
else:
dealer_face_up_card = dealer_hand[0]
while (model_decision_old(model, total_up(player_hands[player]), ace_in_hand, dealer_face_up_card,
hit=action_results[0, player], card_count=card_count) == 1) and (total_up(player_hands[player]) != 21):
player_hands[player].append(dealer_cards.pop(0))
card_count[player_hands[player][-1]] += 1 # 记下玩家此时要的牌
action_results[0, player] = 1
live_total.append(total_up(player_hands[player])) # 玩家要牌后,将点数和记录到 live_total
if total_up(player_hands[player]) > 21: # 拿完牌后再次确定是否爆牌,爆牌则结果为 -1
player_results[0, player] = -1
break
card_count[dealer_hand[-1]] += 1 # 记录庄家第二张发牌
# Dealer hits based on the rules
while total_up(dealer_hand) < 17: # 庄家牌值小于 17,则继续要牌
dealer_hand.append(dealer_cards.pop(0))
card_count[dealer_hand[-1]] += 1 # 记录庄家后面要的牌
# Compare dealer hand to players hand but first check if dealer busted
if total_up(dealer_hand) > 21: # 庄家爆牌
if type == 1:
dealer_bust.append(1) # 记录庄家爆牌
for player in range(players): # 将结果不是 -1 的各玩家设置结果为 1
if player_results[0, player] != -1:
player_results[0, player] = 1
else: # 庄家没爆牌
if type == 1:
dealer_bust.append(0) # 记录庄家没爆牌
for player in range(players): # 将玩家牌点数大于庄家牌点数的玩家结果置为 1
if total_up(player_hands[player]) > total_up(dealer_hand):
if total_up(player_hands[player]) <= 21:
player_results[0, player] = 1
elif total_up(player_hands[player]) == total_up(dealer_hand):
player_results[0, player] = 0
else:
player_results[0, player] = -1
if type == 0:
return player_results, dealer_cards, live_total, action_results, card_count
elif type == 1:
return player_results, dealer_cards, live_total, action_results, card_count, dealer_bust
elif type == 2:
return player_results, dealer_cards, live_total, action_results, multiplier, card_count
def play_stack(type, stacks, num_decks, card_types, players, model=None):
'''
input:
type -> 0: naive 版本
1: random 版本
2: NN 版本
stacks -> 游戏局数
num_decks -> 牌副数目
card_types -> 纸牌类型
players -> 玩家数
model -> 已经训练好的模型(一般指 NN 模型)
output:
dealer_card_feature -> 所有游戏庄家的第一张牌
player_card_feature -> 所有游戏玩家所有手牌
player_results -> 所有玩家“输”、“平”、“赢”结果
action_results -> 所有玩家是否采取"要牌"动作
new_stack -> 是否是第一轮游戏
games_played_with_stack -> 本局第几轮游戏
card_count_list -> 记牌器
dealer_bust -> 庄家是否爆牌
bankroll -> 本局结束剩余筹码
'''
bankroll = []
dollars = 10000 # 起始资金为 10000
dealer_card_feature = []
player_card_feature = []
player_live_total = []
player_results = []
action_results = []
dealer_bust = []
first_game = True
prev_stack = 0
stack_num_list = []
new_stack = []
card_count_list = []
games_played_with_stack = []
for stack in range(stacks):
games_played = 0 # 记录同局游戏下有几轮
# Make a dict for keeping track of the count for a stack
card_count = {
2: 0,
3: 0,
4: 0,
5: 0,
6: 0,
7: 0,
8: 0,
9: 0,
10: 0,
'A': 0
}
# 每新开一局时,temp_new_stack 为 1
# 同局游戏下不同轮次,temp_new_stack 为 0
# 第一局第一轮,temp_new_stack 为 0
if stack != prev_stack:
temp_new_stack = 1
else:
temp_new_stack = 0
blackjack = set(['A', 10])
dealer_cards = make_decks(num_decks, card_types) # 根据给定牌副数洗牌
while len(dealer_cards) > 20: # 牌盒里的牌不大于 20 张就没必要继续用这副牌进行游戏 -- 规则⭐️
curr_player_results = np.zeros((1, players))
curr_action_results = np.zeros((1, players))
dealer_hand = []
player_hands = [[] for player in range(players)]
live_total = []
multiplier = 1
# Record card count
cc_array_bust = pd.DataFrame.from_dict([card_count]) # 直接从字典构建 DataFrame
# Deal FIRST card
for player, hand in enumerate(player_hands): # 先给所有玩家发第一张牌
player_hands[player].append(dealer_cards.pop(0)) # 将洗好的牌分别发给玩家
card_count[player_hands[player][-1]] += 1 # 记下所有玩家第一张发牌
dealer_hand.append(dealer_cards.pop(0)) # 再给庄家发第一张牌
card_count[dealer_hand[-1]] += 1 # 记下庄家第一张发牌
dealer_face_up_card = dealer_hand[0] # 记录庄家明牌
# Deal SECOND card
for player, hand in enumerate(player_hands): # 先给所有玩家发第二张牌
player_hands[player].append(dealer_cards.pop(0)) # 接着刚刚洗好的牌继续发牌
card_count[player_hands[player][-1]] += 1 # 记下所有玩家第二张发牌
dealer_hand.append(dealer_cards.pop(0)) # 再给庄家发第二张牌
if type == 0:
curr_player_results, dealer_cards, live_total, curr_action_results, card_count = play_game(
0, players, live_total, dealer_hand, player_hands, blackjack, dealer_cards,
curr_player_results, curr_action_results, card_count=card_count)
elif type == 1:
# Record the player's live total after cards are dealt
live_total.append(total_up(player_hands[player]))
# 前 stacks/2 局,玩家在发牌后手牌不是 21 点就继续拿牌;
# 后 stacks/2 局,玩家在发牌后手牌不是 21 点不继续拿牌。
if stack < stacks/2:
hit = 1
else:
hit = 0
curr_player_results, dealer_cards, live_total, curr_action_results, card_count, \
dealer_bust = play_game(1, players, live_total, dealer_hand, player_hands, blackjack,
dealer_cards, curr_player_results, curr_action_results,
hit_stay=hit, card_count=card_count, dealer_bust=dealer_bust)
elif type == 2:
# Record the player's live total after cards are dealt
live_total.append(total_up(player_hands[player]))
curr_player_results, dealer_cards, live_total, curr_action_results, multiplier, \
card_count = play_game(2, players, live_total, dealer_hand, player_hands, blackjack,
dealer_cards, curr_player_results, curr_action_results,
temp_new_stack=temp_new_stack, games_played=games_played,
multiplier=multiplier, card_count=card_count, model=model)
# Track features
dealer_card_feature.append(dealer_hand[0]) # 将庄家的第一张牌存入新的 list
player_card_feature.append(player_hands) # 将每个玩家当前手牌存入新的 list
player_results.append(list(curr_player_results[0])) # 将各玩家的输赢结果存入新的 list
if type == 1 or type == 2:
player_live_total.append(live_total) # 将 所有玩家发牌后的点数和 以及 采取要牌行动玩家的点数和 存入新的 list
action_results.append(list(curr_action_results[0])) # 将玩家是否采取要牌行动存入新的 list(只要有一个玩家要牌,action = 1)
# Update card count list with most recent game's card count
# 每新开一局时,new_stack 添加一个 1
# 同局游戏下不同轮次,new_stack 添加一个 0
# 第一局第一轮,new_stack 添加一个 0
if stack != prev_stack:
new_stack.append(1)
else: # 记录本次为第一局游戏
new_stack.append(0)
if first_game == True:
first_game = False
else:
games_played += 1
stack_num_list.append(stack) # 记录每次游戏是否是新开局
games_played_with_stack.append(games_played) # 记录每局游戏的次数
card_count_list.append(card_count.copy()) # 记录每次游戏记牌结果
prev_stack = stack # 记录上一局游戏局数
if type == 0:
return dealer_card_feature, player_card_feature, player_results
elif type == 1:
return dealer_card_feature, player_card_feature, player_results, action_results, new_stack, games_played_with_stack, card_count_list, dealer_bust
elif type == 2:
return dealer_card_feature, player_card_feature, player_results, action_results, bankroll
def step(type, model=None, pred_Y_train_bust=None):
'''
经过 stacks 局游戏后将数据记录在 model_df
input:
type -> 0: naive 版本
1: random 版本
2: NN 版本
model -> 已经训练好的模型(一般指 NN 模型)
return:
model_df -> 封装好数据的 DataFrame
'''
if type == 0 or type == 1:
nights = 1
stacks = 50000 # 牌局数目
elif type == 2:
nights = 201
stacks = 201 # 牌局数目
bankrolls = []
players = 1 # 玩家数目
num_decks = 1 # 牌副数目
card_types = ['A', 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]
for night in range(nights):
if type == 0:
dealer_card_feature, player_card_feature, player_results = play_stack(
0, stacks, num_decks, card_types, players)
model_df = create_data(
0, dealer_card_feature, player_card_feature, player_results)
elif type == 1:
dealer_card_feature, player_card_feature, player_results, action_results, new_stack, \
games_played_with_stack, card_count_list, dealer_bust = play_stack(
1, stacks, num_decks, card_types, players)
model_df = create_data(
1, dealer_card_feature, player_card_feature, player_results, action_results,
new_stack, games_played_with_stack, card_count_list, dealer_bust)
elif type == 2:
dealer_card_feature, player_card_feature, player_results, action_results, bankroll = play_stack(
2, stacks, num_decks, card_types, players, model, pred_Y_train_bust)
model_df = create_data(
2, dealer_card_feature, player_card_feature, player_results, action_results)
return model_df
def train_nn_ca(model_df):
'''
Train a neural net to play blackjack
input:
model_df -> 模型(一般指 random 模型)
return:
model -> NN 模型(预测是否是正确决定)
pred_Y_train -> correct_action 的预测值
actuals -> correct_action 的实际值
'''
# Set up variables for neural net
feature_list = [i for i in model_df.columns if i not in [
'dealer_card', 'Y', 'lose', 'correct_action', 'dealer_bust', 'dealer_bust_pred', 'new_stack',
'games_played_with_stack', 2, 3, 4, 5, 6, 7, 8, 9, 10, 'A', 'blackjack?']]
# 将模型里的数据按矩阵形式存储
train_X = np.array(model_df[feature_list])
train_Y = np.array(model_df['correct_action']).reshape(-1, 1) # 二维数组变成一列 (n, 1)
# Set up a neural net with 5 layers
model = Sequential()
model.add(Dense(16))
model.add(Dense(128))
model.add(Dense(32))
model.add(Dense(8))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='sgd')
model.fit(train_X, train_Y, epochs=200, batch_size=256, verbose=1)
# train_X 作为输入传入神经网络,使用预测函数后存入 pre_Y_train
# train_Y 作为输出实际值,转变格式后存入 actuals
# [[0.4260913 ]
# [0.3595919 ]
# [0.24476886]
# ...
# [0.2946579 ]
# [0.39343864]
# [0.27353495]]
# [1 0 0 ... 0 1 0]
pred_Y_train = model.predict(train_X)
actuals = train_Y[:, -1] # 将二维数组将为一维
return model, pred_Y_train, actuals
def train_nn_ca2(model_df):
'''
Train a neural net to PREDICT BLACKJACK
Apologize for the name, it started as a model to predict dealer busts
Then I decided to predict blackjacks instead but neglected to rename it
input:
model_df -> 模型(一般指 random 模型)
return:
model_bust -> NN 模型(预测玩家初始是否 21 点)
pred_Y_train_bust -> blackjack? 的预测值
actuals -> blackjack? 的实际值
'''
# Set up variables for neural net
feature_list = [i for i in model_df.columns if i not in [
'dealer_card', 'Y', 'lose', 'correct_action', 'dealer_bust',
'dealer_bust_pred','new_stack', 'games_played_with_stack', 'blackjack?']]
train_X_bust = np.array(model_df[feature_list])
train_Y_bust = np.array(model_df['correct_action']).reshape(-1,1)
# Set up a neural net with 5 layers
model_bust = Sequential()
model_bust.add(Dense(train_X_bust.shape[1]))
model_bust.add(Dense(128))
model_bust.add(Dense(32, activation='relu'))
model_bust.add(Dense(8))
model_bust.add(Dense(1, activation='sigmoid'))
model_bust.compile(loss='binary_crossentropy', optimizer='sgd')
model_bust.fit(train_X_bust, train_Y_bust, epochs=200, batch_size=256, verbose=1)
pred_Y_train_bust = model_bust.predict(train_X_bust)
actuals = train_Y_bust[:, -1]
return model_bust, pred_Y_train_bust, actuals
def comparison_chart(data, position):
'''
绘制多模型数据分析图
input:
data -> 数据集
position -> dealer / player
'''
fig, ax = plt.subplots(figsize=(12,6))
ax.bar(x=data.index-0.3, height=data['random'].values, color='blue', width=0.3, label='Random')
ax.bar(x=data.index, height=data['naive'].values, color='orange', width=0.3, label='Naive')
ax.bar(x=data.index+0.3, height=data['smart'].values, color='red', width=0.3, label='Smart')
ax.set_ylabel('Probability of Tie or Win', fontsize=16)
if position == 'dealer':
ax.set_xlabel("Dealer's Card", fontsize=16)
plt.xticks(np.arange(2, 12, 1.0))
elif position == 'player':
ax.set_xlabel("Player's Hand Value", fontsize=16)
plt.xticks(np.arange(4, 21, 1.0))
plt.legend()
plt.tight_layout()
plt.savefig(fname= './img/' + position + '_card_probs_smart', dpi=150)
def comparison(model_df_naive, model_df_random, model_df_smart):
'''
多个模型数据分析
input:
model_df_naive -> naive 模型
model_df_random -> random 模型
model_df_smart -> NN 模型
output:
./img/dealer_card_probs_smart -> 模型对比:按庄家发牌(明牌)分组,分析玩家“不输”的概率
./img/player_card_probs_smart -> 模型对比:按玩家发牌分组,分析玩家“不输”的概率
./img/hit_frequency -> 模型对比:按玩家发牌分组,对比 naive 模型与 NN 模型玩家“要牌”的频率
./img/hit_frequency2 -> 针对玩家发牌为 12, 13, 14, 15, 16 的数据,按庄家发牌分组,分析玩家“要牌”的频率
'''
# 模型对比:按庄家发牌(明牌)分组,分析玩家“不输”的概率
# 保守模型
data_naive = 1 - (model_df_naive.groupby(by='dealer_card_num').sum()['lose'] /
model_df_naive.groupby(by='dealer_card_num').count()['lose'])
# 随机模型
data_random = 1 - (model_df_random.groupby(by='dealer_card_num').sum()['lose'] /
model_df_random.groupby(by='dealer_card_num').count()['lose'])
# 新模型
data_smart = 1 - (model_df_smart.groupby(by='dealer_card_num').sum()['lose'] /
model_df_smart.groupby(by='dealer_card_num').count()['lose'])
data = pd.DataFrame()
data['naive'] = data_naive
data['random'] = data_random
data['smart'] = data_smart
comparison_chart(data, 'dealer')
# 模型对比:按玩家发牌分组,分析玩家“不输”的概率
# 保守模型
data_naive = 1 - (model_df_naive.groupby(by='player_total_initial').sum()['lose'] /
model_df_naive.groupby(by='player_total_initial').count()['lose'])
# 随机模型
data_random = 1 - (model_df_random.groupby(by='player_total_initial').sum()['lose'] /
model_df_random.groupby(by='player_total_initial').count()['lose'])
# 新模型
data_smart = 1 - (model_df_smart.groupby(by='player_total_initial').sum()['lose'] /
model_df_smart.groupby(by='player_total_initial').count()['lose'])
data = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
import os
import re
from datetime import datetime
import numpy as np
from decimal import Decimal
import scipy.io as sio
import pandas as pd
from tqdm import tqdm
import glob
from decimal import Decimal
import datajoint as dj
from pipeline import (reference, subject, acquisition, stimulation, analysis,
intracellular, extracellular, behavior, utilities)
from pipeline import extracellular_path as path
# ================== Dataset ==================
# Fixex-delay
fixed_delay_xlsx = pd.read_excel(
os.path.join(path, 'FixedDelayTask', 'SI_table_2_bilateral_perturb.xlsx'),
index_col =0, usecols='A, P, Q, R, S', skiprows=2, nrows=20)
fixed_delay_xlsx.columns = ['subject_id', 'genotype', 'date_of_birth', 'session_time']
fixed_delay_xlsx['sex'] = 'Unknown'
fixed_delay_xlsx['sess_type'] = 'Auditory task'
fixed_delay_xlsx['delay_duration'] = 2
# Random-long-delay
random_long_delay_xlsx = pd.read_excel(
os.path.join(path, 'RandomDelayTask', 'SI_table_3_random_delay_perturb.xlsx'),
index_col =0, usecols='A, P, Q, R, S', skiprows=5, nrows=23)
random_long_delay_xlsx.columns = ['subject_id', 'genotype', 'date_of_birth', 'session_time']
random_long_delay_xlsx['sex'] = 'Unknown'
random_long_delay_xlsx['sess_type'] = 'Auditory task'
random_long_delay_xlsx['delay_duration'] = np.nan
# Random-short-delay
random_short_delay_xlsx = pd.read_excel(
os.path.join(path, 'RandomDelayTask', 'SI_table_3_random_delay_perturb.xlsx'),
index_col =0, usecols='A, F, G, H, I', skiprows=42, nrows=11)
random_short_delay_xlsx.columns = ['subject_id', 'genotype', 'date_of_birth', 'session_time']
random_short_delay_xlsx['sex'] = 'Unknown'
random_short_delay_xlsx['sess_type'] = 'Auditory task'
random_short_delay_xlsx['delay_duration'] = np.nan
# Tactile-task
tactile_xlsx = pd.read_csv(
os.path.join(path, 'TactileTask', 'Whisker_taskTavle_for_paper.csv'),
index_col =0, usecols= [0, 5, 6, 7, 8, 9], skiprows=1, nrows=30)
tactile_xlsx.columns = ['subject_id', 'genotype', 'date_of_birth', 'sex', 'session_time']
tactile_xlsx = tactile_xlsx.reindex(columns=['subject_id', 'genotype', 'date_of_birth', 'session_time', 'sex'])
tactile_xlsx['sess_type'] = 'Tactile task'
tactile_xlsx['delay_duration'] = 1.2
# Sound-task 1.2s
sound12_xlsx = pd.read_csv(
os.path.join(path, 'Sound task 1.2s', 'OppositeTask12_for_paper.csv'),
index_col =0, usecols= [0, 5, 6, 7, 8, 9], skiprows=1, nrows=37)
sound12_xlsx.columns = ['subject_id', 'genotype', 'date_of_birth', 'sex', 'session_time']
sound12_xlsx = sound12_xlsx.reindex(columns=['subject_id', 'genotype', 'date_of_birth', 'session_time', 'sex'])
sound12_xlsx['sess_type'] = 'Auditory task'
sound12_xlsx['delay_duration'] = 1.2
# concat all 5
meta_data = | pd.concat([fixed_delay_xlsx, random_long_delay_xlsx, random_short_delay_xlsx, tactile_xlsx, sound12_xlsx]) | pandas.concat |
import sys
import numpy as np
import pandas as pd
from loguru import logger
from sklearn import model_selection
from utils import dataset_utils
default_settings = {
'data_definition_file_path': 'dataset.csv',
'folds_num': 5,
'data_random_seed': 1509,
'train_val_fraction': 0.8,
'train_fraction': 0.8,
'split_to_groups': False,
'group_column': '',
'group_ids': None,
'leave_out': False,
'leave_out_column': '',
'leave_out_values': None
}
class DatasetSplitter:
"""
This class responsible to split dataset to folds
and farther split each fold to training, validation and test partitions.
Features:
- samples for each internal group in dataset are split in the same manner between training,
validation and test partitions.
- samples that belong to fold leave-out will be presented only in test partition for this fold.
"""
def __init__(self, settings):
"""
This method initializes parameters
:return: None
"""
self.settings = settings
self.dataset_df = None
self.groups_df_list = None
self.train_df_list = None
self.val_df_list = None
self.test_df_list = None
def load_dataset_file(self):
"""
This method loads dataset file
:return: None
"""
if self.settings['data_definition_file_path']:
logger.info("Loading dataset file {0}".format(self.settings['data_definition_file_path']))
self.dataset_df = dataset_utils.load_dataset_file(self.settings['data_definition_file_path'])
logger.info("Dataset contains {0} entries".format(self.dataset_df.shape[0]))
else:
logger.info("Data definition file path is not specified")
def set_training_dataframe(self,
training_df,
fold_num):
"""
This method sets training dataframe
:param training_df: training dataframe
:param fold_num: fold number to set training dataframe for
:return: None
"""
self.train_df_list[fold_num] = training_df
logger.info("Training dataframe with {0} entries is set for fold {1}".format(training_df.shape[0], fold_num))
def set_validation_dataframe(self,
validation_df,
fold_num):
"""
This method sets training dataframe
:param validation_df: training dataframe
:param fold_num: fold number to set training dataframe for
:return: None
"""
self.val_df_list[fold_num] = validation_df
logger.info("Validation dataframe with {0} entries is set for fold {1}".format(validation_df.shape[0], fold_num))
def set_test_dataframe(self,
test_df,
fold_num):
"""
This method sets training dataframe
:param test_df: training dataframe
:param fold_num: fold number to set training dataframe for
:return: None
"""
self.test_df_list[fold_num] = test_df
logger.info("Test dataframe with {0} entries is set for fold {1}".format(test_df.shape[0], fold_num))
def set_custom_data_split(self, train_data_files, val_data_files, test_data_files):
"""
This method sets training, validation and test dataframe lists according to custom lists of
training, validation and test files defined in the settings.
:return: None
"""
logger.info("Loading custom lists of training validation and test files")
self.train_df_list = [dataset_utils.load_dataset_file(data_file) for data_file in train_data_files]
self.val_df_list = [dataset_utils.load_dataset_file(data_file) for data_file in val_data_files]
self.test_df_list = [dataset_utils.load_dataset_file(data_file) for data_file in test_data_files]
def split_dataset(self):
"""
This method first split dataset to folds
and farther split each fold to training, validation and test partitions
:return: None
"""
# Create lists to hold dataset partitions
self.train_df_list = [None] * self.settings['folds_num']
self.val_df_list = [None] * self.settings['folds_num']
self.test_df_list = [None] * self.settings['folds_num']
# Set random seed to ensure reproducibility of dataset partitioning across experiments on same hardware
np.random.seed(self.settings['data_random_seed'])
# Split dataset to groups
if self.settings['split_to_groups']:
self.split_dataset_to_groups()
else:
self.groups_df_list = [self.dataset_df]
# Permute entries in each group
self.groups_df_list = [group_df.reindex(np.random.permutation(group_df.index)) for group_df in self.groups_df_list]
# Split dataset to folds and training, validation and test partitions for each fold
if self.settings['leave_out']:
# Choose unique leave-out values for each fold
if self.settings['leave_out_values'] is None:
self.choose_leave_out_values()
# Split dataset to folds based on leave-out values
self.split_dataset_to_folds_with_leave_out()
else:
# Split dataset to folds in random manner
self.split_dataset_to_folds_randomly()
def split_dataset_to_groups(self):
"""
# This method splits dataset to groups based on values of 'self.group_column'.
# Samples in each group are split in same manner between training, validation and test partitions.
# This is important, for example, to ensure that each class (in classification problem) is represented
# in training, validation and test partition.
"""
logger.info("Dividing dataset to groups based on values of '{0}' dataset column".format(self.settings['group_column']))
# Get groups identifiers
if self.settings['group_ids'] is None:
group_ids = self.dataset_df[self.settings['group_column']].unique()
else:
group_ids = self.settings['group_ids']
logger.info("Dataset groups are: {0}".format(group_ids))
# Split dataset to groups
self.groups_df_list = [self.dataset_df[self.dataset_df[self.settings['group_column']] == unique_group_id] for unique_group_id in group_ids]
for group_idx, group_df in enumerate(self.groups_df_list):
logger.info("Group {0} contains {1} samples".format(group_ids[group_idx], group_df.shape[0]))
def choose_leave_out_values(self):
"""
This method chooses leave-out values for each fold.
Leave-out values calculated based on values of 'self.leave_out_column'.
Dataset entries which 'self.leave_out_column' value is one of calculated leave-out values
for specific fold will present only in test partition for this fold.
:return: None
"""
logger.info("Choosing leave-out values for each fold from unique values of '{0}' dataset column".format(self.settings['leave_out_column']))
# Get unique values for dataset leave-out column
unique_values = self.dataset_df[self.settings['leave_out_column']].unique()
logger.info("Unique values for column {0} are: {1}".format(self.settings['leave_out_column'], unique_values))
# Check that number of unique leave-out values are greater or equal to number of folds
if len(unique_values) < self.settings['folds_num']:
logger.error("Number of unique leave-out values are smaller than number of required folds")
sys.exit(1)
# Get list of unique leave-out values for each fold
if self.settings['folds_num'] > 1:
self.settings['leave_out_values'] = np.array_split(unique_values, self.settings['folds_num'])
else:
self.settings['leave_out_values'] = [np.random.choice(unique_values, int(len(unique_values) * (1 - self.settings['train_val_fraction'])), replace=False)]
for fold in range(0, self.settings['folds_num']):
logger.info("Leave out values for fold {0} are: {1}".format(fold, self.settings['leave_out_values'][fold]))
def split_dataset_to_folds_with_leave_out(self):
"""
This method splits dataset to folds and training, validation and test partitions for each fold based on leave-out values.
Samples in each group are split in same manner between training, validation and test partitions.
Leave-out values will be presented only in test partition of corresponding fold.
"""
logger.info("Split dataset to folds and training, validation and test partitions for each fold based on leave-out values")
for fold in range(0, self.settings['folds_num']):
groups_train_df_list = list()
groups_val_df_list = list()
groups_test_df_list = list()
for group_idx, group_df in enumerate(self.groups_df_list):
group_test_df = group_df[group_df[self.settings['leave_out_column']].isin(self.settings['leave_out_values'][fold])]
if group_test_df.shape[0] == 0:
logger.warning("Group {0} hasn't any of leave out values: {1}".format(group_idx, self.settings['leave_out_values'][fold]))
else:
groups_test_df_list.append(group_test_df)
group_train_val_df = group_df[~group_df[self.settings['leave_out_column']].isin(self.settings['leave_out_values'][fold])]
if group_train_val_df.shape[0] == 0:
logger.warning("All samples of group {0} is in one of leave out values: {1}".format(group_idx, self.settings['leave_out_values'][fold]))
else:
train_split_idx = int(group_train_val_df.shape[0] * self.settings['train_fraction'])
groups_train_df_list.append(group_train_val_df.iloc[0:train_split_idx])
groups_val_df_list.append(group_train_val_df.iloc[train_split_idx:])
self.train_df_list[fold] = pd.concat(groups_train_df_list)
self.val_df_list[fold] = pd.concat(groups_val_df_list)
self.test_df_list[fold] = pd.concat(groups_test_df_list)
# Print number of examples in training, validation and test for each fold
self.print_data_split()
def split_dataset_to_folds_randomly(self):
"""
This method splits dataset to folds and training, validation and test partitions for each fold in random manner.
Samples in each group are split in same manner between training, validation and test partitions.
"""
logger.info("Split dataset to folds and training, validation and test partitions for each fold randomly")
# For one fold regime data will be divided according to training-validation fraction and training fraction
# defined in settings.
# For multiple folds regime data will be divided with use of sklearn module and according to training
# fraction defined in settings
if self.settings['folds_num'] == 1:
groups_train_df_list = list()
groups_val_df_list = list()
groups_test_df_list = list()
for group_df in self.groups_df_list:
train_val_split_idx = int(group_df.shape[0] * self.settings['train_val_fraction'])
group_train_val_df = group_df.iloc[0:train_val_split_idx]
groups_test_df_list.append(group_df.iloc[train_val_split_idx:])
train_split_idx = int(group_train_val_df.shape[0] * self.settings['train_fraction'])
groups_train_df_list.append(group_train_val_df.iloc[0:train_split_idx])
groups_val_df_list.append(group_train_val_df.iloc[train_split_idx:])
self.train_df_list[0] = pd.concat(groups_train_df_list)
self.val_df_list[0] = pd.concat(groups_val_df_list)
self.test_df_list[0] = pd.concat(groups_test_df_list)
else:
# Split each group to multiple folds
kf_list = list()
kf = model_selection.KFold(n_splits=self.settings['folds_num'], shuffle=True, random_state=self.settings['data_random_seed'])
for group_df in self.groups_df_list:
kf_list.append(kf.split(group_df))
# Combine group splits to folds
for fold in range(0, self.settings['folds_num']):
fold_split = [next(kf_list[idx]) for idx in range(len(kf_list))]
groups_train_df_list = list()
groups_val_df_list = list()
groups_test_df_list = list()
for group_idx, group_df in enumerate(self.groups_df_list):
group_train_val_df = group_df.iloc[fold_split[group_idx][0]]
groups_test_df_list.append(group_df.iloc[fold_split[group_idx][1]])
train_split_idx = int(group_train_val_df.shape[0] * self.settings['train_fraction'])
groups_train_df_list.append(group_train_val_df.iloc[0:train_split_idx])
groups_val_df_list.append(group_train_val_df.iloc[train_split_idx:])
self.train_df_list[fold] = pd.concat(groups_train_df_list)
self.val_df_list[fold] = pd.concat(groups_val_df_list)
self.test_df_list[fold] = | pd.concat(groups_test_df_list) | pandas.concat |
# -*- encoding: utf-8 -*-
import functools
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import pandas as pd
from pandas.api.types import is_numeric_dtype
import scipy.sparse
import sklearn.utils
from sklearn import preprocessing
from sklearn.compose import make_column_transformer
class InputValidator:
"""
Makes sure the input data complies with Auto-sklearn requirements.
Categorical inputs are encoded via a Label Encoder, if the input
is a dataframe.
This class also perform checks for data integrity and flags the user
via informative errors.
"""
def __init__(self) -> None:
self.valid_pd_enc_dtypes = ['category', 'bool']
# If a dataframe was provided, we populate
# this attribute with the column types from the dataframe
# That is, this attribute contains whether autosklearn
# should treat a column as categorical or numerical
# During fit, if the user provided feature_types, the user
# constrain is honored. If not, this attribute is used.
self.feature_types = None # type: Optional[List[str]]
# Whereas autosklearn performed encoding on the dataframe
# We need the target encoder as a decoder mechanism
self.feature_encoder = None
self.target_encoder = None
self.enc_columns = [] # type: List[int]
# During consecutive calls to the validator,
# track the number of outputs of the targets
# We need to make sure y_train/y_test have the
# same dimensionality
self._n_outputs = None
# Add support to make sure that the input to
# autosklearn has consistent dtype through calls.
# That is, once fitted, changes in the input dtype
# are not allowed
self.features_type = None # type: Optional[type]
self.target_type = None # type: Optional[type]
def register_user_feat_type(self, feat_type: Optional[List[str]],
X: Union[pd.DataFrame, np.ndarray]) -> None:
"""
Incorporate information of the feature types when processing a Numpy array.
In case feature types is provided, if using a pd.DataFrame, this utility errors
out, explaining to the user this is contradictory.
"""
if hasattr(X, "iloc") and feat_type is not None:
raise ValueError("When providing a DataFrame to Auto-Sklearn, we extract "
"the feature types from the DataFrame.dtypes. That is, "
"providing the option feat_type to the fit method is not "
"supported when using a Dataframe. Please make sure that the "
"type of each column in your DataFrame is properly set. "
"More details about having the correct data type in your "
"DataFrame can be seen in "
"https://pandas.pydata.org/pandas-docs/stable/reference"
"/api/pandas.DataFrame.astype.html")
elif feat_type is None:
# Nothing to register. No feat type is provided
# or the features are not numpy/list where this is required
return
# Some checks if feat_type is provided
if len(feat_type) != X.shape[1]:
raise ValueError('Array feat_type does not have same number of '
'variables as X has features. %d vs %d.' %
(len(feat_type), X.shape[1]))
if not all([isinstance(f, str) for f in feat_type]):
raise ValueError('Array feat_type must only contain strings.')
for ft in feat_type:
if ft.lower() not in ['categorical', 'numerical']:
raise ValueError('Only `Categorical` and `Numerical` are '
'valid feature types, you passed `%s`' % ft)
# Here we register proactively the feature types for
# Processing Numpy arrays
self.feature_types = feat_type
def validate(
self,
X: Union[pd.DataFrame, np.ndarray],
y: Union[pd.DataFrame, np.ndarray],
is_classification: bool = False,
) -> Tuple[np.ndarray, np.ndarray]:
"""
Wrapper for feature/targets validation
Makes sure consistent number of samples within target and
features.
"""
X = self.validate_features(X)
y = self.validate_target(y, is_classification)
if X.shape[0] != y.shape[0]:
raise ValueError(
"The number of samples from the features X={} should match "
"the number of samples from the target y={}".format(
X.shape[0],
y.shape[0]
)
)
return X, y
def validate_features(
self,
X: Union[pd.DataFrame, np.ndarray],
) -> np.ndarray:
"""
Wrapper around sklearn check_array. Translates a pandas
Dataframe to a valid input for sklearn.
"""
# Make sure that once fitted, we don't allow new dtypes
if self.features_type is None:
self.features_type = type(X)
if self.features_type != type(X):
raise ValueError("Auto-sklearn previously received features of type {} "
"yet the current features have type {}. Changing the dtype "
"of inputs to an estimator is not supported.".format(
self.features_type,
type(X)
)
)
# Do not support category/string numpy data. Only numbers
if hasattr(X, "dtype") and not np.issubdtype(X.dtype.type, np.number):
raise ValueError(
"When providing a numpy array to Auto-sklearn, the only valid "
"dtypes are numerical ones. The provided data type {} is not supported."
"".format(
X.dtype.type,
)
)
# Pre-process dataframe to make them numerical
# Also, encode numpy categorical objects
if hasattr(X, "iloc") and not scipy.sparse.issparse(X):
# Pandas validation provide extra user information
X = self._check_and_encode_features(X)
if scipy.sparse.issparse(X):
X.sort_indices()
# sklearn check array will make sure we have the
# correct numerical features for the array
# Also, a numpy array will be created
X = sklearn.utils.check_array(
X,
force_all_finite=False,
accept_sparse='csr'
)
return X
def validate_target(
self,
y: Union[pd.DataFrame, np.ndarray],
is_classification: bool = False,
) -> np.ndarray:
"""
Wrapper around sklearn check_array. Translates a pandas
Dataframe to a valid input for sklearn.
"""
# Make sure that once fitted, we don't allow new dtypes
if self.target_type is None:
self.target_type = type(y)
if self.target_type != type(y):
raise ValueError("Auto-sklearn previously received targets of type {} "
"yet the current target has type {}. Changing the dtype "
"of inputs to an estimator is not supported.".format(
self.target_type,
type(y)
)
)
# Target data as sparse is not supported
if scipy.sparse.issparse(y):
raise ValueError("Unsupported target data provided"
"Input targets to auto-sklearn must not be of "
"type sparse. Please convert the target input (y) "
"to a dense array via scipy.sparse.csr_matrix.todense(). "
)
# No Nan is supported
if np.any(pd.isnull(y)):
raise ValueError("Target values cannot contain missing/NaN values. "
"This is not supported by scikit-learn. "
)
if not hasattr(y, "iloc"):
y = np.atleast_1d(y)
if y.ndim == 2 and y.shape[1] == 1:
warnings.warn("A column-vector y was passed when a 1d array was"
" expected. Will change shape via np.ravel().",
sklearn.utils.DataConversionWarning, stacklevel=2)
y = np.ravel(y)
# During classification, we do ordinal encoding
# We train a common model for test and train
# If an encoder was ever done for an estimator,
# use it always
# For regression, we default to the check_array in sklearn
# learn. This handles numerical checking and object conversion
# For regression, we expect the user to provide numerical input
# Next check will catch that
if is_classification or self.target_encoder is not None:
y = self._check_and_encode_target(y)
# In code check to make sure everything is numeric
if hasattr(y, "iloc"):
is_number = np.vectorize(lambda x: pd.api.types.is_numeric_dtype(x))
if not np.all(is_number(y.dtypes)):
raise ValueError(
"During the target validation (y_train/y_test) an invalid"
" input was detected. "
"Input dataframe to autosklearn must only contain numerical"
" dtypes, yet it has: {} dtypes.".format(
y.dtypes
)
)
elif not np.issubdtype(y.dtype, np.number):
raise ValueError(
"During the target validation (y_train/y_test) an invalid"
" input was detected. "
"Input to autosklearn must have a numerical dtype, yet it is: {}".format(
y.dtype
)
)
# sklearn check array will make sure we have the
# correct numerical features for the array
# Also, a numpy array will be created
y = sklearn.utils.check_array(
y,
force_all_finite=True,
accept_sparse='csr',
ensure_2d=False,
)
# When translating a dataframe to numpy, make sure we
# honor the ravel requirement
if y.ndim == 2 and y.shape[1] == 1:
y = np.ravel(y)
if self._n_outputs is None:
self._n_outputs = 1 if len(y.shape) == 1 else y.shape[1]
else:
_n_outputs = 1 if len(y.shape) == 1 else y.shape[1]
if self._n_outputs != _n_outputs:
raise ValueError('Number of outputs changed from %d to %d!' %
(self._n_outputs, _n_outputs))
return y
def is_single_column_target(self) -> bool:
"""
Output is encoded with a single column encoding
"""
return self._n_outputs == 1
def _check_and_get_columns_to_encode(
self,
X: pd.DataFrame,
) -> Tuple[List[int], List[str]]:
# Register if a column needs encoding
enc_columns = []
# Also, register the feature types for the estimator
feature_types = []
# Make sure each column is a valid type
for i, column in enumerate(X.columns):
if X[column].dtype.name in self.valid_pd_enc_dtypes:
if hasattr(X, "iloc"):
enc_columns.append(column)
else:
enc_columns.append(i)
feature_types.append('categorical')
# Move away from np.issubdtype as it causes
# TypeError: data type not understood in certain pandas types
elif not is_numeric_dtype(X[column]):
if X[column].dtype.name == 'object':
raise ValueError(
"Input Column {} has invalid type object. "
"Cast it to a valid dtype before using it in Auto-Sklearn. "
"Valid types are numerical, categorical or boolean. "
"You can cast it to a valid dtype using "
"pandas.Series.astype. "
"If working with string objects, the following "
"tutorial illustrates how to work with text data: "
"https://scikit-learn.org/stable/tutorial/text_analytics/working_with_text_data.html".format( # noqa: E501
column,
)
)
elif pd.core.dtypes.common.is_datetime_or_timedelta_dtype(
X[column].dtype
):
raise ValueError(
"Auto-sklearn does not support time and/or date datatype as given "
"in column {}. Please convert the time information to a numerical value "
"first. One example on how to do this can be found on "
"https://stats.stackexchange.com/questions/311494/".format(
column,
)
)
else:
raise ValueError(
"Input Column {} has unsupported dtype {}. "
"Supported column types are categorical/bool/numerical dtypes. "
"Make sure your data is formatted in a correct way, "
"before feeding it to Auto-Sklearn.".format(
column,
X[column].dtype.name,
)
)
else:
feature_types.append('numerical')
return enc_columns, feature_types
def _check_and_encode_features(
self,
X: pd.DataFrame,
) -> Union[pd.DataFrame, np.ndarray]:
"""
Interprets a Pandas
Uses .iloc as a safe way to deal with pandas object
"""
# Start with the features
enc_columns, feature_types = self._check_and_get_columns_to_encode(X)
# If there is a Nan, we cannot encode it due to a scikit learn limitation
if len(enc_columns) > 0:
if np.any(pd.isnull(X[enc_columns].dropna(axis='columns', how='all'))):
# Ignore all NaN columns, and if still a NaN
# Error out
raise ValueError("Categorical features in a dataframe cannot contain "
"missing/NaN values. The OrdinalEncoder used by "
"Auto-sklearn cannot handle this yet (due to a "
"limitation on scikit-learn being addressed via: "
"https://github.com/scikit-learn/scikit-learn/issues/17123)"
)
elif np.any(pd.isnull(X)):
# After above check it means that if there is a NaN
# the whole column must be NaN
# Make sure it is numerical and let the pipeline handle it
for column in X.columns:
if X[column].isna().all():
X[column] = pd.to_numeric(X[column])
# Make sure we only set this once. It should not change
if not self.feature_types:
self.feature_types = feature_types
# This proc has to handle multiple calls, for X_train
# and X_test scenarios. We have to make sure also that
# data is consistent within calls
if enc_columns:
if self.enc_columns and self.enc_columns != enc_columns:
raise ValueError(
"Changing the column-types of the input data to Auto-Sklearn is not "
"allowed. The estimator previously was fitted with categorical/boolean "
"columns {}, yet, the new input data has categorical/boolean values {}. "
"Please recreate the estimator from scratch when changing the input "
"data. ".format(
self.enc_columns,
enc_columns,
)
)
else:
self.enc_columns = enc_columns
if not self.feature_encoder:
self.feature_encoder = make_column_transformer(
(preprocessing.OrdinalEncoder(), self.enc_columns),
remainder="passthrough"
)
# Mypy redefinition
assert self.feature_encoder is not None
self.feature_encoder.fit(X)
# The column transformer reoders the feature types - we therefore need to change
# it as well
def comparator(cmp1, cmp2):
if (
cmp1 == 'categorical' and cmp2 == 'categorical'
or cmp1 == 'numerical' and cmp2 == 'numerical'
):
return 0
elif cmp1 == 'categorical' and cmp2 == 'numerical':
return -1
elif cmp1 == 'numerical' and cmp2 == 'categorical':
return 1
else:
raise ValueError((cmp1, cmp2))
self.feature_types = sorted(
self.feature_types,
key=functools.cmp_to_key(comparator)
)
if self.feature_encoder:
try:
X = self.feature_encoder.transform(X)
except ValueError as e:
if 'Found unknown categories' in e.args[0]:
# Make the message more informative
raise ValueError(
"During fit, the input features contained categorical values in columns"
"{}, with categories {} which were encoded by Auto-sklearn automatically."
"Nevertheless, a new input contained new categories not seen during "
"training = {}. The OrdinalEncoder used by Auto-sklearn cannot handle "
"this yet (due to a limitation on scikit-learn being addressed via:"
" https://github.com/scikit-learn/scikit-learn/issues/17123)"
"".format(
self.enc_columns,
self.feature_encoder.transformers_[0][1].categories_,
e.args[0],
)
)
else:
raise e
# In code check to make sure everything is numeric
if hasattr(X, "iloc"):
is_number = np.vectorize(lambda x: | pd.api.types.is_numeric_dtype(x) | pandas.api.types.is_numeric_dtype |
import functools
import json
import os
from multiprocessing.pool import Pool
from typing import List, Tuple, Type, Any
import pprint
import abc
import luigi
import numpy as np
import pandas as pd
import torch
import torchbearer
from torchbearer import Trial
from tqdm import tqdm
import gc
from mars_gym.data.dataset import (
preprocess_interactions_data_frame,
InteractionsDataset,
)
from mars_gym.evaluation.propensity_score import FillPropensityScoreMixin
from mars_gym.evaluation.metrics.fairness import calculate_fairness_metrics
from mars_gym.utils import files
from mars_gym.utils.files import get_test_set_predictions_path, get_params_path
from mars_gym.evaluation.metrics.offpolicy import (
eval_IPS,
eval_CIPS,
eval_SNIPS,
eval_doubly_robust,
)
from mars_gym.evaluation.metrics.rank import (
mean_reciprocal_rank,
average_precision,
precision_at_k,
ndcg_at_k,
personalization_at_k,
prediction_coverage_at_k,
)
from mars_gym.simulation.training import (
TorchModelTraining,
load_torch_model_training_from_task_id,
)
from mars_gym.evaluation.policy_estimator import PolicyEstimatorTraining
from mars_gym.torch.data import FasterBatchSampler, NoAutoCollationDataLoader
from mars_gym.utils.reflection import load_attr, get_attribute_names
from mars_gym.utils.utils import parallel_literal_eval, JsonEncoder
from mars_gym.utils.index_mapping import (
create_index_mapping,
create_index_mapping_from_arrays,
transform_with_indexing,
map_array,
)
class BaseEvaluationTask(luigi.Task, metaclass=abc.ABCMeta):
model_task_class: str = luigi.Parameter(
default="mars_gym.simulation.interaction.InteractionTraining"
)
model_task_id: str = luigi.Parameter()
offpolicy_eval: bool = luigi.BoolParameter(default=False)
task_hash: str = luigi.Parameter(default="none")
@property
def cache_attr(self):
return [""]
@property
def task_name(self):
return self.model_task_id + "_" + self.task_id.split("_")[-1]
@property
def model_training(self) -> TorchModelTraining:
if not hasattr(self, "_model_training"):
class_ = load_attr(self.model_task_class, Type[TorchModelTraining])
self._model_training = load_torch_model_training_from_task_id(
class_, self.model_task_id
)
return self._model_training
@property
def n_items(self):
return self.model_training.n_items
def output(self):
return luigi.LocalTarget(
os.path.join(
files.OUTPUT_PATH,
"evaluation",
self.__class__.__name__,
"results",
self.task_name,
)
)
def cache_cleanup(self):
for a in self.cache_attrs:
if hasattr(self, a):
delattr(self, a)
def _save_params(self):
with open(get_params_path(self.output().path), "w") as params_file:
json.dump(
self.param_kwargs, params_file, default=lambda o: dict(o), indent=4
)
class EvaluateTestSetPredictions(FillPropensityScoreMixin, BaseEvaluationTask):
# TODO transform this params in a dict params
direct_estimator_class: str = luigi.Parameter(default="mars_gym.simulation.training.SupervisedModelTraining")
direct_estimator_negative_proportion: int = luigi.FloatParameter(0)
direct_estimator_batch_size: int = luigi.IntParameter(default=500)
direct_estimator_epochs: int = luigi.IntParameter(default=50)
direct_estimator_extra_params: dict = luigi.DictParameter(default={})
eval_cips_cap: int = luigi.IntParameter(default=15)
policy_estimator_extra_params: dict = luigi.DictParameter(default={})
num_processes: int = luigi.IntParameter(default=os.cpu_count())
fairness_columns: List[str] = luigi.ListParameter(default=[])
rank_metrics: List[str] = luigi.ListParameter(default=[])
only_new_interactions: bool = luigi.BoolParameter(default=False)
only_exist_items: bool = luigi.BoolParameter(default=False)
only_exist_users: bool = luigi.BoolParameter(default=False)
def get_direct_estimator(self, extra_params: dict) -> TorchModelTraining:
assert self.direct_estimator_class is not None
estimator_class = load_attr(
self.direct_estimator_class, Type[TorchModelTraining]
)
attribute_names = get_attribute_names(estimator_class)
params = {
key: value
for key, value in self.model_training.param_kwargs.items()
if key in attribute_names
}
return estimator_class(**{**params, **extra_params})
#TODO We need change it
@property
def direct_estimator(self):
if not hasattr(self, "_direct_estimator"):
self._direct_estimator = self.get_direct_estimator(
{**{
"project": self.model_training.project,
"learning_rate": 0.0001,
"test_size": 0.0,
"epochs": self.direct_estimator_epochs,
"batch_size": self.direct_estimator_batch_size,
"loss_function": "bce",
"loss_function_params": {},
"observation": "All Data",
"negative_proportion": self.direct_estimator_negative_proportion,
"policy_estimator_extra_params": {},
"metrics": ["loss"],
"seed": 51,
}, **self.direct_estimator_extra_params}
)
return self._direct_estimator
@property
def policy_estimator(self) -> PolicyEstimatorTraining:
if not hasattr(self, "_policy_estimator"):
self._policy_estimator = PolicyEstimatorTraining(
project=self.model_training.project,
data_frames_preparation_extra_params=self.model_training.data_frames_preparation_extra_params,
**self.policy_estimator_extra_params,
)
return self._policy_estimator
def requires(self):
if self.offpolicy_eval:
return [self.direct_estimator, self.policy_estimator]
return []
@property
def item_column(self) -> str:
return self.model_training.project_config.item_column.name
@property
def available_arms_column(self) -> str:
return self.model_training.project_config.available_arms_column_name
@property
def propensity_score_column(self) -> str:
return self.model_training.project_config.propensity_score_column_name
def get_item_index(self)-> List[str]:
indexed_list = list(self.model_training.index_mapping[self.model_training.project_config.item_column.name].keys())
indexed_list = [x for x in indexed_list if x is not None and str(x) != 'nan']
return indexed_list
def get_catalog(self, df: pd.DataFrame) -> List[str]:
indexed_list = self.get_item_index()
test_list = list(df["sorted_actions"])
test_list.append(indexed_list)
all_items = sum(test_list, [])
unique_items = list(np.unique(all_items))
return unique_items
def run(self):
os.makedirs(self.output().path)
# df: pd.DataFrame = preprocess_interactions_data_frame(
# pd.read_csv(
# get_test_set_predictions_path(self.model_training.output().path)
# ),
# self.model_training.project_config,
# ) # .sample(10000)
df: pd.DataFrame = pd.read_csv(
get_test_set_predictions_path(self.model_training.output().path),
dtype = {self.model_training.project_config.item_column.name : "str"}
) # .sample(10000)
df["sorted_actions"] = parallel_literal_eval(df["sorted_actions"])
df["prob_actions"] = parallel_literal_eval(df["prob_actions"])
df["action_scores"] = parallel_literal_eval(df["action_scores"])
df["action"] = df["sorted_actions"].apply(
lambda sorted_actions: str(sorted_actions[0])
)
with Pool(self.num_processes) as p:
print("Creating the relevance lists...")
# from IPython import embed; embed()
df["relevance_list"] = list(
tqdm(
p.starmap(
_create_relevance_list,
zip(
df["sorted_actions"],
df[self.model_training.project_config.item_column.name],
df[self.model_training.project_config.output_column.name],
),
),
total=len(df),
)
)
if self.model_training.metadata_data_frame is not None:
df = pd.merge(
df,
| pd.read_csv(self.model_training.metadata_data_frame_path, dtype = {self.model_training.project_config.item_column.name : "str"}) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 27 10:23:59 2021
@author: alber
"""
import re
import os
import pandas as pd
import numpy as np
import spacy
import pickle
import lightgbm as lgb
import imblearn
from sklearn import preprocessing
from sklearn.semi_supervised import (
LabelPropagation,
LabelSpreading,
SelfTrainingClassifier,
)
from sklearn import metrics
from sklearn.dummy import DummyClassifier
from sklearn.metrics import classification_report
# from nltk.corpus import stopwords
# from nltk import ngrams
from nltk.stem.snowball import SnowballStemmer
# from sentence_transformers import SentenceTransformer, util
from imblearn.over_sampling import SMOTE, BorderlineSMOTE, ADASYN
from statsmodels.stats.inter_rater import cohens_kappa
from common.tools import get_files, file_presistance
from common.config import (
PATH_POEMS, PATH_RESULTS, PATH_AFF_LEXICON, PATH_GROUND_TRUTH
)
nlp = spacy.load("es_core_news_md")
stemmer = SnowballStemmer("spanish")
def _getReport(
y_test, y_pred, y_pred_proba, target_names, using_affective = "yes",
semantic_model = "", classification_model = ""
):
"""
TODO
Parameters
----------
y_test : TYPE
DESCRIPTION.
y_pred : TYPE
DESCRIPTION.
target_names : TYPE
DESCRIPTION.
using_affective : TYPE, optional
DESCRIPTION. The default is "yes".
semantic_model : TYPE, optional
DESCRIPTION. The default is "".
classification_model : TYPE, optional
DESCRIPTION. The default is "".
Returns
-------
df_metrics_iter : TYPE
DESCRIPTION.
"""
### 1. Standard Metrics
report = classification_report(
y_test, y_pred, target_names = target_names, output_dict = True
)
df_metrics_iter = pd.DataFrame(
{
'category': [category],
'using_affective': [using_affective],
'semantic_model': [semantic_model],
'classification_model': [classification_model],
'n_class_0': [report[f'{category}_0']['support']],
'n_class_1': [report[f'{category}_1']['support']],
'precision_class_0': [report[f'{category}_0']['precision']],
'precision_class_1': [report[f'{category}_1']['precision']],
'recall_class_0': [report[f'{category}_0']['recall']],
'recall_class_1': [report[f'{category}_1']['recall']],
'f1_class_0': [report[f'{category}_0']['f1-score']],
'f1_class_1': [report[f'{category}_1']['f1-score']],
'precision_weighted': [report['weighted avg']['precision']],
'recall_weighted': [report['weighted avg']['recall']],
'f1_weighted': [report['weighted avg']['f1-score']]
}
)
### 2. Cohen's Kappa
# Make Dataframe
df = pd.DataFrame({"A": y_test, "B": y_pred})
# Switch it to three columns A's answer, B's answer and count of that combination
df = df.value_counts().reset_index()
# Check compliance
if len(df) < 4:
df_aux = pd.DataFrame({'A': [0.0, 1.0, 0.0, 1.0],
'B': [0.0, 0.0, 1.0, 1.0]
})
df = df.merge(df_aux, how="outer").fillna(0)
# Make square
square = df.pivot(columns="A",index="B").values
# Get Kappa
dct_kappa = cohens_kappa(square)
kappa_max = dct_kappa['kappa_max']
kappa = dct_kappa['kappa']
df_metrics_iter['kappa'] = [kappa]
df_metrics_iter['kappa_max'] = [kappa_max]
### 3. AUC
y_pred_proba = np.asarray([x if str(x) != 'nan' else 0.0 for x in y_pred_proba])
fpr, tpr, thresholds = metrics.roc_curve(
y_test, y_pred_proba, pos_label=1
)
auc = metrics.auc(fpr, tpr)
df_metrics_iter['auc'] = [auc]
return df_metrics_iter
# =============================================================================
# 1. Prepare Data
# =============================================================================
### Load Sonnets Features
# Load Data
file_to_read = open(f"{PATH_RESULTS}/dct_sonnets_input_v5", "rb")
dct_sonnets = pickle.load(file_to_read)
file_to_read.close()
# Only DISCO
if False:
dct_sonnets = {x:y for x,y in dct_sonnets.items() if x <= 4085}
# Sonnet Matrix
list_original_sentence = [
'enc_text_model1',
'enc_text_model2',
'enc_text_model3',
'enc_text_model4',
'enc_text_model5'
]
list_semantic_models = [
'enc_text_model1',
'enc_text_model2',
'enc_text_model3',
'enc_text_model4',
'enc_text_model5',
# 'enc_text_model_hg_bert_max',
# 'enc_text_model_hg_bert_span',
# 'enc_text_model_hg_bert_median',
'enc_text_model_hg_bert_avg_w',
# 'enc_text_model_hg_bert_sp_max',
# 'enc_text_model_hg_bert_sp_span',
# 'enc_text_model_hg_bert_sp_median',
'enc_text_model_hg_bert_sp_avg_w',
# 'enc_text_model_hg_ro_max',
# 'enc_text_model_hg_ro_span',
# 'enc_text_model_hg_ro_median',
# 'enc_text_model_hg_ro_avg_w'
]
# General Variables
dct_metrics_all_models = {}
df_meta = pd.concat(
[
pd.DataFrame({"index": [item["index"]], "text": [item["text"]]})
for key, item in dct_sonnets.items()
]
)
df_affective = pd.concat([item["aff_features"] for key, item in dct_sonnets.items()]).fillna(0)
# Load psycho names
df_names = pd.read_csv(f"{PATH_GROUND_TRUTH}/variable_names_en.csv", encoding="latin-1")
list_names = list(df_names["es_name"].values)
list_aff = [
"concreteness",
"context availability",
"anger",
"arousal",
"disgust",
"fear",
"happinness",
"imageability",
"sadness",
"valence",
]
### Load Ground Truth
if False:
df_gt = pd.read_csv(f"{PATH_GROUND_TRUTH}/poems_corpus_all.csv")
df_gt = df_gt[df_gt['index'].isin(list(dct_sonnets.keys()))]
df_gt = df_gt.rename(columns={"text": "text_original"})
df_gt.columns = [str(x).rstrip().lstrip() for x in list(df_gt.columns)]
### Get Subsample from GT
df_add = pd.DataFrame()
for category in list_names:
if category in list_aff:
continue
try:
df_iter = df_gt.groupby(category).apply(lambda s: s.sample(2))
except:
continue
df_add = df_add.append(df_iter)
df_add = df_add.drop_duplicates()
# New GT (without data used in training)
df_gt = df_gt[~df_gt["index"].isin(df_add["index"])].copy()
df_add.to_csv("train_dataset.csv", index=False)
df_gt.to_csv("test_dataset.csv", index=False)
else:
df_add = | pd.read_csv("train_dataset.csv") | pandas.read_csv |
# import sys
# sys.path.append('JEMIPYC')
# from array_check_function_global import df,dfn,dfv,dfx,dfnx,dfvx
import pandas as pd
import numpy as np
tab = '__'
# no-extension , number of parameters is not limited, 2 or 3, whatever you want.
# ex) df(A,B,C,D,...,Z...)
# of course you just put one parameter.
def df(*x):
pd.reset_option('display.max_columns')
pd.reset_option('display.max_rows')
leng = len(x)
df_concat = []
for i in range(leng):
row=len(x[0])
blank = ['']*row
blank = pd.DataFrame(blank,columns=[tab])
xx = pd.DataFrame(x[i])
if(i==0):
df_concat = xx
else:
df_concat = pd.concat([df_concat,blank,xx], axis=1)
df_concat.replace(np.nan, '', inplace=True)
display(df_concat)
def dfn(*x):
pd.reset_option('display.max_columns')
pd.reset_option('display.max_rows')
leng = len(x)
df_concat = []
for i in range(leng):
row=len(x[0])
blank = ['']*row
tabn = '{'+str(i+1)+'}'
blank = pd.DataFrame(blank,columns=[tabn])
xx = pd.DataFrame(x[i])
if(i==0):
df_concat = pd.concat([xx,blank], axis=1)
else:
df_concat = pd.concat([df_concat,xx,blank], axis=1)
df_concat.replace(np.nan, '', inplace=True)
display(df_concat)
def dfv(*x):
pd.reset_option('display.max_columns')
pd.reset_option('display.max_rows')
leng = len(x)
df_concat = []
for i in range(leng):
xs = x[i]
row=len(x[0])
blank = ['']*row
if((i+1)!=leng):
# print(i)
vname = x[-1][i]
# print(vname)
tabv = "<("+str(vname)+")"
blank = pd.DataFrame(blank,columns=[tabv])
xx = pd.DataFrame(x[i])
if(i==0):
df_concat = pd.concat([xx,blank], axis=1)
else:
df_concat = pd.concat([df_concat,xx,blank], axis=1)
# print(df_concat)
df_concat.replace(np.nan, '', inplace=True)
display(df_concat)
# extension
def dfx(*x):
| pd.set_option('display.max_columns', None) | pandas.set_option |
import os
import pandas as pd
import matplotlib.pyplot as plt
import datapackage as dp
import plotly.io as pio
import plotly.offline as offline
from plots import (
hourly_plot,
stacked_plot,
price_line_plot,
price_scatter_plot,
merit_order_plot,
filling_level_plot,
)
results = [r for r in os.listdir("results") if "plots" not in r]
country = "DE"
# shadow prices
sorted = {}
unsorted = {}
for r in results:
path = os.path.join("results", r, "output", "shadow_prices.csv")
sprices = pd.read_csv(path, index_col=[0], parse_dates=True)[
country + "-electricity"
]
sorted[r] = sprices.sort_values().values
unsorted[r] = sprices.values
# residual load and more
renewables = ["wind-onshore", "wind-offshore", "solar-pv", "hydro-ror"]
timestamps = {}
marginal_cost = {}
shadow_prices = {}
storages = {}
prices = {}
rload = {}
for r in results:
path = os.path.join("results", r, "output", country + "-electricity.csv")
country_electricity_df = pd.read_csv(path, index_col=[0], parse_dates=True)
country_electricity_df["rload"] = country_electricity_df[
("-").join([country, "electricity-load"])
] - country_electricity_df[
[("-").join([country, i]) for i in renewables]
].sum(
axis=1
)
rload[r] = country_electricity_df["rload"].values
timestamps[r] = country_electricity_df.index
if country == "DE":
path = os.path.join("results", r, "input", "datapackage.json")
input_datapackage = dp.Package(path)
dispatchable = input_datapackage.get_resource("dispatchable")
df = pd.DataFrame(dispatchable.read(keyed=True))
df = df.set_index("name")
# select all storages and sum up
storage = [
ss
for ss in [
"DE-" + s for s in ["hydro-phs", "hydro-reservoir", "battery"]
]
if ss in country_electricity_df.columns
]
storages[r] = country_electricity_df[storage].sum(axis=1)
marginal_cost[r] = df
path = os.path.join("results", r, "output", "shadow_prices.csv")
shadow_prices[r] = pd.read_csv(path, index_col=[0], parse_dates=True)[
"DE-electricity"
]
storages[r] = | pd.concat([storages[r], shadow_prices[r]], axis=1) | pandas.concat |
from datetime import datetime
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.core.dtypes.base import _registry as ea_registry
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_interval_dtype,
is_object_dtype,
)
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
IntervalDtype,
PeriodDtype,
)
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
Interval,
IntervalIndex,
MultiIndex,
NaT,
Period,
PeriodIndex,
Series,
Timestamp,
cut,
date_range,
notna,
period_range,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.tseries.offsets import BDay
class TestDataFrameSetItem:
@pytest.mark.parametrize("dtype", ["int32", "int64", "float32", "float64"])
def test_setitem_dtype(self, dtype, float_frame):
arr = np.random.randn(len(float_frame))
float_frame[dtype] = np.array(arr, dtype=dtype)
assert float_frame[dtype].dtype.name == dtype
def test_setitem_list_not_dataframe(self, float_frame):
data = np.random.randn(len(float_frame), 2)
float_frame[["A", "B"]] = data
tm.assert_almost_equal(float_frame[["A", "B"]].values, data)
def test_setitem_error_msmgs(self):
# GH 7432
df = DataFrame(
{"bar": [1, 2, 3], "baz": ["d", "e", "f"]},
index=Index(["a", "b", "c"], name="foo"),
)
ser = Series(
["g", "h", "i", "j"],
index=Index(["a", "b", "c", "a"], name="foo"),
name="fiz",
)
msg = "cannot reindex from a duplicate axis"
with pytest.raises(ValueError, match=msg):
df["newcol"] = ser
# GH 4107, more descriptive error message
df = DataFrame(np.random.randint(0, 2, (4, 4)), columns=["a", "b", "c", "d"])
msg = "incompatible index of inserted column with frame index"
with pytest.raises(TypeError, match=msg):
df["gr"] = df.groupby(["b", "c"]).count()
def test_setitem_benchmark(self):
# from the vb_suite/frame_methods/frame_insert_columns
N = 10
K = 5
df = DataFrame(index=range(N))
new_col = np.random.randn(N)
for i in range(K):
df[i] = new_col
expected = DataFrame(np.repeat(new_col, K).reshape(N, K), index=range(N))
tm.assert_frame_equal(df, expected)
def test_setitem_different_dtype(self):
df = DataFrame(
np.random.randn(5, 3), index=np.arange(5), columns=["c", "b", "a"]
)
df.insert(0, "foo", df["a"])
df.insert(2, "bar", df["c"])
# diff dtype
# new item
df["x"] = df["a"].astype("float32")
result = df.dtypes
expected = Series(
[np.dtype("float64")] * 5 + [np.dtype("float32")],
index=["foo", "c", "bar", "b", "a", "x"],
)
tm.assert_series_equal(result, expected)
# replacing current (in different block)
df["a"] = df["a"].astype("float32")
result = df.dtypes
expected = Series(
[np.dtype("float64")] * 4 + [np.dtype("float32")] * 2,
index=["foo", "c", "bar", "b", "a", "x"],
)
tm.assert_series_equal(result, expected)
df["y"] = df["a"].astype("int32")
result = df.dtypes
expected = Series(
[np.dtype("float64")] * 4 + [np.dtype("float32")] * 2 + [np.dtype("int32")],
index=["foo", "c", "bar", "b", "a", "x", "y"],
)
tm.assert_series_equal(result, expected)
def test_setitem_empty_columns(self):
# GH 13522
df = DataFrame(index=["A", "B", "C"])
df["X"] = df.index
df["X"] = ["x", "y", "z"]
exp = DataFrame(data={"X": ["x", "y", "z"]}, index=["A", "B", "C"])
tm.assert_frame_equal(df, exp)
def test_setitem_dt64_index_empty_columns(self):
rng = date_range("1/1/2000 00:00:00", "1/1/2000 1:59:50", freq="10s")
df = DataFrame(index=np.arange(len(rng)))
df["A"] = rng
assert df["A"].dtype == np.dtype("M8[ns]")
def test_setitem_timestamp_empty_columns(self):
# GH#19843
df = DataFrame(index=range(3))
df["now"] = Timestamp("20130101", tz="UTC")
expected = DataFrame(
[[Timestamp("20130101", tz="UTC")]] * 3, index=[0, 1, 2], columns=["now"]
)
tm.assert_frame_equal(df, expected)
def test_setitem_wrong_length_categorical_dtype_raises(self):
# GH#29523
cat = Categorical.from_codes([0, 1, 1, 0, 1, 2], ["a", "b", "c"])
df = DataFrame(range(10), columns=["bar"])
msg = (
rf"Length of values \({len(cat)}\) "
rf"does not match length of index \({len(df)}\)"
)
with pytest.raises(ValueError, match=msg):
df["foo"] = cat
def test_setitem_with_sparse_value(self):
# GH#8131
df = DataFrame({"c_1": ["a", "b", "c"], "n_1": [1.0, 2.0, 3.0]})
sp_array = SparseArray([0, 0, 1])
df["new_column"] = sp_array
expected = | Series(sp_array, name="new_column") | pandas.Series |
import numpy as np
import pandas as pd
from numba import njit
from datetime import datetime
import pytest
from itertools import product
from sklearn.model_selection import TimeSeriesSplit
import vectorbt as vbt
from vectorbt.generic import nb
seed = 42
day_dt = np.timedelta64(86400000000000)
df = pd.DataFrame({
'a': [1, 2, 3, 4, np.nan],
'b': [np.nan, 4, 3, 2, 1],
'c': [1, 2, np.nan, 2, 1]
}, index=pd.DatetimeIndex([
datetime(2018, 1, 1),
datetime(2018, 1, 2),
datetime(2018, 1, 3),
datetime(2018, 1, 4),
datetime(2018, 1, 5)
]))
group_by = np.array(['g1', 'g1', 'g2'])
@njit
def i_or_col_pow_nb(i_or_col, x, pow):
return np.power(x, pow)
@njit
def pow_nb(x, pow):
return np.power(x, pow)
@njit
def nanmean_nb(x):
return np.nanmean(x)
@njit
def i_col_nanmean_nb(i, col, x):
return np.nanmean(x)
@njit
def i_nanmean_nb(i, x):
return np.nanmean(x)
@njit
def col_nanmean_nb(col, x):
return np.nanmean(x)
# ############# accessors.py ############# #
class TestAccessors:
def test_shuffle(self):
pd.testing.assert_series_equal(
df['a'].vbt.shuffle(seed=seed),
pd.Series(
np.array([2.0, np.nan, 3.0, 1.0, 4.0]),
index=df['a'].index,
name=df['a'].name
)
)
np.testing.assert_array_equal(
df['a'].vbt.shuffle(seed=seed).values,
nb.shuffle_1d_nb(df['a'].values, seed=seed)
)
pd.testing.assert_frame_equal(
df.vbt.shuffle(seed=seed),
pd.DataFrame(
np.array([
[2., 2., 2.],
[np.nan, 4., 1.],
[3., 3., 2.],
[1., np.nan, 1.],
[4., 1., np.nan]
]),
index=df.index,
columns=df.columns
)
)
@pytest.mark.parametrize(
"test_value",
[-1, 0., np.nan],
)
def test_fillna(self, test_value):
pd.testing.assert_series_equal(df['a'].vbt.fillna(test_value), df['a'].fillna(test_value))
pd.testing.assert_frame_equal(df.vbt.fillna(test_value), df.fillna(test_value))
@pytest.mark.parametrize(
"test_n",
[1, 2, 3, 4, 5],
)
def test_bshift(self, test_n):
pd.testing.assert_series_equal(df['a'].vbt.bshift(test_n), df['a'].shift(-test_n))
np.testing.assert_array_equal(
df['a'].vbt.bshift(test_n).values,
nb.bshift_nb(df['a'].values, test_n)
)
pd.testing.assert_frame_equal(df.vbt.bshift(test_n), df.shift(-test_n))
@pytest.mark.parametrize(
"test_n",
[1, 2, 3, 4, 5],
)
def test_fshift(self, test_n):
pd.testing.assert_series_equal(df['a'].vbt.fshift(test_n), df['a'].shift(test_n))
np.testing.assert_array_equal(
df['a'].vbt.fshift(test_n).values,
nb.fshift_1d_nb(df['a'].values, test_n)
)
pd.testing.assert_frame_equal(df.vbt.fshift(test_n), df.shift(test_n))
def test_diff(self):
pd.testing.assert_series_equal(df['a'].vbt.diff(), df['a'].diff())
np.testing.assert_array_equal(df['a'].vbt.diff().values, nb.diff_1d_nb(df['a'].values))
pd.testing.assert_frame_equal(df.vbt.diff(), df.diff())
def test_pct_change(self):
pd.testing.assert_series_equal(df['a'].vbt.pct_change(), df['a'].pct_change(fill_method=None))
np.testing.assert_array_equal(df['a'].vbt.pct_change().values, nb.pct_change_1d_nb(df['a'].values))
pd.testing.assert_frame_equal(df.vbt.pct_change(), df.pct_change(fill_method=None))
def test_ffill(self):
pd.testing.assert_series_equal(df['a'].vbt.ffill(), df['a'].ffill())
pd.testing.assert_frame_equal(df.vbt.ffill(), df.ffill())
def test_product(self):
assert df['a'].vbt.product() == df['a'].product()
np.testing.assert_array_equal(df.vbt.product(), df.product())
def test_cumsum(self):
pd.testing.assert_series_equal(df['a'].vbt.cumsum(), df['a'].cumsum())
pd.testing.assert_frame_equal(df.vbt.cumsum(), df.cumsum())
def test_cumprod(self):
pd.testing.assert_series_equal(df['a'].vbt.cumprod(), df['a'].cumprod())
pd.testing.assert_frame_equal(df.vbt.cumprod(), df.cumprod())
@pytest.mark.parametrize(
"test_window,test_minp",
list(product([1, 2, 3, 4, 5], [1, None]))
)
def test_rolling_min(self, test_window, test_minp):
if test_minp is None:
test_minp = test_window
pd.testing.assert_series_equal(
df['a'].vbt.rolling_min(test_window, minp=test_minp),
df['a'].rolling(test_window, min_periods=test_minp).min()
)
pd.testing.assert_frame_equal(
df.vbt.rolling_min(test_window, minp=test_minp),
df.rolling(test_window, min_periods=test_minp).min()
)
pd.testing.assert_frame_equal(
df.vbt.rolling_min(test_window),
df.rolling(test_window).min()
)
@pytest.mark.parametrize(
"test_window,test_minp",
list(product([1, 2, 3, 4, 5], [1, None]))
)
def test_rolling_max(self, test_window, test_minp):
if test_minp is None:
test_minp = test_window
pd.testing.assert_series_equal(
df['a'].vbt.rolling_max(test_window, minp=test_minp),
df['a'].rolling(test_window, min_periods=test_minp).max()
)
pd.testing.assert_frame_equal(
df.vbt.rolling_max(test_window, minp=test_minp),
df.rolling(test_window, min_periods=test_minp).max()
)
pd.testing.assert_frame_equal(
df.vbt.rolling_max(test_window),
df.rolling(test_window).max()
)
@pytest.mark.parametrize(
"test_window,test_minp",
list(product([1, 2, 3, 4, 5], [1, None]))
)
def test_rolling_mean(self, test_window, test_minp):
if test_minp is None:
test_minp = test_window
pd.testing.assert_series_equal(
df['a'].vbt.rolling_mean(test_window, minp=test_minp),
df['a'].rolling(test_window, min_periods=test_minp).mean()
)
pd.testing.assert_frame_equal(
df.vbt.rolling_mean(test_window, minp=test_minp),
df.rolling(test_window, min_periods=test_minp).mean()
)
pd.testing.assert_frame_equal(
df.vbt.rolling_mean(test_window),
df.rolling(test_window).mean()
)
@pytest.mark.parametrize(
"test_window,test_minp,test_ddof",
list(product([1, 2, 3, 4, 5], [1, None], [0, 1]))
)
def test_rolling_std(self, test_window, test_minp, test_ddof):
if test_minp is None:
test_minp = test_window
pd.testing.assert_series_equal(
df['a'].vbt.rolling_std(test_window, minp=test_minp, ddof=test_ddof),
df['a'].rolling(test_window, min_periods=test_minp).std(ddof=test_ddof)
)
pd.testing.assert_frame_equal(
df.vbt.rolling_std(test_window, minp=test_minp, ddof=test_ddof),
df.rolling(test_window, min_periods=test_minp).std(ddof=test_ddof)
)
pd.testing.assert_frame_equal(
df.vbt.rolling_std(test_window),
df.rolling(test_window).std()
)
@pytest.mark.parametrize(
"test_window,test_minp,test_adjust",
list(product([1, 2, 3, 4, 5], [1, None], [False, True]))
)
def test_ewm_mean(self, test_window, test_minp, test_adjust):
if test_minp is None:
test_minp = test_window
pd.testing.assert_series_equal(
df['a'].vbt.ewm_mean(test_window, minp=test_minp, adjust=test_adjust),
df['a'].ewm(span=test_window, min_periods=test_minp, adjust=test_adjust).mean()
)
pd.testing.assert_frame_equal(
df.vbt.ewm_mean(test_window, minp=test_minp, adjust=test_adjust),
df.ewm(span=test_window, min_periods=test_minp, adjust=test_adjust).mean()
)
pd.testing.assert_frame_equal(
df.vbt.ewm_mean(test_window),
df.ewm(span=test_window).mean()
)
@pytest.mark.parametrize(
"test_window,test_minp,test_adjust,test_ddof",
list(product([1, 2, 3, 4, 5], [1, None], [False, True], [0, 1]))
)
def test_ewm_std(self, test_window, test_minp, test_adjust, test_ddof):
if test_minp is None:
test_minp = test_window
pd.testing.assert_series_equal(
df['a'].vbt.ewm_std(test_window, minp=test_minp, adjust=test_adjust, ddof=test_ddof),
df['a'].ewm(span=test_window, min_periods=test_minp, adjust=test_adjust).std(ddof=test_ddof)
)
pd.testing.assert_frame_equal(
df.vbt.ewm_std(test_window, minp=test_minp, adjust=test_adjust, ddof=test_ddof),
df.ewm(span=test_window, min_periods=test_minp, adjust=test_adjust).std(ddof=test_ddof)
)
pd.testing.assert_frame_equal(
df.vbt.ewm_std(test_window),
df.ewm(span=test_window).std()
)
@pytest.mark.parametrize(
"test_minp",
[1, 3]
)
def test_expanding_min(self, test_minp):
pd.testing.assert_series_equal(
df['a'].vbt.expanding_min(minp=test_minp),
df['a'].expanding(min_periods=test_minp).min()
)
pd.testing.assert_frame_equal(
df.vbt.expanding_min(minp=test_minp),
df.expanding(min_periods=test_minp).min()
)
pd.testing.assert_frame_equal(
df.vbt.expanding_min(),
df.expanding().min()
)
@pytest.mark.parametrize(
"test_minp",
[1, 3]
)
def test_expanding_max(self, test_minp):
pd.testing.assert_series_equal(
df['a'].vbt.expanding_max(minp=test_minp),
df['a'].expanding(min_periods=test_minp).max()
)
pd.testing.assert_frame_equal(
df.vbt.expanding_max(minp=test_minp),
df.expanding(min_periods=test_minp).max()
)
pd.testing.assert_frame_equal(
df.vbt.expanding_max(),
df.expanding().max()
)
@pytest.mark.parametrize(
"test_minp",
[1, 3]
)
def test_expanding_mean(self, test_minp):
pd.testing.assert_series_equal(
df['a'].vbt.expanding_mean(minp=test_minp),
df['a'].expanding(min_periods=test_minp).mean()
)
pd.testing.assert_frame_equal(
df.vbt.expanding_mean(minp=test_minp),
df.expanding(min_periods=test_minp).mean()
)
pd.testing.assert_frame_equal(
df.vbt.expanding_mean(),
df.expanding().mean()
)
@pytest.mark.parametrize(
"test_minp,test_ddof",
list(product([1, 3], [0, 1]))
)
def test_expanding_std(self, test_minp, test_ddof):
pd.testing.assert_series_equal(
df['a'].vbt.expanding_std(minp=test_minp, ddof=test_ddof),
df['a'].expanding(min_periods=test_minp).std(ddof=test_ddof)
)
pd.testing.assert_frame_equal(
df.vbt.expanding_std(minp=test_minp, ddof=test_ddof),
df.expanding(min_periods=test_minp).std(ddof=test_ddof)
)
pd.testing.assert_frame_equal(
df.vbt.expanding_std(),
df.expanding().std()
)
def test_apply_along_axis(self):
pd.testing.assert_frame_equal(
df.vbt.apply_along_axis(i_or_col_pow_nb, 2, axis=0),
df.apply(pow_nb, args=(2,), axis=0, raw=True)
)
pd.testing.assert_frame_equal(
df.vbt.apply_along_axis(i_or_col_pow_nb, 2, axis=1),
df.apply(pow_nb, args=(2,), axis=1, raw=True)
)
@pytest.mark.parametrize(
"test_window,test_minp",
list(product([1, 2, 3, 4, 5], [1, None]))
)
def test_rolling_apply(self, test_window, test_minp):
if test_minp is None:
test_minp = test_window
pd.testing.assert_series_equal(
df['a'].vbt.rolling_apply(test_window, i_col_nanmean_nb, minp=test_minp),
df['a'].rolling(test_window, min_periods=test_minp).apply(nanmean_nb, raw=True)
)
pd.testing.assert_frame_equal(
df.vbt.rolling_apply(test_window, i_col_nanmean_nb, minp=test_minp),
df.rolling(test_window, min_periods=test_minp).apply(nanmean_nb, raw=True)
)
pd.testing.assert_frame_equal(
df.vbt.rolling_apply(test_window, i_col_nanmean_nb),
df.rolling(test_window).apply(nanmean_nb, raw=True)
)
pd.testing.assert_frame_equal(
df.vbt.rolling_apply(3, i_nanmean_nb, on_matrix=True),
pd.DataFrame(
np.array([
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[2.75, 2.75, 2.75],
[np.nan, np.nan, np.nan]
]),
index=df.index,
columns=df.columns
)
)
@pytest.mark.parametrize(
"test_minp",
[1, 3]
)
def test_expanding_apply(self, test_minp):
pd.testing.assert_series_equal(
df['a'].vbt.expanding_apply(i_col_nanmean_nb, minp=test_minp),
df['a'].expanding(min_periods=test_minp).apply(nanmean_nb, raw=True)
)
pd.testing.assert_frame_equal(
df.vbt.expanding_apply(i_col_nanmean_nb, minp=test_minp),
df.expanding(min_periods=test_minp).apply(nanmean_nb, raw=True)
)
pd.testing.assert_frame_equal(
df.vbt.expanding_apply(i_col_nanmean_nb),
df.expanding().apply(nanmean_nb, raw=True)
)
pd.testing.assert_frame_equal(
df.vbt.expanding_apply(i_nanmean_nb, on_matrix=True),
pd.DataFrame(
np.array([
[np.nan, np.nan, np.nan],
[2.0, 2.0, 2.0],
[2.2857142857142856, 2.2857142857142856, 2.2857142857142856],
[2.4, 2.4, 2.4],
[2.1666666666666665, 2.1666666666666665, 2.1666666666666665]
]),
index=df.index,
columns=df.columns
)
)
def test_groupby_apply(self):
pd.testing.assert_series_equal(
df['a'].vbt.groupby_apply(np.asarray([1, 1, 2, 2, 3]), i_col_nanmean_nb),
df['a'].groupby(np.asarray([1, 1, 2, 2, 3])).apply(lambda x: nanmean_nb(x.values))
)
pd.testing.assert_frame_equal(
df.vbt.groupby_apply(np.asarray([1, 1, 2, 2, 3]), i_col_nanmean_nb),
df.groupby(np.asarray([1, 1, 2, 2, 3])).agg({
'a': lambda x: nanmean_nb(x.values),
'b': lambda x: nanmean_nb(x.values),
'c': lambda x: nanmean_nb(x.values)
}), # any clean way to do column-wise grouping in pandas?
)
def test_groupby_apply_on_matrix(self):
pd.testing.assert_frame_equal(
df.vbt.groupby_apply(np.asarray([1, 1, 2, 2, 3]), i_nanmean_nb, on_matrix=True),
pd.DataFrame(
np.array([
[2., 2., 2.],
[2.8, 2.8, 2.8],
[1., 1., 1.]
]),
index=pd.Int64Index([1, 2, 3], dtype='int64'),
columns=df.columns
)
)
@pytest.mark.parametrize(
"test_freq",
['1h', '3d', '1w'],
)
def test_resample_apply(self, test_freq):
pd.testing.assert_series_equal(
df['a'].vbt.resample_apply(test_freq, i_col_nanmean_nb),
df['a'].resample(test_freq).apply(lambda x: nanmean_nb(x.values))
)
pd.testing.assert_frame_equal(
df.vbt.resample_apply(test_freq, i_col_nanmean_nb),
df.resample(test_freq).apply(lambda x: nanmean_nb(x.values))
)
pd.testing.assert_frame_equal(
df.vbt.resample_apply('3d', i_nanmean_nb, on_matrix=True),
pd.DataFrame(
np.array([
[2.28571429, 2.28571429, 2.28571429],
[2., 2., 2.]
]),
index=pd.DatetimeIndex(['2018-01-01', '2018-01-04'], dtype='datetime64[ns]', freq='3D'),
columns=df.columns
)
)
def test_applymap(self):
@njit
def mult_nb(i, col, x):
return x * 2
pd.testing.assert_series_equal(
df['a'].vbt.applymap(mult_nb),
df['a'].map(lambda x: x * 2)
)
pd.testing.assert_frame_equal(
df.vbt.applymap(mult_nb),
df.applymap(lambda x: x * 2)
)
def test_filter(self):
@njit
def greater_nb(i, col, x):
return x > 2
pd.testing.assert_series_equal(
df['a'].vbt.filter(greater_nb),
df['a'].map(lambda x: x if x > 2 else np.nan)
)
pd.testing.assert_frame_equal(
df.vbt.filter(greater_nb),
df.applymap(lambda x: x if x > 2 else np.nan)
)
def test_apply_and_reduce(self):
@njit
def every_nth_nb(col, a, n):
return a[::n]
@njit
def sum_nb(col, a, b):
return np.nansum(a) + b
assert df['a'].vbt.apply_and_reduce(every_nth_nb, sum_nb, apply_args=(2,), reduce_args=(3,)) == \
df['a'].iloc[::2].sum() + 3
pd.testing.assert_series_equal(
df.vbt.apply_and_reduce(every_nth_nb, sum_nb, apply_args=(2,), reduce_args=(3,)),
df.iloc[::2].sum().rename('apply_and_reduce') + 3
)
pd.testing.assert_series_equal(
df.vbt.apply_and_reduce(
every_nth_nb, sum_nb, apply_args=(2,),
reduce_args=(3,), wrap_kwargs=dict(time_units=True)),
(df.iloc[::2].sum().rename('apply_and_reduce') + 3) * day_dt
)
def test_reduce(self):
@njit
def sum_nb(col, a):
return np.nansum(a)
assert df['a'].vbt.reduce(sum_nb) == df['a'].sum()
pd.testing.assert_series_equal(
df.vbt.reduce(sum_nb),
df.sum().rename('reduce')
)
pd.testing.assert_series_equal(
df.vbt.reduce(sum_nb, wrap_kwargs=dict(time_units=True)),
df.sum().rename('reduce') * day_dt
)
pd.testing.assert_series_equal(
df.vbt.reduce(sum_nb, group_by=group_by),
pd.Series([20.0, 6.0], index=['g1', 'g2']).rename('reduce')
)
@njit
def argmax_nb(col, a):
a = a.copy()
a[np.isnan(a)] = -np.inf
return np.argmax(a)
assert df['a'].vbt.reduce(argmax_nb, to_idx=True) == df['a'].idxmax()
pd.testing.assert_series_equal(
df.vbt.reduce(argmax_nb, to_idx=True),
df.idxmax().rename('reduce')
)
pd.testing.assert_series_equal(
df.vbt.reduce(argmax_nb, to_idx=True, flatten=True, group_by=group_by),
pd.Series(['2018-01-02', '2018-01-02'], dtype='datetime64[ns]', index=['g1', 'g2']).rename('reduce')
)
@njit
def min_and_max_nb(col, a):
out = np.empty(2)
out[0] = np.nanmin(a)
out[1] = np.nanmax(a)
return out
pd.testing.assert_series_equal(
df['a'].vbt.reduce(
min_and_max_nb, to_array=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])),
pd.Series([np.nanmin(df['a']), np.nanmax(df['a'])], index=['min', 'max'], name='a')
)
pd.testing.assert_frame_equal(
df.vbt.reduce(
min_and_max_nb, to_array=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])),
df.apply(lambda x: pd.Series(np.asarray([np.nanmin(x), np.nanmax(x)]), index=['min', 'max']), axis=0)
)
pd.testing.assert_frame_equal(
df.vbt.reduce(
min_and_max_nb, to_array=True, group_by=group_by,
wrap_kwargs=dict(name_or_index=['min', 'max'])),
pd.DataFrame([[1.0, 1.0], [4.0, 2.0]], index=['min', 'max'], columns=['g1', 'g2'])
)
@njit
def argmin_and_argmax_nb(col, a):
# nanargmin and nanargmax
out = np.empty(2)
_a = a.copy()
_a[np.isnan(_a)] = np.inf
out[0] = np.argmin(_a)
_a = a.copy()
_a[np.isnan(_a)] = -np.inf
out[1] = np.argmax(_a)
return out
pd.testing.assert_series_equal(
df['a'].vbt.reduce(
argmin_and_argmax_nb, to_idx=True, to_array=True,
wrap_kwargs=dict(name_or_index=['idxmin', 'idxmax'])),
pd.Series([df['a'].idxmin(), df['a'].idxmax()], index=['idxmin', 'idxmax'], name='a')
)
pd.testing.assert_frame_equal(
df.vbt.reduce(
argmin_and_argmax_nb, to_idx=True, to_array=True,
wrap_kwargs=dict(name_or_index=['idxmin', 'idxmax'])),
df.apply(lambda x: pd.Series(np.asarray([x.idxmin(), x.idxmax()]), index=['idxmin', 'idxmax']), axis=0)
)
pd.testing.assert_frame_equal(
df.vbt.reduce(argmin_and_argmax_nb, to_idx=True, to_array=True,
flatten=True, order='C', group_by=group_by,
wrap_kwargs=dict(name_or_index=['idxmin', 'idxmax'])),
pd.DataFrame([['2018-01-01', '2018-01-01'], ['2018-01-02', '2018-01-02']],
dtype='datetime64[ns]', index=['idxmin', 'idxmax'], columns=['g1', 'g2'])
)
pd.testing.assert_frame_equal(
df.vbt.reduce(argmin_and_argmax_nb, to_idx=True, to_array=True,
flatten=True, order='F', group_by=group_by,
wrap_kwargs=dict(name_or_index=['idxmin', 'idxmax'])),
pd.DataFrame([['2018-01-01', '2018-01-01'], ['2018-01-04', '2018-01-02']],
dtype='datetime64[ns]', index=['idxmin', 'idxmax'], columns=['g1', 'g2'])
)
def test_squeeze_grouped(self):
pd.testing.assert_frame_equal(
df.vbt.squeeze_grouped(i_col_nanmean_nb, group_by=group_by),
pd.DataFrame([
[1.0, 1.0],
[3.0, 2.0],
[3.0, np.nan],
[3.0, 2.0],
[1.0, 1.0]
], index=df.index, columns=['g1', 'g2'])
)
def test_flatten_grouped(self):
pd.testing.assert_frame_equal(
df.vbt.flatten_grouped(group_by=group_by, order='C'),
pd.DataFrame([
[1.0, 1.0],
[np.nan, np.nan],
[2.0, 2.0],
[4.0, np.nan],
[3.0, np.nan],
[3.0, np.nan],
[4.0, 2.0],
[2.0, np.nan],
[np.nan, 1.0],
[1.0, np.nan]
], index=np.repeat(df.index, 2), columns=['g1', 'g2'])
)
pd.testing.assert_frame_equal(
df.vbt.flatten_grouped(group_by=group_by, order='F'),
pd.DataFrame([
[1.0, 1.0],
[2.0, 2.0],
[3.0, np.nan],
[4.0, 2.0],
[np.nan, 1.0],
[np.nan, np.nan],
[4.0, np.nan],
[3.0, np.nan],
[2.0, np.nan],
[1.0, np.nan]
], index=np.tile(df.index, 2), columns=['g1', 'g2'])
)
@pytest.mark.parametrize(
"test_name,test_func,test_func_nb",
[
('min', lambda x, **kwargs: x.min(**kwargs), nb.nanmin_nb),
('max', lambda x, **kwargs: x.max(**kwargs), nb.nanmax_nb),
('mean', lambda x, **kwargs: x.mean(**kwargs), nb.nanmean_nb),
('median', lambda x, **kwargs: x.median(**kwargs), nb.nanmedian_nb),
('std', lambda x, **kwargs: x.std(**kwargs, ddof=0), nb.nanstd_nb),
('count', lambda x, **kwargs: x.count(**kwargs), nb.nancnt_nb),
('sum', lambda x, **kwargs: x.sum(**kwargs), nb.nansum_nb)
],
)
def test_funcs(self, test_name, test_func, test_func_nb):
# numeric
assert test_func(df['a'].vbt) == test_func(df['a'])
pd.testing.assert_series_equal(
test_func(df.vbt),
test_func(df).rename(test_name)
)
pd.testing.assert_series_equal(
test_func(df.vbt, group_by=group_by),
pd.Series([
test_func(df[['a', 'b']].stack()),
test_func(df['c'])
], index=['g1', 'g2']).rename(test_name)
)
np.testing.assert_array_equal(test_func(df).values, test_func_nb(df.values))
pd.testing.assert_series_equal(
test_func(df.vbt, wrap_kwargs=dict(time_units=True)),
test_func(df).rename(test_name) * day_dt
)
# boolean
bool_ts = df == df
assert test_func(bool_ts['a'].vbt) == test_func(bool_ts['a'])
pd.testing.assert_series_equal(
test_func(bool_ts.vbt),
test_func(bool_ts).rename(test_name)
)
pd.testing.assert_series_equal(
test_func(bool_ts.vbt, wrap_kwargs=dict(time_units=True)),
test_func(bool_ts).rename(test_name) * day_dt
)
@pytest.mark.parametrize(
"test_name,test_func",
[
('idxmin', lambda x, **kwargs: x.idxmin(**kwargs)),
('idxmax', lambda x, **kwargs: x.idxmax(**kwargs))
],
)
def test_arg_funcs(self, test_name, test_func):
assert test_func(df['a'].vbt) == test_func(df['a'])
pd.testing.assert_series_equal(
test_func(df.vbt),
test_func(df).rename(test_name)
)
pd.testing.assert_series_equal(
test_func(df.vbt, group_by=group_by),
pd.Series([
test_func(df[['a', 'b']].stack())[0],
test_func(df['c'])
], index=['g1', 'g2'], dtype='datetime64[ns]').rename(test_name)
)
def test_describe(self):
pd.testing.assert_series_equal(
df['a'].vbt.describe(),
df['a'].describe()
)
pd.testing.assert_frame_equal(
df.vbt.describe(percentiles=None),
df.describe(percentiles=None)
)
pd.testing.assert_frame_equal(
df.vbt.describe(percentiles=[]),
df.describe(percentiles=[])
)
test_against = df.describe(percentiles=np.arange(0, 1, 0.1))
pd.testing.assert_frame_equal(
df.vbt.describe(percentiles=np.arange(0, 1, 0.1)),
test_against
)
pd.testing.assert_frame_equal(
df.vbt.describe(percentiles=np.arange(0, 1, 0.1), group_by=group_by),
pd.DataFrame({
'g1': df[['a', 'b']].stack().describe(percentiles=np.arange(0, 1, 0.1)).values,
'g2': df['c'].describe(percentiles=np.arange(0, 1, 0.1)).values
}, index=test_against.index)
)
def test_drawdown(self):
pd.testing.assert_series_equal(
df['a'].vbt.drawdown(),
df['a'] / df['a'].expanding().max() - 1
)
pd.testing.assert_frame_equal(
df.vbt.drawdown(),
df / df.expanding().max() - 1
)
def test_drawdowns(self):
assert type(df['a'].vbt.drawdowns) is vbt.Drawdowns
assert df['a'].vbt.drawdowns.wrapper.freq == df['a'].vbt.wrapper.freq
assert df['a'].vbt.drawdowns.wrapper.ndim == df['a'].ndim
assert df.vbt.drawdowns.wrapper.ndim == df.ndim
def test_to_mapped_array(self):
np.testing.assert_array_equal(
df.vbt.to_mapped_array().values,
np.array([1., 2., 3., 4., 4., 3., 2., 1., 1., 2., 2., 1.])
)
np.testing.assert_array_equal(
df.vbt.to_mapped_array().col_arr,
np.array([0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2])
)
np.testing.assert_array_equal(
df.vbt.to_mapped_array().idx_arr,
np.array([0, 1, 2, 3, 1, 2, 3, 4, 0, 1, 3, 4])
)
np.testing.assert_array_equal(
df.vbt.to_mapped_array(dropna=False).values,
np.array([1., 2., 3., 4., np.nan, np.nan, 4., 3., 2., 1., 1., 2., np.nan, 2., 1.])
)
np.testing.assert_array_equal(
df.vbt.to_mapped_array(dropna=False).col_arr,
np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2])
)
np.testing.assert_array_equal(
df.vbt.to_mapped_array(dropna=False).idx_arr,
np.array([0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
)
def test_zscore(self):
pd.testing.assert_series_equal(
df['a'].vbt.zscore(),
(df['a'] - df['a'].mean()) / df['a'].std(ddof=0)
)
pd.testing.assert_frame_equal(
df.vbt.zscore(),
(df - df.mean()) / df.std(ddof=0)
)
def test_split(self):
splitter = TimeSeriesSplit(n_splits=2)
(train_df, train_indexes), (test_df, test_indexes) = df['a'].vbt.split(splitter)
pd.testing.assert_frame_equal(
train_df,
pd.DataFrame(
np.array([
[1.0, 1.0],
[2.0, 2.0],
[3.0, 3.0],
[np.nan, 4.0]
]),
index=pd.RangeIndex(start=0, stop=4, step=1),
columns=pd.Int64Index([0, 1], dtype='int64', name='split_idx')
)
)
target = [
pd.DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03'],
dtype='datetime64[ns]', name='split_0', freq=None),
pd.DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04'],
dtype='datetime64[ns]', name='split_1', freq=None)
]
for i in range(len(target)):
pd.testing.assert_index_equal(
train_indexes[i],
target[i]
)
pd.testing.assert_frame_equal(
test_df,
pd.DataFrame(
np.array([
[4.0, np.nan]
]),
index=pd.RangeIndex(start=0, stop=1, step=1),
columns=pd.Int64Index([0, 1], dtype='int64', name='split_idx')
)
)
target = [
pd.DatetimeIndex(['2018-01-04'], dtype='datetime64[ns]', name='split_0', freq=None),
pd.DatetimeIndex(['2018-01-05'], dtype='datetime64[ns]', name='split_1', freq=None)
]
for i in range(len(target)):
pd.testing.assert_index_equal(
test_indexes[i],
target[i]
)
(train_df, train_indexes), (test_df, test_indexes) = df.vbt.split(splitter)
pd.testing.assert_frame_equal(
train_df,
pd.DataFrame(
np.array([
[1.0, np.nan, 1.0, 1.0, np.nan, 1.0],
[2.0, 4.0, 2.0, 2.0, 4.0, 2.0],
[3.0, 3.0, np.nan, 3.0, 3.0, np.nan],
[np.nan, np.nan, np.nan, 4.0, 2.0, 2.0]
]),
index=pd.RangeIndex(start=0, stop=4, step=1),
columns=pd.MultiIndex.from_tuples([
(0, 'a'),
(0, 'b'),
(0, 'c'),
(1, 'a'),
(1, 'b'),
(1, 'c')
], names=['split_idx', None])
)
)
target = [
pd.DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03'],
dtype='datetime64[ns]', name='split_0', freq=None),
pd.DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04'],
dtype='datetime64[ns]', name='split_1', freq=None)
]
for i in range(len(target)):
pd.testing.assert_index_equal(
train_indexes[i],
target[i]
)
pd.testing.assert_frame_equal(
test_df,
pd.DataFrame(
np.array([
[4.0, 2.0, 2.0, np.nan, 1.0, 1.0]
]),
index=pd.RangeIndex(start=0, stop=1, step=1),
columns=pd.MultiIndex.from_tuples([
(0, 'a'),
(0, 'b'),
(0, 'c'),
(1, 'a'),
(1, 'b'),
(1, 'c')
], names=['split_idx', None])
)
)
target = [
pd.DatetimeIndex(['2018-01-04'], dtype='datetime64[ns]', name='split_0', freq=None),
pd.DatetimeIndex(['2018-01-05'], dtype='datetime64[ns]', name='split_1', freq=None)
]
for i in range(len(target)):
pd.testing.assert_index_equal(
test_indexes[i],
target[i]
)
def test_range_split(self):
pd.testing.assert_frame_equal(
df['a'].vbt.range_split(n=2)[0],
pd.DataFrame(
np.array([
[1., 4.],
[2., np.nan]
]),
index=pd.RangeIndex(start=0, stop=2, step=1),
columns=pd.Int64Index([0, 1], dtype='int64', name='split_idx')
)
)
target = [
pd.DatetimeIndex(['2018-01-01', '2018-01-02'], dtype='datetime64[ns]', name='split_0', freq=None),
pd.DatetimeIndex(['2018-01-04', '2018-01-05'], dtype='datetime64[ns]', name='split_1', freq=None)
]
for i in range(len(target)):
pd.testing.assert_index_equal(
df['a'].vbt.range_split(n=2)[1][i],
target[i]
)
pd.testing.assert_frame_equal(
df['a'].vbt.range_split(range_len=2)[0],
pd.DataFrame(
np.array([
[1., 2., 3., 4.],
[2., 3., 4., np.nan]
]),
index=pd.RangeIndex(start=0, stop=2, step=1),
columns=pd.Int64Index([0, 1, 2, 3], dtype='int64', name='split_idx')
)
)
target = [
pd.DatetimeIndex(['2018-01-01', '2018-01-02'], dtype='datetime64[ns]', name='split_0', freq=None),
pd.DatetimeIndex(['2018-01-02', '2018-01-03'], dtype='datetime64[ns]', name='split_1', freq=None),
pd.DatetimeIndex(['2018-01-03', '2018-01-04'], dtype='datetime64[ns]', name='split_2', freq=None),
pd.DatetimeIndex(['2018-01-04', '2018-01-05'], dtype='datetime64[ns]', name='split_3', freq=None)
]
for i in range(len(target)):
pd.testing.assert_index_equal(
df['a'].vbt.range_split(range_len=2)[1][i],
target[i]
)
pd.testing.assert_frame_equal(
df['a'].vbt.range_split(range_len=2, n=3)[0],
pd.DataFrame(
np.array([
[1., 3., 4.],
[2., 4., np.nan]
]),
index=pd.RangeIndex(start=0, stop=2, step=1),
columns=pd.Int64Index([0, 1, 2], dtype='int64', name='split_idx')
)
)
target = [
pd.DatetimeIndex(['2018-01-01', '2018-01-02'], dtype='datetime64[ns]', name='split_0', freq=None),
pd.DatetimeIndex(['2018-01-03', '2018-01-04'], dtype='datetime64[ns]', name='split_1', freq=None),
pd.DatetimeIndex(['2018-01-04', '2018-01-05'], dtype='datetime64[ns]', name='split_2', freq=None)
]
for i in range(len(target)):
pd.testing.assert_index_equal(
df['a'].vbt.range_split(range_len=2, n=3)[1][i],
target[i]
)
pd.testing.assert_frame_equal(
df['a'].vbt.range_split(range_len=3, n=2)[0],
pd.DataFrame(
np.array([
[1., 3.],
[2., 4.],
[3., np.nan]
]),
index=pd.RangeIndex(start=0, stop=3, step=1),
columns=pd.Int64Index([0, 1], dtype='int64', name='split_idx')
)
)
target = [
pd.DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03'],
dtype='datetime64[ns]', name='split_0', freq=None),
pd.DatetimeIndex(['2018-01-03', '2018-01-04', '2018-01-05'],
dtype='datetime64[ns]', name='split_1', freq=None)
]
for i in range(len(target)):
pd.testing.assert_index_equal(
df['a'].vbt.range_split(range_len=3, n=2)[1][i],
target[i]
)
pd.testing.assert_frame_equal(
df.vbt.range_split(n=2)[0],
pd.DataFrame(
np.array([
[1.0, np.nan, 1.0, 4.0, 2.0, 2.0],
[2.0, 4.0, 2.0, np.nan, 1.0, 1.0]
]),
index=pd.RangeIndex(start=0, stop=2, step=1),
columns=pd.MultiIndex.from_arrays([
pd.Int64Index([0, 0, 0, 1, 1, 1], dtype='int64', name='split_idx'),
pd.Index(['a', 'b', 'c', 'a', 'b', 'c'], dtype='object')
])
)
)
target = [
pd.DatetimeIndex(['2018-01-01', '2018-01-02'], dtype='datetime64[ns]', name='split_0', freq=None),
pd.DatetimeIndex(['2018-01-04', '2018-01-05'], dtype='datetime64[ns]', name='split_1', freq=None)
]
for i in range(len(target)):
pd.testing.assert_index_equal(
df.vbt.range_split(n=2)[1][i],
target[i]
)
pd.testing.assert_frame_equal(
df.vbt.range_split(start_idxs=[0, 1], end_idxs=[2, 3])[0],
pd.DataFrame(
np.array([
[1.0, np.nan, 1.0, 2.0, 4.0, 2.0],
[2.0, 4.0, 2.0, 3.0, 3.0, np.nan],
[3.0, 3.0, np.nan, 4.0, 2.0, 2.0]
]),
index=pd.RangeIndex(start=0, stop=3, step=1),
columns=pd.MultiIndex.from_arrays([
pd.Int64Index([0, 0, 0, 1, 1, 1], dtype='int64', name='split_idx'),
pd.Index(['a', 'b', 'c', 'a', 'b', 'c'], dtype='object')
])
)
)
target = [
pd.DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03'],
dtype='datetime64[ns]', name='split_0', freq=None),
pd.DatetimeIndex(['2018-01-02', '2018-01-03', '2018-01-04'],
dtype='datetime64[ns]', name='split_1', freq=None)
]
for i in range(len(target)):
pd.testing.assert_index_equal(
df.vbt.range_split(start_idxs=[0, 1], end_idxs=[2, 3])[1][i],
target[i]
)
pd.testing.assert_frame_equal(
df.vbt.range_split(start_idxs=df.index[[0, 1]], end_idxs=df.index[[2, 3]])[0],
pd.DataFrame(
np.array([
[1.0, np.nan, 1.0, 2.0, 4.0, 2.0],
[2.0, 4.0, 2.0, 3.0, 3.0, np.nan],
[3.0, 3.0, np.nan, 4.0, 2.0, 2.0]
]),
index=pd.RangeIndex(start=0, stop=3, step=1),
columns=pd.MultiIndex.from_arrays([
pd.Int64Index([0, 0, 0, 1, 1, 1], dtype='int64', name='split_idx'),
pd.Index(['a', 'b', 'c', 'a', 'b', 'c'], dtype='object')
])
)
)
target = [
pd.DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03'],
dtype='datetime64[ns]', name='split_0', freq=None),
pd.DatetimeIndex(['2018-01-02', '2018-01-03', '2018-01-04'],
dtype='datetime64[ns]', name='split_1', freq=None)
]
for i in range(len(target)):
pd.testing.assert_index_equal(
df.vbt.range_split(start_idxs=df.index[[0, 1]], end_idxs=df.index[[2, 3]])[1][i],
target[i]
)
pd.testing.assert_frame_equal(
df.vbt.range_split(start_idxs=df.index[[0]], end_idxs=df.index[[2, 3]])[0],
pd.DataFrame(
np.array([
[1.0, np.nan, 1.0, 1.0, np.nan, 1.0],
[2.0, 4.0, 2.0, 2.0, 4.0, 2.0],
[3.0, 3.0, np.nan, 3.0, 3.0, np.nan],
[np.nan, np.nan, np.nan, 4.0, 2.0, 2.0]
]),
index=pd.RangeIndex(start=0, stop=4, step=1),
columns=pd.MultiIndex.from_arrays([
pd.Int64Index([0, 0, 0, 1, 1, 1], dtype='int64', name='split_idx'),
pd.Index(['a', 'b', 'c', 'a', 'b', 'c'], dtype='object')
])
)
)
target = [
pd.DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03'],
dtype='datetime64[ns]', name='split_0', freq=None),
pd.DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04'],
dtype='datetime64[ns]', name='split_1', freq=None)
]
for i in range(len(target)):
pd.testing.assert_index_equal(
df.vbt.range_split(start_idxs=df.index[[0]], end_idxs=df.index[[2, 3]])[1][i],
target[i]
)
with pytest.raises(Exception) as e_info:
df.vbt.range_split()
with pytest.raises(Exception) as e_info:
df.vbt.range_split(start_idxs=[0, 1])
with pytest.raises(Exception) as e_info:
df.vbt.range_split(end_idxs=[2, 4])
with pytest.raises(Exception) as e_info:
df.vbt.range_split(min_len=10)
with pytest.raises(Exception) as e_info:
df.vbt.range_split(n=10)
def test_rolling_split(self):
(df1, indexes1), (df2, indexes2), (df3, indexes3) = df['a'].vbt.rolling_split(
window_len=4, set_lens=(1, 1), left_to_right=False)
pd.testing.assert_frame_equal(
df1,
pd.DataFrame(
np.array([
[1.0, 2.0],
[2.0, 3.0]
]),
index=pd.RangeIndex(start=0, stop=2, step=1),
columns=pd.Int64Index([0, 1], dtype='int64', name='split_idx')
)
)
target = [
pd.DatetimeIndex(['2018-01-01', '2018-01-02'], dtype='datetime64[ns]', name='split_0', freq=None),
pd.DatetimeIndex(['2018-01-02', '2018-01-03'], dtype='datetime64[ns]', name='split_1', freq=None)
]
for i in range(len(target)):
pd.testing.assert_index_equal(
indexes1[i],
target[i]
)
pd.testing.assert_frame_equal(
df2,
pd.DataFrame(
np.array([
[3.0, 4.0]
]),
index=pd.RangeIndex(start=0, stop=1, step=1),
columns=pd.Int64Index([0, 1], dtype='int64', name='split_idx')
)
)
target = [
pd.DatetimeIndex(['2018-01-03'], dtype='datetime64[ns]', name='split_0', freq=None),
pd.DatetimeIndex(['2018-01-04'], dtype='datetime64[ns]', name='split_1', freq=None)
]
for i in range(len(target)):
pd.testing.assert_index_equal(
indexes2[i],
target[i]
)
pd.testing.assert_frame_equal(
df3,
pd.DataFrame(
np.array([
[4.0, np.nan]
]),
index=pd.RangeIndex(start=0, stop=1, step=1),
columns=pd.Int64Index([0, 1], dtype='int64', name='split_idx')
)
)
target = [
pd.DatetimeIndex(['2018-01-04'], dtype='datetime64[ns]', name='split_0', freq=None),
pd.DatetimeIndex(['2018-01-05'], dtype='datetime64[ns]', name='split_1', freq=None)
]
for i in range(len(target)):
pd.testing.assert_index_equal(
indexes3[i],
target[i]
)
(df1, indexes1), (df2, indexes2), (df3, indexes3) = df['a'].vbt.rolling_split(
window_len=4, set_lens=(1, 1), left_to_right=True)
pd.testing.assert_frame_equal(
df1,
pd.DataFrame(
np.array([
[1.0, 2.0]
]),
index=pd.RangeIndex(start=0, stop=1, step=1),
columns=pd.Int64Index([0, 1], dtype='int64', name='split_idx')
)
)
target = [
pd.DatetimeIndex(['2018-01-01'], dtype='datetime64[ns]', name='split_0', freq=None),
pd.DatetimeIndex(['2018-01-02'], dtype='datetime64[ns]', name='split_1', freq=None)
]
for i in range(len(target)):
pd.testing.assert_index_equal(
indexes1[i],
target[i]
)
pd.testing.assert_frame_equal(
df2,
pd.DataFrame(
np.array([
[2.0, 3.0]
]),
index=pd.RangeIndex(start=0, stop=1, step=1),
columns=pd.Int64Index([0, 1], dtype='int64', name='split_idx')
)
)
target = [
pd.DatetimeIndex(['2018-01-02'], dtype='datetime64[ns]', name='split_0', freq=None),
pd.DatetimeIndex(['2018-01-03'], dtype='datetime64[ns]', name='split_1', freq=None)
]
for i in range(len(target)):
pd.testing.assert_index_equal(
indexes2[i],
target[i]
)
pd.testing.assert_frame_equal(
df3,
pd.DataFrame(
np.array([
[3.0, 4.0],
[4.0, np.nan]
]),
index=pd.RangeIndex(start=0, stop=2, step=1),
columns=pd.Int64Index([0, 1], dtype='int64', name='split_idx')
)
)
target = [
pd.DatetimeIndex(['2018-01-03', '2018-01-04'], dtype='datetime64[ns]', name='split_0', freq=None),
pd.DatetimeIndex(['2018-01-04', '2018-01-05'], dtype='datetime64[ns]', name='split_1', freq=None)
]
for i in range(len(target)):
pd.testing.assert_index_equal(
indexes3[i],
target[i]
)
(df1, indexes1), (df2, indexes2), (df3, indexes3) = df['a'].vbt.rolling_split(
window_len=4, set_lens=(0.25, 0.25), left_to_right=[False, True])
pd.testing.assert_frame_equal(
df1,
pd.DataFrame(
np.array([
[1.0, 2.0],
[2.0, np.nan]
]),
index=pd.RangeIndex(start=0, stop=2, step=1),
columns=pd.Int64Index([0, 1], dtype='int64', name='split_idx')
)
)
target = [
pd.DatetimeIndex(['2018-01-01', '2018-01-02'], dtype='datetime64[ns]', name='split_0', freq=None),
pd.DatetimeIndex(['2018-01-02'], dtype='datetime64[ns]', name='split_1', freq=None)
]
for i in range(len(target)):
pd.testing.assert_index_equal(
indexes1[i],
target[i]
)
pd.testing.assert_frame_equal(
df2,
pd.DataFrame(
np.array([
[3.0, 3.0]
]),
index=pd.RangeIndex(start=0, stop=1, step=1),
columns=pd.Int64Index([0, 1], dtype='int64', name='split_idx')
)
)
target = [
pd.DatetimeIndex(['2018-01-03'], dtype='datetime64[ns]', name='split_0', freq=None),
pd.DatetimeIndex(['2018-01-03'], dtype='datetime64[ns]', name='split_1', freq=None)
]
for i in range(len(target)):
pd.testing.assert_index_equal(
indexes2[i],
target[i]
)
pd.testing.assert_frame_equal(
df3,
pd.DataFrame(
np.array([
[4.0, 4.0],
[np.nan, np.nan]
]),
index=pd.RangeIndex(start=0, stop=2, step=1),
columns=pd.Int64Index([0, 1], dtype='int64', name='split_idx')
)
)
target = [
| pd.DatetimeIndex(['2018-01-04'], dtype='datetime64[ns]', name='split_0', freq=None) | pandas.DatetimeIndex |
import numpy as np
import pandas as pd
import spacy
from spacy.lang.de.stop_words import STOP_WORDS
from nltk.tokenize import sent_tokenize
from itertools import groupby
import copy
import re
import sys
import textstat
# Method to create a matrix with contains only zeroes and a index starting by 0
def create_matrix_index_zeros(rows, columns):
arr = np.zeros((rows, columns))
for r in range(0, rows):
arr[r, 0] = r
return arr
# Method to get all authors with a given number of texts. Used in chapter 5.1 to get a corpus with 100 Texts for 25
# authors
def get_balanced_df_all_authors(par_df, par_num_text):
author_count = par_df["author"].value_counts()
author_list = []
df_balanced_text = pd.DataFrame(columns=['label_encoded', 'author', 'genres', 'release_date', 'text'])
for i in range(0, len(author_count)):
if author_count[i] >= par_num_text and not author_count.index[i] == "Gast-Rezensent":
author_list.append(author_count.index[i])
texts = [par_num_text for i in range(0, len(author_count))]
for index, row in par_df.iterrows():
if row['author'] in author_list:
if texts[author_list.index(row['author'])] != 0:
d = {'author': [row['author']], 'genres': [row['genres']],
'release_date': [row['release_date']], 'text': [row['text']]}
df_balanced_text = df_balanced_text.append(pd.DataFrame.from_dict(d), ignore_index=True)
texts[author_list.index(row['author'])] -= 1
if sum(texts) == 0:
break
# Label encoding and delete author column after
dic_author_mapping = author_encoding(df_balanced_text)
df_balanced_text['label_encoded'] = get_encoded_author_vector(df_balanced_text, dic_author_mapping)[:, 0]
df_balanced_text.drop("author", axis=1, inplace=True)
# Print author mapping in file
original_stdout = sys.stdout
with open('author_mapping.txt', 'w') as f:
sys.stdout = f
print(dic_author_mapping)
sys.stdout = original_stdout
for i in range(0, len(author_list)):
print(f"Autor {i+1}: {par_num_text - texts[i]} Texte")
return df_balanced_text
# Method to get a specific number of authors with a given number of texts. Used later on to get results for different
# combinations of authors and texts
def get_balanced_df_by_texts_authors(par_df, par_num_text, par_num_author):
author_count = par_df["author"].value_counts()
author_list = []
df_balanced_text = pd.DataFrame(columns=['label_encoded', 'author', 'genres', 'release_date', 'text'])
loop_count, loops = 0, par_num_author
while loop_count < loops:
if author_count[loop_count] >= par_num_text and not author_count.index[loop_count] == "Gast-Rezensent":
author_list.append(author_count.index[loop_count])
# Skip the Author "Gast-Rezensent" if its not the last round and increase the loops by 1
elif author_count.index[loop_count] == "Gast-Rezensent":
loops += 1
loop_count += 1
texts = [par_num_text for i in range(0, len(author_list))]
for index, row in par_df.iterrows():
if row['author'] in author_list:
if texts[author_list.index(row['author'])] != 0:
d = {'author': [row['author']], 'genres': [row['genres']],
'release_date': [row['release_date']], 'text': [row['text']]}
df_balanced_text = df_balanced_text.append(pd.DataFrame.from_dict(d), ignore_index=True)
texts[author_list.index(row['author'])] -= 1
if sum(texts) == 0:
break
# Label encoding and delete author column after
dic_author_mapping = author_encoding(df_balanced_text)
df_balanced_text['label_encoded'] = get_encoded_author_vector(df_balanced_text, dic_author_mapping)[:, 0]
df_balanced_text.drop("author", axis=1, inplace=True)
# Print author mapping in file
original_stdout = sys.stdout
with open('author_mapping.txt', 'w') as f:
sys.stdout = f
print(dic_author_mapping)
sys.stdout = original_stdout
for i in range(0, len(author_list)):
print(f"Autor {i+1}: {par_num_text - texts[i]} Texte")
return df_balanced_text
# Feature extraction of the feature described in chapter 5.6.1
def get_bow_matrix(par_df):
nlp = spacy.load("de_core_news_sm")
d_bow = {}
d_bow_list = []
function_pos = ["ADP", "AUX", "CONJ", "CCONJ", "DET", "PART", "PRON", "SCONJ"]
for index, row in par_df.iterrows():
tokens = nlp(row['text'])
tokens = [word for word in tokens if not word.is_punct and not word.is_space and not
word.is_digit and word.lemma_ not in STOP_WORDS and word.pos_ not in function_pos]
for word in tokens:
try:
d_bow["bow:"+word.lemma_.lower()] += 1
except KeyError:
d_bow["bow:"+word.lemma_.lower()] = 1
d_bow_list.append(copy.deepcopy(d_bow))
d_bow.clear()
return pd.DataFrame(d_bow_list)
# Feature extraction of the feature described in chapter 5.6.2
def get_word_n_grams(par_df, n):
nlp = spacy.load("de_core_news_sm")
d_word_ngram = {}
d_word_ngram_list = []
function_pos = ["ADP", "AUX", "CONJ", "CCONJ", "DET", "PART", "PRON", "SCONJ"]
for index, row in par_df.iterrows():
tokens = nlp(row['text'])
tokens = [word for word in tokens if not word.is_punct and not word.is_space and not
word.is_digit and word.lemma_ not in STOP_WORDS and word.pos_ not in function_pos]
tokens = [token.lemma_.lower() for token in tokens]
for w in range(0, len(tokens)):
if w + n <= len(tokens):
try:
d_word_ngram["w" + str(n) + "g" + ":" + '|'.join(tokens[w:w + n])] += 1
except KeyError:
d_word_ngram["w" + str(n) + "g" + ":" + '|'.join(tokens[w:w + n])] = 1
d_word_ngram_list.append(copy.deepcopy(d_word_ngram))
d_word_ngram.clear()
return pd.DataFrame(d_word_ngram_list)
# Feature extraction of the feature described in chapter 5.6.3
def get_word_count(par_df):
arr_wordcount = np.zeros((len(par_df), 1))
nlp = spacy.load("de_core_news_sm")
only_words = []
for index, row in par_df.iterrows():
tokens = nlp(row['text'])
for t in tokens:
if not t.is_punct and not t.is_space:
only_words.append(t)
arr_wordcount[index] = len(only_words)
only_words.clear()
return pd.DataFrame(data=arr_wordcount, columns=["word_count"])
# Feature extraction of the feature described in chapter 5.6.4 with some variations
# Count all word lengths individually
def get_word_length_matrix(par_df):
nlp = spacy.load("de_core_news_sm")
d_word_len = {}
d_word_len_list = []
for index, row in par_df.iterrows():
tokens = nlp(row['text'])
tokens = [word for word in tokens if not word.is_punct and not word.is_space and not word.is_digit]
for word in tokens:
try:
d_word_len["w_len:"+str(len(word.text))] += 1
except KeyError:
d_word_len["w_len:"+str(len(word.text))] = 1
d_word_len_list.append(copy.deepcopy(d_word_len))
d_word_len.clear()
return pd.DataFrame(d_word_len_list)
# Count word lengths and set 2 intervals
def get_word_length_matrix_with_interval(par_df, border_1, border_2):
arr_wordcount_with_interval = np.zeros((len(par_df), border_1 + 2))
nlp = spacy.load("de_core_news_sm")
for index, row in par_df.iterrows():
tokens = nlp(row['text'])
for word in tokens:
if len(word.text) <= border_1 and not word.is_punct and not word.is_space and not word.is_digit:
arr_wordcount_with_interval[index, len(word.text) - 1] += 1
elif border_1 < len(
word.text) <= border_2 and not word.is_punct and not word.is_space and not word.is_digit:
arr_wordcount_with_interval[index, -2] += 1
elif not word.is_punct and not word.is_space and not word.is_digit:
arr_wordcount_with_interval[index, -1] += 1
word_length_labels = [str(i) for i in range(1, border_1+1)]
word_length_labels.append(f"{border_1+1}-{border_2}")
word_length_labels.append(f">{border_2}")
return pd.DataFrame(data=arr_wordcount_with_interval, columns=word_length_labels)
# Count word lengths and sum all above a defined margin
def get_word_length_matrix_with_margin(par_df, par_margin):
arr_wordcount_with_interval = np.zeros((len(par_df), par_margin + 1))
nlp = spacy.load("de_core_news_sm")
for index, row in par_df.iterrows():
tokens = nlp(row['text'])
for word in tokens:
if len(word.text) <= par_margin and not word.is_punct and not word.is_space and not word.is_digit:
arr_wordcount_with_interval[index, len(word.text) - 1] += 1
elif par_margin < len(word.text) and not word.is_punct and not word.is_space and not word.is_digit:
arr_wordcount_with_interval[index, -1] += 1
word_length_labels = [str(i) for i in range(1, par_margin+1)]
word_length_labels.append(f">{par_margin}")
return pd.DataFrame(data=arr_wordcount_with_interval, columns=word_length_labels)
# Count the average word length of the article
def get_average_word_length(par_df):
arr_avg_word_len_vector = np.zeros((len(par_df), 1))
nlp = spacy.load("de_core_news_sm")
for index, row in par_df.iterrows():
symbol_sum = 0
words = 0
tokens = nlp(row['text'])
for word in tokens:
if not word.is_punct and not word.is_space and not word.is_digit:
symbol_sum += len(word.text)
words += 1
arr_avg_word_len_vector[index, 0] = symbol_sum / words
return pd.DataFrame(data=arr_avg_word_len_vector, columns=["avg_word_length"])
# Feature extraction of the feature described in chapter 5.6.5
def get_yules_k(par_df):
d = {}
nlp = spacy.load("de_core_news_sm")
arr_yulesk = np.zeros((len(par_df), 1))
for index, row in par_df.iterrows():
tokens = nlp(row['text'])
for t in tokens:
if not t.is_punct and not t.is_space and not t.is_digit:
w = t.lemma_.lower()
try:
d[w] += 1
except KeyError:
d[w] = 1
s1 = float(len(d))
s2 = sum([len(list(g)) * (freq ** 2) for freq, g in groupby(sorted(d.values()))])
try:
k = 10000 * (s2 - s1) / (s1 * s1)
arr_yulesk[index] = k
except ZeroDivisionError:
pass
d.clear()
return pd.DataFrame(data=arr_yulesk, columns=["yulesk"])
# Feature extraction of the feature described in chapter 5.6.6
# Get a vector of all special characters
def get_special_char_label_vector(par_df):
nlp = spacy.load("de_core_news_sm")
special_char_label_vector = []
for index, row in par_df.iterrows():
tokens = nlp(row['text'])
for t in tokens:
chars = ' '.join([c for c in t.text])
chars = nlp(chars)
for c in chars:
if c.is_punct and c.text not in special_char_label_vector:
special_char_label_vector.append(c.text)
return special_char_label_vector
# Get a matrix of all special character by a given vector of special chars
def get_special_char_matrix(par_df, par_special_char_label_vector):
nlp = spacy.load("de_core_news_sm")
arr_special_char = np.zeros((len(par_df), len(par_special_char_label_vector)))
for index, row in par_df.iterrows():
tokens = nlp(row['text'])
for t in tokens:
chars = ' '.join([c for c in t.text])
chars = nlp(chars)
for c in chars:
if c.text in par_special_char_label_vector:
arr_special_char[index, par_special_char_label_vector.index(c.text)] += 1
return arr_special_char
# Feature extraction of the feature described in chapter 5.6.7
# Get the char-affix-n-grams by a defined n
def get_char_affix_n_grams(par_df, n):
d_prefix_list, d_suffix_list, d_space_prefix_list, d_space_suffix_list = [], [], [], []
d_prefix, d_suffix, d_space_prefix, d_space_suffix = {}, {}, {}, {}
nlp = spacy.load("de_core_news_sm")
for index, row in par_df.iterrows():
tokens = nlp(row['text'])
for w in range(0, len(tokens)):
# Prefix
if len(tokens[w].text) >= n + 1:
try:
d_prefix["c" + str(n) + "_p: " + tokens[w].text.lower()[0:n]] += 1
except KeyError:
d_prefix["c" + str(n) + "_p: " + tokens[w].text.lower()[0:n]] = 1
# Suffix
if len(tokens[w].text) >= n + 1:
try:
d_suffix["c" + str(n) + "_s: " + tokens[w].text.lower()[-n:]] += 1
except KeyError:
d_suffix["c" + str(n) + "_s: " + tokens[w].text.lower()[-n:]] = 1
d_prefix_list.append(copy.deepcopy(d_prefix))
d_suffix_list.append(copy.deepcopy(d_suffix))
d_prefix.clear()
d_suffix.clear()
for i in range(0, len(row['text'])):
if row['text'][i] == " " and i + n <= len(row['text']) and i - n >= 0:
# Space-prefix
try:
d_space_prefix["c" + str(n) + "_sp: " + row['text'].lower()[i:n + i]] += 1
except KeyError:
d_space_prefix["c" + str(n) + "_sp: " + row['text'].lower()[i:n + i]] = 1
# Space-suffix
try:
d_space_suffix["c" + str(n) + "_ss: " + row['text'].lower()[i - n + 1:i + 1]] += 1
except KeyError:
d_space_suffix["c" + str(n) + "_ss: " + row['text'].lower()[i - n + 1:i + 1]] = 1
d_space_prefix_list.append(copy.deepcopy(d_space_prefix))
d_space_suffix_list.append(copy.deepcopy(d_space_suffix))
d_space_prefix.clear()
d_space_suffix.clear()
df_pre = pd.DataFrame(d_prefix_list)
df_su = pd.DataFrame(d_suffix_list)
df_s_pre = pd.DataFrame(d_space_prefix_list)
df_s_su = pd.DataFrame(d_space_suffix_list)
df_affix = pd.concat([df_pre, df_su, df_s_pre, df_s_su], axis=1)
return df_affix
# Get the char-word-n-grams by a defined n
def get_char_word_n_grams(par_df, n):
d_whole_word_list, d_mid_word_list, d_multi_word_list = [], [], []
d_whole_word, d_mid_word, d_multi_word = {}, {}, {}
match_list = []
nlp = spacy.load("de_core_news_sm")
for index, row in par_df.iterrows():
tokens = nlp(row['text'])
for w in range(0, len(tokens)):
# Whole-word
if len(tokens[w].text) == n:
try:
d_whole_word["c" + str(n) + "_ww: " + tokens[w].text.lower()] += 1
except KeyError:
d_whole_word["c" + str(n) + "_ww: " + tokens[w].text.lower()] = 1
# Mid-word
if len(tokens[w].text) >= n + 2:
for i in range(1, len(tokens[w].text) - n):
try:
d_mid_word["c" + str(n) + "_miw: " + tokens[w].text.lower()[i:i + n]] += 1
except KeyError:
d_mid_word["c" + str(n) + "_miw: " + tokens[w].text.lower()[i:i + n]] = 1
d_whole_word_list.append(copy.deepcopy(d_whole_word))
d_mid_word_list.append(copy.deepcopy(d_mid_word))
d_whole_word.clear()
d_mid_word.clear()
# Multi-word
# ignore special character
trimmed_text = re.sub(r'[\s]+', ' ', re.sub(r'[^\w ]+', '', row['text']))
match_list.clear()
for i in range(1, n - 1):
regex = r"\w{" + str(i) + r"}\s\w{" + str(n - 1 - i) + r"}"
match_list += re.findall(regex, trimmed_text.lower())
for match in match_list:
try:
d_multi_word["c" + str(n) + "_mw: " + match] += 1
except KeyError:
d_multi_word["c" + str(n) + "_mw: " + match] = 1
d_multi_word_list.append(copy.deepcopy(d_multi_word))
d_multi_word.clear()
df_ww = pd.DataFrame(d_whole_word_list)
df_miw = pd.DataFrame(d_mid_word_list)
df_mw = | pd.DataFrame(d_multi_word_list) | pandas.DataFrame |
from __future__ import division
import configparser
import logging
import os
import re
import time
from collections import OrderedDict
import numpy as np
import pandas as pd
import scipy.interpolate as itp
from joblib import Parallel
from joblib import delayed
from matplotlib import pyplot as plt
from pyplanscoring.core.dicomparser import ScoringDicomParser
from pyplanscoring.core.dosimetric import read_scoring_criteria, constrains, Competition2016
from pyplanscoring.core.dvhcalculation import Structure, prepare_dvh_data, calc_dvhs_upsampled, save_dicom_dvhs, load
from pyplanscoring.core.dvhdoses import get_dvh_max
from pyplanscoring.core.geometry import get_axis_grid, get_interpolated_structure_planes
from pyplanscoring.core.scoring import DVHMetrics, Scoring, Participant
# TODO extract constrains from analytical curves
class CurveCompare(object):
"""
Statistical analysis of the DVH volume (%) error histograms. volume (cm 3 ) differences (numerical–analytical)
were calculated for points on the DVH curve sampled at every 10 cGy then normalized to
the structure's total volume (cm 3 ) to give the error in volume (%)
"""
def __init__(self, a_dose, a_dvh, calc_dose, calc_dvh, structure_name='', dose_grid='', gradient=''):
self.calc_data = ''
self.ref_data = ''
self.a_dose = a_dose
self.a_dvh = a_dvh
self.cal_dose = calc_dose
self.calc_dvh = calc_dvh
self.sampling_size = 10/100.0
self.dose_samples = np.arange(0, len(calc_dvh)/100, self.sampling_size) # The DVH curve sampled at every 10 cGy
self.ref_dvh = itp.interp1d(a_dose, a_dvh, fill_value='extrapolate')
self.calc_dvh = itp.interp1d(calc_dose, calc_dvh, fill_value='extrapolate')
self.delta_dvh = self.calc_dvh(self.dose_samples) - self.ref_dvh(self.dose_samples)
self.delta_dvh_pp = (self.delta_dvh / a_dvh[0]) * 100
# prepare data dict
# self.calc_dvh_dict = _prepare_dvh_data(self.dose_samples, self.calc_dvh(self.dose_samples))
# self.ref_dvh_dict = _prepare_dvh_data(self.dose_samples, self.ref_dvh(self.dose_samples))
# title data
self.structure_name = structure_name
self.dose_grid = dose_grid
self.gradient = gradient
def stats(self):
df = pd.DataFrame(self.delta_dvh_pp, columns=['delta_pp'])
print(df.describe())
@property
def stats_paper(self):
stats = {}
stats['min'] = self.delta_dvh_pp.min().round(1)
stats['max'] = self.delta_dvh_pp.max().round(1)
stats['mean'] = self.delta_dvh_pp.mean().round(1)
stats['std'] = self.delta_dvh_pp.std(ddof=1).round(1)
return stats
@property
def stats_delta_cc(self):
stats = {}
stats['min'] = self.delta_dvh.min().round(1)
stats['max'] = self.delta_dvh.max().round(1)
stats['mean'] = self.delta_dvh.mean().round(1)
stats['std'] = self.delta_dvh.std(ddof=1).round(1)
return stats
# def get_constrains(self, constrains_dict):
# ref_constrains = eval_constrains_dict(self.ref_dvh_dict, constrains_dict)
# calc_constrains = eval_constrains_dict(self.calc_dvh_dict, constrains_dict)
#
# return ref_constrains, calc_constrains
def eval_range(self, lim=0.2):
t1 = self.delta_dvh < -lim
t2 = self.delta_dvh > lim
ok = np.sum(np.logical_or(t1, t2))
pp = ok / len(self.delta_dvh) * 100
print('pp %1.2f - %i of %i ' % (pp, ok, self.delta_dvh.size))
def plot_results(self, ref_label, calc_label, title):
fig, ax = plt.subplots()
ref = self.ref_dvh(self.dose_samples)
calc = self.calc_dvh(self.dose_samples)
ax.plot(self.dose_samples, ref, label=ref_label)
ax.plot(self.dose_samples, calc, label=calc_label)
ax.set_ylabel('volume [cc]')
ax.set_xlabel('Dose [Gy]')
ax.set_title(title)
ax.legend(loc='best')
def test_real_dvh():
rs_file = r'/home/victor/Dropbox/Plan_Competition_Project/competition_2017/All Required Files - 23 Jan2017/RS.1.2.246.352.71.4.584747638204.248648.20170123083029.dcm'
rd_file = r'/home/victor/Dropbox/Plan_Competition_Project/competition_2017/All Required Files - 23 Jan2017/RD.1.2.246.352.71.7.584747638204.1750110.20170123082607.dcm'
rp = r'/home/victor/Dropbox/Plan_Competition_Project/competition_2017/All Required Files - 23 Jan2017/RP.1.2.246.352.71.5.584747638204.952069.20170122155706.dcm'
# dvh_file = r'/media/victor/TOURO Mobile/COMPETITION 2017/Send to Victor - Jan10 2017/Norm Res with CT Images/RD.1.2.246.352.71.7.584747638204.1746016.20170110164605.dvh'
f = r'/home/victor/Dropbox/Plan_Competition_Project/competition_2017/All Required Files - 23 Jan2017/PlanIQ Criteria TPS PlanIQ matched str names - TXT Fromat - Last mod Jan23.txt'
constrains_all, scores_all, criteria = read_scoring_criteria(f)
dose = ScoringDicomParser(filename=rd_file)
struc = ScoringDicomParser(filename=rs_file)
structures = struc.GetStructures()
ecl_DVH = dose.GetDVHs()
plt.style.use('ggplot')
st = time.time()
dvhs = {}
for structure in structures.values():
for end_cap in [False]:
if structure['id'] in ecl_DVH:
# if structure['id'] in [37, 38]:
if structure['name'] in list(scores_all.keys()):
ecl_dvh = ecl_DVH[structure['id']]['data']
ecl_dmax = ecl_DVH[structure['id']]['max'] * 100 # to cGy
struc_teste = Structure(structure, end_cap=end_cap)
# struc['planes'] = struc_teste.planes
# dicompyler_dvh = get_dvh(structure, dose)
fig, ax = plt.subplots()
fig.set_figheight(12)
fig.set_figwidth(20)
dhist, chist = struc_teste.calculate_dvh(dose, up_sample=True)
max_dose = get_dvh_max(chist)
ax.plot(dhist, chist, label='Up sampled - Dmax: %1.1f cGy' % max_dose)
fig.hold(True)
ax.plot(ecl_dvh, label='Eclipse - Dmax: %1.1f cGy' % ecl_dmax)
dvh_data = prepare_dvh_data(dhist, chist)
txt = structure['name'] + ' volume (cc): %1.1f - end_cap: %s ' % (
ecl_dvh[0], str(end_cap))
ax.set_title(txt)
# nup = get_dvh_max(dicompyler_dvh['data'])
# plt.plot(dicompyler_dvh['data'], label='Software DVH - Dmax: %1.1f cGy' % nup)
ax.legend(loc='best')
ax.set_xlabel('Dose (cGy)')
ax.set_ylabel('volume (cc)')
fname = txt + '.png'
fig.savefig(fname, format='png', dpi=100)
dvhs[structure['name']] = dvh_data
end = time.time()
print('Total elapsed Time (min): ', (end - st) / 60)
def test_spacing(root_path):
"""
# TEST PLANIQ RS-DICOM DATA if z planes are not equal spaced.
:param root_path: root path
"""
root_path = r'/home/victor/Dropbox/Plan_Competition_Project/pyplanscoring/testdata/DVH-Analysis-Data-Etc/STRUCTURES'
structure_files = [os.path.join(root, name) for root, dirs, files in os.walk(root_path) for name in files if
name.endswith(('.dcm', '.DCM'))]
eps = 0.001
test_result = {}
for f in structure_files:
structures = ScoringDicomParser(filename=f).GetStructures()
for key in structures:
try:
all_z = np.array([z for z in structures[key]['planes'].keys()], dtype=float)
all_sorted_diff = np.diff(np.sort(all_z))
test = (abs((all_sorted_diff - all_sorted_diff[0])) > eps).any()
test_result[structures[key]['name']] = test
except:
print('Error in key:', key)
b = {key: value for key, value in test_result.items() if value == True}
return test_result
def test_planes_spacing(sPlanes):
eps = 0.001
all_z = np.array([z for z in sPlanes], dtype=float)
all_sorted_diff = np.diff(np.sort(all_z))
test = (abs((all_sorted_diff - all_sorted_diff[0])) > eps).any()
return test, all_sorted_diff
def test_upsampled_z_spacing(sPlanes):
z = 0.1
ordered_keys = [z for z, sPlane in sPlanes.items()]
ordered_keys.sort(key=float)
ordered_planes = np.array(ordered_keys, dtype=float)
z_interp_positions, dz = get_axis_grid(z, ordered_planes)
hi_res_structure = get_interpolated_structure_planes(sPlanes, z_interp_positions)
ordered_keys = [z for z, sPlane in hi_res_structure.items()]
ordered_keys.sort(key=float)
t, p = test_planes_spacing(hi_res_structure)
assert t is False
def eval_constrains_dict(dvh_data_tmp, constrains_dict):
mtk = DVHMetrics(dvh_data_tmp)
values_tmp = OrderedDict()
for ki in constrains_dict.keys():
cti = mtk.eval_constrain(ki, constrains_dict[ki])
values_tmp[ki] = cti
return values_tmp
def get_analytical_curve(an_curves_obj, file_structure_name, column):
an_curve_i = an_curves_obj[file_structure_name.split('_')[0]]
dose_an = an_curve_i['Dose (cGy)'].values
an_dvh = an_curve_i[column].values # check nonzero
idx = np.nonzero(an_dvh) # remove 0 volumes from DVH
dose_range, cdvh = dose_an[idx], an_dvh[idx]
return dose_range, cdvh
def calc_data(row, dose_files_dict, structure_dict, constrains, calculation_options):
idx, values = row[0], row[1]
s_name = values['Structure name']
voxel = str(values['Dose Voxel (mm)'])
gradient = values['Gradient direction']
dose_file = dose_files_dict[gradient][voxel]
struc_file = structure_dict[s_name]
# get structure and dose
dicom_dose = ScoringDicomParser(filename=dose_file)
struc = ScoringDicomParser(filename=struc_file)
structures = struc.GetStructures()
structure = structures[2]
# set end cap by 1/2 slice thickness
calculation_options['end_cap'] = structure['thickness'] / 2.0
# set up sampled structure
struc_teste = Structure(structure, calculation_options)
dhist, chist = struc_teste.calculate_dvh(dicom_dose)
dvh_data = struc_teste.get_dvh_data()
# Setup DVH metrics class and get DVH DATA
metrics = DVHMetrics(dvh_data)
values_constrains = OrderedDict()
for k in constrains.keys():
ct = metrics.eval_constrain(k, constrains[k])
values_constrains[k] = ct
values_constrains['Gradient direction'] = gradient
# Get data
return pd.Series(values_constrains, name=voxel), s_name
def calc_data_all(row, dose_files_dict, structure_dict, constrains, an_curves, col_grad_dict, delta_mm=(0.2, 0.2, 0.2),
end_cap=True, up_sample=True):
idx, values = row[0], row[1]
s_name = values['Structure name']
voxel = str(values['Dose Voxel (mm)'])
gradient = values['Gradient direction']
dose_file = dose_files_dict[gradient][voxel]
struc_file = structure_dict[s_name]
# get structure and dose
dicom_dose = ScoringDicomParser(filename=dose_file)
struc = ScoringDicomParser(filename=struc_file)
structures = struc.GetStructures()
structure = structures[2]
# set up sampled structure
struc_teste = Structure(structure)
struc_teste.set_delta(delta_mm)
dhist, chist = struc_teste.calculate_dvh(dicom_dose)
# get its columns from spreadsheet
column = col_grad_dict[gradient][voxel]
adose_range, advh = get_analytical_curve(an_curves, s_name, column)
# use CurveCompare class to eval similarity from calculated and analytical curves
cmp = CurveCompare(adose_range, advh, dhist, chist, s_name, voxel, gradient)
ref_constrains, calc_constrains = cmp.get_constrains(constrains)
ref_constrains['Gradient direction'] = gradient
calc_constrains['Gradient direction'] = gradient
ref_series = pd.Series(ref_constrains, name=voxel)
calc_series = pd.Series(calc_constrains, name=voxel)
return ref_series, calc_series, s_name, cmp
def test11(delta_mm=(0.2, 0.2, 0.1), plot_curves=False):
# TEST DICOM DATA
structure_files = ['/home/victor/Downloads/DVH-Analysis-Data-Etc/STRUCTURES/Spheres/Sphere_02_0.dcm',
'/home/victor/Downloads/DVH-Analysis-Data-Etc/STRUCTURES/Cylinders/Cylinder_02_0.dcm',
'/home/victor/Downloads/DVH-Analysis-Data-Etc/STRUCTURES/Cylinders/RtCylinder_02_0.dcm',
'/home/victor/Downloads/DVH-Analysis-Data-Etc/STRUCTURES/Cones/Cone_02_0.dcm',
'/home/victor/Downloads/DVH-Analysis-Data-Etc/STRUCTURES/Cones/RtCone_02_0.dcm']
structure_name = ['Sphere_02_0', 'Cylinder_02_0', 'RtCylinder_02_0', 'Cone__02_0', 'RtCone_02_0']
dose_files = [
r'/home/victor/Dropbox/Plan_Competition_Project/pyplanscoring/testdata/DVH-Analysis-Data-Etc/DOSE GRIDS/Linear_AntPost_0-4_0-2_0-4_mm_Aligned.dcm',
r'/home/victor/Dropbox/Plan_Competition_Project/pyplanscoring/testdata/DVH-Analysis-Data-Etc/DOSE GRIDS/Linear_AntPost_1mm_Aligned.dcm',
r'/home/victor/Dropbox/Plan_Competition_Project/pyplanscoring/testdata/DVH-Analysis-Data-Etc/DOSE GRIDS/Linear_AntPost_2mm_Aligned.dcm',
r'/home/victor/Dropbox/Plan_Competition_Project/pyplanscoring/testdata/DVH-Analysis-Data-Etc/DOSE GRIDS/Linear_AntPost_3mm_Aligned.dcm',
r'/home/victor/Dropbox/Plan_Competition_Project/pyplanscoring/testdata/DVH-Analysis-Data-Etc/DOSE GRIDS/Linear_SupInf_0-4_0-2_0-4_mm_Aligned.dcm',
r'/home/victor/Dropbox/Plan_Competition_Project/pyplanscoring/testdata/DVH-Analysis-Data-Etc/DOSE GRIDS/Linear_SupInf_1mm_Aligned.dcm',
r'/home/victor/Dropbox/Plan_Competition_Project/pyplanscoring/testdata/DVH-Analysis-Data-Etc/DOSE GRIDS/Linear_SupInf_2mm_Aligned.dcm',
r'/home/victor/Dropbox/Plan_Competition_Project/pyplanscoring/testdata/DVH-Analysis-Data-Etc/DOSE GRIDS/Linear_SupInf_3mm_Aligned.dcm']
# Structure Dict
structure_dict = dict(zip(structure_name, structure_files))
# dose dict
dose_files_dict = {
'Z(AP)': {'0.4x0.2x0.4': dose_files[0], '1': dose_files[1], '2': dose_files[2], '3': dose_files[3]},
'Y(SI)': {'0.4x0.2x0.4': dose_files[4], '1': dose_files[5], '2': dose_files[6], '3': dose_files[7]}}
sheets = ['Sphere', 'Cylinder', 'RtCylinder', 'Cone', 'RtCone']
col_grad_dict = {'Z(AP)': {'0.4x0.2x0.4': 'AP 0.2 mm', '1': 'AP 1 mm', '2': 'AP 2 mm', '3': 'AP 3 mm'},
'Y(SI)': {'0.4x0.2x0.4': 'SI 0.2 mm', '1': 'SI 1 mm', '2': 'SI 2 mm', '3': 'SI 3 mm'}}
# grab analytical data
sheet = 'Analytical'
ref_path = '/home/victor/Dropbox/Plan_Competition_Project/pyplanscoring/testdata/analytical_data.xlsx'
df = pd.read_excel(ref_path, sheetname=sheet)
mask = df['CT slice spacing (mm)'] == '0.2mm'
df = df.loc[mask]
# Constrains to get data
# Constrains
constrains = OrderedDict()
constrains['Total_Volume'] = True
constrains['min'] = 'min'
constrains['max'] = 'max'
constrains['mean'] = 'mean'
constrains['D99'] = 99
constrains['D95'] = 95
constrains['D5'] = 5
constrains['D1'] = 1
constrains['Dcc'] = 0.03
# Get all analytical curves
out = '/home/victor/Dropbox/Plan_Competition_Project/pyplanscoring/testdata/analytical_dvh.obj'
an_curves = load(out)
res = Parallel(n_jobs=-1, verbose=11)(
delayed(calc_data_all)(row,
dose_files_dict,
structure_dict,
constrains,
an_curves,
col_grad_dict,
delta_mm=delta_mm) for row in df.iterrows())
ref_results = [d[0] for d in res]
calc_results = [d[1] for d in res]
sname = [d[2] for d in res]
curves = [d[3] for d in res]
df_ref_results = pd.concat(ref_results, axis=1).T.reset_index()
df_calc_results = pd.concat(calc_results, axis=1).T.reset_index()
df_ref_results['Structure name'] = sname
df_calc_results['Structure name'] = sname
ref_num = df_ref_results[df_ref_results.columns[1:-2]]
calc_num = df_calc_results[df_calc_results.columns[1:-2]]
delta = ((calc_num - ref_num) / ref_num) * 100
res = OrderedDict()
lim = 3
for col in delta:
count = np.sum(np.abs(delta[col]) > lim)
rg = np.array([round(delta[col].min(), 2), round(delta[col].max(), 2)])
res[col] = {'count': count, 'range': rg}
test_table = pd.DataFrame(res).T
print(test_table)
if plot_curves:
for c in curves:
c.plot_results()
plt.show()
def test22(delta_mm=(0.1, 0.1, 0.1), up_sample=True, plot_curves=True):
ref_data = '/home/victor/Dropbox/Plan_Competition_Project/pyplanscoring/testdata/analytical_data.xlsx'
struc_dir = '/home/victor/Dropbox/Plan_Competition_Project/pyplanscoring/testdata/DVH-Analysis-Data-Etc/STRUCTURES'
dose_grid_dir = '/home/victor/Dropbox/Plan_Competition_Project/pyplanscoring/testdata/DVH-Analysis-Data-Etc/DOSE GRIDS'
#
# ref_data = r'D:\Dropbox\Plan_Competit
st = 2
snames = ['Sphere_10_0', 'Sphere_20_0', 'Sphere_30_0',
'Cylinder_10_0', 'Cylinder_20_0', 'Cylinder_30_0',
'RtCylinder_10_0', 'RtCylinder_20_0', 'RtCylinder_30_0',
'Cone_10_0', 'Cone_20_0', 'Cone_30_0',
'RtCone_10_0', 'RtCone_20_0', 'RtCone_30_0']
structure_path = [os.path.join(struc_dir, f + '.dcm') for f in snames]
structure_dict = dict(zip(snames, structure_path))
dose_files = [os.path.join(dose_grid_dir, f) for f in [
'Linear_AntPost_1mm_Aligned.dcm',
'Linear_AntPost_2mm_Aligned.dcm',
'Linear_AntPost_3mm_Aligned.dcm',
'Linear_SupInf_1mm_Aligned.dcm',
'Linear_SupInf_2mm_Aligned.dcm',
'Linear_SupInf_3mm_Aligned.dcm']]
# dose dict
dose_files_dict = {
'Z(AP)': {'1': dose_files[0], '2': dose_files[1], '3': dose_files[2]},
'Y(SI)': {'1': dose_files[3], '2': dose_files[4], '3': dose_files[5]}}
col_grad_dict = {'Z(AP)': {'0.4x0.2x0.4': 'AP 0.2 mm', '1': 'AP 1 mm', '2': 'AP 2 mm', '3': 'AP 3 mm'},
'Y(SI)': {'0.4x0.2x0.4': 'SI 0.2 mm', '1': 'SI 1 mm', '2': 'SI 2 mm', '3': 'SI 3 mm'}}
# grab analytical data
out = '/home/victor/Dropbox/Plan_Competition_Project/pyplanscoring/testdata/analytical_dvh.obj'
an_curves = load(out)
df = pd.read_excel(ref_data, sheetname='Analytical')
dfi = df.ix[40:]
mask0 = dfi['Structure Shift'] == 0
dfi = dfi.loc[mask0]
# Constrains to get data
# Constrains
constrains = OrderedDict()
constrains['Total_Volume'] = True
constrains['min'] = 'min'
constrains['max'] = 'max'
constrains['mean'] = 'mean'
constrains['D99'] = 99
constrains['D95'] = 95
constrains['D5'] = 5
constrains['D1'] = 1
constrains['Dcc'] = 0.03
# GET CALCULATED DATA
# backend = 'threading'
res = Parallel(n_jobs=-1, verbose=11)(
delayed(calc_data_all)(row,
dose_files_dict,
structure_dict,
constrains,
an_curves,
col_grad_dict,
delta_mm=delta_mm,
up_sample=up_sample) for row in dfi.iterrows())
ref_results = [d[0] for d in res]
calc_results = [d[1] for d in res]
sname = [d[2] for d in res]
curves = [d[3] for d in res]
df_ref_results = pd.concat(ref_results, axis=1).T.reset_index()
df_calc_results = pd.concat(calc_results, axis=1).T.reset_index()
df_ref_results['Structure name'] = sname
df_calc_results['Structure name'] = sname
ref_num = df_ref_results[df_ref_results.columns[1:-2]]
calc_num = df_calc_results[df_calc_results.columns[1:-2]]
delta = ((calc_num - ref_num) / ref_num) * 100
res = OrderedDict()
lim = 3
for col in delta:
count = np.sum(np.abs(delta[col]) > lim)
rg = np.array([round(delta[col].min(), 2), round(delta[col].max(), 2)])
res[col] = {'count': count, 'range': rg}
test_table = | pd.DataFrame(res) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# Author: <NAME> <<EMAIL>>
# License: BSD
"""
Toolset working with yahoo finance data
Module includes functions for easy access to YahooFinance data
"""
import urllib.request
import numpy as np
import requests # interaction with the web
import os # file system operations
import yaml # human-friendly data format
import re # regular expressions
import pandas as pd # pandas... the best time series library out there
import datetime as dt # date and time functions
import io
from .extra import ProgressBar
dateTimeFormat = "%Y%m%d %H:%M:%S"
def parseStr(s):
''' convert string to a float or string '''
f = s.strip()
if f[0] == '"':
return f.strip('"')
elif f=='N/A':
return np.nan
else:
try: # try float conversion
prefixes = {'M':1e6, 'B': 1e9}
prefix = f[-1]
if prefix in prefixes: # do we have a Billion/Million character?
return float(f[:-1])*prefixes[prefix]
else: # no, convert to float directly
return float(f)
except ValueError: # failed, return original string
return s
def getQuote(symbols):
"""
get current yahoo quote
Parameters
-----------
symbols : list of str
list of ticker symbols
Returns
-----------
DataFrame , data is row-wise
"""
# for codes see: http://www.gummy-stuff.org/Yahoo-data.htm
if not isinstance(symbols,list):
symbols = [symbols]
header = ['symbol','last','change_pct','PE','time','short_ratio','prev_close','eps','market_cap']
request = str.join('', ['s', 'l1', 'p2' , 'r', 't1', 's7', 'p', 'e' , 'j1'])
data = dict(list(zip(header,[[] for i in range(len(header))])))
urlStr = 'http://finance.yahoo.com/d/quotes.csv?s=%s&f=%s' % (str.join('+',symbols), request)
try:
lines = urllib.request.urlopen(urlStr).readlines()
except Exception as e:
s = "Failed to download:\n{0}".format(e);
print(s)
for line in lines:
fields = line.decode().strip().split(',')
#print fields, len(fields)
for i,field in enumerate(fields):
data[header[i]].append( parseStr(field))
idx = data.pop('symbol')
return | pd.DataFrame(data,index=idx) | pandas.DataFrame |
from __future__ import division
from functools import wraps
import pandas as pd
import numpy as np
import time
import csv, sys
import os.path
import logging
from .ted_functions import TedFunctions
from .ted_aggregate_methods import TedAggregateMethods
from base.uber_model import UberModel, ModelSharedInputs
class TedSpeciesProperties(object):
"""
Listing of species properties that will eventually be read in from a SQL db
"""
def __init__(self):
"""Class representing Species properties"""
super(TedSpeciesProperties, self).__init__()
self.sci_name = pd.Series([], dtype='object')
self.com_name = pd.Series([], dtype='object')
self.taxa = pd.Series([], dtype='object')
self.order = pd.Series([], dtype='object')
self.usfws_id = pd.Series([], dtype='object')
self.body_wgt = pd.Series([], dtype='object')
self.diet_item = pd.Series([], dtype='object')
self.h2o_cont = pd.Series([], dtype='float')
def read_species_properties(self):
# this is a temporary method to initiate the species/diet food items lists (this will be replaced with
# a method to access a SQL database containing the properties
#filename = './ted/tests/TEDSpeciesProperties.csv'
filename = os.path.join(os.path.dirname(__file__),'tests/TEDSpeciesProperties.csv')
try:
with open(filename,'rt') as csvfile:
# csv.DictReader uses first line in file for column headings by default
dr = pd.read_csv(csvfile) # comma is default delimiter
except csv.Error as e:
sys.exit('file: %s, %s' (filename, e))
print(dr)
self.sci_name = dr.ix[:,'Scientific Name']
self.com_name = dr.ix[:,'Common Name']
self.taxa = dr.ix[:,'Taxa']
self.order = dr.ix[:,'Order']
self.usfws_id = dr.ix[:,'USFWS Species ID (ENTITY_ID)']
self.body_wgt= dr.ix[:,'BW (g)']
self.diet_item = dr.ix[:,'Food item']
self.h2o_cont = dr.ix[:,'Water content of diet']
class TedInputs(ModelSharedInputs):
"""
Required inputs class for Ted.
"""
def __init__(self):
"""Class representing the inputs for Ted"""
super(TedInputs, self).__init__()
# Inputs: Assign object attribute variables from the input Pandas DataFrame
self.chemical_name = pd.Series([], dtype="object", name="chemical_name")
# application parameters for min/max application scenarios
self.crop_min = pd.Series([], dtype="object", name="crop")
self.app_method_min = pd.Series([], dtype="object", name="app_method_min")
self.app_rate_min = pd.Series([], dtype="float", name="app_rate_min")
self.num_apps_min = pd.Series([], dtype="int", name="num_apps_min")
self.app_interval_min = pd.Series([], dtype="int", name="app_interval_min")
self.droplet_spec_min = pd.Series([], dtype="object", name="droplet_spec_min")
self.boom_hgt_min = pd.Series([], dtype="object", name="droplet_spec_min")
self.pest_incorp_depth_min = pd.Series([], dtype="object", name="pest_incorp_depth")
self.crop_max = pd.Series([], dtype="object", name="crop")
self.app_method_max = pd.Series([], dtype="object", name="app_method_max")
self.app_rate_max = pd.Series([], dtype="float", name="app_rate_max")
self.num_apps_max = pd.Series([], dtype="int", name="num_app_maxs")
self.app_interval_max = pd.Series([], dtype="int", name="app_interval_max")
self.droplet_spec_max = pd.Series([], dtype="object", name="droplet_spec_max")
self.boom_hgt_max = pd.Series([], dtype="object", name="droplet_spec_max")
self.pest_incorp_depth_max = pd.Series([], dtype="object", name="pest_incorp_depth")
# physical, chemical, and fate properties of pesticide
self.foliar_diss_hlife = pd.Series([], dtype="float", name="foliar_diss_hlife")
self.aerobic_soil_meta_hlife = pd.Series([], dtype="float", name="aerobic_soil_meta_hlife")
self.frac_retained_mamm = pd.Series([], dtype="float", name="frac_retained_mamm")
self.frac_retained_birds = pd.Series([], dtype="float", name="frac_retained_birds")
self.log_kow = pd.Series([], dtype="float", name="log_kow")
self.koc = pd.Series([], dtype="float", name="koc")
self.solubility = pd.Series([], dtype="float", name="solubility")
self.henry_law_const = pd.Series([], dtype="float", name="henry_law_const")
# bio concentration factors (ug active ing/kg-ww) / (ug active ing/liter)
self.aq_plant_algae_bcf_mean = pd.Series([], dtype="float", name="aq_plant_algae_bcf_mean")
self.aq_plant_algae_bcf_upper = pd.Series([], dtype="float", name="aq_plant_algae_bcf_upper")
self.inv_bcf_mean = pd.Series([], dtype="float", name="inv_bcf_mean")
self.inv_bcf_upper = pd.Series([], dtype="float", name="inv_bcf_upper")
self.fish_bcf_mean = pd.Series([], dtype="float", name="fish_bcf_mean")
self.fish_bcf_upper = pd.Series([], dtype="float", name="fish_bcf_upper")
# bounding water concentrations (ug active ing/liter)
self.water_conc_1 = pd.Series([], dtype="float", name="water_conc_1") # lower bound
self.water_conc_2 = pd.Series([], dtype="float", name="water_conc_2") # upper bound
# health value inputs
# naming convention (based on listing from OPP TED Excel spreadsheet 'inputs' worksheet):
# dbt: dose based toxicity
# cbt: concentration-based toxicity
# arbt: application rate-based toxicity
# 1inmill_mort: 1/million mortality (note initial character is numeral 1, not letter l)
# 1inten_mort: 10% mortality (note initial character is numeral 1, not letter l)
# others are self explanatory
# dose based toxicity(dbt): mammals (mg-pest/kg-bw) & weight of test animal (grams)
self.dbt_mamm_1inmill_mort = pd.Series([], dtype="float", name="dbt_mamm_1inmill_mort")
self.dbt_mamm_1inten_mort = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort")
self.dbt_mamm_low_ld50 = pd.Series([], dtype="float", name="dbt_mamm_low_ld50")
self.dbt_mamm_rat_oral_ld50 = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort")
self.dbt_mamm_rat_derm_ld50 = pd.Series([], dtype="float", name="dbt_mamm_rat_derm_ld50")
self.dbt_mamm_rat_inhal_ld50 = pd.Series([], dtype="float", name="dbt_mamm_rat_inhal_ld50")
self.dbt_mamm_sub_direct = pd.Series([], dtype="float", name="dbt_mamm_sub_direct")
self.dbt_mamm_sub_indirect = pd.Series([], dtype="float", name="dbt_mamm_sub_indirect")
self.dbt_mamm_1inmill_mort_wgt = pd.Series([], dtype="float", name="dbt_mamm_1inmill_mort_wgt")
self.dbt_mamm_1inten_mort_wgt = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort_wgt")
self.dbt_mamm_low_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_low_ld50_wgt")
self.dbt_mamm_rat_oral_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort_wgt")
self.dbt_mamm_rat_derm_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_rat_derm_ld50_wgt")
self.dbt_mamm_rat_inhal_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_rat_inhal_ld50_wgt")
self.dbt_mamm_sub_direct_wgt = pd.Series([], dtype="float", name="dbt_mamm_sub_direct_wgt")
self.dbt_mamm_sub_indirect_wgt = pd.Series([], dtype="float", name="dbt_mamm_sub_indirect_wgt")
# dose based toxicity(dbt): birds (mg-pest/kg-bw) & weight of test animal (grams)
self.dbt_bird_1inmill_mort = pd.Series([], dtype="float", name="dbt_bird_1inmill_mort")
self.dbt_bird_1inten_mort = pd.Series([], dtype="float", name="dbt_bird_1inten_mort")
self.dbt_bird_low_ld50 = pd.Series([], dtype="float", name="dbt_bird_low_ld50")
self.dbt_bird_hc05 = pd.Series([], dtype="float", name="dbt_bird_hc05")
self.dbt_bird_hc50 = pd.Series([], dtype="float", name="dbt_bird_hc50")
self.dbt_bird_hc95 = pd.Series([], dtype="float", name="dbt_bird_hc95")
self.dbt_bird_sub_direct = pd.Series([], dtype="float", name="dbt_bird_sub_direct")
self.dbt_bird_sub_indirect = pd.Series([], dtype="float", name="dbt_bird_sub_indirect")
self.mineau_sca_fact = pd.Series([], dtype="float", name="mineau_sca_fact")
self.dbt_bird_1inmill_mort_wgt = pd.Series([], dtype="float", name="dbt_bird_1inmill_mort_wgt")
self.dbt_bird_1inten_mort_wgt = pd.Series([], dtype="float", name="dbt_bird_1inten_mort_wgt")
self.dbt_bird_low_ld50_wgt = pd.Series([], dtype="float", name="dbt_bird_low_ld50_wgt")
self.dbt_bird_hc05_wgt = pd.Series([], dtype="float", name="dbt_bird_hc05_wgt")
self.dbt_bird_hc50_wgt = pd.Series([], dtype="float", name="dbt_bird_hc50_wgt")
self.dbt_bird_hc95_wgt = pd.Series([], dtype="float", name="dbt_bird_hc95_wgt")
self.dbt_bird_sub_direct_wgt = pd.Series([], dtype="float", name="dbt_bird_sub_direct_wgt")
self.dbt_bird_sub_indirect_wgt = pd.Series([], dtype="float", name="dbt_bird_sub_indirect_wgt")
self.mineau_sca_fact_wgt = pd.Series([], dtype="float", name="mineau_sca_fact_wgt")
# dose based toxicity(dbt): reptiles, terrestrial-phase amphibians (mg-pest/kg-bw) & weight of test animal (grams)
self.dbt_reptile_1inmill_mort = pd.Series([], dtype="float", name="dbt_reptile_1inmill_mort")
self.dbt_reptile_1inten_mort = pd.Series([], dtype="float", name="dbt_reptile_1inten_mort")
self.dbt_reptile_low_ld50 = pd.Series([], dtype="float", name="dbt_reptile_low_ld50")
self.dbt_reptile_sub_direct = pd.Series([], dtype="float", name="dbt_reptile_sub_direct")
self.dbt_reptile_sub_indirect = pd.Series([], dtype="float", name="dbt_reptile_sub_indirect")
self.dbt_reptile_1inmill_mort_wgt = pd.Series([], dtype="float", name="dbt_reptile_1inmill_mort_wgt")
self.dbt_reptile_1inten_mort_wgt = pd.Series([], dtype="float", name="dbt_reptile_1inten_mort_wgt")
self.dbt_reptile_low_ld50_wgt = pd.Series([], dtype="float", name="dbt_reptile_low_ld50_wgt")
self.dbt_reptile_sub_direct_wgt = pd.Series([], dtype="float", name="dbt_reptile_sub_direct_wgt")
self.dbt_reptile_sub_indirect_wgt = pd.Series([], dtype="float", name="dbt_reptile_sub_indirect_wgt")
# concentration-based toxicity (cbt) : mammals (mg-pest/kg-diet food)
self.cbt_mamm_1inmill_mort = pd.Series([], dtype="float", name="cbt_mamm_1inmill_mort")
self.cbt_mamm_1inten_mort = pd.Series([], dtype="float", name="cbt_mamm_1inten_mort")
self.cbt_mamm_low_lc50 = pd.Series([], dtype="float", name="cbt_mamm_low_lc50")
self.cbt_mamm_sub_direct = pd.Series([], dtype="float", name="cbt_mamm_sub_direct")
self.cbt_mamm_grow_noec = pd.Series([], dtype="float", name="cbt_mamm_grow_noec")
self.cbt_mamm_grow_loec = pd.Series([], dtype="float", name="cbt_mamm_grow_loec")
self.cbt_mamm_repro_noec = pd.Series([], dtype="float", name="cbt_mamm_repro_noec")
self.cbt_mamm_repro_loec = pd.Series([], dtype="float", name="cbt_mamm_repro_loec")
self.cbt_mamm_behav_noec = pd.Series([], dtype="float", name="cbt_mamm_behav_noec")
self.cbt_mamm_behav_loec = pd.Series([], dtype="float", name="cbt_mamm_behav_loec")
self.cbt_mamm_sensory_noec = pd.Series([], dtype="float", name="cbt_mamm_sensory_noec")
self.cbt_mamm_sensory_loec = pd.Series([], dtype="float", name="cbt_mamm_sensory_loec")
self.cbt_mamm_sub_indirect = pd.Series([], dtype="float", name="cbt_mamm_sub_indirect")
# concentration-based toxicity (cbt) : birds (mg-pest/kg-diet food)
self.cbt_bird_1inmill_mort = pd.Series([], dtype="float", name="cbt_bird_1inmill_mort")
self.cbt_bird_1inten_mort = pd.Series([], dtype="float", name="cbt_bird_1inten_mort")
self.cbt_bird_low_lc50 = pd.Series([], dtype="float", name="cbt_bird_low_lc50")
self.cbt_bird_sub_direct = pd.Series([], dtype="float", name="cbt_bird_sub_direct")
self.cbt_bird_grow_noec = pd.Series([], dtype="float", name="cbt_bird_grow_noec")
self.cbt_bird_grow_loec = pd.Series([], dtype="float", name="cbt_bird_grow_loec")
self.cbt_bird_repro_noec = pd.Series([], dtype="float", name="cbt_bird_repro_noec")
self.cbt_bird_repro_loec = pd.Series([], dtype="float", name="cbt_bird_repro_loec")
self.cbt_bird_behav_noec = pd.Series([], dtype="float", name="cbt_bird_behav_noec")
self.cbt_bird_behav_loec = pd.Series([], dtype="float", name="cbt_bird_behav_loec")
self.cbt_bird_sensory_noec = pd.Series([], dtype="float", name="cbt_bird_sensory_noec")
self.cbt_bird_sensory_loec = pd.Series([], dtype="float", name="cbt_bird_sensory_loec")
self.cbt_bird_sub_indirect = pd.Series([], dtype="float", name="cbt_bird_sub_indirect")
# concentration-based toxicity (cbt) : reptiles, terrestrial-phase amphibians (mg-pest/kg-diet food)
self.cbt_reptile_1inmill_mort = pd.Series([], dtype="float", name="cbt_reptile_1inmill_mort")
self.cbt_reptile_1inten_mort = pd.Series([], dtype="float", name="cbt_reptile_1inten_mort")
self.cbt_reptile_low_lc50 = pd.Series([], dtype="float", name="cbt_reptile_low_lc50")
self.cbt_reptile_sub_direct = pd.Series([], dtype="float", name="cbt_reptile_sub_direct")
self.cbt_reptile_grow_noec = pd.Series([], dtype="float", name="cbt_reptile_grow_noec")
self.cbt_reptile_grow_loec = pd.Series([], dtype="float", name="cbt_reptile_grow_loec")
self.cbt_reptile_repro_noec = pd.Series([], dtype="float", name="cbt_reptile_repro_noec")
self.cbt_reptile_repro_loec = pd.Series([], dtype="float", name="cbt_reptile_repro_loec")
self.cbt_reptile_behav_noec = pd.Series([], dtype="float", name="cbt_reptile_behav_noec")
self.cbt_reptile_behav_loec = pd.Series([], dtype="float", name="cbt_reptile_behav_loec")
self.cbt_reptile_sensory_noec = pd.Series([], dtype="float", name="cbt_reptile_sensory_noec")
self.cbt_reptile_sensory_loec = pd.Series([], dtype="float", name="cbt_reptile_sensory_loec")
self.cbt_reptile_sub_indirect = pd.Series([], dtype="float", name="cbt_reptile_sub_indirect")
# concentration-based toxicity (cbt) : invertebrates body weight (mg-pest/kg-bw(ww))
self.cbt_inv_bw_1inmill_mort = pd.Series([], dtype="float", name="cbt_inv_bw_1inmill_mort")
self.cbt_inv_bw_1inten_mort = pd.Series([], dtype="float", name="cbt_inv_bw_1inten_mort")
self.cbt_inv_bw_low_lc50 = pd.Series([], dtype="float", name="cbt_inv_bw_low_lc50")
self.cbt_inv_bw_sub_direct = pd.Series([], dtype="float", name="cbt_inv_bw_sub_direct")
self.cbt_inv_bw_grow_noec = pd.Series([], dtype="float", name="cbt_inv_bw_grow_noec")
self.cbt_inv_bw_grow_loec = pd.Series([], dtype="float", name="cbt_inv_bw_grow_loec")
self.cbt_inv_bw_repro_noec = pd.Series([], dtype="float", name="cbt_inv_bw_repro_noec")
self.cbt_inv_bw_repro_loec = pd.Series([], dtype="float", name="cbt_inv_bw_repro_loec")
self.cbt_inv_bw_behav_noec = pd.Series([], dtype="float", name="cbt_inv_bw_behav_noec")
self.cbt_inv_bw_behav_loec = pd.Series([], dtype="float", name="cbt_inv_bw_behav_loec")
self.cbt_inv_bw_sensory_noec = pd.Series([], dtype="float", name="cbt_inv_bw_sensory_noec")
self.cbt_inv_bw_sensory_loec = pd.Series([], dtype="float", name="cbt_inv_bw_sensory_loec")
self.cbt_inv_bw_sub_indirect = pd.Series([], dtype="float", name="cbt_inv_bw_sub_indirect")
# concentration-based toxicity (cbt) : invertebrates body diet (mg-pest/kg-food(ww))
self.cbt_inv_food_1inmill_mort = pd.Series([], dtype="float", name="cbt_inv_food_1inmill_mort")
self.cbt_inv_food_1inten_mort = pd.Series([], dtype="float", name="cbt_inv_food_1inten_mort")
self.cbt_inv_food_low_lc50 = pd.Series([], dtype="float", name="cbt_inv_food_low_lc50")
self.cbt_inv_food_sub_direct = pd.Series([], dtype="float", name="cbt_inv_food_sub_direct")
self.cbt_inv_food_grow_noec = pd.Series([], dtype="float", name="cbt_inv_food_grow_noec")
self.cbt_inv_food_grow_loec = pd.Series([], dtype="float", name="cbt_inv_food_grow_loec")
self.cbt_inv_food_repro_noec = pd.Series([], dtype="float", name="cbt_inv_food_repro_noec")
self.cbt_inv_food_repro_loec = pd.Series([], dtype="float", name="cbt_inv_food_repro_loec")
self.cbt_inv_food_behav_noec = pd.Series([], dtype="float", name="cbt_inv_food_behav_noec")
self.cbt_inv_food_behav_loec = pd.Series([], dtype="float", name="cbt_inv_food_behav_loec")
self.cbt_inv_food_sensory_noec = pd.Series([], dtype="float", name="cbt_inv_food_sensory_noec")
self.cbt_inv_food_sensory_loec = pd.Series([], dtype="float", name="cbt_inv_food_sensory_loec")
self.cbt_inv_food_sub_indirect = pd.Series([], dtype="float", name="cbt_inv_food_sub_indirect")
# concentration-based toxicity (cbt) : invertebrates soil (mg-pest/kg-soil(dw))
self.cbt_inv_soil_1inmill_mort = pd.Series([], dtype="float", name="cbt_inv_soil_1inmill_mort")
self.cbt_inv_soil_1inten_mort = pd.Series([], dtype="float", name="cbt_inv_soil_1inten_mort")
self.cbt_inv_soil_low_lc50 = pd.Series([], dtype="float", name="cbt_inv_soil_low_lc50")
self.cbt_inv_soil_sub_direct = pd.Series([], dtype="float", name="cbt_inv_soil_sub_direct")
self.cbt_inv_soil_grow_noec = pd.Series([], dtype="float", name="cbt_inv_soil_grow_noec")
self.cbt_inv_soil_grow_loec = pd.Series([], dtype="float", name="cbt_inv_soil_grow_loec")
self.cbt_inv_soil_repro_noec = pd.Series([], dtype="float", name="cbt_inv_soil_repro_noec")
self.cbt_inv_soil_repro_loec = pd.Series([], dtype="float", name="cbt_inv_soil_repro_loec")
self.cbt_inv_soil_behav_noec = pd.Series([], dtype="float", name="cbt_inv_soil_behav_noec")
self.cbt_inv_soil_behav_loec = pd.Series([], dtype="float", name="cbt_inv_soil_behav_loec")
self.cbt_inv_soil_sensory_noec = pd.Series([], dtype="float", name="cbt_inv_soil_sensory_noec")
self.cbt_inv_soil_sensory_loec = pd.Series([], dtype="float", name="cbt_inv_soil_sensory_loec")
self.cbt_inv_soil_sub_indirect = pd.Series([], dtype="float", name="cbt_inv_soil_sub_indirect")
# application rate-based toxicity (arbt) : mammals (lbs active ingredient/Acre)
self.arbt_mamm_mort = pd.Series([], dtype="float", name="arbt_mamm_mort")
self.arbt_mamm_growth = pd.Series([], dtype="float", name="arbt_mamm_growth")
self.arbt_mamm_repro = pd.Series([], dtype="float", name="arbt_mamm_repro")
self.arbt_mamm_behav = pd.Series([], dtype="float", name="arbt_mamm_behav")
self.arbt_mamm_sensory = pd.Series([], dtype="float", name="arbt_mamm_sensory")
# application rate-based toxicity (arbt) : birds (lbs active ingredient/Acre)
self.arbt_bird_mort = pd.Series([], dtype="float", name="arbt_bird_mort")
self.arbt_bird_growth = pd.Series([], dtype="float", name="arbt_bird_growth")
self.arbt_bird_repro = pd.Series([], dtype="float", name="arbt_bird_repro")
self.arbt_bird_behav = pd.Series([], dtype="float", name="arbt_bird_behav")
self.arbt_bird_sensory = pd.Series([], dtype="float", name="arbt_bird_sensory")
# application rate-based toxicity (arbt) : reptiles (lbs active ingredient/Acre)
self.arbt_reptile_mort = pd.Series([], dtype="float", name="arbt_reptile_mort")
self.arbt_reptile_growth = pd.Series([], dtype="float", name="arbt_reptile_growth")
self.arbt_reptile_repro = pd.Series([], dtype="float", name="arbt_reptile_repro")
self.arbt_reptile_behav = pd.Series([], dtype="float", name="arbt_reptile_behav")
self.arbt_reptile_sensory = pd.Series([], dtype="float", name="arbt_reptile_sensory")
# application rate-based toxicity (arbt) : invertebrates (lbs active ingredient/Acre)
self.arbt_inv_1inmill_mort = pd.Series([], dtype="float", name="arbt_inv_1inmill_mort")
self.arbt_inv_1inten_mort = pd.Series([], dtype="float", name="arbt_inv_1inten_mort")
self.arbt_inv_sub_direct = pd.Series([], dtype="float", name="arbt_inv_sub_direct")
self.arbt_inv_sub_indirect = pd.Series([], dtype="float", name="arbt_inv_sub_indirect")
self.arbt_inv_growth = pd.Series([], dtype="float", name="arbt_inv_growth")
self.arbt_inv_repro = pd.Series([], dtype="float", name="arbt_inv_repro")
self.arbt_inv_behav = pd.Series([], dtype="float", name="arbt_inv_behav")
self.arbt_inv_sensory = | pd.Series([], dtype="float", name="arbt_inv_sensory") | pandas.Series |
import pandas as pd
import json
import streamlit as st
import plotly.express as px
from PIL import Image
#Membuka file json
with open ("kode_negara_lengkap.json") as file :
data = json.load (file)
#Menjadikan file json sebagai dataframe
df_json = pd.DataFrame(data)
#Membuka file csv
df = | pd.read_csv('produksi_minyak_mentah.csv') | pandas.read_csv |
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split, StratifiedKFold
from sklearn import svm
from sklearn import metrics
def classification(latent_code, random_seed=42, ten_fold=False):
tumour_type = | pd.read_csv('data/PANCAN/GDC-PANCAN_both_samples_tumour_type.tsv', sep='\t', index_col=0) | pandas.read_csv |
from datetime import datetime
from io import StringIO
import itertools
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
Period,
Series,
Timedelta,
date_range,
)
import pandas._testing as tm
class TestDataFrameReshape:
def test_stack_unstack(self, float_frame):
df = float_frame.copy()
df[:] = np.arange(np.prod(df.shape)).reshape(df.shape)
stacked = df.stack()
stacked_df = DataFrame({"foo": stacked, "bar": stacked})
unstacked = stacked.unstack()
unstacked_df = stacked_df.unstack()
tm.assert_frame_equal(unstacked, df)
tm.assert_frame_equal(unstacked_df["bar"], df)
unstacked_cols = stacked.unstack(0)
unstacked_cols_df = stacked_df.unstack(0)
tm.assert_frame_equal(unstacked_cols.T, df)
tm.assert_frame_equal(unstacked_cols_df["bar"].T, df)
def test_stack_mixed_level(self):
# GH 18310
levels = [range(3), [3, "a", "b"], [1, 2]]
# flat columns:
df = DataFrame(1, index=levels[0], columns=levels[1])
result = df.stack()
expected = Series(1, index=MultiIndex.from_product(levels[:2]))
tm.assert_series_equal(result, expected)
# MultiIndex columns:
df = DataFrame(1, index=levels[0], columns=MultiIndex.from_product(levels[1:]))
result = df.stack(1)
expected = DataFrame(
1, index=MultiIndex.from_product([levels[0], levels[2]]), columns=levels[1]
)
tm.assert_frame_equal(result, expected)
# as above, but used labels in level are actually of homogeneous type
result = df[["a", "b"]].stack(1)
expected = expected[["a", "b"]]
tm.assert_frame_equal(result, expected)
def test_unstack_not_consolidated(self, using_array_manager):
# Gh#34708
df = DataFrame({"x": [1, 2, np.NaN], "y": [3.0, 4, np.NaN]})
df2 = df[["x"]]
df2["y"] = df["y"]
if not using_array_manager:
assert len(df2._mgr.blocks) == 2
res = df2.unstack()
expected = df.unstack()
tm.assert_series_equal(res, expected)
def test_unstack_fill(self):
# GH #9746: fill_value keyword argument for Series
# and DataFrame unstack
# From a series
data = Series([1, 2, 4, 5], dtype=np.int16)
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = data.unstack(fill_value=-1)
expected = DataFrame(
{"a": [1, -1, 5], "b": [2, 4, -1]}, index=["x", "y", "z"], dtype=np.int16
)
tm.assert_frame_equal(result, expected)
# From a series with incorrect data type for fill_value
result = data.unstack(fill_value=0.5)
expected = DataFrame(
{"a": [1, 0.5, 5], "b": [2, 4, 0.5]}, index=["x", "y", "z"], dtype=float
)
tm.assert_frame_equal(result, expected)
# GH #13971: fill_value when unstacking multiple levels:
df = DataFrame(
{"x": ["a", "a", "b"], "y": ["j", "k", "j"], "z": [0, 1, 2], "w": [0, 1, 2]}
).set_index(["x", "y", "z"])
unstacked = df.unstack(["x", "y"], fill_value=0)
key = ("<KEY>")
expected = unstacked[key]
result = Series([0, 0, 2], index=unstacked.index, name=key)
tm.assert_series_equal(result, expected)
stacked = unstacked.stack(["x", "y"])
stacked.index = stacked.index.reorder_levels(df.index.names)
# Workaround for GH #17886 (unnecessarily casts to float):
stacked = stacked.astype(np.int64)
result = stacked.loc[df.index]
tm.assert_frame_equal(result, df)
# From a series
s = df["w"]
result = s.unstack(["x", "y"], fill_value=0)
expected = unstacked["w"]
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame(self):
# From a dataframe
rows = [[1, 2], [3, 4], [5, 6], [7, 8]]
df = DataFrame(rows, columns=list("AB"), dtype=np.int32)
df.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = df.unstack(fill_value=-1)
rows = [[1, 3, 2, 4], [-1, 5, -1, 6], [7, -1, 8, -1]]
expected = DataFrame(rows, index=list("xyz"), dtype=np.int32)
expected.columns = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")]
)
tm.assert_frame_equal(result, expected)
# From a mixed type dataframe
df["A"] = df["A"].astype(np.int16)
df["B"] = df["B"].astype(np.float64)
result = df.unstack(fill_value=-1)
expected["A"] = expected["A"].astype(np.int16)
expected["B"] = expected["B"].astype(np.float64)
tm.assert_frame_equal(result, expected)
# From a dataframe with incorrect data type for fill_value
result = df.unstack(fill_value=0.5)
rows = [[1, 3, 2, 4], [0.5, 5, 0.5, 6], [7, 0.5, 8, 0.5]]
expected = DataFrame(rows, index=list("xyz"), dtype=float)
expected.columns = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")]
)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_datetime(self):
# Test unstacking with date times
dv = date_range("2012-01-01", periods=4).values
data = Series(dv)
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = data.unstack()
expected = DataFrame(
{"a": [dv[0], pd.NaT, dv[3]], "b": [dv[1], dv[2], pd.NaT]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
result = data.unstack(fill_value=dv[0])
expected = DataFrame(
{"a": [dv[0], dv[0], dv[3]], "b": [dv[1], dv[2], dv[0]]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_timedelta(self):
# Test unstacking with time deltas
td = [Timedelta(days=i) for i in range(4)]
data = Series(td)
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = data.unstack()
expected = DataFrame(
{"a": [td[0], pd.NaT, td[3]], "b": [td[1], td[2], pd.NaT]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
result = data.unstack(fill_value=td[1])
expected = DataFrame(
{"a": [td[0], td[1], td[3]], "b": [td[1], td[2], td[1]]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_period(self):
# Test unstacking with period
periods = [
Period("2012-01"),
Period("2012-02"),
Period("2012-03"),
Period("2012-04"),
]
data = Series(periods)
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = data.unstack()
expected = DataFrame(
{"a": [periods[0], None, periods[3]], "b": [periods[1], periods[2], None]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
result = data.unstack(fill_value=periods[1])
expected = DataFrame(
{
"a": [periods[0], periods[1], periods[3]],
"b": [periods[1], periods[2], periods[1]],
},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_categorical(self):
# Test unstacking with categorical
data = Series(["a", "b", "c", "a"], dtype="category")
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
# By default missing values will be NaN
result = data.unstack()
expected = DataFrame(
{
"a": pd.Categorical(list("axa"), categories=list("abc")),
"b": pd.Categorical(list("bcx"), categories=list("abc")),
},
index=list("xyz"),
)
tm.assert_frame_equal(result, expected)
# Fill with non-category results in a ValueError
msg = r"'fill_value=d' is not present in"
with pytest.raises(TypeError, match=msg):
data.unstack(fill_value="d")
# Fill with category value replaces missing values as expected
result = data.unstack(fill_value="c")
expected = DataFrame(
{
"a": pd.Categorical(list("aca"), categories=list("abc")),
"b": pd.Categorical(list("bcc"), categories=list("abc")),
},
index=list("xyz"),
)
tm.assert_frame_equal(result, expected)
def test_unstack_tuplename_in_multiindex(self):
# GH 19966
idx = MultiIndex.from_product(
[["a", "b", "c"], [1, 2, 3]], names=[("A", "a"), ("B", "b")]
)
df = DataFrame({"d": [1] * 9, "e": [2] * 9}, index=idx)
result = df.unstack(("A", "a"))
expected = DataFrame(
[[1, 1, 1, 2, 2, 2], [1, 1, 1, 2, 2, 2], [1, 1, 1, 2, 2, 2]],
columns=MultiIndex.from_tuples(
[
("d", "a"),
("d", "b"),
("d", "c"),
("e", "a"),
("e", "b"),
("e", "c"),
],
names=[None, ("A", "a")],
),
index=Index([1, 2, 3], name=("B", "b")),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"unstack_idx, expected_values, expected_index, expected_columns",
[
(
("A", "a"),
[[1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2]],
MultiIndex.from_tuples(
[(1, 3), (1, 4), (2, 3), (2, 4)], names=["B", "C"]
),
MultiIndex.from_tuples(
[("d", "a"), ("d", "b"), ("e", "a"), ("e", "b")],
names=[None, ("A", "a")],
),
),
(
(("A", "a"), "B"),
[[1, 1, 1, 1, 2, 2, 2, 2], [1, 1, 1, 1, 2, 2, 2, 2]],
Index([3, 4], name="C"),
MultiIndex.from_tuples(
[
("d", "a", 1),
("d", "a", 2),
("d", "b", 1),
("d", "b", 2),
("e", "a", 1),
("e", "a", 2),
("e", "b", 1),
("e", "b", 2),
],
names=[None, ("A", "a"), "B"],
),
),
],
)
def test_unstack_mixed_type_name_in_multiindex(
self, unstack_idx, expected_values, expected_index, expected_columns
):
# GH 19966
idx = MultiIndex.from_product(
[["a", "b"], [1, 2], [3, 4]], names=[("A", "a"), "B", "C"]
)
df = DataFrame({"d": [1] * 8, "e": [2] * 8}, index=idx)
result = df.unstack(unstack_idx)
expected = DataFrame(
expected_values, columns=expected_columns, index=expected_index
)
tm.assert_frame_equal(result, expected)
def test_unstack_preserve_dtypes(self):
# Checks fix for #11847
df = DataFrame(
{
"state": ["IL", "MI", "NC"],
"index": ["a", "b", "c"],
"some_categories": Series(["a", "b", "c"]).astype("category"),
"A": np.random.rand(3),
"B": 1,
"C": "foo",
"D": pd.Timestamp("20010102"),
"E": Series([1.0, 50.0, 100.0]).astype("float32"),
"F": Series([3.0, 4.0, 5.0]).astype("float64"),
"G": False,
"H": Series([1, 200, 923442], dtype="int8"),
}
)
def unstack_and_compare(df, column_name):
unstacked1 = df.unstack([column_name])
unstacked2 = df.unstack(column_name)
tm.assert_frame_equal(unstacked1, unstacked2)
df1 = df.set_index(["state", "index"])
unstack_and_compare(df1, "index")
df1 = df.set_index(["state", "some_categories"])
unstack_and_compare(df1, "some_categories")
df1 = df.set_index(["F", "C"])
unstack_and_compare(df1, "F")
df1 = df.set_index(["G", "B", "state"])
unstack_and_compare(df1, "B")
df1 = df.set_index(["E", "A"])
unstack_and_compare(df1, "E")
df1 = df.set_index(["state", "index"])
s = df1["A"]
unstack_and_compare(s, "index")
def test_stack_ints(self):
columns = MultiIndex.from_tuples(list(itertools.product(range(3), repeat=3)))
df = DataFrame(np.random.randn(30, 27), columns=columns)
tm.assert_frame_equal(df.stack(level=[1, 2]), df.stack(level=1).stack(level=1))
tm.assert_frame_equal(
df.stack(level=[-2, -1]), df.stack(level=1).stack(level=1)
)
df_named = df.copy()
return_value = df_named.columns.set_names(range(3), inplace=True)
assert return_value is None
tm.assert_frame_equal(
df_named.stack(level=[1, 2]), df_named.stack(level=1).stack(level=1)
)
def test_stack_mixed_levels(self):
columns = MultiIndex.from_tuples(
[
("A", "cat", "long"),
("B", "cat", "long"),
("A", "dog", "short"),
("B", "dog", "short"),
],
names=["exp", "animal", "hair_length"],
)
df = DataFrame(np.random.randn(4, 4), columns=columns)
animal_hair_stacked = df.stack(level=["animal", "hair_length"])
exp_hair_stacked = df.stack(level=["exp", "hair_length"])
# GH #8584: Need to check that stacking works when a number
# is passed that is both a level name and in the range of
# the level numbers
df2 = df.copy()
df2.columns.names = ["exp", "animal", 1]
tm.assert_frame_equal(
df2.stack(level=["animal", 1]), animal_hair_stacked, check_names=False
)
tm.assert_frame_equal(
df2.stack(level=["exp", 1]), exp_hair_stacked, check_names=False
)
# When mixed types are passed and the ints are not level
# names, raise
msg = (
"level should contain all level names or all level numbers, not "
"a mixture of the two"
)
with pytest.raises(ValueError, match=msg):
df2.stack(level=["animal", 0])
# GH #8584: Having 0 in the level names could raise a
# strange error about lexsort depth
df3 = df.copy()
df3.columns.names = ["exp", "animal", 0]
tm.assert_frame_equal(
df3.stack(level=["animal", 0]), animal_hair_stacked, check_names=False
)
def test_stack_int_level_names(self):
columns = MultiIndex.from_tuples(
[
("A", "cat", "long"),
("B", "cat", "long"),
("A", "dog", "short"),
("B", "dog", "short"),
],
names=["exp", "animal", "hair_length"],
)
df = DataFrame(np.random.randn(4, 4), columns=columns)
exp_animal_stacked = df.stack(level=["exp", "animal"])
animal_hair_stacked = df.stack(level=["animal", "hair_length"])
exp_hair_stacked = df.stack(level=["exp", "hair_length"])
df2 = df.copy()
df2.columns.names = [0, 1, 2]
tm.assert_frame_equal(
df2.stack(level=[1, 2]), animal_hair_stacked, check_names=False
)
tm.assert_frame_equal(
df2.stack(level=[0, 1]), exp_animal_stacked, check_names=False
)
tm.assert_frame_equal(
df2.stack(level=[0, 2]), exp_hair_stacked, check_names=False
)
# Out-of-order int column names
df3 = df.copy()
df3.columns.names = [2, 0, 1]
tm.assert_frame_equal(
df3.stack(level=[0, 1]), animal_hair_stacked, check_names=False
)
tm.assert_frame_equal(
df3.stack(level=[2, 0]), exp_animal_stacked, check_names=False
)
tm.assert_frame_equal(
df3.stack(level=[2, 1]), exp_hair_stacked, check_names=False
)
def test_unstack_bool(self):
df = DataFrame(
[False, False],
index=MultiIndex.from_arrays([["a", "b"], ["c", "l"]]),
columns=["col"],
)
rs = df.unstack()
xp = DataFrame(
np.array([[False, np.nan], [np.nan, False]], dtype=object),
index=["a", "b"],
columns=MultiIndex.from_arrays([["col", "col"], ["c", "l"]]),
)
tm.assert_frame_equal(rs, xp)
def test_unstack_level_binding(self):
# GH9856
mi = MultiIndex(
levels=[["foo", "bar"], ["one", "two"], ["a", "b"]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [1, 0, 1, 0]],
names=["first", "second", "third"],
)
s = Series(0, index=mi)
result = s.unstack([1, 2]).stack(0)
expected_mi = MultiIndex(
levels=[["foo", "bar"], ["one", "two"]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=["first", "second"],
)
expected = DataFrame(
np.array(
[[np.nan, 0], [0, np.nan], [np.nan, 0], [0, np.nan]], dtype=np.float64
),
index=expected_mi,
columns=Index(["a", "b"], name="third"),
)
tm.assert_frame_equal(result, expected)
def test_unstack_to_series(self, float_frame):
# check reversibility
data = float_frame.unstack()
assert isinstance(data, Series)
undo = data.unstack().T
tm.assert_frame_equal(undo, float_frame)
# check NA handling
data = DataFrame({"x": [1, 2, np.NaN], "y": [3.0, 4, np.NaN]})
data.index = Index(["a", "b", "c"])
result = data.unstack()
midx = MultiIndex(
levels=[["x", "y"], ["a", "b", "c"]],
codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]],
)
expected = Series([1, 2, np.NaN, 3, 4, np.NaN], index=midx)
tm.assert_series_equal(result, expected)
# check composability of unstack
old_data = data.copy()
for _ in range(4):
data = data.unstack()
tm.assert_frame_equal(old_data, data)
def test_unstack_dtypes(self):
# GH 2929
rows = [[1, 1, 3, 4], [1, 2, 3, 4], [2, 1, 3, 4], [2, 2, 3, 4]]
df = DataFrame(rows, columns=list("ABCD"))
result = df.dtypes
expected = Series([np.dtype("int64")] * 4, index=list("ABCD"))
tm.assert_series_equal(result, expected)
# single dtype
df2 = df.set_index(["A", "B"])
df3 = df2.unstack("B")
result = df3.dtypes
expected = Series(
[np.dtype("int64")] * 4,
index=MultiIndex.from_arrays(
[["C", "C", "D", "D"], [1, 2, 1, 2]], names=(None, "B")
),
)
tm.assert_series_equal(result, expected)
# mixed
df2 = df.set_index(["A", "B"])
df2["C"] = 3.0
df3 = df2.unstack("B")
result = df3.dtypes
expected = Series(
[np.dtype("float64")] * 2 + [np.dtype("int64")] * 2,
index=MultiIndex.from_arrays(
[["C", "C", "D", "D"], [1, 2, 1, 2]], names=(None, "B")
),
)
tm.assert_series_equal(result, expected)
df2["D"] = "foo"
df3 = df2.unstack("B")
result = df3.dtypes
expected = Series(
[np.dtype("float64")] * 2 + [np.dtype("object")] * 2,
index=MultiIndex.from_arrays(
[["C", "C", "D", "D"], [1, 2, 1, 2]], names=(None, "B")
),
)
tm.assert_series_equal(result, expected)
# GH7405
for c, d in (
(np.zeros(5), np.zeros(5)),
(np.arange(5, dtype="f8"), np.arange(5, 10, dtype="f8")),
):
df = DataFrame(
{
"A": ["a"] * 5,
"C": c,
"D": d,
"B": date_range("2012-01-01", periods=5),
}
)
right = df.iloc[:3].copy(deep=True)
df = df.set_index(["A", "B"])
df["D"] = df["D"].astype("int64")
left = df.iloc[:3].unstack(0)
right = right.set_index(["A", "B"]).unstack(0)
right[("D", "a")] = right[("D", "a")].astype("int64")
assert left.shape == (3, 2)
tm.assert_frame_equal(left, right)
def test_unstack_non_unique_index_names(self):
idx = MultiIndex.from_tuples([("a", "b"), ("c", "d")], names=["c1", "c1"])
df = DataFrame([1, 2], index=idx)
msg = "The name c1 occurs multiple times, use a level number"
with pytest.raises(ValueError, match=msg):
df.unstack("c1")
with pytest.raises(ValueError, match=msg):
df.T.stack("c1")
def test_unstack_unused_levels(self):
# GH 17845: unused codes in index make unstack() cast int to float
idx = MultiIndex.from_product([["a"], ["A", "B", "C", "D"]])[:-1]
df = DataFrame([[1, 0]] * 3, index=idx)
result = df.unstack()
exp_col = MultiIndex.from_product([[0, 1], ["A", "B", "C"]])
expected = DataFrame([[1, 1, 1, 0, 0, 0]], index=["a"], columns=exp_col)
tm.assert_frame_equal(result, expected)
assert (result.columns.levels[1] == idx.levels[1]).all()
# Unused items on both levels
levels = [[0, 1, 7], [0, 1, 2, 3]]
codes = [[0, 0, 1, 1], [0, 2, 0, 2]]
idx = MultiIndex(levels, codes)
block = np.arange(4).reshape(2, 2)
df = DataFrame(np.concatenate([block, block + 4]), index=idx)
result = df.unstack()
expected = DataFrame(
np.concatenate([block * 2, block * 2 + 1], axis=1), columns=idx
)
tm.assert_frame_equal(result, expected)
assert (result.columns.levels[1] == idx.levels[1]).all()
# With mixed dtype and NaN
levels = [["a", 2, "c"], [1, 3, 5, 7]]
codes = [[0, -1, 1, 1], [0, 2, -1, 2]]
idx = MultiIndex(levels, codes)
data = np.arange(8)
df = DataFrame(data.reshape(4, 2), index=idx)
cases = (
(0, [13, 16, 6, 9, 2, 5, 8, 11], [np.nan, "a", 2], [np.nan, 5, 1]),
(1, [8, 11, 1, 4, 12, 15, 13, 16], [np.nan, 5, 1], [np.nan, "a", 2]),
)
for level, idces, col_level, idx_level in cases:
result = df.unstack(level=level)
exp_data = np.zeros(18) * np.nan
exp_data[idces] = data
cols = MultiIndex.from_product([[0, 1], col_level])
expected = DataFrame(exp_data.reshape(3, 6), index=idx_level, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("cols", [["A", "C"], slice(None)])
def test_unstack_unused_level(self, cols):
# GH 18562 : unused codes on the unstacked level
df = DataFrame([[2010, "a", "I"], [2011, "b", "II"]], columns=["A", "B", "C"])
ind = df.set_index(["A", "B", "C"], drop=False)
selection = ind.loc[(slice(None), slice(None), "I"), cols]
result = selection.unstack()
expected = ind.iloc[[0]][cols]
expected.columns = MultiIndex.from_product(
[expected.columns, ["I"]], names=[None, "C"]
)
expected.index = expected.index.droplevel("C")
tm.assert_frame_equal(result, expected)
def test_unstack_long_index(self):
# PH 32624: Error when using a lot of indices to unstack.
# The error occurred only, if a lot of indices are used.
df = DataFrame(
[[1]],
columns=MultiIndex.from_tuples([[0]], names=["c1"]),
index=MultiIndex.from_tuples(
[[0, 0, 1, 0, 0, 0, 1]],
names=["i1", "i2", "i3", "i4", "i5", "i6", "i7"],
),
)
result = df.unstack(["i2", "i3", "i4", "i5", "i6", "i7"])
expected = DataFrame(
[[1]],
columns=MultiIndex.from_tuples(
[[0, 0, 1, 0, 0, 0, 1]],
names=["c1", "i2", "i3", "i4", "i5", "i6", "i7"],
),
index=Index([0], name="i1"),
)
tm.assert_frame_equal(result, expected)
def test_unstack_multi_level_cols(self):
# PH 24729: Unstack a df with multi level columns
df = DataFrame(
[[0.0, 0.0], [0.0, 0.0]],
columns=MultiIndex.from_tuples(
[["B", "C"], ["B", "D"]], names=["c1", "c2"]
),
index=MultiIndex.from_tuples(
[[10, 20, 30], [10, 20, 40]], names=["i1", "i2", "i3"]
),
)
assert df.unstack(["i2", "i1"]).columns.names[-2:] == ["i2", "i1"]
def test_unstack_multi_level_rows_and_cols(self):
# PH 28306: Unstack df with multi level cols and rows
df = DataFrame(
[[1, 2], [3, 4], [-1, -2], [-3, -4]],
columns=MultiIndex.from_tuples([["a", "b", "c"], ["d", "e", "f"]]),
index=MultiIndex.from_tuples(
[
["m1", "P3", 222],
["m1", "A5", 111],
["m2", "P3", 222],
["m2", "A5", 111],
],
names=["i1", "i2", "i3"],
),
)
result = df.unstack(["i3", "i2"])
expected = df.unstack(["i3"]).unstack(["i2"])
tm.assert_frame_equal(result, expected)
def test_unstack_nan_index1(self):
# GH7466
def cast(val):
val_str = "" if val != val else val
return f"{val_str:1}"
def verify(df):
mk_list = lambda a: list(a) if isinstance(a, tuple) else [a]
rows, cols = df.notna().values.nonzero()
for i, j in zip(rows, cols):
left = sorted(df.iloc[i, j].split("."))
right = mk_list(df.index[i]) + mk_list(df.columns[j])
right = sorted(map(cast, right))
assert left == right
df = DataFrame(
{
"jim": ["a", "b", np.nan, "d"],
"joe": ["w", "x", "y", "z"],
"jolie": ["a.w", "b.x", " .y", "d.z"],
}
)
left = df.set_index(["jim", "joe"]).unstack()["jolie"]
right = df.set_index(["joe", "jim"]).unstack()["jolie"].T
tm.assert_frame_equal(left, right)
for idx in itertools.permutations(df.columns[:2]):
mi = df.set_index(list(idx))
for lev in range(2):
udf = mi.unstack(level=lev)
assert udf.notna().values.sum() == len(df)
verify(udf["jolie"])
df = DataFrame(
{
"1st": ["d"] * 3
+ [np.nan] * 5
+ ["a"] * 2
+ ["c"] * 3
+ ["e"] * 2
+ ["b"] * 5,
"2nd": ["y"] * 2
+ ["w"] * 3
+ [np.nan] * 3
+ ["z"] * 4
+ [np.nan] * 3
+ ["x"] * 3
+ [np.nan] * 2,
"3rd": [
67,
39,
53,
72,
57,
80,
31,
18,
11,
30,
59,
50,
62,
59,
76,
52,
14,
53,
60,
51,
],
}
)
df["4th"], df["5th"] = (
df.apply(lambda r: ".".join(map(cast, r)), axis=1),
df.apply(lambda r: ".".join(map(cast, r.iloc[::-1])), axis=1),
)
for idx in itertools.permutations(["1st", "2nd", "3rd"]):
mi = df.set_index(list(idx))
for lev in range(3):
udf = mi.unstack(level=lev)
assert udf.notna().values.sum() == 2 * len(df)
for col in ["4th", "5th"]:
verify(udf[col])
def test_unstack_nan_index2(self):
# GH7403
df = DataFrame({"A": list("aaaabbbb"), "B": range(8), "C": range(8)})
df.iloc[3, 1] = np.NaN
left = df.set_index(["A", "B"]).unstack(0)
vals = [
[3, 0, 1, 2, np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan, 4, 5, 6, 7],
]
vals = list(map(list, zip(*vals)))
idx = Index([np.nan, 0, 1, 2, 4, 5, 6, 7], name="B")
cols = MultiIndex(
levels=[["C"], ["a", "b"]], codes=[[0, 0], [0, 1]], names=[None, "A"]
)
right = DataFrame(vals, columns=cols, index=idx)
tm.assert_frame_equal(left, right)
df = DataFrame({"A": list("aaaabbbb"), "B": list(range(4)) * 2, "C": range(8)})
df.iloc[2, 1] = np.NaN
left = df.set_index(["A", "B"]).unstack(0)
vals = [[2, np.nan], [0, 4], [1, 5], [np.nan, 6], [3, 7]]
cols = MultiIndex(
levels=[["C"], ["a", "b"]], codes=[[0, 0], [0, 1]], names=[None, "A"]
)
idx = Index([np.nan, 0, 1, 2, 3], name="B")
right = DataFrame(vals, columns=cols, index=idx)
tm.assert_frame_equal(left, right)
df = DataFrame({"A": list("aaaabbbb"), "B": list(range(4)) * 2, "C": range(8)})
df.iloc[3, 1] = np.NaN
left = df.set_index(["A", "B"]).unstack(0)
vals = [[3, np.nan], [0, 4], [1, 5], [2, 6], [np.nan, 7]]
cols = MultiIndex(
levels=[["C"], ["a", "b"]], codes=[[0, 0], [0, 1]], names=[None, "A"]
)
idx = Index([np.nan, 0, 1, 2, 3], name="B")
right = DataFrame(vals, columns=cols, index=idx)
tm.assert_frame_equal(left, right)
def test_unstack_nan_index3(self, using_array_manager):
# GH7401
df = DataFrame(
{
"A": list("aaaaabbbbb"),
"B": (date_range("2012-01-01", periods=5).tolist() * 2),
"C": np.arange(10),
}
)
df.iloc[3, 1] = np.NaN
left = df.set_index(["A", "B"]).unstack()
vals = np.array([[3, 0, 1, 2, np.nan, 4], [np.nan, 5, 6, 7, 8, 9]])
idx = Index(["a", "b"], name="A")
cols = MultiIndex(
levels=[["C"], date_range("2012-01-01", periods=5)],
codes=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],
names=[None, "B"],
)
right = DataFrame(vals, columns=cols, index=idx)
if using_array_manager:
# INFO(ArrayManager) with ArrayManager preserve dtype where possible
cols = right.columns[[1, 2, 3, 5]]
right[cols] = right[cols].astype(df["C"].dtype)
tm.assert_frame_equal(left, right)
def test_unstack_nan_index4(self):
# GH4862
vals = [
["Hg", np.nan, np.nan, 680585148],
["U", 0.0, np.nan, 680585148],
["Pb", 7.07e-06, np.nan, 680585148],
["Sn", 2.3614e-05, 0.0133, 680607017],
["Ag", 0.0, 0.0133, 680607017],
["Hg", -0.00015, 0.0133, 680607017],
]
df = DataFrame(
vals,
columns=["agent", "change", "dosage", "s_id"],
index=[17263, 17264, 17265, 17266, 17267, 17268],
)
left = df.copy().set_index(["s_id", "dosage", "agent"]).unstack()
vals = [
[np.nan, np.nan, 7.07e-06, np.nan, 0.0],
[0.0, -0.00015, np.nan, 2.3614e-05, np.nan],
]
idx = MultiIndex(
levels=[[680585148, 680607017], [0.0133]],
codes=[[0, 1], [-1, 0]],
names=["s_id", "dosage"],
)
cols = MultiIndex(
levels=[["change"], ["Ag", "Hg", "Pb", "Sn", "U"]],
codes=[[0, 0, 0, 0, 0], [0, 1, 2, 3, 4]],
names=[None, "agent"],
)
right = DataFrame(vals, columns=cols, index=idx)
tm.assert_frame_equal(left, right)
left = df.loc[17264:].copy().set_index(["s_id", "dosage", "agent"])
tm.assert_frame_equal(left.unstack(), right)
@td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) MultiIndex bug
def test_unstack_nan_index5(self):
# GH9497 - multiple unstack with nulls
df = DataFrame(
{
"1st": [1, 2, 1, 2, 1, 2],
"2nd": date_range("2014-02-01", periods=6, freq="D"),
"jim": 100 + np.arange(6),
"joe": (np.random.randn(6) * 10).round(2),
}
)
df["3rd"] = df["2nd"] - pd.Timestamp("2014-02-02")
df.loc[1, "2nd"] = df.loc[3, "2nd"] = np.nan
df.loc[1, "3rd"] = df.loc[4, "3rd"] = np.nan
left = df.set_index(["1st", "2nd", "3rd"]).unstack(["2nd", "3rd"])
assert left.notna().values.sum() == 2 * len(df)
for col in ["jim", "joe"]:
for _, r in df.iterrows():
key = r["1st"], (col, r["2nd"], r["3rd"])
assert r[col] == left.loc[key]
def test_stack_datetime_column_multiIndex(self):
# GH 8039
t = datetime(2014, 1, 1)
df = DataFrame([1, 2, 3, 4], columns=MultiIndex.from_tuples([(t, "A", "B")]))
result = df.stack()
eidx = MultiIndex.from_product([(0, 1, 2, 3), ("B",)])
ecols = MultiIndex.from_tuples([(t, "A")])
expected = DataFrame([1, 2, 3, 4], index=eidx, columns=ecols)
tm.assert_frame_equal(result, expected)
def test_stack_partial_multiIndex(self):
# GH 8844
def _test_stack_with_multiindex(multiindex):
df = DataFrame(
np.arange(3 * len(multiindex)).reshape(3, len(multiindex)),
columns=multiindex,
)
for level in (-1, 0, 1, [0, 1], [1, 0]):
result = df.stack(level=level, dropna=False)
if isinstance(level, int):
# Stacking a single level should not make any all-NaN rows,
# so df.stack(level=level, dropna=False) should be the same
# as df.stack(level=level, dropna=True).
expected = df.stack(level=level, dropna=True)
if isinstance(expected, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_frame_equal(result, expected)
df.columns = MultiIndex.from_tuples(
df.columns.to_numpy(), names=df.columns.names
)
expected = df.stack(level=level, dropna=False)
if isinstance(expected, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_frame_equal(result, expected)
full_multiindex = MultiIndex.from_tuples(
[("B", "x"), ("B", "z"), ("A", "y"), ("C", "x"), ("C", "u")],
names=["Upper", "Lower"],
)
for multiindex_columns in (
[0, 1, 2, 3, 4],
[0, 1, 2, 3],
[0, 1, 2, 4],
[0, 1, 2],
[1, 2, 3],
[2, 3, 4],
[0, 1],
[0, 2],
[0, 3],
[0],
[2],
[4],
):
_test_stack_with_multiindex(full_multiindex[multiindex_columns])
if len(multiindex_columns) > 1:
multiindex_columns.reverse()
_test_stack_with_multiindex(full_multiindex[multiindex_columns])
df = DataFrame(np.arange(6).reshape(2, 3), columns=full_multiindex[[0, 1, 3]])
result = df.stack(dropna=False)
expected = DataFrame(
[[0, 2], [1, np.nan], [3, 5], [4, np.nan]],
index=MultiIndex(
levels=[[0, 1], ["u", "x", "y", "z"]],
codes=[[0, 0, 1, 1], [1, 3, 1, 3]],
names=[None, "Lower"],
),
columns=Index(["B", "C"], name="Upper"),
dtype=df.dtypes[0],
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("ordered", [False, True])
@pytest.mark.parametrize("labels", [list("yxz"), list("yxy")])
def test_stack_preserve_categorical_dtype(self, ordered, labels):
# GH13854
cidx = pd.CategoricalIndex(labels, categories=list("xyz"), ordered=ordered)
df = DataFrame([[10, 11, 12]], columns=cidx)
result = df.stack()
# `MultiIndex.from_product` preserves categorical dtype -
# it's tested elsewhere.
midx = MultiIndex.from_product([df.index, cidx])
expected = Series([10, 11, 12], index=midx)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("ordered", [False, True])
@pytest.mark.parametrize(
"labels,data",
[
(list("xyz"), [10, 11, 12, 13, 14, 15]),
(list("zyx"), [14, 15, 12, 13, 10, 11]),
],
)
def test_stack_multi_preserve_categorical_dtype(self, ordered, labels, data):
# GH-36991
cidx = pd.CategoricalIndex(labels, categories=sorted(labels), ordered=ordered)
cidx2 = pd.CategoricalIndex(["u", "v"], ordered=ordered)
midx = MultiIndex.from_product([cidx, cidx2])
df = DataFrame([sorted(data)], columns=midx)
result = df.stack([0, 1])
s_cidx = pd.CategoricalIndex(sorted(labels), ordered=ordered)
expected = Series(data, index=MultiIndex.from_product([[0], s_cidx, cidx2]))
tm.assert_series_equal(result, expected)
def test_stack_preserve_categorical_dtype_values(self):
# GH-23077
cat = pd.Categorical(["a", "a", "b", "c"])
df = DataFrame({"A": cat, "B": cat})
result = df.stack()
index = MultiIndex.from_product([[0, 1, 2, 3], ["A", "B"]])
expected = Series(
pd.Categorical(["a", "a", "a", "a", "b", "b", "c", "c"]), index=index
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"index, columns",
[
([0, 0, 1, 1], MultiIndex.from_product([[1, 2], ["a", "b"]])),
([0, 0, 2, 3], MultiIndex.from_product([[1, 2], ["a", "b"]])),
([0, 1, 2, 3], MultiIndex.from_product([[1, 2], ["a", "b"]])),
],
)
def test_stack_multi_columns_non_unique_index(self, index, columns):
# GH-28301
df = DataFrame(index=index, columns=columns).fillna(1)
stacked = df.stack()
new_index = MultiIndex.from_tuples(stacked.index.to_numpy())
expected = DataFrame(
stacked.to_numpy(), index=new_index, columns=stacked.columns
)
tm.assert_frame_equal(stacked, expected)
stacked_codes = np.asarray(stacked.index.codes)
expected_codes = np.asarray(new_index.codes)
tm.assert_numpy_array_equal(stacked_codes, expected_codes)
@pytest.mark.parametrize("level", [0, 1])
def test_unstack_mixed_extension_types(self, level):
index = MultiIndex.from_tuples([("A", 0), ("A", 1), ("B", 1)], names=["a", "b"])
df = DataFrame(
{
"A": pd.array([0, 1, None], dtype="Int64"),
"B": pd.Categorical(["a", "a", "b"]),
},
index=index,
)
result = df.unstack(level=level)
expected = df.astype(object).unstack(level=level)
expected_dtypes = Series(
[df.A.dtype] * 2 + [df.B.dtype] * 2, index=result.columns
)
tm.assert_series_equal(result.dtypes, expected_dtypes)
tm.assert_frame_equal(result.astype(object), expected)
@pytest.mark.parametrize("level", [0, "baz"])
def test_unstack_swaplevel_sortlevel(self, level):
# GH 20994
mi = MultiIndex.from_product([[0], ["d", "c"]], names=["bar", "baz"])
df = DataFrame([[0, 2], [1, 3]], index=mi, columns=["B", "A"])
df.columns.name = "foo"
expected = DataFrame(
[[3, 1, 2, 0]],
columns=MultiIndex.from_tuples(
[("c", "A"), ("c", "B"), ("d", "A"), ("d", "B")], names=["baz", "foo"]
),
)
expected.index.name = "bar"
result = df.unstack().swaplevel(axis=1).sort_index(axis=1, level=level)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_object():
# GH12815 Test unstacking with object.
data = Series(["a", "b", "c", "a"], dtype="object")
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
# By default missing values will be NaN
result = data.unstack()
expected = DataFrame(
{"a": ["a", np.nan, "a"], "b": ["b", "c", np.nan]}, index=list("xyz")
)
tm.assert_frame_equal(result, expected)
# Fill with any value replaces missing values as expected
result = data.unstack(fill_value="d")
expected = DataFrame(
{"a": ["a", "d", "a"], "b": ["b", "c", "d"]}, index=list("xyz")
)
tm.assert_frame_equal(result, expected)
def test_unstack_timezone_aware_values():
# GH 18338
df = DataFrame(
{
"timestamp": [pd.Timestamp("2017-08-27 01:00:00.709949+0000", tz="UTC")],
"a": ["a"],
"b": ["b"],
"c": ["c"],
},
columns=["timestamp", "a", "b", "c"],
)
result = df.set_index(["a", "b"]).unstack()
expected = DataFrame(
[[pd.Timestamp("2017-08-27 01:00:00.709949+0000", tz="UTC"), "c"]],
index=Index(["a"], name="a"),
columns=MultiIndex(
levels=[["timestamp", "c"], ["b"]],
codes=[[0, 1], [0, 0]],
names=[None, "b"],
),
)
tm.assert_frame_equal(result, expected)
def test_stack_timezone_aware_values():
# GH 19420
ts = date_range(freq="D", start="20180101", end="20180103", tz="America/New_York")
df = DataFrame({"A": ts}, index=["a", "b", "c"])
result = df.stack()
expected = Series(
ts,
index=MultiIndex(levels=[["a", "b", "c"], ["A"]], codes=[[0, 1, 2], [0, 0, 0]]),
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dropna", [True, False])
def test_stack_empty_frame(dropna):
# GH 36113
expected = Series(index=MultiIndex([[], []], [[], []]), dtype=np.float64)
result = DataFrame(dtype=np.float64).stack(dropna=dropna)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dropna", [True, False])
@pytest.mark.parametrize("fill_value", [None, 0])
def test_stack_unstack_empty_frame(dropna, fill_value):
# GH 36113
result = (
DataFrame(dtype=np.int64).stack(dropna=dropna).unstack(fill_value=fill_value)
)
expected = DataFrame(dtype=np.int64)
tm.assert_frame_equal(result, expected)
def test_unstack_single_index_series():
# GH 36113
msg = r"index must be a MultiIndex to unstack.*"
with pytest.raises(ValueError, match=msg):
Series(dtype=np.int64).unstack()
def test_unstacking_multi_index_df():
# see gh-30740
df = DataFrame(
{
"name": ["Alice", "Bob"],
"score": [9.5, 8],
"employed": [False, True],
"kids": [0, 0],
"gender": ["female", "male"],
}
)
df = df.set_index(["name", "employed", "kids", "gender"])
df = df.unstack(["gender"], fill_value=0)
expected = df.unstack("employed", fill_value=0).unstack("kids", fill_value=0)
result = df.unstack(["employed", "kids"], fill_value=0)
expected = DataFrame(
[[9.5, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 8.0]],
index=Index(["Alice", "Bob"], name="name"),
columns=MultiIndex.from_tuples(
[
("score", "female", False, 0),
("score", "female", True, 0),
("score", "male", False, 0),
("score", "male", True, 0),
],
names=[None, "gender", "employed", "kids"],
),
)
tm.assert_frame_equal(result, expected)
def test_stack_positional_level_duplicate_column_names():
# https://github.com/pandas-dev/pandas/issues/36353
columns = MultiIndex.from_product([("x", "y"), ("y", "z")], names=["a", "a"])
df = DataFrame([[1, 1, 1, 1]], columns=columns)
result = df.stack(0)
new_columns = Index(["y", "z"], name="a")
new_index = MultiIndex.from_tuples([(0, "x"), (0, "y")], names=[None, "a"])
expected = DataFrame([[1, 1], [1, 1]], index=new_index, columns=new_columns)
tm.assert_frame_equal(result, expected)
class TestStackUnstackMultiLevel:
def test_unstack(self, multiindex_year_month_day_dataframe_random_data):
# just check that it works for now
ymd = multiindex_year_month_day_dataframe_random_data
unstacked = ymd.unstack()
unstacked.unstack()
# test that ints work
ymd.astype(int).unstack()
# test that int32 work
ymd.astype(np.int32).unstack()
@pytest.mark.parametrize(
"result_rows,result_columns,index_product,expected_row",
[
(
[[1, 1, None, None, 30.0, None], [2, 2, None, None, 30.0, None]],
["ix1", "ix2", "col1", "col2", "col3", "col4"],
2,
[None, None, 30.0, None],
),
(
[[1, 1, None, None, 30.0], [2, 2, None, None, 30.0]],
["ix1", "ix2", "col1", "col2", "col3"],
2,
[None, None, 30.0],
),
(
[[1, 1, None, None, 30.0], [2, None, None, None, 30.0]],
["ix1", "ix2", "col1", "col2", "col3"],
None,
[None, None, 30.0],
),
],
)
def test_unstack_partial(
self, result_rows, result_columns, index_product, expected_row
):
# check for regressions on this issue:
# https://github.com/pandas-dev/pandas/issues/19351
# make sure DataFrame.unstack() works when its run on a subset of the DataFrame
# and the Index levels contain values that are not present in the subset
result = DataFrame(result_rows, columns=result_columns).set_index(
["ix1", "ix2"]
)
result = result.iloc[1:2].unstack("ix2")
expected = DataFrame(
[expected_row],
columns=MultiIndex.from_product(
[result_columns[2:], [index_product]], names=[None, "ix2"]
),
index=Index([2], name="ix1"),
)
tm.assert_frame_equal(result, expected)
def test_unstack_multiple_no_empty_columns(self):
index = MultiIndex.from_tuples(
[(0, "foo", 0), (0, "bar", 0), (1, "baz", 1), (1, "qux", 1)]
)
s = Series(np.random.randn(4), index=index)
unstacked = s.unstack([1, 2])
expected = unstacked.dropna(axis=1, how="all")
tm.assert_frame_equal(unstacked, expected)
def test_stack(self, multiindex_year_month_day_dataframe_random_data):
ymd = multiindex_year_month_day_dataframe_random_data
# regular roundtrip
unstacked = ymd.unstack()
restacked = unstacked.stack()
tm.assert_frame_equal(restacked, ymd)
unlexsorted = ymd.sort_index(level=2)
unstacked = unlexsorted.unstack(2)
restacked = unstacked.stack()
tm.assert_frame_equal(restacked.sort_index(level=0), ymd)
unlexsorted = unlexsorted[::-1]
unstacked = unlexsorted.unstack(1)
restacked = unstacked.stack().swaplevel(1, 2)
tm.assert_frame_equal(restacked.sort_index(level=0), ymd)
unlexsorted = unlexsorted.swaplevel(0, 1)
unstacked = unlexsorted.unstack(0).swaplevel(0, 1, axis=1)
restacked = unstacked.stack(0).swaplevel(1, 2)
tm.assert_frame_equal(restacked.sort_index(level=0), ymd)
# columns unsorted
unstacked = ymd.unstack()
unstacked = unstacked.sort_index(axis=1, ascending=False)
restacked = unstacked.stack()
tm.assert_frame_equal(restacked, ymd)
# more than 2 levels in the columns
unstacked = ymd.unstack(1).unstack(1)
result = unstacked.stack(1)
expected = ymd.unstack()
tm.assert_frame_equal(result, expected)
result = unstacked.stack(2)
expected = ymd.unstack(1)
tm.assert_frame_equal(result, expected)
result = unstacked.stack(0)
expected = ymd.stack().unstack(1).unstack(1)
tm.assert_frame_equal(result, expected)
# not all levels present in each echelon
unstacked = ymd.unstack(2).loc[:, ::3]
stacked = unstacked.stack().stack()
ymd_stacked = ymd.stack()
tm.assert_series_equal(stacked, ymd_stacked.reindex(stacked.index))
# stack with negative number
result = ymd.unstack(0).stack(-2)
expected = ymd.unstack(0).stack(0)
# GH10417
def check(left, right):
tm.assert_series_equal(left, right)
assert left.index.is_unique is False
li, ri = left.index, right.index
tm.assert_index_equal(li, ri)
df = DataFrame(
np.arange(12).reshape(4, 3),
index=list("abab"),
columns=["1st", "2nd", "3rd"],
)
mi = MultiIndex(
levels=[["a", "b"], ["1st", "2nd", "3rd"]],
codes=[np.tile(np.arange(2).repeat(3), 2), np.tile(np.arange(3), 4)],
)
left, right = df.stack(), Series(np.arange(12), index=mi)
check(left, right)
df.columns = ["1st", "2nd", "1st"]
mi = MultiIndex(
levels=[["a", "b"], ["1st", "2nd"]],
codes=[np.tile(np.arange(2).repeat(3), 2), np.tile([0, 1, 0], 4)],
)
left, right = df.stack(), Series(np.arange(12), index=mi)
check(left, right)
tpls = ("a", 2), ("b", 1), ("a", 1), ("b", 2)
df.index = MultiIndex.from_tuples(tpls)
mi = MultiIndex(
levels=[["a", "b"], [1, 2], ["1st", "2nd"]],
codes=[
np.tile(np.arange(2).repeat(3), 2),
np.repeat([1, 0, 1], [3, 6, 3]),
np.tile([0, 1, 0], 4),
],
)
left, right = df.stack(), Series(np.arange(12), index=mi)
check(left, right)
def test_unstack_odd_failure(self):
data = """day,time,smoker,sum,len
Fri,Dinner,No,8.25,3.
Fri,Dinner,Yes,27.03,9
Fri,Lunch,No,3.0,1
Fri,Lunch,Yes,13.68,6
Sat,Dinner,No,139.63,45
Sat,Dinner,Yes,120.77,42
Sun,Dinner,No,180.57,57
Sun,Dinner,Yes,66.82,19
Thu,Dinner,No,3.0,1
Thu,Lunch,No,117.32,44
Thu,Lunch,Yes,51.51,17"""
df = pd.read_csv(StringIO(data)).set_index(["day", "time", "smoker"])
# it works, #2100
result = df.unstack(2)
recons = result.stack()
tm.assert_frame_equal(recons, df)
def test_stack_mixed_dtype(self, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
df = frame.T
df["foo", "four"] = "foo"
df = df.sort_index(level=1, axis=1)
stacked = df.stack()
result = df["foo"].stack().sort_index()
tm.assert_series_equal(stacked["foo"], result, check_names=False)
assert result.name is None
assert stacked["bar"].dtype == np.float_
def test_unstack_bug(self):
df = DataFrame(
{
"state": ["naive", "naive", "naive", "active", "active", "active"],
"exp": ["a", "b", "b", "b", "a", "a"],
"barcode": [1, 2, 3, 4, 1, 3],
"v": ["hi", "hi", "bye", "bye", "bye", "peace"],
"extra": np.arange(6.0),
}
)
result = df.groupby(["state", "exp", "barcode", "v"]).apply(len)
unstacked = result.unstack()
restacked = unstacked.stack()
tm.assert_series_equal(restacked, result.reindex(restacked.index).astype(float))
def test_stack_unstack_preserve_names(self, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
unstacked = frame.unstack()
assert unstacked.index.name == "first"
assert unstacked.columns.names == ["exp", "second"]
restacked = unstacked.stack()
assert restacked.index.names == frame.index.names
@pytest.mark.parametrize("method", ["stack", "unstack"])
def test_stack_unstack_wrong_level_name(
self, method, multiindex_dataframe_random_data
):
# GH 18303 - wrong level name should raise
frame = multiindex_dataframe_random_data
# A DataFrame with flat axes:
df = frame.loc["foo"]
with pytest.raises(KeyError, match="does not match index name"):
getattr(df, method)("mistake")
if method == "unstack":
# Same on a Series:
s = df.iloc[:, 0]
with pytest.raises(KeyError, match="does not match index name"):
getattr(s, method)("mistake")
def test_unstack_level_name(self, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
result = frame.unstack("second")
expected = frame.unstack(level=1)
tm.assert_frame_equal(result, expected)
def test_stack_level_name(self, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
unstacked = frame.unstack("second")
result = unstacked.stack("exp")
expected = frame.unstack().stack(0)
tm.assert_frame_equal(result, expected)
result = frame.stack("exp")
expected = frame.stack()
tm.assert_series_equal(result, expected)
def test_stack_unstack_multiple(
self, multiindex_year_month_day_dataframe_random_data
):
ymd = multiindex_year_month_day_dataframe_random_data
unstacked = ymd.unstack(["year", "month"])
expected = ymd.unstack("year").unstack("month")
tm.assert_frame_equal(unstacked, expected)
assert unstacked.columns.names == expected.columns.names
# series
s = ymd["A"]
s_unstacked = s.unstack(["year", "month"])
tm.assert_frame_equal(s_unstacked, expected["A"])
restacked = unstacked.stack(["year", "month"])
restacked = restacked.swaplevel(0, 1).swaplevel(1, 2)
restacked = restacked.sort_index(level=0)
tm.assert_frame_equal(restacked, ymd)
assert restacked.index.names == ymd.index.names
# GH #451
unstacked = ymd.unstack([1, 2])
expected = ymd.unstack(1).unstack(1).dropna(axis=1, how="all")
tm.assert_frame_equal(unstacked, expected)
unstacked = ymd.unstack([2, 1])
expected = ymd.unstack(2).unstack(1).dropna(axis=1, how="all")
tm.assert_frame_equal(unstacked, expected.loc[:, unstacked.columns])
def test_stack_names_and_numbers(
self, multiindex_year_month_day_dataframe_random_data
):
ymd = multiindex_year_month_day_dataframe_random_data
unstacked = ymd.unstack(["year", "month"])
# Can't use mixture of names and numbers to stack
with pytest.raises(ValueError, match="level should contain"):
unstacked.stack([0, "month"])
def test_stack_multiple_out_of_bounds(
self, multiindex_year_month_day_dataframe_random_data
):
# nlevels == 3
ymd = multiindex_year_month_day_dataframe_random_data
unstacked = ymd.unstack(["year", "month"])
with pytest.raises(IndexError, match="Too many levels"):
unstacked.stack([2, 3])
with pytest.raises(IndexError, match="not a valid level number"):
unstacked.stack([-4, -3])
def test_unstack_period_series(self):
# GH4342
idx1 = pd.PeriodIndex(
["2013-01", "2013-01", "2013-02", "2013-02", "2013-03", "2013-03"],
freq="M",
name="period",
)
idx2 = Index(["A", "B"] * 3, name="str")
value = [1, 2, 3, 4, 5, 6]
idx = MultiIndex.from_arrays([idx1, idx2])
s = Series(value, index=idx)
result1 = s.unstack()
result2 = s.unstack(level=1)
result3 = s.unstack(level=0)
e_idx = pd.PeriodIndex(
["2013-01", "2013-02", "2013-03"], freq="M", name="period"
)
expected = DataFrame(
{"A": [1, 3, 5], "B": [2, 4, 6]}, index=e_idx, columns=["A", "B"]
)
expected.columns.name = "str"
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
tm.assert_frame_equal(result3, expected.T)
idx1 = pd.PeriodIndex(
["2013-01", "2013-01", "2013-02", "2013-02", "2013-03", "2013-03"],
freq="M",
name="period1",
)
idx2 = pd.PeriodIndex(
["2013-12", "2013-11", "2013-10", "2013-09", "2013-08", "2013-07"],
freq="M",
name="period2",
)
idx = MultiIndex.from_arrays([idx1, idx2])
s = Series(value, index=idx)
result1 = s.unstack()
result2 = s.unstack(level=1)
result3 = s.unstack(level=0)
e_idx = pd.PeriodIndex(
["2013-01", "2013-02", "2013-03"], freq="M", name="period1"
)
e_cols = pd.PeriodIndex(
["2013-07", "2013-08", "2013-09", "2013-10", "2013-11", "2013-12"],
freq="M",
name="period2",
)
expected = DataFrame(
[
[np.nan, np.nan, np.nan, np.nan, 2, 1],
[np.nan, np.nan, 4, 3, np.nan, np.nan],
[6, 5, np.nan, np.nan, np.nan, np.nan],
],
index=e_idx,
columns=e_cols,
)
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
tm.assert_frame_equal(result3, expected.T)
def test_unstack_period_frame(self):
# GH4342
idx1 = pd.PeriodIndex(
["2014-01", "2014-02", "2014-02", "2014-02", "2014-01", "2014-01"],
freq="M",
name="period1",
)
idx2 = pd.PeriodIndex(
["2013-12", "2013-12", "2014-02", "2013-10", "2013-10", "2014-02"],
freq="M",
name="period2",
)
value = {"A": [1, 2, 3, 4, 5, 6], "B": [6, 5, 4, 3, 2, 1]}
idx = MultiIndex.from_arrays([idx1, idx2])
df = DataFrame(value, index=idx)
result1 = df.unstack()
result2 = df.unstack(level=1)
result3 = df.unstack(level=0)
e_1 = pd.PeriodIndex(["2014-01", "2014-02"], freq="M", name="period1")
e_2 = pd.PeriodIndex(
["2013-10", "2013-12", "2014-02", "2013-10", "2013-12", "2014-02"],
freq="M",
name="period2",
)
e_cols = MultiIndex.from_arrays(["A A A B B B".split(), e_2])
expected = DataFrame(
[[5, 1, 6, 2, 6, 1], [4, 2, 3, 3, 5, 4]], index=e_1, columns=e_cols
)
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
e_1 = pd.PeriodIndex(
["2014-01", "2014-02", "2014-01", "2014-02"], freq="M", name="period1"
)
e_2 = pd.PeriodIndex(
["2013-10", "2013-12", "2014-02"], freq="M", name="period2"
)
e_cols = MultiIndex.from_arrays(["A A B B".split(), e_1])
expected = DataFrame(
[[5, 4, 2, 3], [1, 2, 6, 5], [6, 3, 1, 4]], index=e_2, columns=e_cols
)
tm.assert_frame_equal(result3, expected)
def test_stack_multiple_bug(self):
# bug when some uniques are not present in the data GH#3170
id_col = ([1] * 3) + ([2] * 3)
name = (["a"] * 3) + (["b"] * 3)
date = pd.to_datetime(["2013-01-03", "2013-01-04", "2013-01-05"] * 2)
var1 = np.random.randint(0, 100, 6)
df = DataFrame({"ID": id_col, "NAME": name, "DATE": date, "VAR1": var1})
multi = df.set_index(["DATE", "ID"])
multi.columns.name = "Params"
unst = multi.unstack("ID")
down = unst.resample("W-THU").mean()
rs = down.stack("ID")
xp = unst.loc[:, ["VAR1"]].resample("W-THU").mean().stack("ID")
xp.columns.name = "Params"
tm.assert_frame_equal(rs, xp)
def test_stack_dropna(self):
# GH#3997
df = DataFrame({"A": ["a1", "a2"], "B": ["b1", "b2"], "C": [1, 1]})
df = df.set_index(["A", "B"])
stacked = df.unstack().stack(dropna=False)
assert len(stacked) > len(stacked.dropna())
stacked = df.unstack().stack(dropna=True)
tm.assert_frame_equal(stacked, stacked.dropna())
def test_unstack_multiple_hierarchical(self):
df = DataFrame(
index=[
[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1],
[0, 1, 0, 1, 0, 1, 0, 1],
],
columns=[[0, 0, 1, 1], [0, 1, 0, 1]],
)
df.index.names = ["a", "b", "c"]
df.columns.names = ["d", "e"]
# it works!
df.unstack(["b", "c"])
def test_unstack_sparse_keyspace(self):
# memory problems with naive impl GH#2278
# Generate Long File & Test Pivot
NUM_ROWS = 1000
df = DataFrame(
{
"A": np.random.randint(100, size=NUM_ROWS),
"B": np.random.randint(300, size=NUM_ROWS),
"C": np.random.randint(-7, 7, size=NUM_ROWS),
"D": np.random.randint(-19, 19, size=NUM_ROWS),
"E": np.random.randint(3000, size=NUM_ROWS),
"F": np.random.randn(NUM_ROWS),
}
)
idf = df.set_index(["A", "B", "C", "D", "E"])
# it works! is sufficient
idf.unstack("E")
def test_unstack_unobserved_keys(self):
# related to GH#2278 refactoring
levels = [[0, 1], [0, 1, 2, 3]]
codes = [[0, 0, 1, 1], [0, 2, 0, 2]]
index = MultiIndex(levels, codes)
df = DataFrame(np.random.randn(4, 2), index=index)
result = df.unstack()
assert len(result.columns) == 4
recons = result.stack()
tm.assert_frame_equal(recons, df)
@pytest.mark.slow
def test_unstack_number_of_levels_larger_than_int32(self):
# GH#20601
df = DataFrame(
np.random.randn(2 ** 16, 2), index=[np.arange(2 ** 16), np.arange(2 ** 16)]
)
with pytest.raises(ValueError, match="int32 overflow"):
df.unstack()
def test_stack_order_with_unsorted_levels(self):
# GH#16323
def manual_compare_stacked(df, df_stacked, lev0, lev1):
assert all(
df.loc[row, col] == df_stacked.loc[(row, col[lev0]), col[lev1]]
for row in df.index
for col in df.columns
)
# deep check for 1-row case
for width in [2, 3]:
levels_poss = itertools.product(
itertools.permutations([0, 1, 2], width), repeat=2
)
for levels in levels_poss:
columns = MultiIndex(levels=levels, codes=[[0, 0, 1, 1], [0, 1, 0, 1]])
df = DataFrame(columns=columns, data=[range(4)])
for stack_lev in range(2):
df_stacked = df.stack(stack_lev)
manual_compare_stacked(df, df_stacked, stack_lev, 1 - stack_lev)
# check multi-row case
mi = MultiIndex(
levels=[["A", "C", "B"], ["B", "A", "C"]],
codes=[np.repeat(range(3), 3), np.tile(range(3), 3)],
)
df = DataFrame(
columns=mi, index=range(5), data=np.arange(5 * len(mi)).reshape(5, -1)
)
manual_compare_stacked(df, df.stack(0), 0, 1)
def test_stack_unstack_unordered_multiindex(self):
# GH# 18265
values = np.arange(5)
data = np.vstack(
[
[f"b{x}" for x in values], # b0, b1, ..
[f"a{x}" for x in values], # a0, a1, ..
]
)
df = DataFrame(data.T, columns=["b", "a"])
df.columns.name = "first"
second_level_dict = {"x": df}
multi_level_df = pd.concat(second_level_dict, axis=1)
multi_level_df.columns.names = ["second", "first"]
df = multi_level_df.reindex(sorted(multi_level_df.columns), axis=1)
result = df.stack(["first", "second"]).unstack(["first", "second"])
expected = DataFrame(
[["a0", "b0"], ["a1", "b1"], ["a2", "b2"], ["a3", "b3"], ["a4", "b4"]],
index=[0, 1, 2, 3, 4],
columns=MultiIndex.from_tuples(
[("a", "x"), ("b", "x")], names=["first", "second"]
),
)
tm.assert_frame_equal(result, expected)
def test_unstack_preserve_types(
self, multiindex_year_month_day_dataframe_random_data
):
# GH#403
ymd = multiindex_year_month_day_dataframe_random_data
ymd["E"] = "foo"
ymd["F"] = 2
unstacked = ymd.unstack("month")
assert unstacked["A", 1].dtype == np.float64
assert unstacked["E", 1].dtype == np.object_
assert unstacked["F", 1].dtype == np.float64
def test_unstack_group_index_overflow(self):
codes = np.tile(np.arange(500), 2)
level = np.arange(500)
index = MultiIndex(
levels=[level] * 8 + [[0, 1]],
codes=[codes] * 8 + [np.arange(2).repeat(500)],
)
s = Series(np.arange(1000), index=index)
result = s.unstack()
assert result.shape == (500, 2)
# test roundtrip
stacked = result.stack()
tm.assert_series_equal(s, stacked.reindex(s.index))
# put it at beginning
index = MultiIndex(
levels=[[0, 1]] + [level] * 8,
codes=[np.arange(2).repeat(500)] + [codes] * 8,
)
s = Series(np.arange(1000), index=index)
result = s.unstack(0)
assert result.shape == (500, 2)
# put it in middle
index = MultiIndex(
levels=[level] * 4 + [[0, 1]] + [level] * 4,
codes=([codes] * 4 + [np.arange(2).repeat(500)] + [codes] * 4),
)
s = Series(np.arange(1000), index=index)
result = s.unstack(4)
assert result.shape == (500, 2)
def test_unstack_with_missing_int_cast_to_float(self, using_array_manager):
# https://github.com/pandas-dev/pandas/issues/37115
df = DataFrame(
{
"a": ["A", "A", "B"],
"b": ["ca", "cb", "cb"],
"v": [10] * 3,
}
).set_index(["a", "b"])
# add another int column to get 2 blocks
df["is_"] = 1
if not using_array_manager:
assert len(df._mgr.blocks) == 2
result = df.unstack("b")
result[("is_", "ca")] = result[("is_", "ca")].fillna(0)
expected = DataFrame(
[[10.0, 10.0, 1.0, 1.0], [np.nan, 10.0, 0.0, 1.0]],
index=Index(["A", "B"], dtype="object", name="a"),
columns=MultiIndex.from_tuples(
[("v", "ca"), ("v", "cb"), ("is_", "ca"), ("is_", "cb")],
names=[None, "b"],
),
)
if using_array_manager:
# INFO(ArrayManager) with ArrayManager preserve dtype where possible
expected[("v", "cb")] = expected[("v", "cb")].astype("int64")
expected[("is_", "cb")] = expected[("is_", "cb")].astype("int64")
tm.assert_frame_equal(result, expected)
def test_unstack_with_level_has_nan(self):
# GH 37510
df1 = DataFrame(
{
"L1": [1, 2, 3, 4],
"L2": [3, 4, 1, 2],
"L3": [1, 1, 1, 1],
"x": [1, 2, 3, 4],
}
)
df1 = df1.set_index(["L1", "L2", "L3"])
new_levels = ["n1", "n2", "n3", None]
df1.index = df1.index.set_levels(levels=new_levels, level="L1")
df1.index = df1.index.set_levels(levels=new_levels, level="L2")
result = df1.unstack("L3")[("x", 1)].sort_index().index
expected = MultiIndex(
levels=[["n1", "n2", "n3", None], ["n1", "n2", "n3", None]],
codes=[[0, 1, 2, 3], [2, 3, 0, 1]],
names=["L1", "L2"],
)
tm.assert_index_equal(result, expected)
def test_stack_nan_in_multiindex_columns(self):
# GH#39481
df = DataFrame(
np.zeros([1, 5]),
columns=MultiIndex.from_tuples(
[
(0, None, None),
(0, 2, 0),
(0, 2, 1),
(0, 3, 0),
(0, 3, 1),
],
),
)
result = df.stack(2)
expected = DataFrame(
[[0.0, np.nan, np.nan], [np.nan, 0.0, 0.0], [np.nan, 0.0, 0.0]],
index=Index([(0, None), (0, 0), (0, 1)]),
columns=Index([(0, None), (0, 2), (0, 3)]),
)
tm.assert_frame_equal(result, expected)
def test_multi_level_stack_categorical(self):
# GH 15239
midx = MultiIndex.from_arrays(
[
["A"] * 2 + ["B"] * 2,
pd.Categorical(list("abab")),
pd.Categorical(list("ccdd")),
]
)
df = DataFrame(np.arange(8).reshape(2, 4), columns=midx)
result = df.stack([1, 2])
expected = DataFrame(
[
[0, np.nan],
[np.nan, 2],
[1, np.nan],
[np.nan, 3],
[4, np.nan],
[np.nan, 6],
[5, np.nan],
[np.nan, 7],
],
columns=["A", "B"],
index=MultiIndex.from_arrays(
[
[0] * 4 + [1] * 4,
pd.Categorical(list("aabbaabb")),
pd.Categorical(list("cdcdcdcd")),
]
),
)
tm.assert_frame_equal(result, expected)
def test_stack_nan_level(self):
# GH 9406
df_nan = DataFrame(
np.arange(4).reshape(2, 2),
columns=MultiIndex.from_tuples(
[("A", np.nan), ("B", "b")], names=["Upper", "Lower"]
),
index=Index([0, 1], name="Num"),
dtype=np.float64,
)
result = df_nan.stack()
expected = DataFrame(
[[0.0, np.nan], [np.nan, 1], [2.0, np.nan], [np.nan, 3.0]],
columns=Index(["A", "B"], name="Upper"),
index=MultiIndex.from_tuples(
[(0, np.nan), (0, "b"), (1, np.nan), (1, "b")], names=["Num", "Lower"]
),
)
tm.assert_frame_equal(result, expected)
def test_unstack_categorical_columns(self):
# GH 14018
idx = | MultiIndex.from_product([["A"], [0, 1]]) | pandas.MultiIndex.from_product |
from flowsa.common import WITHDRAWN_KEYWORD
from flowsa.flowbyfunctions import assign_fips_location_system
from flowsa.location import US_FIPS
import math
import pandas as pd
import io
from flowsa.settings import log
from string import digits
YEARS_COVERED = {
"asbestos": "2014-2018",
"barite": "2014-2018",
"bauxite": "2013-2017",
"beryllium": "2014-2018",
"boron": "2014-2018",
"chromium": "2014-2018",
"clay": "2015-2016",
"cobalt": "2013-2017",
"copper": "2011-2015",
"diatomite": "2014-2018",
"feldspar": "2013-2017",
"fluorspar": "2013-2017",
"fluorspar_inports": ["2016", "2017"],
"gallium": "2014-2018",
"garnet": "2014-2018",
"gold": "2013-2017",
"graphite": "2013-2017",
"gypsum": "2014-2018",
"iodine": "2014-2018",
"ironore": "2014-2018",
"kyanite": "2014-2018",
"lead": "2012-2018",
"lime": "2014-2018",
"lithium": "2013-2017",
"magnesium": "2013-2017",
"manganese": "2012-2016",
"manufacturedabrasive": "2017-2018",
"mica": "2014-2018",
"molybdenum": "2014-2018",
"nickel": "2012-2016",
"niobium": "2014-2018",
"peat": "2014-2018",
"perlite": "2013-2017",
"phosphate": "2014-2018",
"platinum": "2014-2018",
"potash": "2014-2018",
"pumice": "2014-2018",
"rhenium": "2014-2018",
"salt": "2013-2017",
"sandgravelconstruction": "2013-2017",
"sandgravelindustrial": "2014-2018",
"silver": "2012-2016",
"sodaash": "2010-2017",
"sodaash_t4": ["2016", "2017"],
"stonecrushed": "2013-2017",
"stonedimension": "2013-2017",
"strontium": "2014-2018",
"talc": "2013-2017",
"titanium": "2013-2017",
"tungsten": "2013-2017",
"vermiculite": "2014-2018",
"zeolites": "2014-2018",
"zinc": "2013-2017",
"zirconium": "2013-2017",
}
def usgs_myb_year(years, current_year_str):
"""
Sets the column for the string based on the year. Checks that the year
you picked is in the last file.
:param years: string, with hypthon
:param current_year_str: string, year of interest
:return: string, year
"""
years_array = years.split("-")
lower_year = int(years_array[0])
upper_year = int(years_array[1])
current_year = int(current_year_str)
if lower_year <= current_year <= upper_year:
column_val = current_year - lower_year + 1
return "year_" + str(column_val)
else:
log.info("Your year is out of scope. Pick a year between %s and %s",
lower_year, upper_year)
def usgs_myb_name(USGS_Source):
"""
Takes the USGS source name and parses it so it can be used in other parts
of Flow by activity.
:param USGS_Source: string, usgs source name
:return:
"""
source_split = USGS_Source.split("_")
name_cc = str(source_split[2])
name = ""
for char in name_cc:
if char.isupper():
name = name + " " + char
else:
name = name + char
name = name.lower()
name = name.strip()
return name
def usgs_myb_static_variables():
"""
Populates the data values for Flow by activity that are the same
for all of USGS_MYB Files
:return:
"""
data = {}
data["Class"] = "Geological"
data['FlowType'] = "ELEMENTARY_FLOWS"
data["Location"] = US_FIPS
data["Compartment"] = "ground"
data["Context"] = None
data["ActivityConsumedBy"] = None
return data
def usgs_myb_remove_digits(value_string):
"""
Eliminates numbers in a string
:param value_string:
:return:
"""
remove_digits = str.maketrans('', '', digits)
return_string = value_string.translate(remove_digits)
return return_string
def usgs_myb_url_helper(*, build_url, **_):
"""
This helper function uses the "build_url" input from flowbyactivity.py,
which is a base url for data imports that requires parts of the url text
string to be replaced with info specific to the data year. This function
does not parse the data, only modifies the urls from which data is
obtained.
:param build_url: string, base url
:param config: dictionary, items in FBA method yaml
:param args: dictionary, arguments specified when running flowbyactivity.py
flowbyactivity.py ('year' and 'source')
:return: list, urls to call, concat, parse, format into Flow-By-Activity
format
"""
return [build_url]
def usgs_asbestos_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[4:11]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data.columns) > 12:
for x in range(12, len(df_data.columns)):
col_name = "Unnamed: " + str(x)
del df_data[col_name]
if len(df_data. columns) == 12:
df_data.columns = ["Production", "Unit", "space_1", "year_1",
"space_2", "year_2", "space_3",
"year_3", "space_4", "year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['asbestos'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_asbestos_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity"]
product = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['asbestos'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == \
"Imports for consumption:":
product = "imports"
elif df.iloc[index]["Production"].strip() == \
"Exports and reexports:":
product = "exports"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["Description"] = name
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['asbestos'], year)
if str(df.iloc[index][col_name]) == "--":
data["FlowAmount"] = str(0)
elif str(df.iloc[index][col_name]) == "nan":
data["FlowAmount"] = WITHDRAWN_KEYWORD
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(dataframe,
str(year))
return dataframe
def usgs_barite_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(
io.BytesIO(resp.content), sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[7:14]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data. columns) == 11:
df_data.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['barite'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_barite_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['barite'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == \
"Imports for consumption:3":
product = "imports"
elif df.iloc[index]["Production"].strip() == \
"Crude, sold or used by producers:":
product = "production"
elif df.iloc[index]["Production"].strip() == "Exports:2":
product = "exports"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["Description"] = name
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['barite'], year)
if str(df.iloc[index][col_name]) == "--" or \
str(df.iloc[index][col_name]) == "(3)":
data["FlowAmount"] = str(0)
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_bauxite_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data_one = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_one = pd.DataFrame(df_raw_data_one.loc[6:14]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
if len(df_data_one. columns) == 11:
df_data_one.columns = ["Production", "space_2", "year_1", "space_3",
"year_2", "space_4", "year_3", "space_5",
"year_4", "space_6", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['bauxite'], year))
for col in df_data_one.columns:
if col not in col_to_use:
del df_data_one[col]
frames = [df_data_one]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_bauxite_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Production", "Total"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['bauxite'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Production":
prod = "production"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption, as shipped:":
prod = "import"
elif df.iloc[index]["Production"].strip() == \
"Exports, as shipped:":
prod = "export"
if df.iloc[index]["Production"].strip() in row_to_use:
product = df.iloc[index]["Production"].strip()
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
flow_amount = str(df.iloc[index][col_name])
if str(df.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
else:
data["FlowAmount"] = flow_amount
data["Description"] = des
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_beryllium_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T4')
df_raw_data_two = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_1 = pd.DataFrame(df_raw_data_two.loc[6:9]).reindex()
df_data_1 = df_data_1.reset_index()
del df_data_1["index"]
df_data_2 = pd.DataFrame(df_raw_data.loc[12:12]).reindex()
df_data_2 = df_data_2.reset_index()
del df_data_2["index"]
if len(df_data_2.columns) > 11:
for x in range(11, len(df_data_2.columns)):
col_name = "Unnamed: " + str(x)
del df_data_2[col_name]
if len(df_data_1. columns) == 11:
df_data_1.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
if len(df_data_2. columns) == 11:
df_data_2.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['beryllium'], year))
for col in df_data_1.columns:
if col not in col_to_use:
del df_data_1[col]
for col in df_data_2.columns:
if col not in col_to_use:
del df_data_2[col]
frames = [df_data_1, df_data_2]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_beryllium_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["United States6", "Mine shipments1",
"Imports for consumption, beryl2"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['beryllium'], year)
for df in df_list:
for index, row in df.iterrows():
prod = "production"
if df.iloc[index]["Production"].strip() == \
"Imports for consumption, beryl2":
prod = "imports"
if df.iloc[index]["Production"].strip() in row_to_use:
remove_digits = str.maketrans('', '', digits)
product = df.iloc[index][
"Production"].strip().translate(remove_digits)
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Thousand Metric Tons"
data["Description"] = name
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_boron_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_one = pd.DataFrame(df_raw_data.loc[8:8]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
df_data_two = pd.DataFrame(df_raw_data.loc[21:22]).reindex()
df_data_two = df_data_two.reset_index()
del df_data_two["index"]
df_data_three = pd.DataFrame(df_raw_data.loc[27:28]).reindex()
df_data_three = df_data_three.reset_index()
del df_data_three["index"]
if len(df_data_one. columns) == 11:
df_data_one.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
df_data_two.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
df_data_three.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['boron'], year))
for col in df_data_one.columns:
if col not in col_to_use:
del df_data_one[col]
del df_data_two[col]
del df_data_three[col]
frames = [df_data_one, df_data_two, df_data_three]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_boron_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["B2O3 content", "Quantity"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['boron'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "B2O3 content" or \
df.iloc[index]["Production"].strip() == "Quantity":
product = "production"
if df.iloc[index]["Production"].strip() == "Colemanite:4":
des = "Colemanite"
elif df.iloc[index]["Production"].strip() == "Ulexite:4":
des = "Ulexite"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
if des == name:
data['FlowName'] = name + " " + product
else:
data['FlowName'] = name + " " + product + " " + des
data["Description"] = des
data["ActivityProducedBy"] = name
if str(df.iloc[index][col_name]) == "--" or \
str(df.iloc[index][col_name]) == "(3)":
data["FlowAmount"] = str(0)
elif str(df.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_chromium_call(*, resp, year, **_):
""""
Convert response for calling url to pandas dataframe,
begin parsing df into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[4:24]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data. columns) == 12:
df_data.columns = ["Production", "Unit", "space_1", "year_1",
"space_2", "year_2", "space_3", "year_3",
"space_4", "year_4", "space_5", "year_5"]
elif len(df_data. columns) == 13:
df_data.columns = ["Production", "Unit", "space_1", "year_1",
"space_2", "year_2", "space_3", "year_3",
"space_4", "year_4", "space_5", "year_5", "space_6"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['chromium'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_chromium_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Secondary2", "Total"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['chromium'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Imports:":
product = "imports"
elif df.iloc[index]["Production"].strip() == "Secondary2":
product = "production"
elif df.iloc[index]["Production"].strip() == "Exports:":
product = "exports"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["Description"] = name
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['chromium'], year)
if str(df.iloc[index][col_name]) == "--" or \
str(df.iloc[index][col_name]) == "(3)":
data["FlowAmount"] = str(0)
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_clay_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data_ball = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T3')
df_data_ball = pd.DataFrame(df_raw_data_ball.loc[19:19]).reindex()
df_data_ball = df_data_ball.reset_index()
del df_data_ball["index"]
df_raw_data_bentonite = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T4 ')
df_data_bentonite = pd.DataFrame(
df_raw_data_bentonite.loc[28:28]).reindex()
df_data_bentonite = df_data_bentonite.reset_index()
del df_data_bentonite["index"]
df_raw_data_common = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T5 ')
df_data_common = pd.DataFrame(df_raw_data_common.loc[40:40]).reindex()
df_data_common = df_data_common.reset_index()
del df_data_common["index"]
df_raw_data_fire = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T6 ')
df_data_fire = pd.DataFrame(df_raw_data_fire.loc[12:12]).reindex()
df_data_fire = df_data_fire.reset_index()
del df_data_fire["index"]
df_raw_data_fuller = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T7 ')
df_data_fuller = pd.DataFrame(df_raw_data_fuller.loc[17:17]).reindex()
df_data_fuller = df_data_fuller.reset_index()
del df_data_fuller["index"]
df_raw_data_kaolin = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T8 ')
df_data_kaolin = pd.DataFrame(df_raw_data_kaolin.loc[18:18]).reindex()
df_data_kaolin = df_data_kaolin.reset_index()
del df_data_kaolin["index"]
df_raw_data_export = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T13')
df_data_export = pd.DataFrame(df_raw_data_export.loc[6:15]).reindex()
df_data_export = df_data_export.reset_index()
del df_data_export["index"]
df_raw_data_import = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T14')
df_data_import = pd.DataFrame(df_raw_data_import.loc[6:13]).reindex()
df_data_import = df_data_import.reset_index()
del df_data_import["index"]
df_data_ball.columns = ["Production", "space_1", "year_1", "space_2",
"value_1", "space_3", "year_2", "space_4",
"value_2"]
df_data_bentonite.columns = ["Production", "space_1", "year_1", "space_2",
"value_1", "space_3", "year_2", "space_4",
"value_2"]
df_data_common.columns = ["Production", "space_1", "year_1", "space_2",
"value_1", "space_3", "year_2", "space_4",
"value_2"]
df_data_fire.columns = ["Production", "space_1", "year_1", "space_2",
"value_1", "space_3", "year_2", "space_4",
"value_2"]
df_data_fuller.columns = ["Production", "space_1", "year_1", "space_2",
"value_1", "space_3", "year_2", "space_4",
"value_2"]
df_data_kaolin.columns = ["Production", "space_1", "year_1", "space_2",
"value_1", "space_3", "year_2", "space_4",
"value_2"]
df_data_export.columns = ["Production", "space_1", "year_1", "space_2",
"value_1", "space_3", "year_2", "space_4",
"value_2", "space_5", "extra"]
df_data_import.columns = ["Production", "space_1", "year_1", "space_2",
"value_1", "space_3", "year_2", "space_4",
"value_2", "space_5", "extra"]
df_data_ball["type"] = "Ball clay"
df_data_bentonite["type"] = "Bentonite"
df_data_common["type"] = "Common clay"
df_data_fire["type"] = "Fire clay"
df_data_fuller["type"] = "Fuller’s earth"
df_data_kaolin["type"] = "Kaolin"
df_data_export["type"] = "export"
df_data_import["type"] = "import"
col_to_use = ["Production", "type"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['clay'], year))
for col in df_data_import.columns:
if col not in col_to_use:
del df_data_import[col]
del df_data_export[col]
for col in df_data_ball.columns:
if col not in col_to_use:
del df_data_ball[col]
del df_data_bentonite[col]
del df_data_common[col]
del df_data_fire[col]
del df_data_fuller[col]
del df_data_kaolin[col]
frames = [df_data_import, df_data_export, df_data_ball, df_data_bentonite,
df_data_common, df_data_fire, df_data_fuller, df_data_kaolin]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_clay_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Ball clay", "Bentonite", "Fire clay", "Kaolin",
"Fuller’s earth", "Total", "Grand total",
"Artificially activated clay and earth",
"Clays, not elsewhere classified",
"Clays, not elsewhere classified"]
dataframe = pd.DataFrame()
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["type"].strip() == "import":
product = "imports"
elif df.iloc[index]["type"].strip() == "export":
product = "exports"
else:
product = "production"
if str(df.iloc[index]["Production"]).strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
if product == "production":
data['FlowName'] = \
df.iloc[index]["type"].strip() + " " + product
data["Description"] = df.iloc[index]["type"].strip()
data["ActivityProducedBy"] = df.iloc[index]["type"].strip()
else:
data['FlowName'] = \
df.iloc[index]["Production"].strip() + " " + product
data["Description"] = df.iloc[index]["Production"].strip()
data["ActivityProducedBy"] = \
df.iloc[index]["Production"].strip()
col_name = usgs_myb_year(YEARS_COVERED['clay'], year)
if str(df.iloc[index][col_name]) == "--" or \
str(df.iloc[index][col_name]) == "(3)" or \
str(df.iloc[index][col_name]) == "(2)":
data["FlowAmount"] = str(0)
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_cobalt_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T8')
df_raw_data_two = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_1 = pd.DataFrame(df_raw_data_two.loc[6:11]).reindex()
df_data_1 = df_data_1.reset_index()
del df_data_1["index"]
df_data_2 = pd.DataFrame(df_raw_data.loc[23:23]).reindex()
df_data_2 = df_data_2.reset_index()
del df_data_2["index"]
if len(df_data_2.columns) > 11:
for x in range(11, len(df_data_2.columns)):
col_name = "Unnamed: " + str(x)
del df_data_2[col_name]
if len(df_data_1. columns) == 12:
df_data_1.columns = ["Production", "space_6", "space_1", "year_1",
"space_2", "year_2", "space_3", "year_3",
"space_4", "year_4", "space_5", "year_5"]
if len(df_data_2. columns) == 11:
df_data_2.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['cobalt'], year))
for col in df_data_1.columns:
if col not in col_to_use:
del df_data_1[col]
for col in df_data_2.columns:
if col not in col_to_use:
del df_data_2[col]
frames = [df_data_1, df_data_2]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_cobalt_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
name = usgs_myb_name(source)
des = name
row_to_use = ["United Statese, 16, 17", "Mine productione",
"Imports for consumption", "Exports"]
dataframe = pd.DataFrame()
for df in df_list:
for index, row in df.iterrows():
prod = "production"
if df.iloc[index]["Production"].strip() == \
"United Statese, 16, 17":
prod = "production"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption":
prod = "imports"
elif df.iloc[index]["Production"].strip() == "Exports":
prod = "exports"
if df.iloc[index]["Production"].strip() in row_to_use:
remove_digits = str.maketrans('', '', digits)
product = df.iloc[index][
"Production"].strip().translate(remove_digits)
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Thousand Metric Tons"
col_name = usgs_myb_year(YEARS_COVERED['cobalt'], year)
data["Description"] = des
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
data["FlowAmount"] = str(df.iloc[index][col_name])
remove_rows = ["(18)", "(2)"]
if data["FlowAmount"] not in remove_rows:
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_copper_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin
parsing df into FBA format
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_1 = pd.DataFrame(df_raw_data.loc[12:12]).reindex()
df_data_1 = df_data_1.reset_index()
del df_data_1["index"]
df_data_2 = pd.DataFrame(df_raw_data.loc[30:31]).reindex()
df_data_2 = df_data_2.reset_index()
del df_data_2["index"]
if len(df_data_1. columns) == 12:
df_data_1.columns = ["Production", "Unit", "space_1", "year_1",
"space_2", "year_2", "space_3", "year_3",
"space_4", "year_4", "space_5", "year_5"]
df_data_2.columns = ["Production", "Unit", "space_1", "year_1",
"space_2", "year_2", "space_3", "year_3",
"space_4", "year_4", "space_5", "year_5"]
col_to_use = ["Production", "Unit"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['copper'], year))
for col in df_data_1.columns:
if col not in col_to_use:
del df_data_1[col]
for col in df_data_2.columns:
if col not in col_to_use:
del df_data_2[col]
frames = [df_data_1, df_data_2]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_copper_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
for df in df_list:
for index, row in df.iterrows():
remove_digits = str.maketrans('', '', digits)
product = df.iloc[index][
"Production"].strip().translate(remove_digits)
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
if product == "Total":
prod = "production"
elif product == "Exports, refined":
prod = "exports"
elif product == "Imports, refined":
prod = "imports"
data["ActivityProducedBy"] = "Copper; Mine"
data['FlowName'] = name + " " + prod
data["Unit"] = "Metric Tons"
col_name = usgs_myb_year(YEARS_COVERED['copper'], year)
data["Description"] = "Copper; Mine"
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_diatomite_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param url: string, url
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data_one = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_one = pd.DataFrame(df_raw_data_one.loc[7:10]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
if len(df_data_one.columns) == 10:
df_data_one.columns = ["Production", "year_1", "space_2", "year_2",
"space_3", "year_3", "space_4", "year_4",
"space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['diatomite'], year))
for col in df_data_one.columns:
if col not in col_to_use:
del df_data_one[col]
frames = [df_data_one]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_diatomite_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity", "Exports2", "Imports for consumption2"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Exports2":
prod = "exports"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption2":
prod = "imports"
elif df.iloc[index]["Production"].strip() == "Quantity":
prod = "production"
if df.iloc[index]["Production"].strip() in row_to_use:
product = df.iloc[index]["Production"].strip()
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Thousand metric tons"
col_name = usgs_myb_year(YEARS_COVERED['diatomite'], year)
data["FlowAmount"] = str(df.iloc[index][col_name])
data["Description"] = name
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_feldspar_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin
parsing df into FBA format
:param url: string, url
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data_two = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_two = pd.DataFrame(df_raw_data_two.loc[4:8]).reindex()
df_data_two = df_data_two.reset_index()
del df_data_two["index"]
df_data_one = pd.DataFrame(df_raw_data_two.loc[10:15]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
if len(df_data_two. columns) == 13:
df_data_two.columns = ["Production", "space_1", "unit", "space_2",
"year_1", "space_3", "year_2", "space_4",
"year_3", "space_5", "year_4", "space_6",
"year_5"]
df_data_one.columns = ["Production", "space_1", "unit", "space_2",
"year_1", "space_3", "year_2", "space_4",
"year_3", "space_5", "year_4", "space_6",
"year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['feldspar'], year))
for col in df_data_two.columns:
if col not in col_to_use:
del df_data_two[col]
del df_data_one[col]
frames = [df_data_two, df_data_one]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_feldspar_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity", "Quantity3"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Exports, feldspar:4":
prod = "exports"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption:4":
prod = "imports"
elif df.iloc[index]["Production"].strip() == \
"Production, feldspar:e, 2":
prod = "production"
elif df.iloc[index]["Production"].strip() == "Nepheline syenite:":
prod = "production"
des = "Nepheline syenite"
if df.iloc[index]["Production"].strip() in row_to_use:
product = df.iloc[index]["Production"].strip()
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
col_name = usgs_myb_year(YEARS_COVERED['feldspar'], year)
data["FlowAmount"] = str(df.iloc[index][col_name])
data["Description"] = des
data["ActivityProducedBy"] = name
if name == des:
data['FlowName'] = name + " " + prod
else:
data['FlowName'] = name + " " + prod + " " + des
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_fluorspar_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin
parsing df into FBA format
:param url: string, url
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data_one = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
if year in YEARS_COVERED['fluorspar_inports']:
df_raw_data_two = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T2')
df_raw_data_three = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T7')
df_raw_data_four = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T8')
df_data_one = pd.DataFrame(df_raw_data_one.loc[5:15]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
if year in YEARS_COVERED['fluorspar_inports']:
df_data_two = pd.DataFrame(df_raw_data_two.loc[7:8]).reindex()
df_data_three = pd.DataFrame(df_raw_data_three.loc[19:19]).reindex()
df_data_four = pd.DataFrame(df_raw_data_four.loc[11:11]).reindex()
if len(df_data_two.columns) == 13:
df_data_two.columns = ["Production", "space_1", "not_1", "space_2",
"not_2", "space_3", "not_3", "space_4",
"not_4", "space_5", "year_4", "space_6",
"year_5"]
if len(df_data_three.columns) == 9:
df_data_three.columns = ["Production", "space_1", "year_4",
"space_2", "not_1", "space_3", "year_5",
"space_4", "not_2"]
df_data_four.columns = ["Production", "space_1", "year_4",
"space_2", "not_1", "space_3", "year_5",
"space_4", "not_2"]
if len(df_data_one. columns) == 13:
df_data_one.columns = ["Production", "space_1", "unit", "space_2",
"year_1", "space_3", "year_2", "space_4",
"year_3", "space_5", "year_4", "space_6",
"year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['fluorspar'], year))
for col in df_data_one.columns:
if col not in col_to_use:
del df_data_one[col]
if year in YEARS_COVERED['fluorspar_inports']:
for col in df_data_two.columns:
if col not in col_to_use:
del df_data_two[col]
for col in df_data_three.columns:
if col not in col_to_use:
del df_data_three[col]
for col in df_data_four.columns:
if col not in col_to_use:
del df_data_four[col]
df_data_one["type"] = "data_one"
if year in YEARS_COVERED['fluorspar_inports']:
# aluminum fluoride
# cryolite
df_data_two["type"] = "data_two"
df_data_three["type"] = "Aluminum Fluoride"
df_data_four["type"] = "Cryolite"
frames = [df_data_one, df_data_two, df_data_three, df_data_four]
else:
frames = [df_data_one]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_fluorspar_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity", "Quantity3", "Total", "Hydrofluoric acid",
"Metallurgical", "Production"]
prod = ""
name = usgs_myb_name(source)
dataframe = pd.DataFrame()
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Exports:3":
prod = "exports"
des = name
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption:3":
prod = "imports"
des = name
elif df.iloc[index]["Production"].strip() == "Fluorosilicic acid:":
prod = "production"
des = "Fluorosilicic acid:"
if str(df.iloc[index]["type"]).strip() == "data_two":
prod = "imports"
des = df.iloc[index]["Production"].strip()
elif str(df.iloc[index]["type"]).strip() == \
"Aluminum Fluoride" or \
str(df.iloc[index]["type"]).strip() == "Cryolite":
prod = "imports"
des = df.iloc[index]["type"].strip()
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
col_name = usgs_myb_year(YEARS_COVERED['fluorspar'], year)
if str(df.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
data["Description"] = des
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_gallium_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[5:7]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data.columns) > 11:
for x in range(11, len(df_data.columns)):
col_name = "Unnamed: " + str(x)
del df_data[col_name]
if len(df_data.columns) == 11:
df_data.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['gallium'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_gallium_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Production, primary crude", "Metal"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['gallium'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == \
"Imports for consumption:":
product = "imports"
elif df.iloc[index]["Production"].strip() == \
"Production, primary crude":
product = "production"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Kilograms"
data['FlowName'] = name + " " + product
data["Description"] = name
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['gallium'], year)
if str(df.iloc[index][col_name]).strip() == "--":
data["FlowAmount"] = str(0)
elif str(df.iloc[index][col_name]) == "nan":
data["FlowAmount"] = WITHDRAWN_KEYWORD
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_garnet_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data_two = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_two = pd.DataFrame(df_raw_data_two.loc[4:5]).reindex()
df_data_two = df_data_two.reset_index()
del df_data_two["index"]
df_data_one = pd.DataFrame(df_raw_data_two.loc[10:14]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
if len(df_data_one.columns) > 13:
for x in range(13, len(df_data_one.columns)):
col_name = "Unnamed: " + str(x)
del df_data_one[col_name]
del df_data_two[col_name]
if len(df_data_two. columns) == 13:
df_data_two.columns = ["Production", "space_1", "unit", "space_2",
"year_1", "space_3", "year_2", "space_4",
"year_3", "space_5", "year_4", "space_6",
"year_5"]
df_data_one.columns = ["Production", "space_1", "unit", "space_2",
"year_1", "space_3", "year_2", "space_4",
"year_3", "space_5", "year_4", "space_6",
"year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['garnet'], year))
for col in df_data_two.columns:
if col not in col_to_use:
del df_data_two[col]
del df_data_one[col]
frames = [df_data_two, df_data_one]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_garnet_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Exports:2":
prod = "exports"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption: 3":
prod = "imports"
elif df.iloc[index]["Production"].strip() == "Crude production:":
prod = "production"
if df.iloc[index]["Production"].strip() in row_to_use:
product = df.iloc[index]["Production"].strip()
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
col_name = usgs_myb_year(YEARS_COVERED['garnet'], year)
data["FlowAmount"] = str(df.iloc[index][col_name])
data["Description"] = des
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_gold_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[6:14]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data.columns) == 13:
df_data.columns = ["Production", "Space", "Units", "space_1",
"year_1", "space_2", "year_2", "space_3",
"year_3", "space_4", "year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['gold'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_gold_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity", "Exports, refined bullion",
"Imports for consumption, refined bullion"]
dataframe = pd.DataFrame()
product = "production"
name = usgs_myb_name(source)
des = name
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Quantity":
product = "production"
elif df.iloc[index]["Production"].strip() == \
"Exports, refined bullion":
product = "exports"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption, refined bullion":
product = "imports"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "kilograms"
data['FlowName'] = name + " " + product
data["Description"] = des
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['gold'], year)
if str(df.iloc[index][col_name]) == "--":
data["FlowAmount"] = str(0)
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_graphite_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[5:9]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data. columns) == 13:
df_data.columns = ["Production", "space_1", "Unit", "space_6",
"year_1", "space_2", "year_2", "space_3",
"year_3", "space_4", "year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['graphite'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_graphite_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantiy", "Quantity"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['graphite'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == \
"Imports for consumption:":
product = "imports"
elif df.iloc[index]["Production"].strip() == "Exports:":
product = "exports"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["Description"] = name
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['graphite'], year)
if str(df.iloc[index][col_name]) == "--":
data["FlowAmount"] = str(0)
elif str(df.iloc[index][col_name]) == "nan":
data["FlowAmount"] = WITHDRAWN_KEYWORD
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_gypsum_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin
parsing df into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data_one = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_one = pd.DataFrame(df_raw_data_one.loc[7:10]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
if len(df_data_one.columns) > 11:
for x in range(11, len(df_data_one.columns)):
col_name = "Unnamed: " + str(x)
del df_data_one[col_name]
if len(df_data_one.columns) == 11:
df_data_one.columns = ["Production", "space_1", "year_1", "space_3",
"year_2", "space_4", "year_3", "space_5",
"year_4", "space_6", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['gypsum'], year))
for col in df_data_one.columns:
if col not in col_to_use:
del df_data_one[col]
frames = [df_data_one]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_gypsum_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity", "Imports for consumption"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['gypsum'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == \
"Imports for consumption":
prod = "imports"
elif df.iloc[index]["Production"].strip() == "Quantity":
prod = "production"
if df.iloc[index]["Production"].strip() in row_to_use:
product = df.iloc[index]["Production"].strip()
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data["FlowAmount"] = str(df.iloc[index][col_name])
if str(df.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
data["Description"] = des
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_iodine_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[6:10]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data. columns) == 11:
df_data.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
elif len(df_data. columns) == 13:
df_data.columns = ["Production", "unit", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5", "space_6"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['iodine'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_iodine_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Production", "Quantity, for consumption", "Exports2"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['iodine'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Imports:2":
product = "imports"
elif df.iloc[index]["Production"].strip() == "Production":
product = "production"
elif df.iloc[index]["Production"].strip() == "Exports2":
product = "exports"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["Description"] = name
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['iodine'], year)
if str(df.iloc[index][col_name]) == "--":
data["FlowAmount"] = str(0)
elif str(df.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_iron_ore_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[7:25]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data. columns) == 12:
df_data.columns = ["Production", "Units", "space_1", "year_1",
"space_2", "year_2", "space_3", "year_3",
"space_4", "year_4", "space_5", "year_5"]
col_to_use = ["Production", "Units"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['ironore'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_iron_ore_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
name = usgs_myb_name(source)
des = name
row_to_use = ["Gross weight", "Quantity"]
dataframe = pd.DataFrame()
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Production:":
product = "production"
elif df.iloc[index]["Production"].strip() == "Exports:":
product = "exports"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption:":
product = "imports"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Thousand Metric Tons"
data['FlowName'] = "Iron Ore " + product
data["Description"] = "Iron Ore"
data["ActivityProducedBy"] = "Iron Ore"
col_name = usgs_myb_year(YEARS_COVERED['ironore'], year)
if str(df.iloc[index][col_name]) == "--":
data["FlowAmount"] = str(0)
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_kyanite_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param url: string, url
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data_one = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_one = pd.DataFrame(df_raw_data_one.loc[4:13]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
if len(df_data_one. columns) == 12:
df_data_one.columns = ["Production", "unit", "space_2", "year_1",
"space_3", "year_2", "space_4", "year_3",
"space_5", "year_4", "space_6", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['kyanite'], year))
for col in df_data_one.columns:
if col not in col_to_use:
del df_data_one[col]
frames = [df_data_one]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_kyanite_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity", "Quantity2"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['kyanite'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == \
"Exports of kyanite concentrate:3":
prod = "exports"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption, all kyanite minerals:3":
prod = "imports"
elif df.iloc[index]["Production"].strip() == "Production:":
prod = "production"
if df.iloc[index]["Production"].strip() in row_to_use:
product = df.iloc[index]["Production"].strip()
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data["FlowAmount"] = str(df.iloc[index][col_name])
if str(df.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
data["Description"] = des
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_lead_url_helper(*, year, **_):
"""
This helper function uses the "build_url" input from flowbyactivity.py,
which is a base url for data imports that requires parts of the url text
string to be replaced with info specific to the data year. This function
does not parse the data, only modifies the urls from which data is
obtained.
:param build_url: string, base url
:return: list, urls to call, concat, parse, format into Flow-By-Activity
format
"""
if int(year) < 2013:
build_url = ('https://d9-wret.s3.us-west-2.amazonaws.com/assets/'
'palladium/production/atoms/files/myb1-2016-lead.xls')
elif int(year) < 2014:
build_url = ('https://d9-wret.s3.us-west-2.amazonaws.com/assets/'
'palladium/production/atoms/files/myb1-2017-lead.xls')
else:
build_url = ('https://d9-wret.s3.us-west-2.amazonaws.com/assets/'
'palladium/production/s3fs-public/media/files/myb1-2018-lead-advrel.xlsx')
url = build_url
return [url]
def usgs_lead_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param url: string, url
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[8:15]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data.columns) > 12:
for x in range(12, len(df_data.columns)):
col_name = "Unnamed: " + str(x)
del df_data[col_name]
if len(df_data. columns) == 12:
df_data.columns = ["Production", "Units", "space_1", "year_1",
"space_2", "year_2", "space_3", "year_3",
"space_4", "year_4", "space_5", "year_5"]
col_to_use = ["Production", "Units"]
if int(year) == 2013:
modified_sy = "2013-2018"
col_to_use.append(usgs_myb_year(modified_sy, year))
elif int(year) > 2013:
modified_sy = "2014-2018"
col_to_use.append(usgs_myb_year(modified_sy, year))
else:
col_to_use.append(usgs_myb_year(YEARS_COVERED['lead'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_lead_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
name = usgs_myb_name(source)
des = name
row_to_use = ["Primary lead, refined content, "
"domestic ores and base bullion",
"Secondary lead, lead content",
"Lead ore and concentrates", "Lead in base bullion"]
import_export = ["Exports, lead content:",
"Imports for consumption, lead content:"]
dataframe = pd.DataFrame()
product = "production"
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() in import_export:
if df.iloc[index]["Production"].strip() == \
"Exports, lead content:":
product = "exports"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption, lead content:":
product = "imports"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["ActivityProducedBy"] = df.iloc[index]["Production"]
if int(year) == 2013:
modified_sy = "2013-2018"
col_name = usgs_myb_year(modified_sy, year)
elif int(year) > 2013:
modified_sy = "2014-2018"
col_name = usgs_myb_year(modified_sy, year)
else:
col_name = usgs_myb_year(YEARS_COVERED['lead'], year)
if str(df.iloc[index][col_name]) == "--":
data["FlowAmount"] = str(0)
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_lime_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param url: string, url
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data_two = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_1 = pd.DataFrame(df_raw_data_two.loc[16:16]).reindex()
df_data_1 = df_data_1.reset_index()
del df_data_1["index"]
df_data_2 = pd.DataFrame(df_raw_data_two.loc[28:32]).reindex()
df_data_2 = df_data_2.reset_index()
del df_data_2["index"]
if len(df_data_1.columns) > 12:
for x in range(12, len(df_data_1.columns)):
col_name = "Unnamed: " + str(x)
del df_data_1[col_name]
del df_data_2[col_name]
if len(df_data_1. columns) == 12:
df_data_1.columns = ["Production", "Unit", "space_1", "year_1",
"space_2", "year_2", "space_3", "year_3",
"space_4", "year_4", "space_5", "year_5"]
df_data_2.columns = ["Production", "Unit", "space_1", "year_1",
"space_2", "year_2", "space_3", "year_3",
"space_4", "year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['lime'], year))
for col in df_data_1.columns:
if col not in col_to_use:
del df_data_1[col]
for col in df_data_2.columns:
if col not in col_to_use:
del df_data_2[col]
frames = [df_data_1, df_data_2]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_lime_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Total", "Quantity"]
import_export = ["Exports:7", "Imports for consumption:7"]
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
for df in df_list:
prod = "production"
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Exports:7":
prod = "exports"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption:7":
prod = "imports"
if df.iloc[index]["Production"].strip() in row_to_use:
remove_digits = str.maketrans('', '', digits)
product = df.iloc[index][
"Production"].strip().translate(remove_digits)
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Thousand Metric Tons"
col_name = usgs_myb_year(YEARS_COVERED['lime'], year)
data["Description"] = des
data["ActivityProducedBy"] = name
if product.strip() == "Total":
data['FlowName'] = name + " " + prod
elif product.strip() == "Quantity":
data['FlowName'] = name + " " + prod
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_lithium_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param url: string, url
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data_one = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_one = pd.DataFrame(df_raw_data_one.loc[6:8]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
if len(df_data_one.columns) > 11:
for x in range(11, len(df_data_one.columns)):
col_name = "Unnamed: " + str(x)
del df_data_one[col_name]
if len(df_data_one. columns) == 11:
df_data_one.columns = ["Production", "space_2", "year_1", "space_3",
"year_2", "space_4", "year_3", "space_5",
"year_4", "space_6", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['lithium'], year))
for col in df_data_one.columns:
if col not in col_to_use:
del df_data_one[col]
frames = [df_data_one]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_lithium_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Exports3", "Imports3", "Production"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['lithium'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Exports3":
prod = "exports"
elif df.iloc[index]["Production"].strip() == "Imports3":
prod = "imports"
elif df.iloc[index]["Production"].strip() == "Production":
prod = "production"
if df.iloc[index]["Production"].strip() in row_to_use:
product = df.iloc[index]["Production"].strip()
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data["FlowAmount"] = str(df.iloc[index][col_name])
if str(df.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
data["Description"] = des
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_magnesium_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param url: string, url
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[7:15]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data. columns) == 12:
df_data.columns = ["Production", "Units", "space_1", "year_1",
"space_2", "year_2", "space_3", "year_3",
"space_4", "year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['magnesium'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_magnesium_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Secondary", "Primary", "Exports", "Imports for consumption"]
dataframe = pd.DataFrame()
name = usgs_myb_name(source)
des = name
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Exports":
product = "exports"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption":
product = "imports"
elif df.iloc[index]["Production"].strip() == "Secondary" or \
df.iloc[index]["Production"].strip() == "Primary":
product = "production" + " " + \
df.iloc[index]["Production"].strip()
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["Description"] = name
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['magnesium'], year)
if str(df.iloc[index][col_name]) == "--":
data["FlowAmount"] = str(0)
elif str(df.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_manganese_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[7:9]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data.columns) > 12:
for x in range(12, len(df_data.columns)):
col_name = "Unnamed: " + str(x)
del df_data[col_name]
if len(df_data. columns) == 12:
df_data.columns = ["Production", "Unit", "space_1", "year_1",
"space_2", "year_2", "space_3",
"year_3", "space_4", "year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['manganese'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_manganese_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Production", "Exports", "Imports for consumption"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['manganese'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == \
"Imports for consumption":
product = "imports"
elif df.iloc[index]["Production"].strip() == "Production":
product = "production"
elif df.iloc[index]["Production"].strip() == "Exports":
product = "exports"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["Description"] = name
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['manganese'], year)
if str(df.iloc[index][col_name]) == "--" or \
str(df.iloc[index][col_name]) == "(3)":
data["FlowAmount"] = str(0)
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_ma_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T2')
df_data = pd.DataFrame(df_raw_data.loc[6:7]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data.columns) > 9:
for x in range(9, len(df_data.columns)):
col_name = "Unnamed: " + str(x)
del df_data[col_name]
if len(df_data. columns) == 9:
df_data.columns = ["Product", "space_1", "quality_year_1", "space_2",
"value_year_1", "space_3",
"quality_year_2", "space_4", "value_year_2"]
elif len(df_data. columns) == 9:
df_data.columns = ["Product", "space_1", "quality_year_1", "space_2",
"value_year_1", "space_3",
"quality_year_2", "space_4", "value_year_2"]
col_to_use = ["Product"]
col_to_use.append("quality_"
+ usgs_myb_year(YEARS_COVERED['manufacturedabrasive'],
year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_ma_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Silicon carbide"]
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
for df in df_list:
for index, row in df.iterrows():
remove_digits = str.maketrans('', '', digits)
product = df.iloc[index][
"Product"].strip().translate(remove_digits)
if product in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data['FlowName'] = "Silicon carbide"
data["ActivityProducedBy"] = "Silicon carbide"
data["Unit"] = "Metric Tons"
col_name = ("quality_"
+ usgs_myb_year(
YEARS_COVERED['manufacturedabrasive'], year))
col_name_array = col_name.split("_")
data["Description"] = product + " " + col_name_array[0]
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_mica_call(*, resp, source, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param url: string, url
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data_one = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_one = pd.DataFrame(df_raw_data_one.loc[4:6]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
name = usgs_myb_name(source)
des = name
if len(df_data_one. columns) == 12:
df_data_one.columns = ["Production", "Unit", "space_2", "year_1",
"space_3", "year_2", "space_4", "year_3",
"space_5", "year_4", "space_6", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['mica'], year))
for col in df_data_one.columns:
if col not in col_to_use:
del df_data_one[col]
frames = [df_data_one]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_mica_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['mica'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == \
"Production, sold or used by producers:":
prod = "production"
if df.iloc[index]["Production"].strip() in row_to_use:
product = df.iloc[index]["Production"].strip()
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data["FlowAmount"] = str(df.iloc[index][col_name])
if str(df.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
data["Description"] = des
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_molybdenum_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param url: string, url
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[7:11]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data. columns) == 11:
df_data.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['molybdenum'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_molybdenum_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Production", "Imports for consumption", "Exports"]
dataframe = pd.DataFrame()
name = usgs_myb_name(source)
des = name
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Exports":
product = "exports"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption":
product = "imports"
elif df.iloc[index]["Production"].strip() == "Production":
product = "production"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["Description"] = des
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['molybdenum'], year)
if str(df.iloc[index][col_name]) == "--":
data["FlowAmount"] = str(0)
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_nickel_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param url: string, url
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T10')
df_data_1 = pd.DataFrame(df_raw_data.loc[36:36]).reindex()
df_data_1 = df_data_1.reset_index()
del df_data_1["index"]
df_raw_data_two = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_2 = pd.DataFrame(df_raw_data_two.loc[11:16]).reindex()
df_data_2 = df_data_2.reset_index()
del df_data_2["index"]
if len(df_data_1.columns) > 11:
for x in range(11, len(df_data_1.columns)):
col_name = "Unnamed: " + str(x)
del df_data_1[col_name]
if len(df_data_1. columns) == 11:
df_data_1.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
if len(df_data_2.columns) == 12:
df_data_2.columns = ["Production", "space_1", "space_2", "year_1",
"space_3", "year_2", "space_4", "year_3",
"space_5", "year_4", "space_6", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['nickel'], year))
for col in df_data_1.columns:
if col not in col_to_use:
del df_data_1[col]
for col in df_data_2.columns:
if col not in col_to_use:
del df_data_2[col]
frames = [df_data_1, df_data_2]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_nickel_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Ores and concentrates3",
"United States, sulfide ore, concentrate"]
import_export = ["Exports:", "Imports for consumption:"]
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
for df in df_list:
prod = "production"
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Exports:":
prod = "exports"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption:":
prod = "imports"
if df.iloc[index]["Production"].strip() in row_to_use:
remove_digits = str.maketrans('', '', digits)
product = df.iloc[index][
"Production"].strip().translate(remove_digits)
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
col_name = usgs_myb_year(YEARS_COVERED['nickel'], year)
if product.strip() == \
"United States, sulfide ore, concentrate":
data["Description"] = \
"United States, sulfide ore, concentrate Nickel"
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
elif product.strip() == "Ores and concentrates":
data["Description"] = "Ores and concentrates Nickel"
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
if str(df.iloc[index][col_name]) == "--" or \
str(df.iloc[index][col_name]) == "(4)":
data["FlowAmount"] = str(0)
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_niobium_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param url: string, url
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[4:19]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data.columns) > 13:
for x in range(13, len(df_data.columns)):
col_name = "Unnamed: " + str(x)
del df_data[col_name]
if len(df_data. columns) == 13:
df_data.columns = ["Production", "space_1", "Unit_1", "space_2",
"year_1", "space_3", "year_2", "space_4",
"year_3", "space_5", "year_4", "space_6", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['niobium'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_niobium_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Total imports, Nb content", "Total exports, Nb content"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['niobium'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == \
"Imports for consumption:":
product = "imports"
elif df.iloc[index]["Production"].strip() == "Exports:":
product = "exports"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["Description"] = name
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['niobium'], year)
if str(df.iloc[index][col_name]) == "--" or \
str(df.iloc[index][col_name]) == "(3)":
data["FlowAmount"] = str(0)
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_peat_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param url: string, url
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
"""Calls the excel sheet for nickel and removes extra columns"""
df_raw_data_one = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_one = pd.DataFrame(df_raw_data_one.loc[7:18]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
if len(df_data_one.columns) > 12:
for x in range(12, len(df_data_one.columns)):
col_name = "Unnamed: " + str(x)
del df_data_one[col_name]
if len(df_data_one.columns) == 12:
df_data_one.columns = ["Production", "Unit", "space_2", "year_1",
"space_3", "year_2", "space_4", "year_3",
"space_5", "year_4", "space_6", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['peat'], year))
for col in df_data_one.columns:
if col not in col_to_use:
del df_data_one[col]
frames = [df_data_one]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_peat_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Production", "Exports", "Imports for consumption"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['peat'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Production":
prod = "production"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption":
prod = "import"
elif df.iloc[index]["Production"].strip() == "Exports":
prod = "export"
if df.iloc[index]["Production"].strip() in row_to_use:
product = df.iloc[index]["Production"].strip()
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Thousand Metric Tons"
data["FlowAmount"] = str(df.iloc[index][col_name])
if str(df.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
data["Description"] = des
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_perlite_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data_one = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_one = pd.DataFrame(df_raw_data_one.loc[6:6]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
df_data_two = pd.DataFrame(df_raw_data_one.loc[20:25]).reindex()
df_data_two = df_data_two.reset_index()
del df_data_two["index"]
if len(df_data_one. columns) == 12:
df_data_one.columns = ["Production", "space_1", "space_2", "year_1",
"space_3", "year_2", "space_4", "year_3",
"space_5", "year_4", "space_6", "year_5"]
df_data_two.columns = ["Production", "space_1", "space_2", "year_1",
"space_3", "year_2", "space_4", "year_3",
"space_5", "year_4", "space_6", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['perlite'], year))
for col in df_data_one.columns:
if col not in col_to_use:
del df_data_one[col]
del df_data_two[col]
frames = [df_data_one, df_data_two]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_perlite_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity", "Mine production2"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['perlite'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Mine production2":
prod = "production"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption:3":
prod = "import"
elif df.iloc[index]["Production"].strip() == "Exports:3":
prod = "export"
if df.iloc[index]["Production"].strip() in row_to_use:
product = df.iloc[index]["Production"].strip()
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Thousand Metric Tons"
data["FlowAmount"] = str(df.iloc[index][col_name])
if str(df.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
data["Description"] = des
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_phosphate_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param url: string, url
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data_one = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_one = pd.DataFrame(df_raw_data_one.loc[7:9]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
df_data_two = pd.DataFrame(df_raw_data_one.loc[19:21]).reindex()
df_data_two = df_data_two.reset_index()
del df_data_two["index"]
if len(df_data_one.columns) > 12:
for x in range(11, len(df_data_one.columns)):
col_name = "Unnamed: " + str(x)
del df_data_one[col_name]
del df_data_two[col_name]
if len(df_data_one. columns) == 12:
df_data_one.columns = ["Production", "unit", "space_1", "year_1",
"space_3", "year_2", "space_4", "year_3",
"space_5", "year_4", "space_6", "year_5"]
df_data_two.columns = ["Production", "unit", "space_1", "year_1",
"space_3", "year_2", "space_4", "year_3",
"space_5", "year_4", "space_6", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['phosphate'], year))
for col in df_data_one.columns:
if col not in col_to_use:
del df_data_one[col]
del df_data_two[col]
frames = [df_data_one, df_data_two]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_phosphate_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Gross weight", "Quantity, gross weight"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['phosphate'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == \
"Marketable production:":
prod = "production"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption:3":
prod = "import"
if df.iloc[index]["Production"].strip() in row_to_use:
product = df.iloc[index]["Production"].strip()
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Thousand Metric Tons"
data["FlowAmount"] = str(df.iloc[index][col_name])
if str(df.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
data["Description"] = des
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_platinum_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_1 = pd.DataFrame(df_raw_data.loc[4:9]).reindex()
df_data_1 = df_data_1.reset_index()
del df_data_1["index"]
df_data_2 = pd.DataFrame(df_raw_data.loc[18:30]).reindex()
df_data_2 = df_data_2.reset_index()
del df_data_2["index"]
if len(df_data_1. columns) == 13:
df_data_1.columns = ["Production", "space_6", "Units", "space_1",
"year_1", "space_2", "year_2", "space_3",
"year_3", "space_4", "year_4", "space_5",
"year_5"]
df_data_2.columns = ["Production", "space_6", "Units", "space_1",
"year_1", "space_2", "year_2", "space_3",
"year_3", "space_4", "year_4", "space_5",
"year_5"]
elif len(df_data_1. columns) == 12:
df_data_1.columns = ["Production", "Units", "space_1",
"year_1", "space_2", "year_2", "space_3",
"year_3", "space_4", "year_4", "space_5",
"year_5"]
df_data_2.columns = ["Production", "Units", "space_1",
"year_1", "space_2", "year_2", "space_3",
"year_3", "space_4", "year_4", "space_5",
"year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['platinum'], year))
for col in df_data_1.columns:
if col not in col_to_use:
del df_data_1[col]
del df_data_2[col]
frames = [df_data_1, df_data_2]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_platinum_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity", "Palladium, Pd content",
"Platinum, includes coins, Pt content",
"Platinum, Pt content",
"Iridium, Ir content", "Osmium, Os content",
"Rhodium, Rh content", "Ruthenium, Ru content",
"Iridium, osmium, and ruthenium, gross weight",
"Rhodium, Rh content"]
dataframe = pd.DataFrame()
for df in df_list:
previous_name = ""
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Exports, refined:":
product = "exports"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption, refined:":
product = "imports"
elif df.iloc[index]["Production"].strip() == "Mine production:2":
product = "production"
name_array = df.iloc[index]["Production"].strip().split(",")
if product == "production":
name_array = previous_name.split(",")
previous_name = df.iloc[index]["Production"].strip()
name = name_array[0]
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "kilograms"
data['FlowName'] = name + " " + product
data["Description"] = name
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['platinum'], year)
if str(df.iloc[index][col_name]) == "--":
data["FlowAmount"] = str(0)
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_potash_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param url: string, url
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data_one = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_one = pd.DataFrame(df_raw_data_one.loc[6:8]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
df_data_two = pd.DataFrame(df_raw_data_one.loc[17:23]).reindex()
df_data_two = df_data_two.reset_index()
del df_data_two["index"]
if len(df_data_one.columns) > 12:
for x in range(12, len(df_data_one.columns)):
col_name = "Unnamed: " + str(x)
del df_data_one[col_name]
del df_data_two[col_name]
if len(df_data_one. columns) == 12:
df_data_one.columns = ["Production", "space_1", "space_2", "year_1",
"space_3", "year_2", "space_4", "year_3",
"space_5", "year_4", "space_6", "year_5"]
df_data_two.columns = ["Production", "space_1", "space_2", "year_1",
"space_3", "year_2", "space_4", "year_3",
"space_5", "year_4", "space_6", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['potash'], year))
for col in df_data_one.columns:
if col not in col_to_use:
del df_data_one[col]
del df_data_two[col]
frames = [df_data_one, df_data_two]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_potash_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["K2O equivalent"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
import pytest
from rulelist.datastructure.attribute.nominal_attribute import activation_nominal, NominalAttribute
class TestNominalAttribute(object):
def test_normal(self):
dictdata = {"column1" : np.array(["below50" if i < 50 else "above49" for i in range(100)]),
"column2" : np.ones(100)}
test_dataframe = pd.DataFrame(data=dictdata)
input_name = "column1"
input_max_operators = 1
input_minsupp = 0
expected_number_items = 2
expected_cardinality_operator = {1: 2}
output_attribute = NominalAttribute(input_name, test_dataframe[input_name], input_max_operators,input_minsupp)
actual_number_items= len(output_attribute.items)
actual_cardinality_operator = output_attribute.cardinality_operator
pd.testing.assert_series_equal(output_attribute.values, test_dataframe[input_name])
assert expected_number_items == actual_number_items
assert expected_cardinality_operator == actual_cardinality_operator
def test_onlyonevalue(self):
dictdata = {"column1" : np.array(["below100" for i in range(100)]),
"column2" : np.ones(100)}
test_dataframe = pd.DataFrame(data=dictdata)
input_name = "column1"
input_max_operators = 1
input_minsupp = 0
expected_number_items = 1
expected_cardinality_operator = {1: 1}
expected_n_cutpoints = 3
output_attribute = NominalAttribute(input_name, test_dataframe[input_name], input_max_operators,input_minsupp)
actual_number_items= len(output_attribute.items)
actual_cardinality_operator = output_attribute.cardinality_operator
pd.testing.assert_series_equal(output_attribute.values, test_dataframe[input_name])
assert expected_number_items == actual_number_items
assert expected_cardinality_operator == actual_cardinality_operator
class TestActivationNominal(object):
def test_left_interval(self):
dictdata = {"column1" : np.array(["below50" if i < 50 else "above49" for i in range(100)]),
"column2" : np.ones(100)}
test_dataframe = pd.DataFrame(data=dictdata)
input_attribute_name = "column1"
input_category = "below50"
expected_vector = pd.Series(name= "column1", data = [True if i < 50 else False for i in range(100)])
actual_vector = activation_nominal(test_dataframe,input_attribute_name,input_category)
| pd.testing.assert_series_equal(actual_vector, expected_vector, check_exact=True) | pandas.testing.assert_series_equal |
#! -*- coding: utf-8 -*-
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
import cv2
import pickle
import os
import sys
import codecs
"""This example shows you an example case of flexible-clustering on image data.
In this example, it uses sub data from cifar-10 image collection.
The clustering setting is
- Matrix setting
- 1st layer(level=0): dense matrix(feature=100) by PCA
- 2nd layer(level=1): original matrix(feature=3072)
- Clustering setting
- 1st layer(level=0): KMeans(n=10)
- 2nd layer(level=1): KMeans(n=3)
"""
def unpickle(file):
with open(file, 'rb') as fo:
dict = pickle.load(fo, encoding='bytes')
return dict
ROOT_IMAGES_DIR = "./images/cifar-10-batches-py"
data_batch_1 = "data_batch_1"
data_meta = "batches.meta"
image_file = unpickle(os.path.join(ROOT_IMAGES_DIR, data_batch_1))
meta_file = unpickle(os.path.join(ROOT_IMAGES_DIR, data_meta))
import sys
sys.path.append("..")
from flexible_clustering_tree.interface import FlexibleClustering
from flexible_clustering_tree.models import FeatureMatrixObject, MultiFeatureMatrixObject, ClusteringOperator, MultiClusteringOperator
label_index2label = {i: label for i, label in enumerate(meta_file[b'label_names'])}
matrix_index2label = {i: str(label_index2label[label_index]) for i, label_index in enumerate(image_file[b'labels'])}
original_feature_matrix = image_file[b'data']
limit_of_sample = 1000
sampled_original_feature_matrix = original_feature_matrix[:limit_of_sample]
sampled_matrix_index2label = {i: str(label_index2label[label_index])
for i, label_index in enumerate(image_file[b'labels']) if i < limit_of_sample}
# feature decomposition with PCA. We set this matrix as 1st layer(level=0)
from sklearn.decomposition.pca import PCA
dense_sampled_original_feature_matrix = PCA(n_components=100).fit_transform(sampled_original_feature_matrix)
f_obj_1st = FeatureMatrixObject(0, dense_sampled_original_feature_matrix)
# set matrix object
f_obj_2nd = FeatureMatrixObject(1, sampled_original_feature_matrix)
multi_f_obj = MultiFeatureMatrixObject([f_obj_1st, f_obj_2nd], sampled_matrix_index2label)
# set clustering algorithm
from sklearn.cluster import KMeans
from hdbscan import HDBSCAN
c_obj_1st = ClusteringOperator(level=0, n_cluster=10, instance_clustering=KMeans(n_clusters=10))
c_obj_2nd = ClusteringOperator(level=1, n_cluster=3, instance_clustering=KMeans(n_clusters=3))
multi_c_obj = MultiClusteringOperator([c_obj_1st, c_obj_2nd])
# run flexible clustering with max depth = 5
flexible_clustering_runner = FlexibleClustering(max_depth=3)
index2cluster_id = flexible_clustering_runner.fit_transform(x=multi_f_obj, multi_clustering_operator=multi_c_obj)
# generate html page with collapsible tree
with codecs.open("animal_example.html", "w") as f:
f.write(flexible_clustering_runner.clustering_tree.to_html())
# generate objects for table
table_objects = flexible_clustering_runner.clustering_tree.to_objects()
import pandas
print( | pandas.DataFrame(table_objects['cluster_information']) | pandas.DataFrame |
from tensorflow.keras.callbacks import TensorBoard
import tensorflow as tf
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras import backend as K
from sqlalchemy import create_engine
from sklearn.preprocessing import StandardScaler, normalize
import pandas as pd
import numpy as np
import constants
#this file is responsible for getting and formatting the data used to create a new prediction.
#connect to the database and load in the model
db = constants.DATABASES['production']
engine_string = "postgresql+psycopg2://{user}:{password}@{host}:{port}/{database}".format(
user = db['USER'],
password = db['PASSWORD'],
host = db['HOST'],
port = db['PORT'],
database = db['NAME']
)
model = tf.keras.models.load_model('model_predictFutureCandle.model')
#function to normalize inputs
def scale_linear_bycolumn(rawpoints, high=1.0, low=0.0):
mins = np.min(rawpoints, axis=0)
maxs = np.max(rawpoints, axis=0)
rng = maxs - mins
return high - (((high - low) * (maxs - rawpoints)) / rng)
#pulls in the data for the next prediction and formats it.
def prepareData(curr_Pair):
#pull in the data and format it by taking the mean between the asking and bid price
engine = create_engine(engine_string)
data = | pd.read_sql_table(curr_Pair, engine) | pandas.read_sql_table |
# %%
import os
import sys
from collections import Counter
from datetime import datetime, timedelta
from glob import glob
from pathlib import Path
from zipfile import ZipFile
# data wrangling
import geopandas as gpd
import pandas as pd
import numpy as np
import requests
from urllib.error import HTTPError
# data maniuplation
from convertbng.util import convert_lonlat
# logging
from shapely.geometry import Point
import con_checks as con
import geo_checks as geo
# %%
timestr = datetime.now().strftime("%Y_%m_%d")
src_home = Path('./OpenNaPTAN/src/')
data_home = Path('./OpenNaPTAN/data/')
base_path = (f'{os.getcwd()}')
download_home = str(os.path.join(Path.home(), "Downloads"))
naptan_csv_url = 'http://naptan.app.dft.gov.uk/DataRequest/Naptan.ashx?format=csv'
naptan_xml_url = 'http://naptan.app.dft.gov.uk/Datarequest/naptan.ashx'
# config options
pd.set_option('display.max_columns', None)
| pd.set_option('display.max_rows', 5) | pandas.set_option |
# coding:utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2020
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
from datetime import datetime
import numpy
import pandas as pd
import pymongo
from pandas import DataFrame
from czsc.Data.financial_mean import financial_dict
from czsc.Utils import util_log_info
from czsc.Utils.trade_date import util_get_real_date, trade_date_sse, util_date_valid, util_date_stamp, \
util_date_str2int, util_date_int2str
# uri = 'mongodb://localhost:27017/factor'
# client = pymongo.MongoClient(uri)
from czsc.Setting import CLIENT
QA_DATABASE = CLIENT.quantaxis
FACTOR_DATABASE = CLIENT.factor
def util_code_tostr(code):
"""
explanation:
将所有沪深股票从数字转化到6位的代码,因为有时候在csv等转换的时候,诸如 000001的股票会变成office强制转化成数字1,
同时支持聚宽股票格式,掘金股票代码格式,Wind股票代码格式,天软股票代码格式
params:
* code ->
含义: 代码
类型: str
参数支持: []
"""
if isinstance(code, int):
return "{:>06d}".format(code)
if isinstance(code, str):
# 聚宽股票代码格式 '600000.XSHG'
# 掘金股票代码格式 'SHSE.600000'
# Wind股票代码格式 '600000.SH'
# 天软股票代码格式 'SH600000'
code = code.upper() # 数据库中code名称都存为大写
if len(code) == 6:
return code
if len(code) == 8:
# 天软数据
return code[-6:]
if len(code) == 9:
return code[:6]
if len(code) == 11:
if code[0] in ["S"]:
return code.split(".")[1]
return code.split(".")[0]
raise ValueError("错误的股票代码格式")
if isinstance(code, list):
return util_code_tostr(code[0])
def util_code_tolist(code, auto_fill=True):
"""
explanation:
将转换code==> list
params:
* code ->
含义: 代码
类型: str
参数支持: []
* auto_fill->
含义: 是否自动补全(一般是用于股票/指数/etf等6位数,期货不适用) (default: {True})
类型: bool
参数支持: [True]
"""
if isinstance(code, str):
if auto_fill:
return [util_code_tostr(code)]
else:
return [code.upper()]
elif isinstance(code, list):
if auto_fill:
return [util_code_tostr(item) for item in code]
else:
return [item.upper() for item in code]
def now_time():
return str(util_get_real_date(str(datetime.date.today() - datetime.timedelta(days=1)), trade_date_sse, -1)) + \
' 17:00:00' if datetime.datetime.now().hour < 15 else str(util_get_real_date(
str(datetime.date.today()), trade_date_sse, -1)) + ' 15:00:00'
def fetch_future_day(
code,
start=None,
end=None,
format='pandas',
collections=QA_DATABASE.future_day
):
"""
:param code:
:param start:
:param end:
:param format:
:param collections:
:return: pd.DataFrame
columns = ["code", "date", "open", "close", "high", "low", "position", "price", "trade"]
"""
start = '1990-01-01' if start is None else str(start)[0:10]
end = datetime.today().strftime('%Y-%m-%d') if end is None else str(end)[0:10]
code = util_code_tolist(code, auto_fill=False)
if util_date_valid(end):
_data = []
cursor = collections.find(
{
'code': {
'$in': code
},
"date_stamp":
{
"$lte": util_date_stamp(end),
"$gte": util_date_stamp(start)
}
},
{"_id": 0},
batch_size=10000
)
if format in ['dict', 'json']:
return [data for data in cursor]
for item in cursor:
_data.append(
[
str(item['code']),
float(item['open']),
float(item['high']),
float(item['low']),
float(item['close']),
float(item['position']),
float(item['price']),
float(item['trade']),
item['date']
]
)
# 多种数据格式
if format in ['n', 'N', 'numpy']:
_data = numpy.asarray(_data)
elif format in ['list', 'l', 'L']:
_data = _data
elif format in ['P', 'p', 'pandas', 'pd']:
_data = DataFrame(
_data,
columns=[
'code',
'open',
'high',
'low',
'close',
'position',
'price',
'trade',
'date'
]
).drop_duplicates()
_data['date'] = pd.to_datetime(_data['date'])
_data = _data.set_index('date', drop=False)
else:
logging.error(
"Error fetch_future_day format parameter %s is none of \"P, p, pandas, pd , n, N, numpy !\" "
% format
)
return _data
else:
logging.warning('Something wrong with date')
def fetch_financial_report(code=None, start=None, end=None, report_date=None, ltype='EN', db=QA_DATABASE):
"""
获取专业财务报表
:parmas
code: 股票代码或者代码list
report_date: 8位数字
ltype: 列名显示的方式
:return
DataFrame, 索引为report_date和code
"""
if isinstance(code, str):
code = [code]
if isinstance(report_date, str):
report_date = [util_date_str2int(report_date)]
elif isinstance(report_date, int):
report_date = [report_date]
elif isinstance(report_date, list):
report_date = [util_date_str2int(item) for item in report_date]
collection = db.financial
num_columns = [item[:3] for item in list(financial_dict.keys())]
CH_columns = [item[3:] for item in list(financial_dict.keys())]
EN_columns = list(financial_dict.values())
filter = {}
projection = {"_id": 0}
try:
if code is not None:
filter.update(
code={
'$in': code
}
)
if start or end:
start = '1990-01-01' if start is None else str(start)[0:10]
end = datetime.today().strftime('%Y-%m-%d') if end is None else str(end)[0:10]
if not util_date_valid(end):
util_log_info('Something wrong with end date {}'.format(end))
return
if not util_date_valid(start):
util_log_info('Something wrong with start date {}'.format(start))
return
filter.update(
report_date={
"$lte": util_date_str2int(end),
"$gte": util_date_str2int(start)
}
)
elif report_date is not None:
filter.update(
report_date={
'$in': report_date
}
)
collection.create_index([('report_date', -1), ('code', 1)])
data = [
item for item in collection.find(
filter=filter,
projection=projection,
batch_size=10000,
# sort=[('report_date', -1)]
)
]
if len(data) > 0:
res_pd = pd.DataFrame(data)
if ltype in ['CH', 'CN']:
cndict = dict(zip(num_columns, CH_columns))
cndict['code'] = 'code'
cndict['report_date'] = 'report_date'
res_pd.columns = res_pd.columns.map(lambda x: cndict[x])
elif ltype is 'EN':
endict = dict(zip(num_columns, EN_columns))
endict['code'] = 'code'
endict['report_date'] = 'report_date'
try:
res_pd.columns = res_pd.columns.map(lambda x: endict[x])
except Exception as e:
print(e)
if res_pd.report_date.dtype == numpy.int64:
res_pd.report_date = pd.to_datetime(
res_pd.report_date.apply(util_date_int2str)
)
else:
res_pd.report_date = pd.to_datetime(res_pd.report_date)
return res_pd.replace(-4.039810335e+34,
numpy.nan).set_index(
['report_date',
'code'],
# drop=False
)
else:
return None
except Exception as e:
raise e
def fetch_future_bi_day(
code,
start=None,
end=None,
limit=2,
format='pandas',
collections=FACTOR_DATABASE.future_bi_day
):
"""
:param code:
:param start:
:param end:
:param limit: 如果有limit,直接按limit的数量取
:param format:
:param collections:
:return: pd.DataFrame
columns = ["code", "date", "value", "fx_mark"]
"""
code = util_code_tolist(code, auto_fill=False)
filter = {
'code': {
'$in': code
}
}
projection = {"_id": 0}
if start or end:
start = '1990-01-01' if start is None else str(start)[0:10]
end = datetime.today().strftime('%Y-%m-%d') if end is None else str(end)[0:10]
if not util_date_valid(end):
logging.warning('Something wrong with date')
return
filter.update(
date_stamp={
"$lte": util_date_stamp(end),
"$gte": util_date_stamp(start)
}
)
cursor = collections.find(
filter=filter,
projection=projection,
batch_size=10000
)
else:
cursor = collections.find(
filter=filter,
projection=projection,
limit=limit,
sort=[('date', -1)],
batch_size=10000
)
_data = []
if format in ['dict', 'json']:
_data = [data for data in cursor]
# 调整未顺序排列
if not(start or end):
_data = _data[::-1]
return _data
for item in cursor:
_data.append(
[
str(item['code']),
item['date'],
str(item['fx_mark']),
item['fx_start'],
item['fx_end'],
float(item['value'])
]
)
if not (start or end):
_data = _data[::-1]
# 多种数据格式
if format in ['n', 'N', 'numpy']:
_data = numpy.asarray(_data)
elif format in ['list', 'l', 'L']:
_data = _data
elif format in ['P', 'p', 'pandas', 'pd']:
_data = DataFrame(
_data,
columns=[
'code',
'date',
'fx_mark',
'fx_start',
'fx_end',
'value'
]
).drop_duplicates()
_data['date'] = | pd.to_datetime(_data['date']) | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
@author: HYPJUDY 2019/4/15
https://github.com/HYPJUDY
Decoupling Localization and Classification in Single Shot Temporal Action Detection
-----------------------------------------------------------------------------------
Operations used by Decouple-SSAD
"""
import pandas as pd
import pandas
import numpy as np
import numpy
import os
import tensorflow as tf
from os.path import join
#################################### TRAIN & TEST #####################################
def abs_smooth(x):
"""Smoothed absolute function. Useful to compute an L1 smooth error.
Define as:
x^2 / 2 if abs(x) < 1
abs(x) - 0.5 if abs(x) > 1
We use here a differentiable definition using min(x) and abs(x). Clearly
not optimal, but good enough for our purpose!
"""
absx = tf.abs(x)
minx = tf.minimum(absx, 1)
r = 0.5 * ((absx - 1) * minx + absx)
return r
def jaccard_with_anchors(anchors_min, anchors_max, len_anchors, box_min, box_max):
"""Compute jaccard score between a box and the anchors.
"""
int_xmin = tf.maximum(anchors_min, box_min)
int_xmax = tf.minimum(anchors_max, box_max)
inter_len = tf.maximum(int_xmax - int_xmin, 0.)
union_len = len_anchors - inter_len + box_max - box_min
jaccard = tf.div(inter_len, union_len)
return jaccard
def loop_condition(idx, b_anchors_rx, b_anchors_rw, b_glabels, b_gbboxes,
b_match_x, b_match_w, b_match_labels, b_match_scores):
r = tf.less(idx, tf.shape(b_glabels))
return r[0]
def loop_body(idx, b_anchors_rx, b_anchors_rw, b_glabels, b_gbboxes,
b_match_x, b_match_w, b_match_labels, b_match_scores):
num_class = b_match_labels.get_shape().as_list()[-1]
label = b_glabels[idx][0:num_class]
box_min = b_gbboxes[idx, 0]
box_max = b_gbboxes[idx, 1]
# ground truth
box_x = (box_max + box_min) / 2
box_w = (box_max - box_min)
# predict
anchors_min = b_anchors_rx - b_anchors_rw / 2
anchors_max = b_anchors_rx + b_anchors_rw / 2
len_anchors = anchors_max - anchors_min
jaccards = jaccard_with_anchors(anchors_min, anchors_max, len_anchors, box_min, box_max)
# jaccards > b_match_scores > -0.5 & jaccards > matching_threshold
mask = tf.greater(jaccards, b_match_scores)
matching_threshold = 0.5
mask = tf.logical_and(mask, tf.greater(jaccards, matching_threshold))
mask = tf.logical_and(mask, b_match_scores > -0.5)
imask = tf.cast(mask, tf.int32)
fmask = tf.cast(mask, tf.float32)
# Update values using mask.
# if overlap enough, update b_match_* with gt, otherwise not update
b_match_x = fmask * box_x + (1 - fmask) * b_match_x
b_match_w = fmask * box_w + (1 - fmask) * b_match_w
ref_label = tf.zeros(tf.shape(b_match_labels), dtype=tf.int32)
ref_label = ref_label + label
b_match_labels = tf.matmul(tf.diag(imask), ref_label) + tf.matmul(tf.diag(1 - imask), b_match_labels)
b_match_scores = tf.maximum(jaccards, b_match_scores)
return [idx + 1, b_anchors_rx, b_anchors_rw, b_glabels, b_gbboxes,
b_match_x, b_match_w, b_match_labels, b_match_scores]
def default_box(layer_steps, scale, a_ratios):
width_set = [scale * ratio for ratio in a_ratios]
center_set = [1. / layer_steps * i + 0.5 / layer_steps for i in range(layer_steps)]
width_default = []
center_default = []
for i in range(layer_steps):
for j in range(len(a_ratios)):
width_default.append(width_set[j])
center_default.append(center_set[i])
width_default = np.array(width_default)
center_default = np.array(center_default)
return width_default, center_default
def anchor_box_adjust(anchors, config, layer_name, pre_rx=None, pre_rw=None):
if pre_rx == None:
dboxes_w, dboxes_x = default_box(config.num_anchors[layer_name],
config.scale[layer_name], config.aspect_ratios[layer_name])
else:
dboxes_x = pre_rx
dboxes_w = pre_rw
anchors_conf = anchors[:, :, -3]
# anchors_conf=tf.nn.sigmoid(anchors_conf)
anchors_rx = anchors[:, :, -2]
anchors_rw = anchors[:, :, -1]
anchors_rx = anchors_rx * dboxes_w * 0.1 + dboxes_x
anchors_rw = tf.exp(0.1 * anchors_rw) * dboxes_w
# anchors_class=anchors[:,:,:config.num_classes]
num_class = anchors.get_shape().as_list()[-1] - 3
anchors_class = anchors[:, :, :num_class]
return anchors_class, anchors_conf, anchors_rx, anchors_rw
# This function is mainly used for producing matched ground truth with
# each adjusted anchors after predicting one by one
# the matched ground truth may be positive/negative,
# the matched x,w,labels,scores all corresponding to this anchor
def anchor_bboxes_encode(anchors, glabels, gbboxes, Index, config, layer_name, pre_rx=None, pre_rw=None):
num_anchors = config.num_anchors[layer_name]
num_dbox = config.num_dbox[layer_name]
# num_classes = config.num_classes
num_classes = anchors.get_shape().as_list()[-1] - 3
dtype = tf.float32
anchors_class, anchors_conf, anchors_rx, anchors_rw = \
anchor_box_adjust(anchors, config, layer_name, pre_rx, pre_rw)
batch_match_x = tf.reshape(tf.constant([]), [-1, num_anchors * num_dbox])
batch_match_w = tf.reshape(tf.constant([]), [-1, num_anchors * num_dbox])
batch_match_scores = tf.reshape(tf.constant([]), [-1, num_anchors * num_dbox])
batch_match_labels = tf.reshape(tf.constant([], dtype=tf.int32),
[-1, num_anchors * num_dbox, num_classes])
for i in range(config.batch_size):
shape = (num_anchors * num_dbox)
match_x = tf.zeros(shape, dtype)
match_w = tf.zeros(shape, dtype)
match_scores = tf.zeros(shape, dtype)
match_labels_other = tf.ones((num_anchors * num_dbox, 1), dtype=tf.int32)
match_labels_class = tf.zeros((num_anchors * num_dbox, num_classes - 1), dtype=tf.int32)
match_labels = tf.concat([match_labels_other, match_labels_class], axis=-1)
b_anchors_rx = anchors_rx[i]
b_anchors_rw = anchors_rw[i]
b_glabels = glabels[Index[i]:Index[i + 1]]
b_gbboxes = gbboxes[Index[i]:Index[i + 1]]
idx = 0
[idx, b_anchors_rx, b_anchors_rw, b_glabels, b_gbboxes,
match_x, match_w, match_labels, match_scores] = \
tf.while_loop(loop_condition, loop_body,
[idx, b_anchors_rx, b_anchors_rw,
b_glabels, b_gbboxes,
match_x, match_w, match_labels, match_scores])
match_x = tf.reshape(match_x, [-1, num_anchors * num_dbox])
batch_match_x = tf.concat([batch_match_x, match_x], axis=0)
match_w = tf.reshape(match_w, [-1, num_anchors * num_dbox])
batch_match_w = tf.concat([batch_match_w, match_w], axis=0)
match_scores = tf.reshape(match_scores, [-1, num_anchors * num_dbox])
batch_match_scores = tf.concat([batch_match_scores, match_scores], axis=0)
match_labels = tf.reshape(match_labels, [-1, num_anchors * num_dbox, num_classes])
batch_match_labels = tf.concat([batch_match_labels, match_labels], axis=0)
return [batch_match_x, batch_match_w, batch_match_labels, batch_match_scores,
anchors_class, anchors_conf, anchors_rx, anchors_rw]
def in_conv(layer, initer=tf.contrib.layers.xavier_initializer(seed=5)):
net = tf.layers.conv1d(inputs=layer, filters=1024, kernel_size=3, strides=1, padding='same',
activation=tf.nn.relu, kernel_initializer=initer)
out = tf.layers.conv1d(inputs=net, filters=1024, kernel_size=3, strides=1, padding='same',
activation=None, kernel_initializer=initer)
return out
def out_conv(layer, initer=tf.contrib.layers.xavier_initializer(seed=5)):
net = tf.nn.relu(layer)
out = tf.layers.conv1d(inputs=net, filters=1024, kernel_size=3, strides=1, padding='same',
activation=tf.nn.relu, kernel_initializer=initer)
return out
############################ TRAIN and TEST NETWORK LAYER ###############################
def get_trainable_variables():
trainable_variables_scope = [a.name for a in tf.trainable_variables()]
trainable_variables_list = tf.trainable_variables()
trainable_variables = []
for i in range(len(trainable_variables_scope)):
if ("base_feature_network" in trainable_variables_scope[i]) or \
("anchor_layer" in trainable_variables_scope[i]) or \
("predict_layer" in trainable_variables_scope[i]):
trainable_variables.append(trainable_variables_list[i])
return trainable_variables
def base_feature_network(X, mode=''):
# main network
initer = tf.contrib.layers.xavier_initializer(seed=5)
with tf.variable_scope("base_feature_network" + mode):
# ----------------------- Base layers ----------------------
# [batch_size, 128, 1024]
net = tf.layers.conv1d(inputs=X, filters=512, kernel_size=9, strides=1, padding='same',
activation=tf.nn.relu, kernel_initializer=initer)
# [batch_size, 128, 512]
net = tf.layers.max_pooling1d(inputs=net, pool_size=4, strides=2, padding='same')
# [batch_size, 64, 512]
net = tf.layers.conv1d(inputs=net, filters=512, kernel_size=9, strides=1, padding='same',
activation=tf.nn.relu, kernel_initializer=initer)
# [batch_size, 64, 512]
net = tf.layers.max_pooling1d(inputs=net, pool_size=4, strides=2, padding='same')
# [batch_size, 32, 512]
return net
def main_anchor_layer(net, mode=''):
# main network
initer = tf.contrib.layers.xavier_initializer(seed=5)
with tf.variable_scope("main_anchor_layer" + mode):
# ----------------------- Anchor layers ----------------------
MAL1 = tf.layers.conv1d(inputs=net, filters=1024, kernel_size=3, strides=2, padding='same',
activation=tf.nn.relu, kernel_initializer=initer)
# [batch_size, 16, 1024]
MAL2 = tf.layers.conv1d(inputs=MAL1, filters=1024, kernel_size=3, strides=2, padding='same',
activation=tf.nn.relu, kernel_initializer=initer)
# [batch_size, 8, 1024]
MAL3 = tf.layers.conv1d(inputs=MAL2, filters=1024, kernel_size=3, strides=2, padding='same',
activation=tf.nn.relu, kernel_initializer=initer)
# [batch_size, 4, 1024]
return MAL1, MAL2, MAL3
def branch_anchor_layer(MALs, name=''):
MAL1, MAL2, MAL3 = MALs
with tf.variable_scope("branch_anchor_layer" + name):
BAL3 = out_conv(in_conv(MAL3)) # [batch_size, 4, 1024]
BAL3_expd = tf.expand_dims(BAL3, 1) # [batch_size, 1, 4, 1024]
BAL3_de = tf.layers.conv2d_transpose(BAL3_expd, 1024, kernel_size=(1, 4),
strides=(1, 2), padding='same') # [batch_size, 1, 8, 1024]
BAL3_up = tf.reduce_sum(BAL3_de, [1]) # [batch_size, 8, 1024]
MAL2_in_conv = in_conv(MAL2)
BAL2 = out_conv((MAL2_in_conv * 2 + BAL3_up) / 3) # [batch_size, 8, 1024]
MAL2_expd = tf.expand_dims(BAL2, 1) # [batch_size, 1, 8, 1024]
MAL2_de = tf.layers.conv2d_transpose(MAL2_expd, 1024, kernel_size=(1, 4),
strides=(1, 2), padding='same') # [batch_size, 1, 16, 1024]
MAL2_up = tf.reduce_sum(MAL2_de, [1]) # [batch_size, 16, 1024]
MAL1_in_conv = in_conv(MAL1)
BAL1 = out_conv((MAL1_in_conv * 2 + MAL2_up) / 3) # [batch_size, 16, 1024]
return BAL1, BAL2, BAL3
# action or not + conf + location (center&width)
# Anchor Binary Classification and Regression
def biClsReg_predict_layer(config, layer, layer_name, specific_layer):
num_dbox = config.num_dbox[layer_name]
with tf.variable_scope("biClsReg_predict_layer" + layer_name + specific_layer):
anchor = tf.layers.conv1d(inputs=layer, filters=num_dbox * (1 + 3),
kernel_size=3, padding='same', kernel_initializer=
tf.contrib.layers.xavier_initializer(seed=5))
anchor = tf.reshape(anchor, [config.batch_size, -1, (1 + 3)])
return anchor
# action or not + class score + conf + location (center&width)
# Action Multi-Class Classification and Regression
def mulClsReg_predict_layer(config, layer, layer_name, specific_layer):
num_dbox = config.num_dbox[layer_name]
ncls = config.num_classes
with tf.variable_scope("mulClsReg_predict_layer" + layer_name + specific_layer):
anchor = tf.layers.conv1d(inputs=layer, filters=num_dbox * (ncls + 3),
kernel_size=3, padding='same', kernel_initializer=
tf.contrib.layers.xavier_initializer(seed=5))
anchor = tf.reshape(anchor, [config.batch_size, -1, (ncls + 3)])
return anchor
#################################### TRAIN LOSS #####################################
def loss_function(anchors_class, anchors_conf, anchors_xmin, anchors_xmax,
match_x, match_w, match_labels, match_scores, config):
match_xmin = match_x - match_w / 2
match_xmax = match_x + match_w / 2
pmask = tf.cast(match_scores > 0.5, dtype=tf.float32)
num_positive = tf.reduce_sum(pmask)
num_entries = tf.cast(tf.size(match_scores), dtype=tf.float32)
hmask = match_scores < 0.5
hmask = tf.logical_and(hmask, anchors_conf > 0.5)
hmask = tf.cast(hmask, dtype=tf.float32)
num_hard = tf.reduce_sum(hmask)
# the meaning of r_negative: the ratio of anchors need to choose from easy negative anchors
# If we have `num_positive` positive anchors in training data,
# then we only need `config.negative_ratio*num_positive` negative anchors
# r_negative=(number of easy negative anchors need to choose from all easy negative) / (number of easy negative)
# the meaning of easy negative: all-pos-hard_neg
r_negative = (config.negative_ratio - num_hard / num_positive) * num_positive / (
num_entries - num_positive - num_hard)
r_negative = tf.minimum(r_negative, 1)
nmask = tf.random_uniform(tf.shape(pmask), dtype=tf.float32)
nmask = nmask * (1. - pmask)
nmask = nmask * (1. - hmask)
nmask = tf.cast(nmask > (1. - r_negative), dtype=tf.float32)
# class_loss
weights = pmask + nmask + hmask
class_loss = tf.nn.softmax_cross_entropy_with_logits(logits=anchors_class, labels=match_labels)
class_loss = tf.losses.compute_weighted_loss(class_loss, weights)
# correct_pred = tf.equal(tf.argmax(anchors_class, 2), tf.argmax(match_labels, 2))
# accuracy = tf.reduce_mean(tf.cast(correct_pred, dtype=tf.float32))
# loc_loss
weights = pmask
loc_loss = abs_smooth(anchors_xmin - match_xmin) + abs_smooth(anchors_xmax - match_xmax)
loc_loss = tf.losses.compute_weighted_loss(loc_loss, weights)
# conf loss
weights = pmask + nmask + hmask
# match_scores is from jaccard_with_anchors
conf_loss = abs_smooth(match_scores - anchors_conf)
conf_loss = tf.losses.compute_weighted_loss(conf_loss, weights)
return class_loss, loc_loss, conf_loss
#################################### POST PROCESS #####################################
def min_max_norm(X):
# map [0,1] -> [0.5,0.73] (almost linearly) ([-1, 0] -> [0.26, 0.5])
return 1.0 / (1.0 + np.exp(-1.0 * X))
def post_process(df, config):
class_scores_class = [(df['score_' + str(i)]).values[:].tolist() for i in range(21)]
class_scores_seg = [[class_scores_class[j][i] for j in range(21)] for i in range(len(df))]
class_real = [0] + config.class_real # num_classes + 1
# save the top 2 or 3 score element
# append the largest score element
class_type_list = []
class_score_list = []
for i in range(len(df)):
class_score = np.array(class_scores_seg[i][1:]) * min_max_norm(df.conf.values[i])
class_score = class_score.tolist()
class_type = class_real[class_score.index(max(class_score)) + 1]
class_type_list.append(class_type)
class_score_list.append(max(class_score))
resultDf1 = pd.DataFrame()
resultDf1['out_type'] = class_type_list
resultDf1['out_score'] = class_score_list
resultDf1['start'] = df.xmin.values[:]
resultDf1['end'] = df.xmax.values[:]
# append the second largest score element
class_type_list = []
class_score_list = []
for i in range(len(df)):
class_score = np.array(class_scores_seg[i][1:]) * min_max_norm(df.conf.values[i])
class_score = class_score.tolist()
class_score[class_score.index(max(class_score))] = 0
class_type = class_real[class_score.index(max(class_score)) + 1]
class_type_list.append(class_type)
class_score_list.append(max(class_score))
resultDf2 = pd.DataFrame()
resultDf2['out_type'] = class_type_list
resultDf2['out_score'] = class_score_list
resultDf2['start'] = df.xmin.values[:]
resultDf2['end'] = df.xmax.values[:]
resultDf1 = pd.concat([resultDf1, resultDf2])
# # append the third largest score element (improve little and slow)
class_type_list = []
class_score_list = []
for i in range(len(df)):
class_score = np.array(class_scores_seg[i][1:]) * min_max_norm(df.conf.values[i])
class_score = class_score.tolist()
class_score[class_score.index(max(class_score))] = 0
class_score[class_score.index(max(class_score))] = 0
class_type = class_real[class_score.index(max(class_score)) + 1]
class_type_list.append(class_type)
class_score_list.append(max(class_score))
resultDf2 = pd.DataFrame()
resultDf2['out_type'] = class_type_list
resultDf2['out_score'] = class_score_list
resultDf2['start'] = df.xmin.values[:]
resultDf2['end'] = df.xmax.values[:]
resultDf1 = | pd.concat([resultDf1, resultDf2]) | pandas.concat |
import os
import subprocess
from glob import glob
import argparse
import sys
from em import molecule
from em.dataset import metrics
from mpi4py import MPI
from mpi4py.futures import MPICommExecutor
from concurrent.futures import wait
from scipy.spatial import cKDTree
import numpy as np
import pandas as pd
import traceback
import random
import json
from json import encoder
from skimage.measure import regionprops
from scipy.ndimage import distance_transform_edt, gaussian_filter
from Bio.PDB import PDBParser, PDBIO
def convert(o):
if isinstance(o, np.generic): return o.item()
raise TypeError
# Intersección de mapas simulados de pedazos con original
# Si hay traslape debe anotarse
# Obtiene mapa anotado según label, tipo float
# Revisa pedazos no asociados, utiliza holgura, hace una pasada
# obtiene stats
# Lo guarda en disco
def annotateSample(map_id, indexes, df, fullness,columns, output_dir):
map_path = df.at[indexes[0], columns['map_path']]
annotated_path = os.path.join(output_dir,map_path.replace('.','_gt.'))
contourLvl = float(df.at[indexes[0], columns['contourLevel']])
map_to_annotate = molecule.Molecule(map_path, recommendedContour=contourLvl)
data_map = map_to_annotate.emMap.data()
map_mask = map_to_annotate.getContourMasks()[1]
result = {}
result['map_path'] = map_path
result['contourLevel'] = contourLvl
result['total'] = map_to_annotate.getVolume()[1]
# Set to 0 all voxels outside contour level, otherwise fill with a marker
marker = 10000
data_map[np.logical_not(map_mask)] = 0
data_map[map_mask] = marker
labels = []
chain_label_id_dict = {}
print('Tagging em map {}'.format(os.path.basename(map_path)))
for i in indexes:
segment_path = df.at[i, columns['subunit_path']]
if os.path.exists(segment_path):
segment_label = int(float(df.at[i, columns['chain_label']]))
chain_label_id_dict[df.at[i,columns['chain_label']]] = df.at[i,columns['chain_id']]
segment_map = molecule.Molecule(segment_path, recommendedContour=0.001)
segment_mask = segment_map.getContourMasks()[1]
print("Number of voxels in segment {}".format(np.sum(segment_mask)))
masks_intersec = np.logical_and(map_mask, segment_mask)
print("Number of voxels in intersection {}".format(np.sum(masks_intersec)))
data_map[masks_intersec] = segment_label
labels.append(segment_label)
print("Chain {}, voxels {}".format(segment_label,segment_map.getVolume()[1]))
print(" Matching {} of {} voxels".format(np.sum(masks_intersec), np.sum(segment_mask)))
else:
return ValueError('There is a problem getting segments for {}'.format(aligned_path))
# Get non assigned voxels
dim1,dim2,dim3 = np.where(data_map == marker)
nonassigned_points = np.array(list(map(list,zip(dim1,dim2,dim3))))
# Get assigned voxels coords
dim1,dim2,dim3 = np.where(np.logical_and((data_map != marker), (data_map != 0)))
# Combine list of indexes into a list of points in 3D space
assigned_points = list(map(list,zip(dim1,dim2,dim3)))
print("Asigned voxels : {}".format(len(assigned_points)))
print("Non asigned voxels : {}".format(len(nonassigned_points)))
print("Total number of voxels: {}".format(map_to_annotate.getVolume()[1]))
# If any voxel remain
if (len(nonassigned_points) > 0) & (len(assigned_points)>0):
# Create KDTree with assigned points
tree = cKDTree(assigned_points)
# Search for nearest point
d,i = tree.query(nonassigned_points)
neighbors_index = tree.data[i].astype(int)
# Use voxels inside fullnes value only
mask = d <= fullness
mask_inv = np.logical_not(mask)
points_to_reassign = nonassigned_points[mask]
points_to_discard = nonassigned_points[mask_inv]
neighbors_index = neighbors_index[mask]
d1_i, d2_i, d3_i = neighbors_index[:,0], neighbors_index[:,1], neighbors_index[:,2]
# Replace values in map with search result
values_to_map = data_map[d1_i,d2_i,d3_i]
for point,value in zip(points_to_reassign,values_to_map):
data_map[point[0],point[1],point[2]] = value
# Set voxels outside fullness value to 0
for point in points_to_discard:
data_map[point[0],point[1],point[2]] = 0
result['voxels_reasigned'] = np.sum(mask)
result['voxels_discarted'] = np.sum(mask_inv)
else:
print(" No more voxels to assign")
result['voxels_reasigned'] = 0
result['voxels_discarted'] = 0
dim1,dim2,dim3 = np.where(data_map == marker)
if len(dim1)>0:
print("there shuldnt be markers in array of labels.. check this {}".format(os.path.basename(map_path)))
# print labels
voxels_dict = {}
for l in labels:
voxels_dict[l]=np.sum(data_map==l)
filename = map_path.replace(str(map_path[-4:]), '_'+chain_label_id_dict[l]+'.npy')
map_masked = np.copy(data_map)
print("Voxels for label {} :{}".format(l, voxels_dict[l]))
map_masked[data_map==l] = 1.0
map_masked[data_map!=l] = 0.0
print("saved volume of {}".format(map_masked.sum()))
np.save(filename, map_masked)
print("saved {}".format(filename))
# Compute euler numbers
euler_dict = {}
for region in regionprops(data_map.astype(np.int32)):
euler_dict[region.label] = region.euler_number
# Save map
result['euler_segments'] = json.dumps(euler_dict, default=convert)
result['voxels_assigned'] = json.dumps(voxels_dict, default=convert)
result['tag_path'] = annotated_path
result['map_id'] = map_id
map_to_annotate.setData(data_map)
map_to_annotate.save(annotated_path)
return result
def annotatePoints(df, i, output_path, number_points=3, gaussian_std=3):
output_df = pd.DataFrame(columns=['id','map_path','contourLevel','subunit', 'tagged_path', 'number_points','tagged_points_path'])
#print("aa{}".format(df.iloc[i]['tagged_path']))
tagged_map = molecule.Molecule(df.iloc[i]['tagged_path'], 0.001).getEmMap().data()
#print("unique",np.unique(tagged_map))
for region in regionprops(tagged_map.astype(np.int32)):
label = int(region.label)
region_gt = np.copy(tagged_map)
region_gt[ region_gt != label ] = 0.0
region_gt[ region_gt == label ] = 1.0
#print("number",np.sum(region_gt==1.0))
#print("in label {}".format(label))
basename = df.iloc[i]['id']+'_'+str(label)+'.npy'
region_path = os.path.join(output_path,basename)
#print("pathh {}".format(region_path))
distance = distance_transform_edt(region_gt)
distance[distance != 1] = 0
index_x, index_y, index_z = np.where(distance == 1)
chosen_indexes = np.random.choice(len(index_x), number_points, replace=False)
#print("indexes:",chosen_indexes)
index_x = index_x[chosen_indexes]
index_y = index_y[chosen_indexes]
index_z = index_z[chosen_indexes]
point_array = np.zeros_like(region_gt)
point_array[index_x,index_y,index_z] = 1.0
point_array = gaussian_filter(point_array, gaussian_std)
np.save(region_path,point_array)
#print("saved {}".format(np.sum(point_array)))
output_df = output_df.append({'id':df.iloc[i]['id'], 'map_path':df.iloc[i]['map_path'], 'contourLevel':df.iloc[i]['contourLevel'], 'subunit':label, 'tagged_path':df.iloc[i]['tagged_path'], 'number_points':number_points, 'tagged_points_path':region_path}, ignore_index=True)
#print("output_df: ", output_df)
return output_df
def compute_adjacency(df, i):
# Get EM map id
map_id = df.iloc[i]['id']
# Get pdb path and chain id
pdb_path = df.iloc[i]['pdb_path']
chain = df.iloc[i]['fitted_entries']
# Create parser and get readed object
parser = PDBParser(PERMISSIVE = True, QUIET = True)
pdb_obj = parser.get_structure(chain, pdb_path)
# Compute dictionary to translate chain id (letter) to chain label (number)
chain_id_list = [chain._id for chain in pdb_obj.get_chains()]
chain_label_list = [i for i in range(1,len(chain_id_list)+1)]
dict_label_id_chain = dict(zip(chain_id_list,chain_label_list))
# Create dictionaries to store coords and kdtree for each chain
dict_chain_kdtree = dict()
# Create dictionary to store final adjency data
adjacency_dict = dict()
# Compute kdtree for each chain and assign it along with their coords to the corresponding chain label in dict
for c in pdb_obj.get_chains():
ca_coord_list = [atom.coord for atom in c.get_atoms() if atom.name=="CA"]
chain_id = c.id
print("get {} atoms for chain {}".format(len(ca_coord_list), chain_id))
if len(ca_coord_list) == 0:
continue
else:
kdtree = cKDTree(ca_coord_list)
dict_chain_kdtree[dict_label_id_chain[chain_id]] = kdtree
# Loop over chains again to compute adjacency (if exists an atom from other chain at a distance of 4 o less Angstroms )
for c in dict_chain_kdtree.keys():
# Get atoms coords for current chain from dict
current_chain_adjacency_dict = dict()
current_kdtree = dict_chain_kdtree[c]
# For every other chain, loop atoms to find adjacency or until atom list is empty.
for c_i in dict_chain_kdtree.keys():
if c == c_i:
continue
else:
print("Comparing {} against {}".format(c,c_i))
# Get kdtree to compare with
chain_kdtree = dict_chain_kdtree[c_i]
# Get adjacent atoms within radius of 4 Angstroms
adjacent_atoms = current_kdtree.query_ball_tree(chain_kdtree, r=5)
number_adjacencies = np.sum([len(adjacent) for adjacent in adjacent_atoms])
if number_adjacencies > 0:
current_chain_adjacency_dict[c_i] = 1
else:
current_chain_adjacency_dict[c_i] = 0
adjacency_dict[c] = current_chain_adjacency_dict
label_id_chain = json.dumps(dict_label_id_chain, default=convert)
adjacency = json.dumps(adjacency_dict, default=convert)
return pd.Series( [map_id, label_id_chain, adjacency], index=['map_id','chain_id_to_label','adjacency'])
def mapMetricsCompute(row,match_dict):
map_id = row['id']
tagged_path = row['tagged_path']
contour = 0.001
compare_path = match_dict[map_id]
sample = molecule.Molecule(tagged_path, contour)
labeled = molecule.Molecule(compare_path, contour)
iou = metrics.intersection_over_union(sample, labeled)
h = metrics.homogenity(sample, labeled)
p = metrics.proportion(sample, labeled)
c = metrics.consistency(sample, labeled)
return pd.Series( [map_id, row['map_path'], tagged_path, row['contourLevel'], compare_path, iou, h, p, c ], index=['id', 'map_path','tagged_path', 'contourLevel', 'reference_path', 'iou', 'homogenity', 'proportion', 'consistency'])
def doParallelTagging(df, fullness, gt_path, columns):
unique_id_list = df[columns['id']].unique().tolist()
# Construct dataframe to store results
output_df = pd.DataFrame(columns=['id','map_path','contourLevel','tagged_path','subunits','matched_subunits','voxels','voxels_matched','voxels_discarted','voxels_reassigned','voxels_assigned','euler_segments'])
print("Spawn procecess...")
comm = MPI.COMM_WORLD
size = comm.Get_size()
with MPICommExecutor(comm, root=0, worker_size=size) as executor:
if executor is not None:
futures = []
# For each map, perform annotation
for i in unique_id_list:
subunit_indexes = df.loc[df[columns['id']]==i].index.tolist()
futures.append(executor.submit(annotateSample,i, subunit_indexes, df, fullness, columns, gt_path))
wait(futures)
for f in futures:
try:
res = f.result()
map_id = res['map_id']
voxels_assigned = json.loads(res['voxels_assigned'])
euler_segments = json.loads(res['euler_segments'])
voxels_reassigned = res['voxels_reasigned']
voxels_discarted = res['voxels_discarted']
tagged_path = res['tag_path']
map_path = res['map_path']
contour = res['contourLevel']
voxels_num = res['total']
print("Received {}".format(res))
# Get number of segments matched
segments_matched = 0
voxels_matched = 0
for key in voxels_assigned.keys():
matched_num = voxels_assigned[key]
if matched_num > 0:
segments_matched+=1
voxels_matched += matched_num
#'tagged_path', 'subunits','matched_subunits', 'voxels', 'voxels_matched', 'matched_per_segment'
output_df = output_df.append({'id':map_id, 'map_path':map_path, 'contourLevel':contour, 'tagged_path':tagged_path, 'subunits':len(voxels_assigned.keys()), 'matched_subunits':segments_matched, 'voxels':voxels_num, 'voxels_matched':voxels_matched, 'voxels_discarted':voxels_discarted, 'voxels_reassigned':voxels_reassigned, 'voxels_assigned':voxels_assigned, 'euler_segments':euler_segments}, ignore_index=True)
except ValueError as error:
print("Error asignating segments for {}".format(map_id))
return output_df
def doParallelAdjacency(df):
id_list = df.index.tolist()
print("Spawn procecess...")
comm = MPI.COMM_WORLD
size = comm.Get_size()
output_df = pd.DataFrame(columns=['map_id','chain_id_to_label', 'adjacency'])
'''
with MPICommExecutor(comm, root=0, worker_size=size) as executor:
if executor is not None:
futures = []
# For each map, perform annotation
for i in id_list:
futures.append(executor.submit(compute_adjacency,df,i))
wait(futures)
for f in futures:
try:
res = f.result()
print("Received {}".format(res))
output_df = output_df.append(res, ignore_index=True)
except Exception as error:
print(traceback.format_exc())
'''
for i in id_list:
res = compute_adjacency(df,i)
output_df = output_df.append(res, ignore_index=True)
return output_df
def doParallelExtremePointAnnotation(df, output_path):
indexes = df.index.tolist()
output_df = | pd.DataFrame(columns=['id','map_path','contourLevel','subunit', 'tagged_path', 'number_points','tagged_points_path']) | pandas.DataFrame |
"""
Evaluates the model.
"""
import argparse
import matplotlib as mpl
# do not use Qt/X that require $DISPLAY, must be called before importing pyplot
mpl.use('Agg')
import numpy as np
import pandas as pd
from sklearn.metrics import confusion_matrix
from prepare_training_data import load_indexes, load_transformers
import plots
def evaluate_model(data_dir, model_dir):
evaluation_dir = model_dir + '/evaluation'
ix = load_indexes(data_dir)
predictions = pd.read_csv(model_dir + '/output-data/predictions.csv')
instr_family_le, scaler, _ = load_transformers(model_dir)
training_history = pd.read_csv(evaluation_dir + '/learning_curves.csv')
final_metrics = | pd.read_csv(evaluation_dir + '/final_metrics.csv', index_col=0) | pandas.read_csv |
"""Тесты для таблицы с торгуемыми ценными бумагами."""
from datetime import date
import pandas as pd
import pytest
from poptimizer.data import ports
from poptimizer.data.domain import events
from poptimizer.data.domain.tables import base, securities
from poptimizer.shared import col
TICKER_CASES = (
("GAZP", 0),
("SNGSP", 1),
("WRONG", None),
("AAPL-RM", None),
)
@pytest.mark.parametrize("ticker, answer", TICKER_CASES)
def test_ticker_type(ticker, answer):
"""Проверка, что тикер соответствует обыкновенной акции."""
if answer is None:
with pytest.raises(securities.WrongTickerTypeError, match=ticker):
securities._ticker_type(ticker)
else:
assert securities._ticker_type(ticker) is answer
@pytest.fixture(scope="function", name="table")
def create_table():
"""Создает пустую таблицу для тестов."""
id_ = base.create_id(ports.SECURITIES)
return securities.Securities(id_)
def test_update_cond(table):
"""Обновление происходит всегда при поступлении события."""
assert table._update_cond(object())
@pytest.mark.asyncio
async def test_load_and_format_df(table, mocker):
"""Данные загружаются и добавляется колонка с названием рынка."""
fake_gateway = mocker.AsyncMock()
fake_gateway.return_value = pd.DataFrame([1, 2])
table._gateway = fake_gateway
df = await table._load_and_format_df(
"m1",
"b1",
lambda index: 1 + index * 2,
)
pd.testing.assert_frame_equal(
df,
pd.DataFrame(
[[1, "m1", 1], [2, "m1", 3]],
columns=[0, col.MARKET, col.TICKER_TYPE],
),
)
fake_gateway.assert_called_once_with(market="m1", board="b1")
@pytest.mark.asyncio
async def test_prepare_df(table, mocker):
"""Данные загружаются объединяются и сортируются."""
dfs = [
| pd.DataFrame([1, 4], index=["AKRN", "RTKMP"]) | pandas.DataFrame |
# Copyright (c) 2019, MD2K Center of Excellence
# - <NAME> <<EMAIL>>, <NAME> <<EMAIL>>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import pandas as pd
from geopy.distance import great_circle
from pyspark.sql.functions import pandas_udf, PandasUDFType
from pyspark.sql.group import GroupedData
from pyspark.sql.types import StructField, StructType, DoubleType, IntegerType
from scipy.spatial import ConvexHull
from shapely.geometry.multipoint import MultiPoint
from sklearn.cluster import DBSCAN
from cerebralcortex.algorithms.utils.mprov_helper import CC_MProvAgg
from cerebralcortex.algorithms.utils.util import update_metadata
from cerebralcortex.core.datatypes import DataStream
from cerebralcortex.core.metadata_manager.stream.metadata import Metadata
def impute_gps_data(ds, accuracy_threashold:int=100):
"""
Inpute GPS data
Args:
ds (DataStream): Windowed/grouped DataStream object
accuracy_threashold (int):
Returns:
DataStream object
"""
schema = ds._data.schema
@pandas_udf(schema, PandasUDFType.GROUPED_MAP)
def gps_imputer(data):
data = data.sort_values('localtime').reset_index(drop=True)
data['latitude'][data.accuracy > accuracy_threashold] = np.nan
data['longitude'][data.accuracy > accuracy_threashold] = np.nan
data = data.fillna(method='ffill').dropna()
return data
# check if datastream object contains grouped type of DataFrame
if not isinstance(ds._data, GroupedData):
raise Exception(
"DataStream object is not grouped data type. Please use 'window' operation on datastream object before running this algorithm")
data = ds._data.apply(gps_imputer)
results = DataStream(data=data, metadata=Metadata())
metadta = update_metadata(stream_metadata=results.metadata,
stream_name="gps--org.md2k.imputed",
stream_desc="impute GPS data",
module_name="cerebralcortex.algorithms.gps.clustering.impute_gps_data",
module_version="1.0.0",
authors=[{"Azim": "<EMAIL>"}])
results.metadata = metadta
return results
def cluster_gps(ds: DataStream, epsilon_constant:int = 1000,
km_per_radian:int = 6371.0088,
geo_fence_distance:int = 30,
minimum_points_in_cluster:int = 1,
latitude_column_name:str = 'latitude',
longitude_column_name:str = 'longitude'):
"""
Cluster GPS data - Algorithm used to cluster GPS data is based on DBScan
Args:
ds (DataStream): Windowed/grouped DataStream object
epsilon_constant (int):
km_per_radian (int):
geo_fence_distance (int):
minimum_points_in_cluster (int):
latitude_column_name (str):
longitude_column_name (str):
Returns:
DataStream object
"""
centroid_id_name = 'centroid_id'
features_list = [StructField('centroid_longitude', DoubleType()),
StructField('centroid_latitude', DoubleType()),
StructField('centroid_id', IntegerType()),
StructField('centroid_area', DoubleType())]
schema = StructType(ds._data._df.schema.fields + features_list)
column_names = [a.name for a in schema.fields]
def reproject(latitude, longitude):
from math import pi, cos, radians
earth_radius = 6371009 # in meters
lat_dist = pi * earth_radius / 180.0
y = [lat * lat_dist for lat in latitude]
x = [long * lat_dist * cos(radians(lat))
for lat, long in zip(latitude, longitude)]
return np.column_stack((x, y))
def get_centermost_point(cluster: np.ndarray) -> object:
"""
Get center most point of a cluster
Args:
cluster (np.ndarray):
Returns:
"""
try:
if cluster.shape[0]>=3:
points_project = reproject(cluster[:,0],cluster[:,1])
hull = ConvexHull(points_project)
area = hull.area
else:
area = 1
except:
area = 1
centroid = (
MultiPoint(cluster).centroid.x, MultiPoint(cluster).centroid.y)
centermost_point = min(cluster, key=lambda point: great_circle(point,
centroid).m)
return list(centermost_point) + [area]
@pandas_udf(schema, PandasUDFType.GROUPED_MAP)
@CC_MProvAgg('gps--org.md2k.phonesensor--phone', 'gps_clustering', 'gps--org.md2k.clusters', ['user', 'timestamp'], ['user', 'timestamp'])
def gps_clustering(data):
if data.shape[0] < minimum_points_in_cluster:
return | pd.DataFrame([], columns=column_names) | pandas.DataFrame |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import arrow
import pandas as pd
import requests
import json
from functools import reduce
# RU-1: European and Uralian Market Zone (Price Zone 1)
# RU-2: Siberian Market Zone (Price Zone 2)
# RU-AS: Russia East Power System (2nd synchronous zone)
# Handling of hours: data at t on API side corresponds to
# production / consumption from t to t+1
BASE_EXCHANGE_URL = 'http://br.so-ups.ru/webapi/api/flowDiagramm/GetData?'
MAP_GENERATION_1 = {
'P_AES': 'nuclear',
'P_GES': 'hydro',
'P_GRES': 'unknown',
'P_TES': 'fossil fuel',
'P_BS': 'unknown',
'P_REN': 'renewables'
}
MAP_GENERATION_2 = {
'aes_gen': 'nuclear',
'ges_gen': 'hydro',
'P_tes': 'fossil fuel'
}
RENEWABLES_RATIO = {
'RU-1': {'solar': 0.5, 'wind': 0.5},
'RU-2': {'solar': 1.0, 'wind': 0.0}
}
FOSSIL_FUEL_RATIO = {
'RU-1': {'coal': 0.060, 'gas': 0.892, 'oil': 0.004, 'unknown': 0.044},
'RU-2': {'coal': 0.864, 'gas': 0.080, 'oil': 0.004, 'unknown': 0.052},
'RU-AS': {'coal': 0.611, 'gas': 0.384, 'oil': 0.005, 'unknown': 0.00}
}
exchange_ids = {'RU-AS->CN': 764,
'RU->MN': 276,
'RU-2->MN': 276,
'RU->KZ': 785,
'RU-1->KZ': 2394,
'RU-2->KZ': 344,
'RU-2->RU-1': 139,
'RU->GE': 752,
'RU-1->GE': 752,
'AZ->RU': 598,
'AZ->RU-1': 598,
'BY->RU': 321,
'BY->RU-1': 321,
'RU->FI': 187,
'RU-1->FI': 187,
'RU-KGD->LT': 212,
'RU-1->UA-CR': 5688,
'UA->RU-1': 880}
# Each exchange is contained in a div tag with a "data-id" attribute that is unique.
tz = 'Europe/Moscow'
def fetch_production(zone_key='RU', session=None, target_datetime=None, logger=None) -> list:
"""Requests the last known production mix (in MW) of a given country."""
if zone_key == 'RU':
# Get data for all zones
dfs = {}
for subzone_key in ['RU-1', 'RU-2', 'RU-AS']:
data = fetch_production(subzone_key, session, target_datetime, logger)
df = | pd.DataFrame(data) | pandas.DataFrame |
import pandas as pd
from primus.category import OneHotEncoder
def test_fit_transform_HaveHandleUnknownValueAndUnseenValues_ExpectAllZeroes():
train = pd.DataFrame({'city': ['Chicago', 'Seattle']})
test = pd.DataFrame({'city': ['Chicago', 'Detroit']})
expected_result = pd.DataFrame({'city_1': [1, 0],
'city_2': [0, 0]},
columns=['city_1', 'city_2'])
print("\ntrain\n", train)
print("test\n", test)
print("expected \n", expected_result)
enc = OneHotEncoder(handle_unknown='value')
result = enc.fit(train).transform(test)
print("result\n", result)
| pd.testing.assert_frame_equal(expected_result, result) | pandas.testing.assert_frame_equal |
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
import requests
import time
from datetime import datetime
import pandas as pd
from urllib import parse
from config import ENV_VARIABLE
from os.path import getsize
fold_path = "./crawler_data/"
page_Max = 100
def stripID(url, wantStrip):
loc = url.find(wantStrip)
length = len(wantStrip)
return url[loc+length:]
def Kklee():
shop_id = 13
name = 'kklee'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.kklee.co/products?page=" + \
str(p) + "&sort_by=&order_by=&limit=24"
#
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//a[%i]/div[@class='Product-info']/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='col-xs-12 ProductList-list']/a[%i]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//a[%i]/div[1]/div[1]" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//a[%i]/div[@class='Product-info']/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//a[%i]/div[@class='Product-info']/div[3]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//a[%i]/div[@class='Product-info']/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Wishbykorea():
shop_id = 14
name = 'wishbykorea'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if(close == 1):
chrome.quit()
break
url = "https://www.wishbykorea.com/collection-727&pgno=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
print(url)
except:
break
time.sleep(1)
i = 1
while(i < 17):
try:
title = chrome.find_element_by_xpath(
"//div[@class='collection_item'][%i]/div/div/label" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='collection_item'][%i]/a[@href]" % (i,)).get_attribute('href')
page_id = page_link.replace("https://www.wishbykorea.com/collection-view-", "").replace("&ca=727", "")
find_href = chrome.find_element_by_xpath(
"//div[@class='collection_item'][%i]/a/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip('")')
except:
i += 1
if(i == 17):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='collection_item'][%i]/div[@class='collection_item_info']/div[2]/label" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = ""
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='collection_item'][%i]/div[@class='collection_item_info']/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = ""
except:
i += 1
if(i == 17):
p += 1
continue
if(sale_price == "0"):
i += 1
if(i == 17):
p += 1
continue
i += 1
if(i == 17):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Aspeed():
shop_id = 15
name = 'aspeed'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if(close == 1):
chrome.quit()
break
url = "https://www.aspeed.co/products?page=" + \
str(p) + "&sort_by=&order_by=&limit=72"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 73):
try:
title = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[1]/div[1]" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 73):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[2]/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[2]/div/div[2]/div[2]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[2]/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = ""
except:
i += 1
if(i == 73):
p += 1
continue
i += 1
if(i == 73):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Openlady():
shop_id = 17
name = 'openlady'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.openlady.tw/item.html?&id=157172&page=" + \
str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 17):
try:
title = chrome.find_element_by_xpath(
"//li[@class='item_block item_block_y'][%i]/div[@class='item_text']/p[@class='item_name']/a[@class='mymy_item_link']" % (i,)).text
page_link = chrome.find_element_by_xpath(
"//li[@class='item_block item_block_y'][%i]/div[@class='item_text']/p[@class='item_name']/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.replace("&id=", "")
except:
close += 1
break
try:
pic_link = chrome.find_element_by_xpath(
"//li[@class='item_block item_block_y'][%i]/div[@class='item_img']/a[@class='mymy_item_link']/img[@src]" % (i,)).get_attribute("src")
except:
i += 1
if(i == 17):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[@class='item_block item_block_y'][%i]/div[@class='item_text']/p[@class='item_amount']/span[2]" % (i,)).text
sale_price = sale_price.strip('NT$ ')
ori_price = chrome.find_element_by_xpath(
"//li[@class='item_block item_block_y'][%i]/div[@class='item_text']/p[@class='item_amount']/span[1]" % (i,)).text
ori_price = ori_price.strip('NT$ ')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[@class='item_block item_block_y'][%i]/div[@class='item_text']/p[@class='item_amount']/span[1]" % (i,)).text
sale_price = sale_price.strip('NT$ ')
ori_price = ""
except:
i += 1
if(i == 17):
p += 1
continue
i += 1
if(i == 17):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Azoom():
shop_id = 20
name = 'azoom'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if(close == 1):
chrome.quit()
break
url = "https://www.aroom1988.com/categories/view-all?page=" + \
str(p) + "&sort_by=&order_by=&limit=24"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 24):
try:
title = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.strip("/products/")
find_href = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[1]/div[1]" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip('")')
except:
i += 1
if(i == 24):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[2]/div/div/div" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = ""
except:
i += 1
if(i == 24):
p += 1
continue
i += 1
if(i == 24):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Roxy():
shop_id = 21
name = 'roxy'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.roxytaiwan.com.tw/new-collection?p=" + \
str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 65):
try:
title = chrome.find_element_by_xpath(
"//div[@class='product-container product-thumb'][%i]/div[@class='product-thumb-info']/p[@class='product-title']/a" % (i,)).text
page_link = chrome.find_element_by_xpath(
"//div[@class='product-container product-thumb'][%i]/div[@class='product-thumb-info']/p[@class='product-title']/a[@href]" % (i,)).get_attribute('href')
page_id = stripID(page_link, "default=")
except:
close += 1
break
try:
pic_link = chrome.find_element_by_xpath(
"//div[@class='product-container product-thumb'][%i]/div[@class='product-img']/a[@class='img-link']/picture[@class='main-picture']/img[@data-src]" % (i,)).get_attribute("data-src")
except:
i += 1
if(i == 65):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='product-container product-thumb'][%i]//span[@class='special-price']//span[@class='price-dollars']" % (i,)).text
sale_price = sale_price.replace('TWD', "")
ori_price = chrome.find_element_by_xpath(
"//div[@class='product-container product-thumb'][%i]//span[@class='old-price']//span[@class='price-dollars']" % (i,)).text
ori_price = ori_price.replace('TWD', "")
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='product-container product-thumb'][%i]//span[@class='price-dollars']" % (i,)).text
sale_price = sale_price.replace('TWD', "")
ori_price = ""
except:
i += 1
if(i == 65):
p += 1
continue
i += 1
if(i == 65):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Shaxi():
shop_id = 22
name = 'shaxi'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.shaxi.tw/products?page=" + str(p)
try:
chrome.get(url)
except:
break
i = 1
while(i < 49):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//li[%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[1]/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 49):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 49):
p += 1
continue
i += 1
if(i == 49):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Cici():
shop_id = 23
name = 'cici'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.cici2.tw/products?page=" + str(p)
try:
chrome.get(url)
except:
break
i = 1
while(i < 49):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//li[%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[1]/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 49):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 49):
p += 1
continue
i += 1
if(i == 49):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Amesoeur():
shop_id = 25
name = 'amesour'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.amesoeur.co/categories/%E5%85%A8%E9%83%A8%E5%95%86%E5%93%81?page=" + \
str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[2]/ul/li[%i]/a[@href]" % (i,)).get_attribute('href')
page_id = chrome.find_element_by_xpath(
"//div[2]/ul/li[%i]/a[@href]" % (i,)).get_attribute('product-id')
find_href = chrome.find_element_by_xpath(
"//li[%i]/a/div[1]/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[3]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[2]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Singular():
shop_id = 27
name = 'singular'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
i = 1
offset = (p-1) * 50
url = "https://www.singular-official.com/products?limit=50&offset=" + \
str(offset) + "&price=0%2C10000&sort=createdAt-desc"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
while(i < 51):
try:
title = chrome.find_element_by_xpath(
"//div[@class='rm<PASSWORD>1ca3'][%i]/div[2]" % (i,)).text
except:
close += 1
# print(i, "title")
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='rmq-3ab81ca3'][%i]//a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/product/")
pic_link = chrome.find_element_by_xpath(
"//div[@class='rm<PASSWORD>1ca3'][%i]//img" % (i,)).get_attribute('src')
sale_price = chrome.find_element_by_xpath(
"//div[@class='rmq-3ab81ca3'][%i]/div[3]/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$ ')
ori_price = chrome.find_element_by_xpath(
"//div[@class='rm<PASSWORD>3'][%i]/div[3]/div[1]/span/s" % (i,)).text
ori_price = ori_price.strip('NT$ ')
ori_price = ori_price.split()
ori_price = ori_price[0]
except:
i += 1
if(i == 51):
p += 1
continue
i += 1
if(i == 51):
p += 1
chrome.find_element_by_tag_name('body').send_keys(Keys.PAGE_DOWN)
time.sleep(1)
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Folie():
shop_id = 28
name = 'folie'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.folief.com/products?page=" + \
str(p) + "&sort_by=&order_by=&limit=24"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div[1]/div[1]" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div/div/div[2]/div[2]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Corban():
shop_id = 29
name = 'corban'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
i = 1
offset = (p-1) * 50
url = "https://www.corban.com.tw/products?limit=50&offset=" + \
str(offset) + "&price=0%2C10000&sort=createdAt-desc&tags=ALL%20ITEMS"
try:
chrome.get(url)
except:
break
while(i < 51):
try:
title = chrome.find_element_by_xpath(
"//div[@class='rmq-3ab81ca3'][%i]/div[2]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='rmq-3ab81ca3'][%i]//a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/product/")
pic_link = chrome.find_element_by_xpath(
"//div[@class='rm<PASSWORD>'][%i]//img" % (i,)).get_attribute('src')
sale_price = chrome.find_element_by_xpath(
"//div[@class='rm<PASSWORD>3'][%i]/div[3]/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$ ')
ori_price = chrome.find_element_by_xpath(
"//div[@class='rm<PASSWORD>3'][%i]/div[3]/div[1]/span/s" % (i,)).text
ori_price = ori_price.strip('NT$ ')
except:
i += 1
if(i == 51):
p += 1
continue
i += 1
if(i == 51):
p += 1
chrome.find_element_by_tag_name('body').send_keys(Keys.PAGE_DOWN)
time.sleep(1)
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Gmorning():
shop_id = 30
name = 'gmorning'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.gmorning.co/products?page=" + \
str(p) + "&sort_by=&order_by=&limit=24"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div[1]/div[1]" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div/div/div[2]/div[2]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def July():
shop_id = 31
name = 'july'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = | pd.DataFrame() | pandas.DataFrame |
"""
dataset = AbstractDataset()
"""
from collections import OrderedDict, defaultdict
import json
from pathlib import Path
import numpy as np
import pandas as pd
from tqdm import tqdm
import random
def make_perfect_forecast(prices, horizon):
prices = np.array(prices).reshape(-1, 1)
forecast = np.hstack([np.roll(prices, -i) for i in range(0, horizon)])
return forecast[:-(horizon-1), :]
def load_episodes(path):
# pass in list of filepaths
if isinstance(path, list):
if isinstance(path[0], pd.DataFrame):
# list of dataframes?
return path
else:
# list of paths
episodes = [Path(p) for p in path]
print(f'loading {len(episodes)} from list')
csvs = [pd.read_csv(p, index_col=0) for p in tqdm(episodes) if p.suffix == '.csv']
parquets = [pd.read_parquet(p) for p in tqdm(episodes) if p.suffix == '.parquet']
eps = csvs + parquets
print(f'loaded {len(episodes)} from list')
return eps
# pass in directory
elif Path(path).is_dir() or isinstance(path, str):
path = Path(path)
episodes = [p for p in path.iterdir() if p.suffix == '.csv']
else:
path = Path(path)
assert path.is_file() and path.suffix == '.csv'
episodes = [path, ]
print(f'loading {len(episodes)} from {path.name}')
eps = [pd.read_csv(p, index_col=0) for p in tqdm(episodes)]
print(f'loaded {len(episodes)} from {path.name}')
return eps
def round_nearest(x, divisor):
return x - (x % divisor)
from abc import ABC, abstractmethod
class AbstractDataset(ABC):
def get_data(self, cursor):
# relies on self.dataset
return OrderedDict({k: d[cursor] for k, d in self.dataset.items()})
def reset(self, mode=None):
# can dispatch based on mode, or just reset
# should return first obs using get_data
return self.get_data(0)
def setup_test(self):
# called by energypy.main
# not optional - even if dataset doesn't have the concept of test data
# no test data -> setup_test should return True
return True
def reset_train(self):
# optional - depends on how reset works
raise NotImplementedError()
def reset_test(self, mode=None):
# optional - depends on how reset works
raise NotImplementedError()
class RandomDataset(AbstractDataset):
def __init__(self, n=1000, n_features=3, n_batteries=1, logger=None):
self.dataset = self.make_random_dataset(n, n_features, n_batteries)
self.test_done = True # no notion of test data for random data
self.reset()
def make_random_dataset(self, n, n_features, n_batteries):
np.random.seed(42)
# (timestep, batteries, features)
prices = np.random.uniform(0, 100, n*n_batteries).reshape(n, n_batteries, 1)
features = np.random.uniform(0, 100, n*n_features*n_batteries).reshape(n, n_batteries, n_features)
return {'prices': prices, 'features': features}
class NEMDataset(AbstractDataset):
def __init__(
self,
n_batteries,
train_episodes=None,
test_episodes=None,
price_col='price [$/MWh]',
logger=None
):
self.n_batteries = n_batteries
self.price_col = price_col
train_episodes = load_episodes(train_episodes)
self.episodes = {
'train': train_episodes,
# our random sampling done on train episodes
'random': train_episodes,
'test': load_episodes(test_episodes),
}
# want test episodes to be a multiple of the number of batteries
episodes_before = len(self.episodes['test'])
lim = round_nearest(len(self.episodes['test'][:]), self.n_batteries)
self.episodes['test'] = self.episodes['test'][:lim]
assert len(self.episodes['test']) % self.n_batteries == 0
episodes_after = len(self.episodes['test'])
print(f'lost {episodes_before - episodes_after} test episodes due to even multiple')
# test_done is a flag used to control which dataset we sample from
# it's a bit hacky
self.test_done = True
self.reset()
def reset(self, mode='train'):
if mode == 'test':
return self.reset_test()
else:
return self.reset_train()
def setup_test(self):
# called by energypy.main
self.test_done = False
self.test_episodes_idx = list(range(0, len(self.episodes['test'])))
return self.test_done
def reset_train(self):
episodes = random.sample(self.episodes['train'], self.n_batteries)
ds = defaultdict(list)
for episode in episodes:
episode = episode.copy()
prices = episode.pop(self.price_col)
ds['prices'].append(prices.reset_index(drop=True).values.reshape(-1, 1, 1))
ds['features'].append(episode.reset_index(drop=True).values.reshape(prices.shape[0], 1, -1))
# TODO could call this episode
self.dataset = {
'prices': np.concatenate(ds['prices'], axis=1),
'features': np.concatenate(ds['features'], axis=1),
}
return self.get_data(0)
def reset_test(self):
episodes = self.test_episodes_idx[:self.n_batteries]
self.test_episodes_idx = self.test_episodes_idx[self.n_batteries:]
ds = defaultdict(list)
for episode in episodes:
episode = self.episodes['test'][episode].copy()
prices = episode.pop(self.price_col)
ds['prices'].append(prices.reset_index(drop=True))
ds['features'].append(episode.reset_index(drop=True))
# TODO could call this episode
self.dataset = {
'prices': pd.concat(ds['prices'], axis=1).values,
'features': | pd.concat(ds['features'], axis=1) | pandas.concat |
import os
import tempfile
from StringIO import StringIO
import numpy as np
import numpy.testing as npt
import pandas as pd
import pytest
import statsmodels.formula.api as smf
import yaml
from pandas.util import testing as pdt
from statsmodels.regression.linear_model import RegressionResultsWrapper
from .. import regression
from ...exceptions import ModelEvaluationError
from ...utils import testing
@pytest.fixture
def test_df():
return pd.DataFrame(
{'col1': range(5),
'col2': range(5, 10)},
index=['a', 'b', 'c', 'd', 'e'])
@pytest.fixture
def groupby_df(test_df):
test_df['group'] = ['x', 'y', 'x', 'x', 'y']
return test_df
def test_fit_model(test_df):
filters = []
model_exp = 'col1 ~ col2'
fit = regression.fit_model(test_df, filters, model_exp)
assert isinstance(fit, RegressionResultsWrapper)
def test_predict(test_df):
filters = ['col1 in [0, 2, 4]']
model_exp = 'col1 ~ col2'
fit = regression.fit_model(test_df, filters, model_exp)
predicted = regression.predict(
test_df.query('col1 in [1, 3]'), None, fit)
expected = pd.Series([1., 3.], index=['b', 'd'])
pdt.assert_series_equal(predicted, expected)
def test_predict_ytransform(test_df):
def yt(x):
return x / 2.
filters = ['col1 in [0, 2, 4]']
model_exp = 'col1 ~ col2'
fit = regression.fit_model(test_df, filters, model_exp)
predicted = regression.predict(
test_df.query('col1 in [1, 3]'), None, fit, ytransform=yt)
expected = pd.Series([0.5, 1.5], index=['b', 'd'])
pdt.assert_series_equal(predicted, expected)
def test_predict_with_nans():
df = pd.DataFrame(
{'col1': range(5),
'col2': [5, 6, pd.np.nan, 8, 9]},
index=['a', 'b', 'c', 'd', 'e'])
with pytest.raises(ModelEvaluationError):
regression.fit_model(df, None, 'col1 ~ col2')
fit = regression.fit_model(df.loc[['a', 'b', 'e']], None, 'col1 ~ col2')
with pytest.raises(ModelEvaluationError):
regression.predict(
df.loc[['c', 'd']], None, fit)
def test_rhs():
assert regression._rhs('col1 + col2') == 'col1 + col2'
assert regression._rhs('col3 ~ col1 + col2') == 'col1 + col2'
def test_FakeRegressionResults(test_df):
model_exp = 'col1 ~ col2'
model = smf.ols(formula=model_exp, data=test_df)
fit = model.fit()
fit_parameters = regression._model_fit_to_table(fit)
wrapper = regression._FakeRegressionResults(
model_exp, fit_parameters, fit.rsquared, fit.rsquared_adj)
test_predict = pd.DataFrame({'col2': [0.5, 10, 25.6]})
npt.assert_array_equal(
wrapper.predict(test_predict), fit.predict(test_predict))
pdt.assert_series_equal(wrapper.params, fit.params, check_names=False)
| pdt.assert_series_equal(wrapper.bse, fit.bse, check_names=False) | pandas.util.testing.assert_series_equal |
import matplotlib.pyplot as plt
import os
import seaborn as sns
import numpy as np
from matplotlib.colors import ListedColormap
import pandas as pd
from sklearn.manifold import TSNE
from src.Utils.Fitness import Fitness
class Graphs:
def __init__(self,objectiveNames,data,save=True,display=False,path='./Figures/'):
self.objectiveNames = objectiveNames
self.data = data
self.save = save
self.path = path
self.display = display
self.CheckIfPathExist()
def CheckIfPathExist(self):
p = self.path.split('/')
p = p[:-1]
p = '/'.join(p)
pathExist = os.path.exists(p)
if not pathExist :
os.mkdir(p)
def dataTSNE(self):
self.data = self.ChangeAlgoNames(self.data)
fig = sns.relplot(data=self.data,x=self.data['x'],y=self.data['y'],col='algorithm',kind='scatter',col_wrap=4,height=8.27, aspect=17/8.27)
if self.display:
plt.show()
if self.save:
fig.savefig(self.path + ".png")
def findGlobalParetoFront(self,dataSet,pop):
print('find global pareto front')
fitness = Fitness('horizontal_binary', ['support','confidence','cosine'], len(pop) ,dataSet.shape[1])
fitness.ComputeScorePopulation(pop,dataSet)
scores = fitness.scores
print(scores)
paretoFront = []
isParetoFrontColumn = []
for p in range(len(scores)):
dominate = True
for q in range(len(scores)):
if fitness.Domination(scores[p], scores[q]) == 1:
dominate = False
isParetoFrontColumn.append(False)
break
if dominate:
paretoFront.append(p)
isParetoFrontColumn.append(True)
paretoFront = np.array(paretoFront)
return paretoFront
def getRulesFromFiles(self,dataSet,data):
rules = []
pop = []
files = os.listdir('D:/ULaval/Maitrise/Recherche/Code/Experiments/MUSHROOM/Rules/0/')
for file in files:
f = open('D:/ULaval/Maitrise/Recherche/Code/Experiments/MUSHROOM/Rules/0/'+file,'r')
lines = f.readlines()
f.close()
for i in range(len(lines)):
if(i%2==0):
ind = np.zeros(dataSet.shape[1]*2)
line = lines[i]
line = line[1:len(line)-2]
line = line.split("' '")
line = [l.replace("'", "") for l in line]
for li in range(len(line)):
obj = line[li]
obj = obj[1:len(obj)-1]
obj = obj.split(' ')
obj= [ x for x in obj if x!='']
if(li==0):
for item in obj:
ind[int(item)] = 1
if(li==2):
for item in obj:
ind[int(item)+dataSet.shape[1]] = 1
pop.append(ind)
pop = np.array(pop)
paretoFront = self.findGlobalParetoFront(dataSet,pop)
pop = pop[paretoFront]
pop = [list(x) for x in pop]
isInParetoFront = []
for i in range(len(data)):
line = list(np.array(data.loc[i])[1:])
isInPareto = False
for ind in pop:
if(ind == line):
isInPareto = True
if isInPareto:
isInParetoFront.append(True)
else:
isInParetoFront.append(False)
return isInParetoFront
def dataTSNEFromFile(self,dataSet):
self.data = pd.read_csv('D:/ULaval/Maitrise/Recherche/Code/Experiments/MUSHROOM/0/TestedIndividuals/49.csv',index_col=0)
isParetoFrontColumn = self.getRulesFromFiles(dataSet,self.data)
self.data = self.ChangeAlgoNames(self.data)
print(self.data)
algorithms = self.data['algorithm']
self.data = self.data.drop('algorithm',axis=1)
self.data['isInParetoFront'] = isParetoFrontColumn
self.data = TSNE(n_components=2, learning_rate='auto',
init='random').fit_transform(np.asarray(self.data,dtype='float64'))
transformed = pd.DataFrame(list(zip(list(algorithms),self.data[:,0],self.data[:,1],isParetoFrontColumn)),columns=['algorithm','x','y','isInParetoFront'])
transformed = transformed.drop_duplicates()
self.data = transformed
print(self.data)
fig = sns.relplot(data=self.data,x=self.data['x'],y=self.data['y'],col='algorithm',kind='scatter',col_wrap=4,height=8.27, aspect=17/8.27,hue='isInParetoFront')
self.path = 'D:/ULaval/Maitrise/Recherche/Code/Experiments/MUSHROOM/0/TestedIndividuals/graph'
if True:
plt.show()
if True:
fig.savefig(self.path + ".png")
def GraphNbRules(self):
plt.cla()
plt.clf()
fig = plt.figure(figsize=(15,15))
sns.barplot(x='algorithm', y='nbRules', data=self.data)
plt.xticks(rotation=70)
plt.tight_layout()
if self.display:
plt.show()
else:
plt.close(fig)
if self.save:
fig.savefig(self.path + ".png")
def GraphDistances(self):
plt.cla()
plt.clf()
fig = plt.figure(figsize=(15,15))
sns.barplot(x='algorithm', y='distances', data=self.data)
plt.xticks(rotation=70)
plt.tight_layout()
if self.display:
plt.show()
else:
plt.close(fig)
if self.save:
fig.savefig(self.path + ".png")
def GraphCoverages(self):
plt.cla()
plt.clf()
fig = plt.figure(figsize=(15,15))
sns.barplot(x='algorithm', y='coverages', data=self.data)
plt.xticks(rotation=70)
plt.tight_layout()
if self.display:
plt.show()
else:
plt.close(fig)
if self.save:
fig.savefig(self.path + ".png")
def GraphAverageCoverages(self,p,algName,nbIter):
plt.cla()
plt.clf()
nbRepeat = len(os.listdir(p)) - 2
data = []
for i in range(nbRepeat):
print(i)
df = pd.read_csv(p + str(i) + '/Coverages.csv', index_col=0)
for nameIndex in range(len(algName)):
# data.append([algName[nameIndex],float(df.loc[(df['algorithm'] == algName[nameIndex]) & (df['i'] == nbIter-1)]['coverages'])])
data.append([algName[nameIndex], float(
df.loc[df['algorithm'] == algName[nameIndex]].head(1)['coverages'])])
df = pd.DataFrame(data,columns=['algorithm','coverages'])
df = df.sort_values(by=['coverages'],ascending=False)
df.reset_index(level=0, inplace=True)
df = self.ChangeAlgoNames(df)
print(df)
fig = plt.figure(figsize=(15,15))
sns.barplot(x='algorithm', y='coverages', data=df)
plt.xticks(rotation=70)
plt.tight_layout()
if true:
plt.show()
else:
plt.close(fig)
if self.save:
fig.savefig(self.path + ".png")
def GraphAverageNBRules(self,p,algName,nbIter):
plt.cla()
plt.clf()
nbRepeat = len(os.listdir(p)) - 2
data = []
for i in range(nbRepeat):
print(i)
df = pd.read_csv(p + str(i) + '/NbRules/'+str(nbIter-1)+'.csv', index_col=0)
for nameIndex in range(len(algName)):
data.append([algName[nameIndex],float(df.loc[df['algorithm'] == algName[nameIndex]]['nbRules'])])
df = pd.DataFrame(data,columns=['algorithm','nbRules'])
df = df.sort_values(by=['nbRules'],ascending=False)
df = self.ChangeAlgoNames(df)
print(df)
fig = plt.figure(figsize=(15,15))
sns.barplot(x='algorithm', y='nbRules', data=df)
plt.xticks(rotation=70)
plt.tight_layout()
if self.display:
plt.show()
else:
plt.close(fig)
if self.save:
fig.savefig(self.path + ".png")
def GraphAverageExecutionTime(self,p,algName,nbIter):
plt.cla()
plt.clf()
nbRepeat = len(os.listdir(p)) - 2
data = []
for i in range(nbRepeat):
print(i)
df = pd.read_csv(p + str(i) + '/ExecutionTime.csv', index_col=0)
for nameIndex in range(len(algName)):
for j in range(nbIter):
data.append([algName[nameIndex], float(df.loc[(df['algorithm'] == algName[nameIndex]) & (df['i'] == j)]['execution Time'])])
df = pd.DataFrame(data, columns=['algorithm', 'execution Time'])
df = df.sort_values(by=['execution Time'], ascending=False)
df = self.ChangeAlgoNames(df)
print(df)
fig = plt.figure(figsize=(15, 15))
sns.barplot(x='algorithm', y='execution Time', data=df)
plt.xticks(rotation=70)
plt.tight_layout()
if self.display:
plt.show()
else:
plt.close(fig)
if self.save:
fig.savefig(self.path + ".png")
def GraphAverageDistances(self, p, algName,nbIter):
plt.cla()
plt.clf()
nbRepeat = len(os.listdir(p)) - 2
data = []
for i in range(nbRepeat):
print(i)
df = pd.read_csv(p + str(i) + '/Distances.csv', index_col=0)
for nameIndex in range(len(algName)):
# data.append([algName[nameIndex], float(df.loc[(df['algorithm'] == algName[nameIndex]) & (df['i'] == nbIter-1) ]['distances'])])
data.append([algName[nameIndex], float(
df.loc[df['algorithm'] == algName[nameIndex]].head(1)['distances'])])
df = pd.DataFrame(data, columns=['algorithm', 'distances'])
df = df.sort_values(by=['distances'], ascending=False)
df.reset_index(level=0, inplace=True)
df = self.ChangeAlgoNames(df)
fig = plt.figure(figsize=(15, 15))
sns.barplot(x='algorithm', y='distances', data=df)
plt.xticks(rotation=70)
plt.tight_layout()
if self.display:
plt.show()
else:
plt.close(fig)
if self.save:
fig.savefig(self.path + ".png")
def GraphExecutionTime(self):
plt.cla()
plt.clf()
fig = plt.figure(figsize=(15,15))
self.data = self.ChangeAlgoNames(self.data)
sns.lineplot(x='i',y='execution Time',hue='algorithm',style='algorithm',data=self.data)
fig.legend(loc='center left', bbox_to_anchor=(1, 0.5))
if self.display:
plt.show()
else:
plt.close(fig)
if self.save:
fig.savefig(self.path+".png")
def GraphScores(self):
plt.cla()
plt.clf()
fig = plt.figure(figsize=(15,15))
ax = fig.add_subplot(111, projection='3d')
ax.set_xlim3d(0, 1)
ax.set_ylim3d(0, 1)
#a Changer si on a une IM avec un interval de definition autre
ax.set_zlim3d(0, 1)
ax.set_xlabel(self.objectiveNames[0])
ax.set_ylabel(self.objectiveNames[1])
ax.set_zlabel(self.objectiveNames[2])
for alg in self.data.algorithm.unique():
ax.scatter(self.data[self.data.algorithm==alg][self.objectiveNames[0]],
self.data[self.data.algorithm==alg][self.objectiveNames[1]],
self.data[self.data.algorithm==alg][self.objectiveNames[2]],
label=alg)
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
if self.display:
plt.show()
else:
plt.close(fig)
if self.save:
fig.savefig(self.path+".png")
def ChangeAlgoNames(self,df):
df = df.replace('custom','Cambrian Explosion')
df = df.replace('mohsbotsarm', 'Bee Swarm')
df = df.replace('moaloarm', 'Antlion')
df = df.replace('modearm', 'Differential Evolution')
df = df.replace('mossoarm', 'Social Spider')
df = df.replace('modaarm', 'Dragonfly')
df = df.replace('mowoaarm', 'Whale')
df = df.replace('mogsaarm', 'Gravity Search')
df = df.replace('hmofaarm', 'Firefly')
df = df.replace('mofpaarm', 'Flower Polination')
df = df.replace('mososarm', 'Symbiotic')
df = df.replace('mowsaarm', 'Wolf')
df = df.replace('mocatsoarm', 'Cat')
df = df.replace('mogeaarm', 'Gradient')
df = df.replace('nshsdearm', 'NSHSDE')
df = df.replace('mosaarm', 'Simulated Annealing')
df = df.replace('motlboarm', 'Teaching Learning')
df = df.replace('mopso', 'Particle Swarm')
df = df.replace('mocssarm', 'Charged System')
df = df.replace('nsgaii', 'NSGAII')
df = df.replace('mocsoarm', 'Cockroach')
return df
def getAverage(self):
nbRepeat = 50
dataset = 'RISK'
mesureFolder = 'LeaderBoard'
dfArray = []
avgArray = []
for i in range(nbRepeat):
p = 'D:/ULaval/Maitrise/Recherche/Code/Experiments/' + dataset + '/'
p = p +str(i)+'/'+ mesureFolder+'/49.csv'
df = pd.read_csv(p,index_col=1)
if(i>0):
fdf = fdf + df
else:
fdf = df
fdf = fdf/nbRepeat
fdf = fdf.sort_values(by=['support'],ascending=False)
print(fdf)
def Graph3D(self):
plt.cla()
plt.clf()
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
x = self.data[:, 0]
y = self.data[:, 1]
z = self.data[:, 2]
ax.set_xlabel(self.objectiveNames[0])
ax.set_ylabel(self.objectiveNames[1])
ax.set_zlabel(self.objectiveNames[2])
ax.scatter(x, y, z)
if self.display:
plt.show()
else:
plt.close(fig)
if self.save:
fig.savefig(self.path+".png")
plt.close()
def GraphNBRulesVsCoverages(self,algName,p,graphType,nbIter):
plt.cla()
plt.clf()
nbRepeat = len(os.listdir(p)) - 2
data = []
for i in range(nbRepeat):
print(i)
dfNbRules = pd.read_csv(p + str(i) + '/NbRules/' + str(nbIter - 1) + '.csv', index_col=0)
dfCoverages = pd.read_csv(p + str(i) + '/Coverages.csv', index_col=0)
# dfCoverages = dfCoverages[dfCoverages['i']==float(nbRepeat-1)]
for nameIndex in range(len(algName)):
data.append([algName[nameIndex], float(dfNbRules.loc[dfNbRules['algorithm'] == algName[nameIndex]]['nbRules']),float(
dfCoverages.loc[dfCoverages['algorithm'] == algName[nameIndex]].head(1)['coverages'])])
df = pd.DataFrame(data, columns=['algorithm', 'nbRules','coverages'])
df = df.sort_values(by=['nbRules'], ascending=False)
coverages = df.groupby(['algorithm'])
coverages = coverages['coverages'].agg(
['mean', 'std']).sort_values(by=['mean'], ascending=False)
coverages = coverages.rename(columns={'mean':'covMean','std':'covStd'})
nbRules = df.groupby(['algorithm'])
nbRules = nbRules['nbRules'].agg(
['mean', 'std']).sort_values(by=['mean'], ascending=False)
nbRules = nbRules.rename(columns={'mean': 'nbRulesMean', 'std': 'nbRulesStd'})
df = pd.concat([coverages,nbRules],axis=1)
df.reset_index(level=0, inplace=True)
df = self.ChangeAlgoNames(df)
fig = plt.figure(figsize=(15, 15))
ax = sns.scatterplot(x='nbRulesMean', y='covMean', hue='algorithm', style='algorithm',data=df)
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.show()
if self.save:
fig.savefig(self.path+'GraphNBRulesVsCoverages' + ".png")
def GraphSCCVsCoverage(self,algName,p,graphType,nbIter):
plt.cla()
plt.clf()
nbRepeat = len(os.listdir(p)) - 2
data = []
for i in range(nbRepeat):
print(i)
dfCoverages = pd.read_csv(p + str(i) + '/Coverages.csv', index_col=0)
# dfCoverages = dfCoverages[dfCoverages['i'] == float(nbRepeat - 1)]
dfScores = pd.read_csv(p + str(i) + '/LeaderBoard/'+ str(nbIter - 1)+'.csv', index_col=0)
for nameIndex in range(len(algName)):
data.append([algName[nameIndex], float(dfCoverages.loc[dfCoverages['algorithm'] == algName[nameIndex]].head(1)['coverages']),float(
dfScores.loc[dfScores['algorithm'] == algName[nameIndex]]['support']),float(
dfScores.loc[dfScores['algorithm'] == algName[nameIndex]]['confidence']),float(
dfScores.loc[dfScores['algorithm'] == algName[nameIndex]]['cosine'])])
df = pd.DataFrame(data, columns=['algorithm', 'coverages','support','confidence','cosine'])
df = df.sort_values(by=['coverages'], ascending=False)
support = df.groupby(['algorithm'])
support = support['support'].agg(
['mean', 'std']).sort_values(by=['mean'], ascending=False)
support = support.rename(columns={'mean':'supportMean','std':'supportStd'})
confidence = df.groupby(['algorithm'])
confidence = confidence['confidence'].agg(
['mean', 'std']).sort_values(by=['mean'], ascending=False)
confidence = confidence.rename(columns={'mean': 'confidenceMean', 'std': 'confidenceStd'})
cosine = df.groupby(['algorithm'])
cosine = cosine['cosine'].agg(
['mean', 'std']).sort_values(by=['mean'], ascending=False)
cosine = cosine.rename(columns={'mean': 'cosineMean', 'std': 'cosineStd'})
coverages = df.groupby(['algorithm'])
coverages = coverages['coverages'].agg(
['mean', 'std']).sort_values(by=['mean'], ascending=False)
coverages = coverages.rename(columns={'mean': 'coveragesMean', 'std': 'coveragesStd'})
df = pd.concat([support,confidence,cosine,coverages],axis=1)
df.reset_index(level=0, inplace=True)
df = self.ChangeAlgoNames(df)
fig, axes = plt.subplots(1, 3, figsize=(17, 5), sharey=True)
ax = sns.scatterplot(ax=axes[0],x='coveragesMean', y='supportMean', hue='algorithm', style='algorithm',data=df)
ax.get_legend().remove()
ax =sns.scatterplot(ax=axes[1], x='coveragesMean', y='confidenceMean', hue='algorithm', style='algorithm', data=df)
ax.get_legend().remove()
ax =sns.scatterplot(ax=axes[2], x='coveragesMean', y='cosineMean', hue='algorithm', style='algorithm', data=df)
ax.get_legend().remove()
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.show()
if self.save:
fig.savefig(self.path+'GraphCoveragesVsSCC' + ".png")
def GraphSCCVsNBRules(self,algName,p,graphType,nbIter):
plt.cla()
plt.clf()
nbRepeat = len(os.listdir(p)) - 2
data = []
for i in range(nbRepeat):
print(i)
dfNbRules = pd.read_csv(p + str(i) + '/NbRules/' + str(nbIter - 1) + '.csv', index_col=0)
dfScores = pd.read_csv(p + str(i) + '/LeaderBoard/'+ str(nbIter - 1)+'.csv', index_col=0)
for nameIndex in range(len(algName)):
data.append([algName[nameIndex], float(dfNbRules.loc[dfNbRules['algorithm'] == algName[nameIndex]]['nbRules']),float(
dfScores.loc[dfScores['algorithm'] == algName[nameIndex]]['support']),float(
dfScores.loc[dfScores['algorithm'] == algName[nameIndex]]['confidence']),float(
dfScores.loc[dfScores['algorithm'] == algName[nameIndex]]['cosine'])])
df = | pd.DataFrame(data, columns=['algorithm', 'nbRules','support','confidence','cosine']) | pandas.DataFrame |
#!/usr/bin/env python
# Copyright 2020 ARC Centre of Excellence for Climate Extremes
# author: <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import os
import xarray as xr
import numpy as np
import pandas as pd
import datetime
TESTS_HOME = os.path.abspath(os.path.dirname(__file__))
TESTS_DATA = os.path.join(TESTS_HOME, "testdata")
# oisst data from 2003 to 2004 included for small region
oisst = os.path.join(TESTS_DATA, "oisst_2003_2004.nc")
# oisst data from 2003 to 2004 included for all land region
land = os.path.join(TESTS_DATA, "land.nc")
# threshold and seasonal avg calculated using Eric Olivier MHW code on two points of OISST region subset for same period 2003-2004
# point1 lat=-42.625, lon=148.125
# point2 lat=-41.625, lon=148.375
oisst_clim = os.path.join(TESTS_DATA,"test_clim_oisst.nc")
oisst_clim_nosmooth = os.path.join(TESTS_DATA,"test_clim_oisst_nosmooth.nc")
relthreshnorm = os.path.join(TESTS_DATA, "relthreshnorm.nc")
@pytest.fixture(scope="module")
def oisst_ts():
ds = xr.open_dataset(oisst)
return ds.sst
@pytest.fixture(scope="module")
def landgrid():
ds = xr.open_dataset(land)
return ds.sst
@pytest.fixture(scope="module")
def clim_oisst():
ds = xr.open_dataset(oisst_clim)
return ds
@pytest.fixture(scope="module")
def clim_oisst_nosmooth():
ds = xr.open_dataset(oisst_clim_nosmooth)
return ds
@pytest.fixture(scope="module")
def dsnorm():
ds = xr.open_dataset(relthreshnorm)
return ds.stack(cell=['lat','lon'])
@pytest.fixture
def oisst_doy():
a = np.arange(1,367)
b = np.delete(a,[59])
return np.concatenate((b,a))
@pytest.fixture
def tstack():
return np.array([ 16.99, 17.39, 16.99, 17.39, 17.3 , 17.39, 17.3 ])
@pytest.fixture
def filter_data():
a = [0,1,1,1,1,1,0,0,1,1,0,1,1,1,1,1,1,0,0,0,1,1,1,1,1,0,0,0,0]
time = pd.date_range('2001-01-01', periods=len(a))
array = pd.Series(a, index=time)
idxarr = pd.Series(data=np.arange(len(a)), index=time)
bthresh = array==1
st = pd.Series(index=time, dtype='float64').rename('start')
end = pd.Series(index=time, dtype='float64').rename('end')
events = pd.Series(index=time, dtype='float64').rename('events')
st[5] = 1
st[16] = 11
st[24] = 20
end[5] = 5
end[16] = 16
end[24] = 24
events[1:6] = 1
events[11:17] = 11
events[20:25] =20
st2 = st.copy()
end2 = end.copy()
events2 = events.copy()
st2[24] = np.nan
end2[16] = np.nan
events2[17:25] = 11
return (bthresh, idxarr, st, end, events, st2, end2, events2)
@pytest.fixture
def join_data():
evs = pd.Series(np.arange(20)).rename('events')
evs2 = evs.copy()
evs2[1:8] = 1
evs2[12:19] = 12
joined = set([(1,7),(12,18)])
return (evs, evs2, joined)
@pytest.fixture
def rates_data():
d = { 'index_start': [3.], 'index_end': [10.], 'index_peak': [8.],
'relS_first': [2.3], 'relS_last': [1.8], 'intensity_max': [3.1],
'anom_first': [0.3], 'anom_last': [0.2]}
df = | pd.DataFrame(d) | pandas.DataFrame |
import re
from collections import defaultdict
import pandas as pd
def empty_data(mapping_rows: list, row: pd.DataFrame):
return all([True if mapping_row not in row or row.get(mapping_row)=='' else False for mapping_row in mapping_rows])
def merge_spreadsheets(workbook: str, merge_field: str) -> pd.DataFrame:
# parse dataset
df = | pd.read_excel(workbook, sheet_name=None) | pandas.read_excel |
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2020/3/17 13:06
Desc: 期货-中国-交易所-会员持仓数据接口
大连商品交易所、上海期货交易所、郑州商品交易所、中国金融期货交易所
采集前 20 会员持仓数据;
建议下午 16:30 以后采集当天数据, 避免交易所数据更新不稳定;
郑州商品交易所格式分为三类
大连商品交易所有具体合约的持仓排名, 通过 futures_dce_position_rank 获取
20171228
http://www.czce.com.cn/cn/DFSStaticFiles/Future/2020/20200727/FutureDataHolding.txt
20100825
http://www.czce.com.cn/cn/exchange/2014/datatradeholding/20140515.txt
"""
import datetime
import json
import re
import warnings
import zipfile
from io import BytesIO
from io import StringIO
import pandas as pd
import requests
from bs4 import BeautifulSoup
from akshare.futures import cons
from akshare.futures.requests_fun import (
requests_link
)
from akshare.futures.symbol_var import chinese_to_english, find_chinese
from akshare.futures.symbol_var import (
symbol_varieties
)
calendar = cons.get_calendar()
rank_columns = ['vol_party_name', 'vol', 'vol_chg', 'long_party_name', 'long_open_interest',
'long_open_interest_chg', 'short_party_name', 'short_open_interest', 'short_open_interest_chg']
intColumns = ['vol', 'vol_chg', 'long_open_interest', 'long_open_interest_chg', 'short_open_interest',
'short_open_interest_chg']
def get_rank_sum_daily(start_day="20200721", end_day="20200723", vars_list=cons.contract_symbols):
"""
采集四个期货交易所前5、前10、前15、前20会员持仓排名数据
注1:由于上期所和中金所只公布每个品种内部的标的排名,没有公布品种的总排名;
所以函数输出的品种排名是由品种中的每个标的加总获得,并不是真实的品种排名列表
注2:大商所只公布了品种排名,未公布标的排名
:param start_day: 开始日期 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象 为空时为当天
:param end_day: 结束数据 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象 为空时为当天
:param vars_list: 合约品种如RB、AL等列表 为空时为所有商品
:return: pd.DataFrame
展期收益率数据(DataFrame):
symbol 标的合约 string
var 商品品种 string
vol_top5 成交量前5会员成交量总和 int
vol_chg_top5 成交量前5会员成交量变化总和 int
long_open_interest_top5 持多单前5会员持多单总和 int
long_open_interest_chg_top5 持多单前5会员持多单变化总和 int
short_open_interest_top5 持空单前5会员持空单总和 int
short_open_interest_chg_top5 持空单前5会员持空单变化总和 int
vol_top10 成交量前10会员成交量总和 int
...
date 日期 string YYYYMMDD
"""
start_day = cons.convert_date(start_day) if start_day is not None else datetime.date.today()
end_day = cons.convert_date(end_day) if end_day is not None else cons.convert_date(
cons.get_latest_data_date(datetime.datetime.now()))
records = pd.DataFrame()
while start_day <= end_day:
print(start_day)
if start_day.strftime('%Y%m%d') in calendar:
data = get_rank_sum(start_day, vars_list)
if data is False:
print(f"{start_day.strftime('%Y-%m-%d')}日交易所数据连接失败,已超过20次,您的地址被网站墙了,请保存好返回数据,稍后从该日期起重试")
return records.reset_index(drop=True)
records = records.append(data)
else:
warnings.warn(f"{start_day.strftime('%Y%m%d')}非交易日")
start_day += datetime.timedelta(days=1)
return records.reset_index(drop=True)
def get_rank_sum(date="20200727", vars_list=cons.contract_symbols):
"""
抓取四个期货交易所前5、前10、前15、前20会员持仓排名数据
注1:由于上期所和中金所只公布每个品种内部的标的排名, 没有公布品种的总排名;
所以函数输出的品种排名是由品种中的每个标的加总获得, 并不是真实的品种排名列表
注2:大商所只公布了品种排名, 未公布标的排名
:param date: 日期 format: YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象 为空时为当天
:param vars_list: 合约品种如 RB, AL等列表 为空时为所有商品
:return: pd.DataFrame:
展期收益率数据
symbol 标的合约 string
var 商品品种 string
vol_top5 成交量前5会员成交量总和 int
vol_chg_top5 成交量前5会员成交量变化总和 int
long_open_interest_top5 持多单前5会员持多单总和 int
long_open_interest_chg_top5 持多单前5会员持多单变化总和 int
short_open_interest_top5 持空单前5会员持空单总和 int
short_open_interest_chg_top5 持空单前5会员持空单变化总和 int
vol_top10 成交量前10会员成交量总和 int
...
date 日期 string YYYYMMDD
"""
date = cons.convert_date(date) if date is not None else datetime.date.today()
if date.strftime('%Y%m%d') not in calendar:
warnings.warn('%s非交易日' % date.strftime('%Y%m%d'))
return None
dce_var = [i for i in vars_list if i in cons.market_exchange_symbols['dce']]
shfe_var = [i for i in vars_list if i in cons.market_exchange_symbols['shfe']]
czce_var = [i for i in vars_list if i in cons.market_exchange_symbols['czce']]
cffex_var = [i for i in vars_list if i in cons.market_exchange_symbols['cffex']]
big_dict = {}
if len(dce_var) > 0:
data = get_dce_rank_table(date, dce_var)
if data is False:
return False
big_dict.update(data)
if len(shfe_var) > 0:
data = get_shfe_rank_table(date, shfe_var)
if data is False:
return False
big_dict.update(data)
if len(czce_var) > 0:
data = get_czce_rank_table(date, czce_var)
if data is False:
return False
big_dict.update(data)
if len(cffex_var) > 0:
data = get_cffex_rank_table(date, cffex_var)
if data is False:
return False
big_dict.update(data)
records = pd.DataFrame()
for symbol, table in big_dict.items():
table = table.applymap(lambda x: 0 if x == '' else x)
for symbol_inner in set(table['symbol']):
var = symbol_varieties(symbol_inner)
if var in vars_list:
table_cut = table[table['symbol'] == symbol_inner]
table_cut['rank'] = table_cut['rank'].astype('float')
table_cut_top5 = table_cut[table_cut['rank'] <= 5]
table_cut_top10 = table_cut[table_cut['rank'] <= 10]
table_cut_top15 = table_cut[table_cut['rank'] <= 15]
table_cut_top20 = table_cut[table_cut['rank'] <= 20]
big_dict = {'symbol': symbol_inner, 'variety': var,
'vol_top5': table_cut_top5['vol'].sum(), 'vol_chg_top5': table_cut_top5['vol_chg'].sum(),
'long_open_interest_top5': table_cut_top5['long_open_interest'].sum(),
'long_open_interest_chg_top5': table_cut_top5['long_open_interest_chg'].sum(),
'short_open_interest_top5': table_cut_top5['short_open_interest'].sum(),
'short_open_interest_chg_top5': table_cut_top5['short_open_interest_chg'].sum(),
'vol_top10': table_cut_top10['vol'].sum(),
'vol_chg_top10': table_cut_top10['vol_chg'].sum(),
'long_open_interest_top10': table_cut_top10['long_open_interest'].sum(),
'long_open_interest_chg_top10': table_cut_top10['long_open_interest_chg'].sum(),
'short_open_interest_top10': table_cut_top10['short_open_interest'].sum(),
'short_open_interest_chg_top10': table_cut_top10['short_open_interest_chg'].sum(),
'vol_top15': table_cut_top15['vol'].sum(),
'vol_chg_top15': table_cut_top15['vol_chg'].sum(),
'long_open_interest_top15': table_cut_top15['long_open_interest'].sum(),
'long_open_interest_chg_top15': table_cut_top15['long_open_interest_chg'].sum(),
'short_open_interest_top15': table_cut_top15['short_open_interest'].sum(),
'short_open_interest_chg_top15': table_cut_top15['short_open_interest_chg'].sum(),
'vol_top20': table_cut_top20['vol'].sum(),
'vol_chg_top20': table_cut_top20['vol_chg'].sum(),
'long_open_interest_top20': table_cut_top20['long_open_interest'].sum(),
'long_open_interest_chg_top20': table_cut_top20['long_open_interest_chg'].sum(),
'short_open_interest_top20': table_cut_top20['short_open_interest'].sum(),
'short_open_interest_chg_top20': table_cut_top20['short_open_interest_chg'].sum(),
'date': date.strftime('%Y%m%d')
}
records = records.append(pd.DataFrame(big_dict, index=[0]))
if len(big_dict.items()) > 0:
add_vars = [i for i in cons.market_exchange_symbols['shfe'] + cons.market_exchange_symbols['cffex'] if
i in records['variety'].tolist()]
for var in add_vars:
records_cut = records[records['variety'] == var]
var_record = pd.DataFrame(records_cut.sum()).T
var_record['date'] = date.strftime('%Y%m%d')
var_record.loc[:, ['variety', 'symbol']] = var
records = records.append(var_record)
return records.reset_index(drop=True)
def get_shfe_rank_table(date=None, vars_list=cons.contract_symbols):
"""
上海期货交易所前 20 会员持仓排名数据明细
注:该交易所只公布每个品种内部的标的排名,没有公布品种的总排名
数据从20020107开始,每交易日16:30左右更新数据
:param date: 日期 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象 为空时为当天
:param vars_list: 合约品种如RB、AL等列表 为空时为所有商品
:return: pd.DataFrame
rank 排名 int
vol_party_name 成交量排序的当前名次会员 string(中文)
vol 该会员成交量 int
vol_chg 该会员成交量变化量 int
long_party_name 持多单排序的当前名次会员 string(中文)
long_open_interest 该会员持多单 int
long_open_interest_chg 该会员持多单变化量 int
short_party_name 持空单排序的当前名次会员 string(中文)
short_open_interest 该会员持空单 int
short_open_interest_chg 该会员持空单变化量 int
symbol 标的合约 string
var 品种 string
date 日期 string YYYYMMDD
"""
date = cons.convert_date(date) if date is not None else datetime.date.today()
if date < datetime.date(2002, 1, 7):
print("shfe数据源开始日期为20020107,跳过")
return {}
if date.strftime('%Y%m%d') not in calendar:
warnings.warn('%s非交易日' % date.strftime('%Y%m%d'))
return {}
url = cons.SHFE_VOL_RANK_URL % (date.strftime('%Y%m%d'))
r = requests_link(url, 'utf-8')
try:
context = json.loads(r.text)
except:
return {}
df = pd.DataFrame(context['o_cursor'])
df = df.rename(
columns={'CJ1': 'vol', 'CJ1_CHG': 'vol_chg', 'CJ2': 'long_open_interest', 'CJ2_CHG': 'long_open_interest_chg',
'CJ3': 'short_open_interest',
'CJ3_CHG': 'short_open_interest_chg', 'PARTICIPANTABBR1': 'vol_party_name',
'PARTICIPANTABBR2': 'long_party_name',
'PARTICIPANTABBR3': 'short_party_name', 'PRODUCTNAME': 'product1', 'RANK': 'rank',
'INSTRUMENTID': 'symbol', 'PRODUCTSORTNO': 'product2'})
if len(df.columns) < 3:
return {}
df = df.applymap(lambda x: x.strip() if isinstance(x, str) else x)
df = df.applymap(lambda x: None if x == '' else x)
df['variety'] = df['symbol'].apply(lambda x: symbol_varieties(x))
df = df[df['rank'] > 0]
for col in ['PARTICIPANTID1', 'PARTICIPANTID2', 'PARTICIPANTID3', 'product1', 'product2']:
try:
del df[col]
except:
pass
get_vars = [var for var in vars_list if var in df['variety'].tolist()]
big_dict = {}
for var in get_vars:
df_var = df[df['variety'] == var]
for symbol in set(df_var['symbol']):
df_symbol = df_var[df_var['symbol'] == symbol]
big_dict[symbol] = df_symbol.reset_index(drop=True)
return big_dict
def _czce_df_read(url, skip_rows, encoding='utf-8', header=0):
"""
郑州商品交易所的网页数据
:param header:
:type header:
:param url: 网站 string
:param skip_rows: 去掉前几行 int
:param encoding: utf-8 or gbk or gb2312
:return: pd.DataFrame
"""
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36",
"Host": "www.czce.com.cn",
"Cookie": "XquW6dFMPxV380S=CAaD3sMkdXv3fUoaJlICIEv0MVegGq5EoMyBcxkOjCgSjmpuovYFuTLtYFcxTZGw; XquW6dFMPxV380T=5QTTjUlA6f6WiDO7fMGmqNxHBWz.hKIc8lb_tc1o4nHrJM4nsXCAI9VHaKyV_jkHh4cIVvD25kGQAh.MvLL1SHRA20HCG9mVVHPhAzktNdPK3evjm0NYbTg2Gu_XGGtPhecxLvdFQ0.JlAxy_z0C15_KdO8kOI18i4K0rFERNPxjXq5qG1Gs.QiOm976wODY.pe8XCQtAsuLYJ.N4DpTgNfHJp04jhMl0SntHhr.jhh3dFjMXBx.JEHngXBzY6gQAhER7uSKAeSktruxFeuKlebse.vrPghHqWvJm4WPTEvDQ8q",
}
r = requests_link(url, encoding, headers=headers)
data = pd.read_html(r.text, match='.+', flavor=None, header=header, index_col=0, skiprows=skip_rows, attrs=None,
parse_dates=False, thousands=', ', encoding="gbk", decimal='.',
converters=None, na_values=None, keep_default_na=True)
return data
def get_czce_rank_table(date="20200727", vars_list=cons.contract_symbols):
"""
郑州商品交易所前 20 会员持仓排名数据明细
注:该交易所既公布了品种排名, 也公布了标的排名
:param date: 日期 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象 为空时为当天
:param vars_list: 合约品种如RB、AL等列表 为空时为所有商品, 数据从20050509开始,每交易日16:30左右更新数据
:return: pd.DataFrame
rank 排名 int
vol_party_name 成交量排序的当前名次会员 string(中文)
vol 该会员成交量 int
vol_chg 该会员成交量变化量 int
long_party_name 持多单排序的当前名次会员 string(中文)
long_open_interest 该会员持多单 int
long_open_interest_chg 该会员持多单变化量 int
short_party_name 持空单排序的当前名次会员 string(中文)
short_open_interest 该会员持空单 int
short_open_interest_chg 该会员持空单变化量 int
symbol 标的合约 string
var 品种 string
date 日期 string YYYYMMDD
"""
date = cons.convert_date(date) if date is not None else datetime.date.today()
if date < datetime.date(2015, 10, 8):
print("CZCE可获取的数据源开始日期为 20151008, 请输入合适的日期参数")
return {}
if date.strftime('%Y%m%d') not in calendar:
warnings.warn('%s非交易日' % date.strftime('%Y%m%d'))
return {}
if date >= datetime.date(2015, 10, 8):
url = f"http://www.czce.com.cn/cn/DFSStaticFiles/Future/{date.year}/{date.isoformat().replace('-', '')}/FutureDataHolding.xls"
r = requests.get(url)
temp_df = pd.read_excel(BytesIO(r.content))
temp_pinzhong_index = [item + 1 for item in temp_df[temp_df.iloc[:, 0].str.contains("合计")].index.to_list()]
temp_pinzhong_index.insert(0, 0)
temp_pinzhong_index.pop()
temp_symbol_index = temp_df.iloc[temp_pinzhong_index, 0].str.split(" ", expand=True).iloc[:, 0]
symbol_list = [re.compile(r"[0-9a-zA-Z_]+").findall(item)[0] for item in temp_symbol_index.values]
temp_symbol_index_list = temp_symbol_index.index.to_list()
big_dict = {}
for i in range(len(temp_symbol_index_list)-1):
inner_temp_df = temp_df[temp_symbol_index_list[i]+2: temp_symbol_index_list[i+1]-1]
inner_temp_df.columns = ["rank",
"vol_party_name",
"vol",
"vol_chg",
"long_party_name",
"long_open_interest",
"long_open_interest_chg",
"short_party_name",
"short_open_interest",
"short_open_interest_chg",
]
inner_temp_df.reset_index(inplace=True, drop=True)
big_dict[symbol_list[i]] = inner_temp_df
inner_temp_df = temp_df[temp_symbol_index_list[i+1]+2:-1]
inner_temp_df.columns = ["rank",
"vol_party_name",
"vol",
"vol_chg",
"long_party_name",
"long_open_interest",
"long_open_interest_chg",
"short_party_name",
"short_open_interest",
"short_open_interest_chg",
]
inner_temp_df.reset_index(inplace=True, drop=True)
big_dict[symbol_list[-1]] = inner_temp_df
new_big_dict = {}
for key, value in big_dict.items():
value["symbol"] = key
value["variety"] = re.compile(r"[a-zA-Z_]+").findall(key)[0]
new_big_dict[key] = value
return new_big_dict
def get_dce_rank_table(date="20200727", vars_list=cons.contract_symbols):
"""
大连商品交易所前 20 会员持仓排名数据明细
注: 该交易所既公布品种排名, 也公布标的合约排名
:param date: 日期 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date 对象, 为空时为当天
:param vars_list: 合约品种如 RB、AL等列表为空时为所有商品, 数据从 20060104 开始,每交易日 16:30 左右更新数据
:return: pandas.DataFrame
rank 排名 int
vol_party_name 成交量排序的当前名次会员 string(中文)
vol 该会员成交量 int
vol_chg 该会员成交量变化量 int
long_party_name 持多单排序的当前名次会员 string(中文)
long_open_interest 该会员持多单 int
long_open_interest_chg 该会员持多单变化量 int
short_party_name 持空单排序的当前名次会员 string(中文)
short_open_interest 该会员持空单 int
short_open_interest_chg 该会员持空单变化量 int
symbol 标的合约 string
var 品种 string
date 日期 string YYYYMMDD
"""
date = cons.convert_date(date) if date is not None else datetime.date.today()
if date < datetime.date(2006, 1, 4):
print(Exception("大连商品交易所数据源开始日期为20060104,跳过"))
return {}
if date.strftime('%Y%m%d') not in calendar:
warnings.warn('%s非交易日' % date.strftime('%Y%m%d'))
return {}
vars_list = [i for i in vars_list if i in cons.market_exchange_symbols['dce']]
big_dict = {}
for var in vars_list:
url = cons.DCE_VOL_RANK_URL % (var.lower(), var.lower(), date.year, date.month - 1, date.day)
list_60_name = []
list_60 = []
list_60_chg = []
rank = []
texts = requests_link(url).content.splitlines()
if not texts:
return False
if len(texts) > 30:
for text in texts:
line = text.decode("utf-8")
string_list = line.split()
try:
if int(string_list[0]) <= 20:
list_60_name.append(string_list[1])
list_60.append(string_list[2])
list_60_chg.append(string_list[3])
rank.append(string_list[0])
except:
pass
table_cut = pd.DataFrame({'rank': rank[0:20],
'vol_party_name': list_60_name[0:20],
'vol': list_60[0:20],
'vol_chg': list_60_chg[0:20],
'long_party_name': list_60_name[20:40],
'long_open_interest': list_60[20:40],
'long_open_interest_chg': list_60_chg[20:40],
'short_party_name': list_60_name[40:60],
'short_open_interest': list_60[40:60],
'short_open_interest_chg': list_60_chg[40:60]
})
table_cut = table_cut.applymap(lambda x: x.replace(',', ''))
table_cut = _table_cut_cal(table_cut, var)
big_dict[var] = table_cut.reset_index(drop=True)
return big_dict
def get_cffex_rank_table(date="20200427", vars_list=cons.contract_symbols):
"""
中国金融期货交易所前 20 会员持仓排名数据明细
注:该交易所既公布品种排名,也公布标的排名
:param date: 日期 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象 为空时为当天
:param vars_list: 合约品种如RB、AL等列表 为空时为所有商品, 数据从20100416开始,每交易日16:30左右更新数据
:return: pd.DataFrame
rank 排名 int
vol_party_name 成交量排序的当前名次会员 string(中文)
vol 该会员成交量 int
vol_chg 该会员成交量变化量 int
long_party_name 持多单排序的当前名次会员 string(中文)
long_open_interest 该会员持多单 int
long_open_interest_chg 该会员持多单变化量 int
short_party_name 持空单排序的当前名次会员 string(中文)
short_open_interest 该会员持空单 int
short_open_interest_chg 该会员持空单变化量 int
symbol 标的合约 string
var 品种 string
date 日期 string YYYYMMDD
"""
vars_list = [i for i in vars_list if i in cons.market_exchange_symbols['cffex']]
date = cons.convert_date(date) if date is not None else datetime.date.today()
if date < datetime.date(2010, 4, 16):
print(Exception("cffex数据源开始日期为20100416,跳过"))
return {}
if date.strftime('%Y%m%d') not in calendar:
warnings.warn('%s非交易日' % date.strftime('%Y%m%d'))
return {}
big_dict = {}
for var in vars_list:
# print(var)
# var = "IF"
url = cons.CFFEX_VOL_RANK_URL % (date.strftime('%Y%m'), date.strftime('%d'), var)
r = requests_link(url, encoding='gbk')
if not r:
return False
if '网页错误' not in r.text:
try:
temp_chche = StringIO(r.text.split('\n交易日,')[1])
except:
temp_chche = StringIO(r.text.split('\n交易日,')[0][4:]) # 20200316开始数据结构变化,统一格式
table = pd.read_csv(temp_chche)
table = table.dropna(how='any')
table = table.applymap(lambda x: x.strip() if isinstance(x, str) else x)
for symbol in set(table['合约']):
table_cut = table[table['合约'] == symbol]
table_cut.columns = ['symbol', 'rank'] + rank_columns
table_cut = _table_cut_cal(pd.DataFrame(table_cut), symbol)
big_dict[symbol] = table_cut.reset_index(drop=True)
return big_dict
def _table_cut_cal(table_cut, symbol):
"""
表格切分
:param table_cut: 需要切分的表格
:type table_cut: pandas.DataFrame
:param symbol: 具体合约的代码
:type symbol: str
:return:
:rtype: pandas.DataFrame
"""
var = symbol_varieties(symbol)
table_cut[intColumns + ['rank']] = table_cut[intColumns + ['rank']].astype(int)
table_cut_sum = table_cut.sum()
table_cut_sum['rank'] = 999
for col in ['vol_party_name', 'long_party_name', 'short_party_name']:
table_cut_sum[col] = None
table_cut = table_cut.append(pd.DataFrame(table_cut_sum).T, sort=True)
table_cut['symbol'] = symbol
table_cut['variety'] = var
table_cut[intColumns + ['rank']] = table_cut[intColumns + ['rank']].astype(int)
return table_cut
def futures_dce_position_rank(date: str = "20160104") -> pd.DataFrame:
"""
大连商品交易日每日持仓排名-具体合约
http://www.dce.com.cn/dalianshangpin/xqsj/tjsj26/rtj/rcjccpm/index.html
:param date: 指定交易日; e.g., "20200511"
:type date: str
:return: 指定日期的持仓排名数据
:rtype: pandas.DataFrame
"""
date = cons.convert_date(date) if date is not None else datetime.date.today()
if date.strftime('%Y%m%d') not in calendar:
warnings.warn('%s非交易日' % date.strftime('%Y%m%d'))
return {}
url = "http://www.dce.com.cn/publicweb/quotesdata/exportMemberDealPosiQuotesBatchData.html"
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"Content-Length": "160",
"Content-Type": "application/x-www-form-urlencoded",
"Host": "www.dce.com.cn",
"Origin": "http://www.dce.com.cn",
"Pragma": "no-cache",
"Referer": "http://www.dce.com.cn/publicweb/quotesdata/memberDealPosiQuotes.html",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36",
}
payload = {
"memberDealPosiQuotes.variety": "a",
"memberDealPosiQuotes.trade_type": "0",
"contract.contract_id": "a2009",
"contract.variety_id": "a",
"year": date.year,
"month": date.month - 1,
"day": date.day,
"batchExportFlag": "batch",
}
r = requests.post(url, payload, headers=headers)
big_dict = dict()
with zipfile.ZipFile(BytesIO(r.content), "r") as z:
for i in z.namelist():
file_name = i.encode('cp437').decode('GBK')
try:
data = pd.read_table(z.open(i), header=None, sep="\t").iloc[:-6]
if len(data) < 12: # 处理没有活跃合约的情况
big_dict[file_name.split("_")[1]] = pd.DataFrame()
continue
start_list = data[data.iloc[:, 0].str.find("名次") == 0].index.tolist()
data = data.iloc[start_list[0]:, data.columns[data.iloc[start_list[0], :].notnull()]]
data.reset_index(inplace=True, drop=True)
start_list = data[data.iloc[:, 0].str.find("名次") == 0].index.tolist()
end_list = data[data.iloc[:, 0].str.find("总计") == 0].index.tolist()
part_one = data[start_list[0]: end_list[0]].iloc[1:, :]
part_two = data[start_list[1]: end_list[1]].iloc[1:, :]
part_three = data[start_list[2]: end_list[2]].iloc[1:, :]
temp_df = pd.concat([part_one.reset_index(drop=True), part_two.reset_index(drop=True),
part_three.reset_index(drop=True)], axis=1, ignore_index=True)
temp_df.columns = ["名次", "会员简称", "成交量", "增减", "名次", "会员简称", "持买单量", "增减", "名次", "会员简称", "持卖单量", "增减"]
temp_df["rank"] = range(1, len(temp_df) + 1)
del temp_df["名次"]
temp_df.columns = ["vol_party_name", "vol", "vol_chg", "long_party_name", "long_open_interest",
"long_open_interest_chg", "short_party_name", "short_open_interest",
"short_open_interest_chg", "rank"]
temp_df["symbol"] = file_name.split("_")[1]
temp_df["variety"] = file_name.split("_")[1][:-4].upper()
temp_df = temp_df[["long_open_interest", "long_open_interest_chg", "long_party_name", "rank",
"short_open_interest", "short_open_interest_chg", "short_party_name",
"vol", "vol_chg", "vol_party_name", "symbol", "variety"]]
big_dict[file_name.split("_")[1]] = temp_df
except UnicodeDecodeError as e:
try:
data = pd.read_table(z.open(i), header=None, sep="\\s+", encoding="gb2312", skiprows=3)
except:
data = pd.read_table(z.open(i), header=None, sep="\\s+", encoding="gb2312", skiprows=4)
start_list = data[data.iloc[:, 0].str.find("名次") == 0].index.tolist()
end_list = data[data.iloc[:, 0].str.find("总计") == 0].index.tolist()
part_one = data[start_list[0]: end_list[0]].iloc[1:, :]
part_two = data[start_list[1]: end_list[1]].iloc[1:, :]
part_three = data[start_list[2]: end_list[2]].iloc[1:, :]
temp_df = pd.concat([part_one.reset_index(drop=True), part_two.reset_index(drop=True),
part_three.reset_index(drop=True)], axis=1, ignore_index=True)
temp_df.columns = ["名次", "会员简称", "成交量", "增减", "名次", "会员简称", "持买单量", "增减", "名次", "会员简称", "持卖单量", "增减"]
temp_df["rank"] = range(1, len(temp_df) + 1)
del temp_df["名次"]
temp_df.columns = ["vol_party_name", "vol", "vol_chg", "long_party_name", "long_open_interest",
"long_open_interest_chg", "short_party_name", "short_open_interest",
"short_open_interest_chg", "rank"]
temp_df["symbol"] = file_name.split("_")[1]
temp_df["variety"] = file_name.split("_")[1][:-4].upper()
temp_df = temp_df[["long_open_interest", "long_open_interest_chg", "long_party_name", "rank",
"short_open_interest", "short_open_interest_chg", "short_party_name",
"vol", "vol_chg", "vol_party_name", "symbol", "variety"]]
big_dict[file_name.split("_")[1]] = temp_df
return big_dict
def futures_dce_position_rank_other(date="20160104"):
date = cons.convert_date(date) if date is not None else datetime.date.today()
if date.strftime('%Y%m%d') not in calendar:
warnings.warn('%s非交易日' % date.strftime('%Y%m%d'))
return {}
url = "http://www.dce.com.cn/publicweb/quotesdata/memberDealPosiQuotes.html"
payload = {
"memberDealPosiQuotes.variety": "c",
"memberDealPosiQuotes.trade_type": "0",
"year": date.year,
"month": date.month-1,
"day": date.day,
"contract.contract_id": "all",
"contract.variety_id": "c",
"contract": "",
}
r = requests.post(url, data=payload)
soup = BeautifulSoup(r.text, "lxml")
symbol_list = [item["onclick"].strip("javascript:setVariety(").strip("');") for item in soup.find_all(attrs={"class": "selBox"})[-3].find_all("input")]
big_df = dict()
for symbol in symbol_list:
payload = {
"memberDealPosiQuotes.variety": symbol,
"memberDealPosiQuotes.trade_type": "0",
"year": date.year,
"month": date.month-1,
"day": date.day,
"contract.contract_id": "all",
"contract.variety_id": symbol,
"contract": "",
}
r = requests.post(url, data=payload)
soup = BeautifulSoup(r.text, "lxml")
contract_list = [item["onclick"].strip("javascript:setContract_id('").strip("');") for item in soup.find_all(attrs={"name": "contract"})]
if contract_list:
if len(contract_list[0]) == 4:
contract_list = [symbol + item for item in contract_list]
for contract in contract_list:
payload = {
"memberDealPosiQuotes.variety": symbol,
"memberDealPosiQuotes.trade_type": "0",
"year": date.year,
"month": date.month - 1,
"day": date.day,
"contract.contract_id": contract,
"contract.variety_id": symbol,
"contract": "",
}
r = requests.post(url, data=payload)
temp_df = | pd.read_html(r.text) | pandas.read_html |
# %%
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import pandas as pd
import sys
from allensdk.api.queries.image_download_api import ImageDownloadApi
from allensdk.config.manifest import Manifest
import logging
import os
from tqdm import tqdm
# %%
def get_gene_by_id(results_df, ExperimentID):
gene_name = results_df["Gene Symbol"][
results_df["ExperimentID"] == ExperimentID
].iloc[0]
print(
"You are requesting for downloading brain lices of "
+ gene_name
+ " ("
+ ExperimentID
+ ")",
file=sys.stderr,
flush=True,
)
print(
'The downloaded brain lices will be placed in the dir "' + gene_name + '".',
file=sys.stderr,
flush=True,
)
return gene_name
# %%
def search_by_keywords(keywords, outfile):
# create a browser
driver = webdriver.Chrome()
# create a result DataFrame to store results
result = pd.DataFrame()
# the index of necessary columns in the table
column_index = [1, 2, 3, 6]
for ii, keyword in enumerate(keywords):
url = "https://mouse.brain-map.org/search/show?search_term=" + keyword
driver.get(url)
# make sure the page is correcly loaded using explict wait
try:
element = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.CLASS_NAME, "slick-column-name"))
)
except:
print(
"An exception occurred: an element could not be found.\nThe Internet speed may be too slow."
)
driver.quit()
exit()
# get header at the first loop
# if ii == 0:
# use selenium to find the header
elements = driver.find_elements_by_class_name("slick-column-name")
header = []
for element in elements:
header.append(element.text)
if len(header) == 8:
header = [header[i] for i in column_index]
else:
raise Exception("Something went wrong when accessing the header.")
# user selenium to find the search results in the cells of the table
elements = driver.find_elements_by_tag_name("div[row]")
rows = []
for element in elements:
if element.text:
rows.append([element.text.split("\n")[i - 1] for i in column_index])
# If the search result is present, make it a dataframe
if rows:
table = pd.DataFrame(rows, columns=header)
table.insert(0, "Keyword", keyword)
# If no search result, make an empty dataframe
else:
table = pd.DataFrame([keyword], columns=["Keyword"])
# concatenate the search results of each keyword
result = pd.concat([result, table], ignore_index=True)
# print the search results
print(result)
driver.quit()
result.to_csv(outfile)
return result
# %%
def download_brain_slice(df):
# create an image download API
image_api = ImageDownloadApi()
format_str = ".jpg"
# You have probably noticed that the AllenSDK has a logger which notifies you of file downloads.
# Since we are downloading ~300 images, we don't want to see messages for each one.
# The following line will temporarily disable the download logger.
logging.getLogger("allensdk.api.api.retrieve_file_over_http").disabled = True
# get parameters
path, downsample, indices = ask_parameters_for_downloading(df)
print(
"Downloads initiated", end="...", file=sys.stderr, flush=True,
)
for index in indices:
# from indices, get experiment id and gene symbol from df
exp_id = df["Experiment"][index]
# set the dirname as the gene symbol
dirname = df["Gene Symbol"][index]
plane = df["Plane"][index]
section_data_set_id = exp_id
section_image_directory = os.path.join(path, dirname)
# get the image ids for all of the images in this data set
section_images = image_api.section_image_query(
section_data_set_id
) # Should be a dicionary of the features of section images
section_image_ids = [
si["id"] for si in section_images
] # Take value of 'id' from the dictionary
# Create a progress bar
pbar_image = tqdm(total=len(section_image_ids), desc=dirname + " " + plane)
for section_image_id in section_image_ids:
file_name = str(section_image_id) + format_str
file_path = os.path.join(section_image_directory, file_name)
Manifest.safe_make_parent_dirs(file_path)
# Check if the file is already downloaded, which happens if the downloads have been interrupted.
saved_file_names = os.listdir(section_image_directory)
if file_name in saved_file_names:
pass
else:
image_api.download_section_image(
section_image_id, file_path=file_path, downsample=downsample
)
pbar_image.update()
pbar_image.close()
# re-enable the logger
logging.getLogger("allensdk.api.api.retrieve_file_over_http").disabled = False
print(
"Downloads completed.", file=sys.stderr, flush=True,
)
# %%
def read_previous_results(infile):
result = | pd.read_csv(infile, index_col=0) | pandas.read_csv |
#%%
import numpy as np
import pandas as pd
from orderedset import OrderedSet as oset
#%%
wals = pd.read_csv('ISO_completos.csv').rename(columns={'Status':'Status_X_L'})
wals_2 = pd.read_csv('ISO_completos_features.csv').rename(columns={'Status':'Status_X_L'})
wiki_merged = pd.read_csv('Wikidata_Wals_IDWALS.csv')
wiki = pd.read_csv('wikidata_v3.csv')
#%%
#region IMPLODE
#los agrupo por ISO y le pido que ponga todos lso valores en una lista
country_imploded = wiki.groupby(wiki['ISO']).countryLabel.agg(list)
#%%
#defini una función porque voy a hacer esto muchas veces
def implode(df,index_column,data_column):
""" index_column = valor en común para agrupar (en este caso es el ISO), string
data_column = datos que queremos agrupar en una sola columna, string """
return df.groupby(df[index_column])[data_column].agg(list)
#%%
#lo hice para todas las columnas y lo guarde en una lista
agrupadas = []
for column in wiki.columns.values:
if column != 'ISO':
agrupadas.append(implode(wiki,'ISO',column))
#%%
#ahora armo un df con las series que ya estan agrupadas
df_imploded = pd.concat(agrupadas, axis=1).rename(
columns={'languageLabel':'wiki_name',
'countryLabel':'wiki_country',
'country_ISO':'wiki_countryISO',
'Ethnologe_stastusLabel':'wiki_Status',
'number_of_speaker':'num_speakers',
'coordinates':'wiki_lang_coord',
'population':'country_population'})
#endregion
#%%
#region COLLAPSE
#Voy a pasar cada lista del DF a un set, para quedarme con los valores únicos
#Luego reemplazo esa entrada por el set, además si el valor es uno solo lo agrego como string
#y no como lista
df_test = df_imploded.copy()
column = df_test['wiki_name']
new_column = []
for index, item in column.items():
values = list(oset(item))
if len(values) == 1:
new_column.append(values[0])
else:
new_column.append(values)
#%%
def notna(list):
return [x for x in list if str(x) != 'nan']
#defino una función para hacer esto muchas veces
def group_idem_oset(df,column_name):
"""Para sacar valores unicos dentro de las listas que quedaron """
new_column = []
for index, item in df[column_name].items():
values = notna(list(oset(item))) #hace un set de todos los valores de la fila
if len(values) == 1:
new_column.append(values[0]) #si hay un unico valor lo reemplaza directamente
elif not values:
new_column.append(np.nan) #si es una lista vacía pone un 0
else:
new_column.append(values) #si hay varios valores distintos los conservamos
return new_column
#%%
#y lo hago para todas las columnas del df nuevo
collapsed = []
for column_name in df_test.columns.values:
new_column = pd.Series(group_idem_oset(df_test,column_name),name=column_name, index=df_test.index)
collapsed.append(new_column)
df_collapsed = | pd.concat(collapsed, axis=1) | pandas.concat |
"""Project Palette
functions for palette project
"""
from tkinter import filedialog
from tkinter.constants import END
import tkinter.messagebox as msgbox
import os
import webbrowser
from bs4 import BeautifulSoup
import pandas as pd
from PIL import Image
import requests
if __name__ == "__main__":
from data import *
else:
from palette.data import *
def get_path(path):
return os.path.join(os.getcwd(), path)
def open_url(url):
try:
webbrowser.open(url)
except webbrowser.Error as exp:
msgbox.showerror("Error", f"Cannot open the browser!\n{exp}")
except Exception:
return
def save_new_csv(entry_1, entry_2, entry_3):
path_now = os.getcwd()
filename = filedialog.asksaveasfilename(
initialdir=path_now,
title="Save",
filetypes=(("Data files", "*.csv"), ("all files", "*.*")),
defaultextension=".csv",
)
if filename == "":
return
else:
data = {
"FEATURE_1": entry_1.get(),
"FEATURE_2": entry_2.get(),
"FEATURE_3": entry_3.get(),
}
df = pd.DataFrame(data, index=[0])
df.to_csv(filename, index=False, encoding="utf-8")
def open_csv(entry_1, entry_2, entry_3):
filename = ""
path_now = os.getcwd()
filename = filedialog.askopenfilename(
title="Find your data",
filetypes=(("Data files", "*.csv"), ("all files", "*.*")),
initialdir=path_now,
)
if filename == "":
return
else:
if os.path.isfile(filename):
entry_1.delete(0, END)
entry_2.delete(0, END)
entry_3.delete(0, END)
try:
df = | pd.read_csv(filename, nrows=1) | pandas.read_csv |
import os
import sys
import argparse
import numpy as np
import pandas as pd
import cv2
import matplotlib.pyplot as plt
from tqdm import tqdm
import torch
import torch.nn.functional as TF
import torch.backends.cudnn as cudnn
from torch.utils.data import DataLoader
sys.path.append('../')
# from torchlib.transforms import functional as F
from torchlib.datasets.factory import FactoryDataset
from torchlib.datasets.datasets import Dataset
from torchlib.datasets.fersynthetic import SyntheticFaceDataset
from torchlib.attentionnet import AttentionNeuralNet, AttentionGMMNeuralNet
from torchlib.classnet import ClassNeuralNet
from aug import get_transforms_aug, get_transforms_det
# METRICS
import sklearn.metrics as metrics
from argparse import ArgumentParser
def arg_parser():
"""Arg parser"""
parser = ArgumentParser()
parser.add_argument('--project', metavar='DIR', help='path to projects')
parser.add_argument('--projectname', metavar='DIR', help='name projects')
parser.add_argument('--pathdataset', metavar='DIR', help='path to dataset')
parser.add_argument('--namedataset', metavar='S', help='name to dataset')
parser.add_argument('--pathnameout', metavar='DIR', help='path to out dataset')
parser.add_argument('--filename', metavar='S', help='name of the file output')
parser.add_argument('--model', metavar='S', help='filename model')
parser.add_argument('--breal', type=str, default='real', help='dataset is real or synthetic')
parser.add_argument('--name-method', type=str, default='attnet', help='which neural network')
parser.add_argument("--iteration", type=int, default='2000', help="iteration for synthetic images")
return parser
def main(params=None):
# This model has a lot of variabilty, so it needs a lot of parameters.
# We use an arg parser to get all the arguments we need.
# See above for the default values, definitions and information on the datatypes.
parser = arg_parser()
if params:
args = parser.parse_args(params)
else:
args = parser.parse_args()
# Configuration
project = args.project
projectname = args.projectname
pathnamedataset = args.pathdataset
pathnamemodel = args.model
pathproject = os.path.join( project, projectname )
namedataset = args.namedataset
breal = args.breal
name_method = args.name_method
iteration = args.iteration
fname = args.name_method
fnet = {
'attnet': AttentionNeuralNet,
'attgmmnet': AttentionGMMNeuralNet,
'classnet': ClassNeuralNet,
}
no_cuda=False
parallel=False
gpu=0
seed=1
brepresentation=True
bclassification_test=True
brecover_test=False
imagesize=64
kfold = 5
nactores = 10
idenselect = np.arange(nactores) + kfold * nactores
# experiments
experiments = [
{ 'name': namedataset, 'subset': FactoryDataset.training, 'status': breal },
{ 'name': namedataset, 'subset': FactoryDataset.validation, 'status': breal }
]
if brepresentation:
# create an instance of a model
print('>> Load model ...')
network = fnet[fname](
patchproject=project,
nameproject=projectname,
no_cuda=no_cuda,
parallel=parallel,
seed=seed,
gpu=gpu,
)
cudnn.benchmark = True
# load trained model
if network.load( pathnamemodel ) is not True:
print('>>Error!!! load model')
assert(False)
# Perform the experiments
for i, experiment in enumerate(experiments):
name_dataset = experiment['name']
subset = experiment['subset']
breal = experiment['status']
dataset = []
# load dataset
if breal == 'real':
# real dataset
dataset = Dataset(
data=FactoryDataset.factory(
pathname=pathnamedataset,
name=namedataset,
subset=subset,
idenselect=idenselect,
download=True
),
num_channels=3,
transform=get_transforms_det( imagesize ),
)
else:
# synthetic dataset
dataset = SyntheticFaceDataset(
data=FactoryDataset.factory(
pathname=pathnamedataset,
name=namedataset,
subset=subset,
idenselect=idenselect,
download=True
),
pathnameback='~/.datasets/coco',
ext='jpg',
count=iteration,
num_channels=3,
iluminate=True, angle=45, translation=0.3, warp=0.2, factor=0.2,
transform_data=get_transforms_aug( imagesize ),
transform_image=get_transforms_det( imagesize ),
)
dataloader = DataLoader(dataset, batch_size=64, shuffle=False, num_workers=10 )
print("\ndataset:", breal)
print("Subset:", subset)
print("Classes", dataloader.dataset.data.classes)
print("size of data:", len(dataset))
print("num of batches", len(dataloader))
# if method is attgmmnet, then the output has representation vector Zs
# otherwise, the output only has the predicted emotions, and ground truth
if name_method == 'attgmmnet':
# representation
Y_labs, Y_lab_hats, Zs = network.representation(dataloader, breal)
print(Y_lab_hats.shape, Zs.shape, Y_labs.shape)
reppathname = os.path.join(pathproject, 'rep_{}_{}_{}.pth'.format(namedataset, subset,
breal))
torch.save({'Yh': Y_lab_hats, 'Z': Zs, 'Y': Y_labs}, reppathname)
print('save representation ...', reppathname)
else:
Y_labs, Y_lab_hats= network.representation( dataloader, breal )
print("Y_lab_hats shape: {}, y_labs shape: {}".format(Y_lab_hats.shape, Y_labs.shape))
reppathname = os.path.join( pathproject, 'rep_{}_{}_{}.pth'.format(namedataset, subset, breal ) )
torch.save( { 'Yh':Y_lab_hats, 'Y':Y_labs }, reppathname )
print( 'save representation ...', reppathname )
# if calculate the classification result, accuracy, precision, recall and f1
if bclassification_test:
tuplas=[]
print('|Num\t|Acc\t|Prec\t|Rec\t|F1\t|Set\t|Type\t|Accuracy_type\t')
for i, experiment in enumerate(experiments):
name_dataset = experiment['name']
subset = experiment['subset']
breal = experiment['status']
real = breal
rep_pathname = os.path.join( pathproject, 'rep_{}_{}_{}.pth'.format(
namedataset, subset, breal) )
data_emb = torch.load(rep_pathname)
Yto = data_emb['Y']
Yho = data_emb['Yh']
yhat = np.argmax( Yho, axis=1 )
y = Yto
acc = metrics.accuracy_score(y, yhat)
precision = metrics.precision_score(y, yhat, average='macro')
recall = metrics.recall_score(y, yhat, average='macro')
f1_score = 2*precision*recall/(precision+recall)
print( '|{}\t|{:0.3f}\t|{:0.3f}\t|{:0.3f}\t|{:0.3f}\t|{}\t|{}\t|{}\t'.format(
i,
acc, precision, recall, f1_score,
subset, real, 'topk'
))
cm = metrics.confusion_matrix(y, yhat)
# label = ['Neutral', 'Happiness', 'Surprise', 'Sadness', 'Anger', 'Disgust', 'Fear', 'Contempt']
# cm_display = metrics.ConfusionMatrixDisplay(cm, display_labels=label).plot()
print(cm)
print(f'save y and yhat to {real}_{subset}_y.npz')
np.savez(os.path.join(pathproject, f'{real}_{subset}_y.npz'), name1=yhat, name2=y)
#|Name|Dataset|Cls|Acc| ...
tupla = {
'Name':projectname,
'Dataset': '{}({})_{}'.format( name_dataset, subset, real ),
'Accuracy': acc,
'Precision': precision,
'Recall': recall,
'F1 score': f1_score,
}
tuplas.append(tupla)
# save
df = | pd.DataFrame(tuplas) | pandas.DataFrame |
import json
import pandas as pd
import argparse
#Test how many points the new_cut_dataset has
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_path', default="new_dataset.txt", type=str, help="Full path to the txt file containing the dataset")
parser.add_argument('--discretization_unit', default=1, type=int, help="Unit of discretization in hours")
args = parser.parse_args()
filename = args.dataset_path
discretization_unit = args.discretization_unit
with open(filename, "r") as f:
data = json.load(f)
print(len(data['embeddings']))
print( | pd.to_datetime(data['start_date']) | pandas.to_datetime |
import os
import sys
import joblib
# sys.path.append('../')
main_path = os.path.split(os.getcwd())[0] + '/covid19_forecast_ml'
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from datetime import datetime, timedelta
from tqdm import tqdm
from Dataloader_v2 import BaseCOVDataset
from LSTNet_v2 import LSTNet_v2
import torch
from torch.utils.data import Dataset, DataLoader
import argparse
parser = argparse.ArgumentParser(description = 'Training model')
parser.add_argument('--GT_trends', default=None, type=str,
help='Define which Google Trends terms to use: all, related_average, or primary (default)')
parser.add_argument('--batch_size', default=3, type=int,
help='Speficy the bath size for the model to train to')
parser.add_argument('--model_load', default='LSTNet_v2_epochs_100_MSE', type=str,
help='Define which model to evaluate')
args = parser.parse_args()
#--------------------------------------------------------------------------------------------------
#----------------------------------------- Test functions ----------------------------------------
def predict(model, dataloader, min_cases, max_cases):
model.eval()
predictions = None
for i, batch in tqdm(enumerate(dataloader, start=1),leave=False, total=len(dataloader)):
X, Y = batch
Y_pred = model(X).detach().numpy()
if i == 1:
predictions = Y_pred
else:
predictions = np.concatenate((predictions, Y_pred), axis=0)
predictions = predictions*(max_cases-min_cases)+min_cases
columns = ['forecast_cases']
df_predictions = pd.DataFrame(predictions, columns=columns)
return df_predictions
#--------------------------------------------------------------------------------------------------
#----------------------------------------- Data paths ---------------------------------------------
data_cases_path = os.path.join('data','cases_localidades.csv')
data_movement_change_path = os.path.join('data','Movement','movement_range_colombian_cities.csv')
data_GT_path = os.path.join('data','Google_Trends','trends_BOG.csv')
data_GT_id_terms_path = os.path.join('data','Google_Trends','terms_id_ES.csv')
data_GT_search_terms_path = os.path.join('data','Google_Trends','search_terms_ES.csv')
#--------------------------------------------------------------------------------------------------
#----------------------------------------- Load data ----------------------------------------------
### Load confirmed cases for Bogota
data_cases = pd.read_csv(data_cases_path, usecols=['date_time','location','num_cases','num_diseased'])
data_cases['date_time'] = | pd.to_datetime(data_cases['date_time'], format='%Y-%m-%d') | pandas.to_datetime |
# -*- coding: utf-8 -*-
""" This module is designed for the use with the coastdat2 weather data set
of the Helmholtz-Zentrum Geesthacht.
A description of the coastdat2 data set can be found here:
https://www.earth-syst-sci-data.net/6/147/2014/
SPDX-FileCopyrightText: 2016-2019 <NAME> <<EMAIL>>
SPDX-License-Identifier: MIT
"""
__copyright__ = "<NAME> <<EMAIL>>"
__license__ = "MIT"
import os
import pandas as pd
import pvlib
from nose.tools import eq_
from windpowerlib.wind_turbine import WindTurbine
from reegis import coastdat, feedin, config as cfg
import warnings
warnings.filterwarnings("ignore", category=RuntimeWarning)
def feedin_wind_sets_tests():
fn = os.path.join(
os.path.dirname(__file__),
os.pardir,
"tests",
"data",
"test_coastdat_weather.csv",
)
wind_sets = feedin.create_windpowerlib_sets()
weather = pd.read_csv(fn, header=[0, 1])["1126088"]
data_height = cfg.get_dict("coastdat_data_height")
wind_weather = coastdat.adapt_coastdat_weather_to_windpowerlib(
weather, data_height
)
df = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import os
def _1996(data_dir):
from . import sgf_table_sums
file = "96data35.txt"
ids = pd.read_excel(
os.path.join(data_dir, "government-ids.xls"),
dtype={"ID Code": str, "State": str},
)
ids["State"] = ids["State"].str.strip()
map_id = dict(zip(ids["ID Code"], ids["State"]))
map_id["00000000000000"] = "United States"
map_id["09000000000000"] = "District of Columbia"
t = pd.read_table(os.path.join(data_dir, file), header=None, index_col=None)
t["Government Code"] = [t.loc[i, 0][0:14] for i in t.index]
t["Item Code"] = [t.loc[i, 0][14:17] for i in t.index]
t["Amount"] = [t.loc[i, 0][17:29] for i in t.index]
t["Survery Year"] = [t.loc[i, 0][29:31] for i in t.index]
t["Year of Data"] = [t.loc[i, 0][31:33] for i in t.index]
t["Origin"] = [t.loc[i, 0][33:35] for i in t.index]
t["Amount"] = t["Amount"].map(int)
t["Government Name"] = t["Government Code"].map(map_id)
regions = list(set(t["Government Name"]))
regions.sort()
cols = ["Category"]
cols.extend(regions)
table = pd.DataFrame(columns=cols)
for n, row in enumerate(sgf_table_sums.sums_new_methodology.keys()):
table.loc[n, "Category"] = row
for region in regions:
table.loc[n, region] = t[
(t["Government Name"] == region)
& (
t["Item Code"].isin(sgf_table_sums.sums_new_methodology[row])
== True
)
]["Amount"].sum()
table = pd.melt(table, id_vars="Category", var_name="State")
table["year"] = "1996"
table["units"] = "thousands of us dollars (USD)"
# typing
table["Category"] = table["Category"].map(str)
table["State"] = table["State"].map(str)
table["value"] = table["value"].map(int)
table["year"] = table["year"].map(str)
table["units"] = table["units"].map(str)
return table
def _1997(data_dir):
from . import sgf_table_sums
file = "97data35.txt"
ids = pd.read_excel(
os.path.join(data_dir, "government-ids.xls"),
dtype={"ID Code": str, "State": str},
)
ids["State"] = ids["State"].str.strip()
map_id = dict(zip(ids["ID Code"], ids["State"]))
map_id["00000000000000"] = "United States"
map_id["09000000000000"] = "District of Columbia"
t = pd.read_table(os.path.join(data_dir, file), header=None, index_col=None)
t["Government Code"] = [t.loc[i, 0][0:14] for i in t.index]
t["Item Code"] = [t.loc[i, 0][14:17] for i in t.index]
t["Amount"] = [t.loc[i, 0][17:29] for i in t.index]
t["Survery Year"] = [t.loc[i, 0][29:31] for i in t.index]
t["Year of Data"] = [t.loc[i, 0][31:33] for i in t.index]
t["Origin"] = [t.loc[i, 0][33:35] for i in t.index]
t["Amount"] = t["Amount"].map(int)
t["Government Name"] = t["Government Code"].map(map_id)
regions = list(set(t["Government Name"]))
regions.sort()
cols = ["Category"]
cols.extend(regions)
table = pd.DataFrame(columns=cols)
for n, row in enumerate(sgf_table_sums.sums_new_methodology.keys()):
table.loc[n, "Category"] = row
for region in regions:
table.loc[n, region] = t[
(t["Government Name"] == region)
& (
t["Item Code"].isin(sgf_table_sums.sums_new_methodology[row])
== True
)
]["Amount"].sum()
table = pd.melt(table, id_vars="Category", var_name="State")
table["year"] = "1997"
table["units"] = "thousands of us dollars (USD)"
# typing
table["Category"] = table["Category"].map(str)
table["State"] = table["State"].map(str)
table["value"] = table["value"].map(int)
table["year"] = table["year"].map(str)
table["units"] = table["units"].map(str)
return table
def _1998(data_dir):
from . import sgf_table_sums
file = "98data35.txt"
ids = pd.read_excel(
os.path.join(data_dir, "government-ids.xls"),
dtype={"ID Code": str, "State": str},
)
ids["State"] = ids["State"].str.strip()
map_id = dict(zip(ids["ID Code"], ids["State"]))
map_id["00000000000000"] = "United States"
map_id["09000000000000"] = "District of Columbia"
t = pd.read_table(os.path.join(data_dir, file), header=None, index_col=None)
t["Government Code"] = [t.loc[i, 0][0:14] for i in t.index]
t["Item Code"] = [t.loc[i, 0][14:17] for i in t.index]
t["Amount"] = [t.loc[i, 0][17:29] for i in t.index]
t["Survery Year"] = [t.loc[i, 0][29:31] for i in t.index]
t["Year of Data"] = [t.loc[i, 0][31:33] for i in t.index]
t["Origin"] = [t.loc[i, 0][33:35] for i in t.index]
t["Amount"] = t["Amount"].map(int)
t["Government Name"] = t["Government Code"].map(map_id)
regions = list(set(t["Government Name"]))
regions.sort()
cols = ["Category"]
cols.extend(regions)
table = pd.DataFrame(columns=cols)
for n, row in enumerate(sgf_table_sums.sums_new_methodology.keys()):
table.loc[n, "Category"] = row
for region in regions:
table.loc[n, region] = t[
(t["Government Name"] == region)
& (
t["Item Code"].isin(sgf_table_sums.sums_new_methodology[row])
== True
)
]["Amount"].sum()
table = pd.melt(table, id_vars="Category", var_name="State")
table["year"] = "1998"
table["units"] = "thousands of us dollars (USD)"
# typing
table["Category"] = table["Category"].map(str)
table["State"] = table["State"].map(str)
table["value"] = table["value"].map(int)
table["year"] = table["year"].map(str)
table["units"] = table["units"].map(str)
return table
def _1999(data_dir):
from . import sgf_table_sums
file = "99state35.txt"
ids = pd.read_excel(
os.path.join(data_dir, "government-ids.xls"),
dtype={"ID Code": str, "State": str},
)
ids["State"] = ids["State"].str.strip()
map_id = dict(zip(ids["ID Code"], ids["State"]))
map_id["00000000000000"] = "United States"
map_id["09000000000000"] = "District of Columbia"
t = pd.read_table(os.path.join(data_dir, file), header=None, index_col=None)
t["Government Code"] = [t.loc[i, 0][0:14] for i in t.index]
t["Origin"] = [t.loc[i, 0][17:19] for i in t.index]
t["Item Code"] = [t.loc[i, 0][21:24] for i in t.index]
t["Amount"] = [t.loc[i, 0][24:35] for i in t.index]
t["Survery Year"] = 99
t["Year of Data"] = 99
t["Amount"] = t["Amount"].map(int)
t["Government Name"] = t["Government Code"].map(map_id)
regions = list(set(t["Government Name"]))
regions.sort()
cols = ["Category"]
cols.extend(regions)
table = pd.DataFrame(columns=cols)
for n, row in enumerate(sgf_table_sums.sums_new_methodology.keys()):
table.loc[n, "Category"] = row
for region in regions:
table.loc[n, region] = t[
(t["Government Name"] == region)
& (
t["Item Code"].isin(sgf_table_sums.sums_new_methodology[row])
== True
)
]["Amount"].sum()
table = pd.melt(table, id_vars="Category", var_name="State")
table["year"] = "1999"
table["units"] = "thousands of us dollars (USD)"
# typing
table["Category"] = table["Category"].map(str)
table["State"] = table["State"].map(str)
table["value"] = table["value"].map(int)
table["year"] = table["year"].map(str)
table["units"] = table["units"].map(str)
return table
def _2000(data_dir):
from . import sgf_table_sums
file = "00state35.txt"
ids = pd.read_excel(
os.path.join(data_dir, "government-ids.xls"),
dtype={"ID Code": str, "State": str},
)
ids["State"] = ids["State"].str.strip()
map_id = dict(zip(ids["ID Code"], ids["State"]))
map_id["00000000000000"] = "United States"
map_id["09000000000000"] = "District of Columbia"
t = pd.read_table(os.path.join(data_dir, file), header=None, index_col=None)
t["Government Code"] = [t.loc[i, 0][0:14] for i in t.index]
t["Item Code"] = [t.loc[i, 0][14:17] for i in t.index]
t["Amount"] = [t.loc[i, 0][17:29] for i in t.index]
t["Survery Year"] = [t.loc[i, 0][29:31] for i in t.index]
t["Year of Data"] = [t.loc[i, 0][31:33] for i in t.index]
t["Origin"] = [t.loc[i, 0][33:35] for i in t.index]
t["Amount"] = t["Amount"].map(int)
t["Government Name"] = t["Government Code"].map(map_id)
regions = list(set(t["Government Name"]))
regions.sort()
cols = ["Category"]
cols.extend(regions)
table = pd.DataFrame(columns=cols)
for n, row in enumerate(sgf_table_sums.sums_new_methodology.keys()):
table.loc[n, "Category"] = row
for region in regions:
table.loc[n, region] = t[
(t["Government Name"] == region)
& (
t["Item Code"].isin(sgf_table_sums.sums_new_methodology[row])
== True
)
]["Amount"].sum()
table = pd.melt(table, id_vars="Category", var_name="State")
table["year"] = "2000"
table["units"] = "thousands of us dollars (USD)"
# typing
table["Category"] = table["Category"].map(str)
table["State"] = table["State"].map(str)
table["value"] = table["value"].map(int)
table["year"] = table["year"].map(str)
table["units"] = table["units"].map(str)
return table
def _2001(data_dir):
from . import sgf_table_sums
file = "01state35.txt"
ids = pd.read_excel(
os.path.join(data_dir, "government-ids.xls"),
dtype={"ID Code": str, "State": str},
)
ids["State"] = ids["State"].str.strip()
map_id = dict(zip(ids["ID Code"], ids["State"]))
map_id["00000000000000"] = "United States"
map_id["09000000000000"] = "District of Columbia"
t = pd.read_table(os.path.join(data_dir, file), header=None, index_col=None)
t["Government Code"] = [t.loc[i, 0][0:14] for i in t.index]
t["Item Code"] = [t.loc[i, 0][14:17] for i in t.index]
t["Amount"] = [t.loc[i, 0][17:29] for i in t.index]
t["Survery Year"] = [t.loc[i, 0][29:31] for i in t.index]
t["Year of Data"] = [t.loc[i, 0][31:33] for i in t.index]
t["Origin"] = [t.loc[i, 0][33:35] for i in t.index]
t["Amount"] = t["Amount"].map(int)
t["Government Name"] = t["Government Code"].map(map_id)
regions = list(set(t["Government Name"]))
regions.sort()
cols = ["Category"]
cols.extend(regions)
table = pd.DataFrame(columns=cols)
for n, row in enumerate(sgf_table_sums.sums_new_methodology.keys()):
table.loc[n, "Category"] = row
for region in regions:
table.loc[n, region] = t[
(t["Government Name"] == region)
& (
t["Item Code"].isin(sgf_table_sums.sums_new_methodology[row])
== True
)
]["Amount"].sum()
table = pd.melt(table, id_vars="Category", var_name="State")
table["year"] = "2001"
table["units"] = "thousands of us dollars (USD)"
# typing
table["Category"] = table["Category"].map(str)
table["State"] = table["State"].map(str)
table["value"] = table["value"].map(int)
table["year"] = table["year"].map(str)
table["units"] = table["units"].map(str)
return table
def _2002(data_dir):
from . import sgf_table_sums
file = "02state35.txt"
ids = pd.read_excel(
os.path.join(data_dir, "government-ids.xls"),
dtype={"ID Code": str, "State": str},
)
ids["State"] = ids["State"].str.strip()
map_id = dict(zip(ids["ID Code"], ids["State"]))
map_id["00000000000000"] = "United States"
map_id["09000000000000"] = "District of Columbia"
t = pd.read_table(os.path.join(data_dir, file), header=None, index_col=None)
t["Government Code"] = [t.loc[i, 0][0:14] for i in t.index]
t["Item Code"] = [t.loc[i, 0][14:17] for i in t.index]
t["Amount"] = [t.loc[i, 0][17:29] for i in t.index]
t["Survery Year"] = [t.loc[i, 0][29:31] for i in t.index]
t["Year of Data"] = [t.loc[i, 0][31:33] for i in t.index]
t["Origin"] = [t.loc[i, 0][33:35] for i in t.index]
t["Amount"] = t["Amount"].map(int)
t["Government Name"] = t["Government Code"].map(map_id)
regions = list(set(t["Government Name"]))
regions.sort()
cols = ["Category"]
cols.extend(regions)
table = | pd.DataFrame(columns=cols) | pandas.DataFrame |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import os.path
import math
from IPython.display import display,clear_output
import random
import scipy.stats as st
from sklearn.preprocessing import LabelEncoder
import sklearn.preprocessing as sk
import sklearn.model_selection as skm
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import RidgeClassifier
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier,AdaBoostClassifier,GradientBoostingClassifier
from xgboost import XGBClassifier
from lightgbm import LGBMClassifier
from sklearn.metrics import accuracy_score,roc_auc_score,f1_score,precision_score,recall_score,cohen_kappa_score,log_loss
from scalarpy.pre_process import preprocess
import ipywidgets as widgets
from yellowbrick.classifier import ROCAUC
from yellowbrick.classifier import PrecisionRecallCurve
from yellowbrick.classifier import ClassificationReport
from yellowbrick.classifier import ClassPredictionError
from yellowbrick.classifier import ConfusionMatrix
from yellowbrick.classifier import DiscriminationThreshold
from yellowbrick.model_selection import LearningCurve
from yellowbrick.model_selection import CVScores
from yellowbrick.model_selection import FeatureImportances
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import RandomizedSearchCV
import pickle
import warnings
warnings.filterwarnings('ignore')
def highlight_max(s):
'''
highlight the maximum in a Series green.
'''
is_max = s == s.max()
return ['background-color: yellow' if v else '' for v in is_max]
def highlight_min(s):
'''
highlight the maximum in a Series green.
'''
is_min = s == s.min()
return ['background-color: yellow' if v else '' for v in is_min]
class classifier:
'''
build_classifier(dataset,target=None,preprocess_data=True,classifiers="all",ignore_columns=None,train_size=0.8,random_state=42,impute_missing=True,handle_outliers=True,encode_data=True,normalize=True,
numerical_imputation="mean",categorical_imputation="mode",cat_thresh=10,
outlier_method="iqr",outlier_threshold=2,outlier_strategy="replace_lb_ub",outlier_columns="all",
encoding_strategy="one_hot_encode",high_cardinality_encoding="frequency",encode_drop_first=True,ordinal_map=None,encoding_categorical_features="auto",encode_map=None,
normalization_strategy="min_max",
hyperparameter_tunning="best",param_grid="auto",cv=10,n_iter=10, hyperparameter_scoring="accuracy",n_jobs=1,
verbose=1)
'''
def __init__(self,dataset,target=None,preprocess_data=True,classifiers="all",ignore_columns=None,train_size=0.8,random_state=42,impute_missing=True,handle_outliers=True,encode_data=True,normalize=True,sort="accuracy",
numerical_imputation="mean",categorical_imputation="mode",cat_thresh=10,
outlier_method="iqr",outlier_threshold=2,outlier_strategy="replace_lb_ub",outlier_columns="all",
encoding_strategy="one_hot_encode",high_cardinality_encoding="frequency",encode_drop_first=True,ordinal_map=None,encoding_categorical_features="auto",encode_map=None,
handle_imbalance=False,resampling_method="smote",
normalization_strategy="min_max",
hyperparameter_tunning="best",param_grid="auto",cv=10,n_iter=10, hyperparameter_scoring="accuracy",n_jobs=1,
verbose=1):
self.target=target
self.train_size=train_size
self.random_state=random_state
self.classifiers=classifiers
self.pd=preprocess_data
self.sort=sort
self.handle_imbalance=handle_imbalance
self.resampling_method=resampling_method
self.hyperparameter_tunning=hyperparameter_tunning
self.param_grid=param_grid
self.cv=cv
self.n_iter=n_iter
self.n_jobs=n_jobs
self.hyperparameter_scoring=hyperparameter_scoring
if(preprocess_data):
self.pp=preprocess(dataset,target,ignore_columns=ignore_columns)
self.pp.preprocess_data(impute_missing,handle_outliers,encode_data,normalize,
numerical_imputation,categorical_imputation,cat_thresh,
outlier_method,outlier_threshold,outlier_strategy,outlier_columns,
encoding_strategy,high_cardinality_encoding,encode_drop_first,ordinal_map,encoding_categorical_features,encode_map,
normalization_strategy,verbose)
def auto_classify(self,verbose=1):
data=self.pp.data
if(data[self.target].nunique()>2):
self.c_type="multi_class"
else:
self.c_type="binary"
X=data.drop(self.target,axis=1)
y=data[self.target]
self.X_train, self.X_test, self.y_train, self.y_test=skm.train_test_split(X,y,train_size=self.train_size,random_state=self.random_state)
if(self.handle_imbalance):
self.X_train,self.y_train=self.pp.handle_imbalance(self.X_train,self.y_train,self.resampling_method,verbose)
#Logistic Regression
self.models={}
if(verbose):
print("Part-2 Building the models...")
classifiers=self.classifiers
if(classifiers=="all" or ("lr" in classifiers)):
self.lr=LogisticRegression()
self.lr.fit(self.X_train,self.y_train)
self.models["Logistic Regression"]=self.lr
#Ridge Classififer
if(classifiers=="all" or ("rc" in classifiers)):
self.rc=RidgeClassifier()
self.rc.fit(self.X_train,self.y_train)
self.models["Ridge Classifier"]=self.rc
#KNN
if(classifiers=="all" or ("knn" in classifiers)):
self.knn=KNeighborsClassifier()
self.knn.fit(self.X_train,self.y_train)
self.models["K Neighbors Classifier"]=self.knn
#Decision Tree
if(classifiers=="all" or ("dt" in classifiers)):
self.dt=DecisionTreeClassifier()
self.dt.fit(self.X_train,self.y_train)
self.models["Decision Tree Classifier"]=self.dt
#SVM
if(classifiers=="all" or ("svm" in classifiers)):
self.svm=SVC(kernel="linear")
self.svm.fit(self.X_train,self.y_train)
self.models["Linear SVM"]=self.svm
#Navie Bayes
if(classifiers=="all" or ("nb" in classifiers)):
self.nb=GaussianNB()
self.nb.fit(self.X_train,self.y_train)
self.models["Navie Bayes"]=self.nb
#Random Forest
if(classifiers=="all" or ("rf" in classifiers)):
self.rf=RandomForestClassifier()
self.rf.fit(self.X_train,self.y_train)
self.models["Random Forest Classifier"]=self.rf
#ADA Boost
if(classifiers=="all" or ("adb" in classifiers)):
self.adb=AdaBoostClassifier()
self.adb.fit(self.X_train,self.y_train)
self.models["AdaBoost Classifier"]=self.adb
#GBM
if(classifiers=="all" or ("gbm" in classifiers)):
self.gbm=GradientBoostingClassifier()
self.gbm.fit(self.X_train,self.y_train)
self.models["Gradient Boosting Classifier"]=self.gbm
#XGBOOST
if(classifiers=="all" or ("xgb" in classifiers)):
self.xgb=XGBClassifier()
self.xgb.fit(self.X_train,self.y_train)
self.models["Extreme Boosting Classifier"]=self.xgb
#lGBM
if(classifiers=="all" or ("lgbm" in classifiers)):
self.lgb=LGBMClassifier()
self.lgb.fit(self.X_train,self.y_train)
self.models["Light Gradient Boosting Classifier"]=self.lgb
if(verbose):
print(30*"=")
print("Part-3 Evaluating Model Performance")
#Evaluate Models
score_grid=pd.DataFrame()
for key,model in self.models.items():
y_pred=model.predict(self.X_test)
accuracy=accuracy_score(self.y_test,y_pred)
auc=roc_auc_score(self.y_test,y_pred)
precision=precision_score(self.y_test,y_pred)
recall=recall_score(self.y_test,y_pred)
f1=f1_score(self.y_test,y_pred)
kappa=cohen_kappa_score(self.y_test,y_pred)
logloss=log_loss(self.y_test,y_pred)
score_dict={"Model":key,"Accuracy":accuracy,"AUC_ROC":auc,"Precision":precision,
"Recall":recall,"F1 Score":f1,"Kappa":kappa,"Log Loss":logloss}
score_grid=score_grid.append( score_dict,ignore_index=True,sort=False)
self.score_grid=score_grid.set_index('Model')
if(self.hyperparameter_tunning=="best"):
if(verbose):
print(30*"=")
print("Part-4 Tunning Hyperparameters")
best=self.score_grid.sort_values(by="Accuracy",ascending=False).iloc[0,:].name
tg=self.tune_model(m_model=best,param_grid=self.param_grid,cv=self.cv,n_iter=self.n_iter,scoring=self.hyperparameter_scoring,n_jobs=self.n_jobs)
tune_grid= | pd.DataFrame() | pandas.DataFrame |
import argparse
import numpy as np
import pandas
import utils
parser = argparse.ArgumentParser()
parser.add_argument("data_path", type=str, help="path to csv file")
utils.add_arguments(parser, ["output"])
args = parser.parse_args()
data_path = args.data_path
out_path = args.output
df = pandas.read_csv(data_path)
aggregate_dict = {
"data_dir": df["data_dir"].iloc[0],
"hyperparameter_keys": [],
"hyperparameter_values": [],
"n_trials": [],
"did_all_trial_complete": [],
"mean_loss": [],
"min_loss": [],
}
hyperparameter_value_combinations = df["hyperparameter_values"].unique()
for value_combination in hyperparameter_value_combinations:
dfv = df[df["hyperparameter_values"] == value_combination]
n_trials = len(dfv)
did_all_trial_complete = np.all(dfv["did_trial_complete"])
losses = dfv["validation_loss"]
loss_mean = losses.mean()
loss_min = losses.min()
aggregate_dict["hyperparameter_keys"].append(dfv["hyperparameter_keys"].iloc[0])
aggregate_dict["hyperparameter_values"].append(value_combination)
aggregate_dict["n_trials"].append(n_trials)
aggregate_dict["did_all_trial_complete"].append(did_all_trial_complete)
aggregate_dict["mean_loss"].append(loss_mean)
aggregate_dict["min_loss"].append(loss_min)
aggregate_df = | pandas.DataFrame(aggregate_dict) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Description
----------
Some simple classes to be used in sklearn pipelines for pandas input
Informations
----------
Author: <NAME>
Maintainer:
Email: <EMAIL>
Copyright:
Credits:
License:
Version:
Status: in development
"""
import numpy, math, scipy, pandas
import numpy as np
import pandas as pd
from scipy.stats import zscore
from sklearn.base import BaseEstimator, TransformerMixin
# from IPython.display import clear_output
from sklearn import preprocessing
from sklearn.preprocessing import (
# MinMaxScaler,
RobustScaler,
KBinsDiscretizer,
KernelCenterer,
QuantileTransformer,
)
from sklearn.pipeline import Pipeline
from scipy import stats
from .metrics import eval_information_value
class ReplaceValue(BaseEstimator, TransformerMixin):
"""
Description
----------
Replace all values of a column by a specific value.
Arguments
----------
feature_name: str
name of the column to replace
value:
Value to be replaced
replace_by:
Value to replace
active: boolean
This parameter controls if the selection will occour. This is useful in hyperparameters searchs to test the contribution
in the final score
Examples
----------
>>> replace = ReplaceValue('first_col','val','new_val')
>>> replace.fit_transform(X,y)
"""
def __init__(self, feature_name, value, replace_by, active=True):
self.active = active
self.feature_name = feature_name
self.value = value
self.replace_by = replace_by
def fit(self, X, y):
return self
def transform(self, X):
if not self.active:
return X
else:
return self.__transformation(X)
def __transformation(self, X_in):
X = X_in.copy()
X[self.feature_name] = X[self.feature_name].replace(self.value, self.replace_by)
return X
class OneFeatureApply(BaseEstimator, TransformerMixin):
"""
Description
----------
Apply a passed function to all elements of column
Arguments
----------
feature_name: str
name of the column to replace
apply: str
String containing the lambda function to be applied
active: boolean
This parameter controls if the selection will occour. This is useful in hyperparameters searchs to test the contribution
in the final score
Examples
----------
>>> apply = OneFeatureApply(feature_name = 'first_col',apply = 'np.log1p(x/2)')
>>> apply.fit_transform(X_trn,y_trn)
"""
def __init__(self, feature_name, apply="x", active=True, variable="x"):
self.feature_name = feature_name
self.apply = eval("lambda ?: ".replace("?", variable) + apply)
self.active = active
def fit(self, X, y):
return self
def transform(self, X):
if not self.active:
return X
else:
return self.__transformation(X)
def __transformation(self, X_in):
X = X_in.copy()
X[self.feature_name] = self.apply(X[self.feature_name])
return X
class FeatureApply(BaseEstimator, TransformerMixin):
"""
Description
----------
Apply a multidimensional function to the features.
Arguments
----------
apply: str
String containing a multidimensional lambda function to be applied. The name of the columns must appear in the string inside the tag <>. Ex. `apply = "np.log(<column_1> + <column_2>)" `
destination: str
Name of the column to receive the result
drop: bool
The user choose if the old features columns must be deleted.
active: boolean
This parameter controls if the selection will occour. This is useful in hyperparameters searchs to test the contribution
in the final score
Examples
----------
>>> apply = FeatureApply( destination = 'result_column', apply = 'np.log1p(<col_1> + <col_2>)')
>>> apply.fit_transform(X_trn,y_trn)
"""
def __init__(self, apply="x", active=True, destination=None, drop=False):
self.apply = apply
self.active = active
self.destination = destination
self.drop = drop
def fit(self, X, y):
return self
def transform(self, X):
if not self.active:
return X
else:
return self.__transformation(X)
def __transformation(self, X_in):
X = X_in.copy()
cols = list(X.columns)
variables = self.__get_variables(self.apply, cols)
len_variables = len(variables)
new_column = self.__new_column(self.apply, X)
if self.drop:
X = X.drop(columns=variables)
if self.destination:
if self.destination == "first":
X[variables[0]] = new_column
elif self.destination == "last":
X[variables[-1]] = new_column
else:
if type(self.destination) == str:
X[self.destination] = new_column
else:
print(
'[Warning]: <destination> is not a string. Result is on "new_column"'
)
X["new_column"] = new_column
else:
if len_variables == 1:
X[variables[0]] = new_column
else:
X["new_column"] = new_column
return X
def __findall(self, string, pattern):
return [i for i in range(len(string)) if string.startswith(pattern, i)]
def __remove_duplicates(self, x):
return list(dict.fromkeys(x))
def __get_variables(self, string, checklist, verbose=1):
start_pos = self.__findall(string, "<")
end_pos = self.__findall(string, ">")
prop_variables = self.__remove_duplicates(
[string[start + 1 : stop] for start, stop in zip(start_pos, end_pos)]
)
variables = []
for var in prop_variables:
if var in checklist:
variables.append(var)
else:
if verbose > 0:
print("[Error]: Feature " + var + " not found.")
return variables
def __new_column(self, string, dataframe):
cols = list(dataframe.columns)
variables = self.__get_variables(string, cols, verbose=0)
function = eval(
"lambda "
+ ",".join(variables)
+ ": "
+ string.replace("<", "").replace(">", "")
)
new_list = []
for ind, row in dataframe.iterrows():
if len(variables) == 1:
var = eval("[row['" + variables[0] + "']]")
else:
var = eval(
",".join(list(map(lambda st: "row['" + st + "']", variables)))
)
new_list.append(function(*var))
return new_list
class Encoder(BaseEstimator, TransformerMixin):
"""
Description
----------
Encodes categorical features
Arguments
----------
drop_first: boll
Whether to get k-1 dummies out of k categorical levels by removing the first level.
active: boolean
This parameter controls if the selection will occour. This is useful in hyperparameters searchs to test the contribution
in the final score
"""
def __init__(self, active=True, drop_first=True):
self.active = active
self.drop_first = drop_first
def fit(self, X, y=None):
return self
def transform(self, X):
if not self.active:
return X
else:
return self.__transformation(X)
def __transformation(self, X_in):
return pd.get_dummies(X_in, drop_first=self.drop_first)
class OneHotMissingEncoder(BaseEstimator, TransformerMixin):
""" """
def __init__(self, columns, suffix="nan", sep="_", dummy_na=True, drop_last=False):
""" """
self.columns = columns
self.suffix = suffix
self.sep = sep
self.any_missing = None
self.column_values = None
self.last_value = None
self.dummy_na = dummy_na
self.drop_last = drop_last
def transform(self, X, **transform_params):
""" """
X_copy = X.copy()
final_columns = []
for col in X_copy.columns:
if col not in self.columns:
final_columns.append(col)
else:
for value in self.column_values[col]:
col_name = col + self.sep + str(value)
if (
self.drop_last
and value == self.last_value[col]
and (not self.any_missing[col])
):
pass # dropping
else:
final_columns.append(col_name)
X_copy[col_name] = (X_copy[col] == value).astype(int)
if self.any_missing[col]:
if self.dummy_na and not self.drop_last:
col_name = col + self.sep + "nan"
final_columns.append(col_name)
X_copy[col_name] = pd.isnull(X_copy[col]).astype(int)
return X_copy[final_columns]
def fit(self, X, y=None, **fit_params):
""" """
self.any_missing = {col: (pd.notnull(X[col]).sum() > 0) for col in self.columns}
self.column_values = {
col: sorted([x for x in list(X[col].unique()) if pd.notnull(x)])
for col in self.columns
}
self.last_value = {col: self.column_values[col][-1] for col in self.columns}
return self
class MeanModeImputer(BaseEstimator, TransformerMixin):
"""
Description
----------
Not documented yet
Arguments
----------
Not documented yet
"""
def __init__(self, features="all", active=True):
self.features = features
self.active = active
def fit(self, X, y=None):
if self.features == "all":
self.features = list(X.columns)
# receive X and collect its columns
self.columns = list(X.columns)
# defining the categorical columns of X
self.numerical_features = list(X._get_numeric_data().columns)
# definig numerical columns of x
self.categorical_features = list(
set(list(X.columns)) - set(list(X._get_numeric_data().columns))
)
self.mean_dict = {}
for feature_name in self.features:
if feature_name in self.numerical_features:
self.mean_dict[feature_name] = X[feature_name].mean()
elif feature_name in self.categorical_features:
self.mean_dict[feature_name] = X[feature_name].mode()[0]
return self
def transform(self, X, y=None):
if not self.active:
return X
else:
return self.__transformation(X, y)
def __transformation(self, X_in, y_in=None):
X = X_in.copy()
for feature_name in self.features:
new_list = []
if X[feature_name].isna().sum() > 0:
for ind, row in X[[feature_name]].iterrows():
if pd.isnull(row[feature_name]):
new_list.append(self.mean_dict[feature_name])
else:
new_list.append(row[feature_name])
X[feature_name] = new_list
return X
class ScalerDF(BaseEstimator, TransformerMixin):
""""""
def __init__(self, max_missing=0.0, active=True):
self.active = active
self.max_missing = max_missing
def fit(self, X, y=None):
return self
def transform(self, X):
if not self.active:
return X
else:
return self.__transformation(X)
def __transformation(self, X_in):
X = X_in.copy()
scaler = preprocessing.MinMaxScaler(copy=True, feature_range=(0, 1))
try:
ind = np.array(list(X.index)).reshape(-1, 1)
ind_name = X.index.name
df = pd.concat(
[
pd.DataFrame(scaler.fit_transform(X), columns=list(X.columns)),
pd.DataFrame(ind, columns=[ind_name]),
],
1,
)
X = df.set_index("Id")
except:
X = pd.DataFrame(scaler.fit_transform(X), columns=list(X.columns))
return X
def _dataframe_transform(transformer, data):
if isinstance(data, (pd.DataFrame)):
return pd.DataFrame(
transformer.transform(data), columns=data.columns, index=data.index
)
else:
return transformer.transform(data)
class MinMaxScaler(preprocessing.MinMaxScaler):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def transform(self, X):
return _dataframe_transform(super(), X)
class StandardScaler(preprocessing.StandardScaler):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def transform(self, X):
return _dataframe_transform(super(), X)
class RobustScaler(preprocessing.RobustScaler):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def transform(self, X):
return _dataframe_transform(super(), X)
class DataFrameImputer(TransformerMixin):
def __init__(self):
"""
https://stackoverflow.com/a/25562948/14204691
Impute missing values.
Columns of dtype object are imputed with the most frequent value
in column.
Columns of other types are imputed with mean of column.
"""
def fit(self, X, y=None):
self.fill = pd.Series(
[
X[c].value_counts().index[0]
if X[c].dtype == np.dtype("O")
else X[c].mean()
for c in X
],
index=X.columns,
)
return self
def transform(self, X, y=None):
return X.fillna(self.fill)
class EncoderDataframe(TransformerMixin):
""""""
def __init__(self, separator="_", drop_first=True):
self.numerical_features = None
self.categorical_features = None
self.separator = separator
self.drop_first = drop_first
#
def fit(self, X, y=None):
# receive X and collect its columns
self.columns = list(X.columns)
# defining the categorical columns of X
self.numerical_features = list(X._get_numeric_data().columns)
# definig numerical columns of x
self.categorical_features = list(
set(list(X.columns)) - set(list(X._get_numeric_data().columns))
)
# make the loop through the columns
new_columns = {}
for col in self.columns:
# if the column is numerica, append to new_columns
if col in self.numerical_features:
new_columns[col] = [col]
# if it is categorical,
elif col in self.categorical_features:
# get all possible categories
unique_elements = X[col].unique().tolist()
# drop the last if the user ask for it
if self.drop_first:
unique_elements.pop(-1)
# make a loop through the categories
new_list = []
for elem in unique_elements:
new_list.append(elem)
new_columns[col] = new_list
self.new_columns = new_columns
return self
def transform(self, X, y=None):
X_ = X.reset_index(drop=True).copy()
# columns to be transformed
columns = X_.columns
# columns fitted
if list(columns) != self.columns:
print(
"[Error]: The features in fitted dataset are not equal to the dataset in transform."
)
list_df = []
for col in X_.columns:
if col in self.numerical_features:
list_df.append(X_[col])
elif col in self.categorical_features:
for elem in self.new_columns[col]:
serie = pd.Series(
list(map(lambda x: int(x), list(X_[col] == elem))),
name=str(col) + self.separator + str(elem),
)
list_df.append(serie)
return | pd.concat(list_df, 1) | pandas.concat |
import pandas as pd
import numpy as np
from sklearn.datasets import load_breast_cancer as lbc
from tkinter import *
from tkinter import messagebox
data = lbc()
clm = np.array(data['feature_names'])
df_x = pd.DataFrame(data['data'])
df_y = | pd.DataFrame(data['target']) | pandas.DataFrame |
from datetime import datetime
import numpy as np
import pandas as pd
import pytest
from numba import njit
import vectorbt as vbt
from tests.utils import record_arrays_close
from vectorbt.generic.enums import range_dt, drawdown_dt
from vectorbt.portfolio.enums import order_dt, trade_dt, log_dt
day_dt = np.timedelta64(86400000000000)
example_dt = np.dtype([
('id', np.int64),
('col', np.int64),
('idx', np.int64),
('some_field1', np.float64),
('some_field2', np.float64)
], align=True)
records_arr = np.asarray([
(0, 0, 0, 10, 21),
(1, 0, 1, 11, 20),
(2, 0, 2, 12, 19),
(3, 1, 0, 13, 18),
(4, 1, 1, 14, 17),
(5, 1, 2, 13, 18),
(6, 2, 0, 12, 19),
(7, 2, 1, 11, 20),
(8, 2, 2, 10, 21)
], dtype=example_dt)
records_nosort_arr = np.concatenate((
records_arr[0::3],
records_arr[1::3],
records_arr[2::3]
))
group_by = pd.Index(['g1', 'g1', 'g2', 'g2'])
wrapper = vbt.ArrayWrapper(
index=['x', 'y', 'z'],
columns=['a', 'b', 'c', 'd'],
ndim=2,
freq='1 days'
)
wrapper_grouped = wrapper.replace(group_by=group_by)
records = vbt.records.Records(wrapper, records_arr)
records_grouped = vbt.records.Records(wrapper_grouped, records_arr)
records_nosort = records.replace(records_arr=records_nosort_arr)
records_nosort_grouped = vbt.records.Records(wrapper_grouped, records_nosort_arr)
# ############# Global ############# #
def setup_module():
vbt.settings.numba['check_func_suffix'] = True
vbt.settings.caching.enabled = False
vbt.settings.caching.whitelist = []
vbt.settings.caching.blacklist = []
def teardown_module():
vbt.settings.reset()
# ############# col_mapper.py ############# #
class TestColumnMapper:
def test_col_arr(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_arr,
np.array([0, 0, 0])
)
np.testing.assert_array_equal(
records.col_mapper.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
)
def test_get_col_arr(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_arr(),
records.col_mapper.col_arr
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_arr(),
np.array([0, 0, 0, 0, 0, 0])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_arr(),
np.array([0, 0, 0, 0, 0, 0, 1, 1, 1])
)
def test_col_range(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_range,
np.array([
[0, 3]
])
)
np.testing.assert_array_equal(
records.col_mapper.col_range,
np.array([
[0, 3],
[3, 6],
[6, 9],
[-1, -1]
])
)
def test_get_col_range(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_range(),
np.array([
[0, 3],
[3, 6],
[6, 9],
[-1, -1]
])
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_range(),
np.array([[0, 6]])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_range(),
np.array([[0, 6], [6, 9]])
)
def test_col_map(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_map[0],
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
records['a'].col_mapper.col_map[1],
np.array([3])
)
np.testing.assert_array_equal(
records.col_mapper.col_map[0],
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
np.testing.assert_array_equal(
records.col_mapper.col_map[1],
np.array([3, 3, 3, 0])
)
def test_get_col_map(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_map()[0],
records.col_mapper.col_map[0]
)
np.testing.assert_array_equal(
records.col_mapper.get_col_map()[1],
records.col_mapper.col_map[1]
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_map()[0],
np.array([0, 1, 2, 3, 4, 5])
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_map()[1],
np.array([6])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_map()[0],
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_map()[1],
np.array([6, 3])
)
def test_is_sorted(self):
assert records.col_mapper.is_sorted()
assert not records_nosort.col_mapper.is_sorted()
# ############# mapped_array.py ############# #
mapped_array = records.map_field('some_field1')
mapped_array_grouped = records_grouped.map_field('some_field1')
mapped_array_nosort = records_nosort.map_field('some_field1')
mapped_array_nosort_grouped = records_nosort_grouped.map_field('some_field1')
mapping = {x: 'test_' + str(x) for x in pd.unique(mapped_array.values)}
mp_mapped_array = mapped_array.replace(mapping=mapping)
mp_mapped_array_grouped = mapped_array_grouped.replace(mapping=mapping)
class TestMappedArray:
def test_config(self, tmp_path):
assert vbt.MappedArray.loads(mapped_array.dumps()) == mapped_array
mapped_array.save(tmp_path / 'mapped_array')
assert vbt.MappedArray.load(tmp_path / 'mapped_array') == mapped_array
def test_mapped_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].values,
np.array([10., 11., 12.])
)
np.testing.assert_array_equal(
mapped_array.values,
np.array([10., 11., 12., 13., 14., 13., 12., 11., 10.])
)
def test_id_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].id_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
mapped_array.id_arr,
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
def test_col_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].col_arr,
np.array([0, 0, 0])
)
np.testing.assert_array_equal(
mapped_array.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
)
def test_idx_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].idx_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
mapped_array.idx_arr,
np.array([0, 1, 2, 0, 1, 2, 0, 1, 2])
)
def test_is_sorted(self):
assert mapped_array.is_sorted()
assert mapped_array.is_sorted(incl_id=True)
assert not mapped_array_nosort.is_sorted()
assert not mapped_array_nosort.is_sorted(incl_id=True)
def test_sort(self):
assert mapped_array.sort().is_sorted()
assert mapped_array.sort().is_sorted(incl_id=True)
assert mapped_array.sort(incl_id=True).is_sorted(incl_id=True)
assert mapped_array_nosort.sort().is_sorted()
assert mapped_array_nosort.sort().is_sorted(incl_id=True)
assert mapped_array_nosort.sort(incl_id=True).is_sorted(incl_id=True)
def test_apply_mask(self):
mask_a = mapped_array['a'].values >= mapped_array['a'].values.mean()
np.testing.assert_array_equal(
mapped_array['a'].apply_mask(mask_a).id_arr,
np.array([1, 2])
)
mask = mapped_array.values >= mapped_array.values.mean()
filtered = mapped_array.apply_mask(mask)
np.testing.assert_array_equal(
filtered.id_arr,
np.array([2, 3, 4, 5, 6])
)
np.testing.assert_array_equal(filtered.col_arr, mapped_array.col_arr[mask])
np.testing.assert_array_equal(filtered.idx_arr, mapped_array.idx_arr[mask])
assert mapped_array_grouped.apply_mask(mask).wrapper == mapped_array_grouped.wrapper
assert mapped_array_grouped.apply_mask(mask, group_by=False).wrapper.grouper.group_by is None
def test_map_to_mask(self):
@njit
def every_2_nb(inout, idxs, col, mapped_arr):
inout[idxs[::2]] = True
np.testing.assert_array_equal(
mapped_array.map_to_mask(every_2_nb),
np.array([True, False, True, True, False, True, True, False, True])
)
def test_top_n_mask(self):
np.testing.assert_array_equal(
mapped_array.top_n_mask(1),
np.array([False, False, True, False, True, False, True, False, False])
)
def test_bottom_n_mask(self):
np.testing.assert_array_equal(
mapped_array.bottom_n_mask(1),
np.array([True, False, False, True, False, False, False, False, True])
)
def test_top_n(self):
np.testing.assert_array_equal(
mapped_array.top_n(1).id_arr,
np.array([2, 4, 6])
)
def test_bottom_n(self):
np.testing.assert_array_equal(
mapped_array.bottom_n(1).id_arr,
np.array([0, 3, 8])
)
def test_to_pd(self):
target = pd.DataFrame(
np.array([
[10., 13., 12., np.nan],
[11., 14., 11., np.nan],
[12., 13., 10., np.nan]
]),
index=wrapper.index,
columns=wrapper.columns
)
pd.testing.assert_series_equal(
mapped_array['a'].to_pd(),
target['a']
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(),
target
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(fill_value=0.),
target.fillna(0.)
)
mapped_array2 = vbt.MappedArray(
wrapper,
records_arr['some_field1'].tolist() + [1],
records_arr['col'].tolist() + [2],
idx_arr=records_arr['idx'].tolist() + [2]
)
with pytest.raises(Exception):
_ = mapped_array2.to_pd()
pd.testing.assert_series_equal(
mapped_array['a'].to_pd(ignore_index=True),
pd.Series(np.array([10., 11., 12.]), name='a')
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(ignore_index=True),
pd.DataFrame(
np.array([
[10., 13., 12., np.nan],
[11., 14., 11., np.nan],
[12., 13., 10., np.nan]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(fill_value=0, ignore_index=True),
pd.DataFrame(
np.array([
[10., 13., 12., 0.],
[11., 14., 11., 0.],
[12., 13., 10., 0.]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.to_pd(ignore_index=True),
pd.DataFrame(
np.array([
[10., 12.],
[11., 11.],
[12., 10.],
[13., np.nan],
[14., np.nan],
[13., np.nan],
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_apply(self):
@njit
def cumsum_apply_nb(idxs, col, a):
return np.cumsum(a)
np.testing.assert_array_equal(
mapped_array['a'].apply(cumsum_apply_nb).values,
np.array([10., 21., 33.])
)
np.testing.assert_array_equal(
mapped_array.apply(cumsum_apply_nb).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
mapped_array_grouped.apply(cumsum_apply_nb, apply_per_group=False).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
mapped_array_grouped.apply(cumsum_apply_nb, apply_per_group=True).values,
np.array([10., 21., 33., 46., 60., 73., 12., 23., 33.])
)
assert mapped_array_grouped.apply(cumsum_apply_nb).wrapper == \
mapped_array.apply(cumsum_apply_nb, group_by=group_by).wrapper
assert mapped_array.apply(cumsum_apply_nb, group_by=False).wrapper.grouper.group_by is None
def test_reduce(self):
@njit
def mean_reduce_nb(col, a):
return np.mean(a)
assert mapped_array['a'].reduce(mean_reduce_nb) == 11.
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb),
pd.Series(np.array([11., 13.333333333333334, 11., np.nan]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, fill_value=0.),
pd.Series(np.array([11., 13.333333333333334, 11., 0.]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, fill_value=0., wrap_kwargs=dict(dtype=np.int_)),
pd.Series(np.array([11., 13.333333333333334, 11., 0.]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, wrap_kwargs=dict(to_timedelta=True)),
pd.Series(np.array([11., 13.333333333333334, 11., np.nan]), index=wrapper.columns).rename('reduce') * day_dt
)
pd.testing.assert_series_equal(
mapped_array_grouped.reduce(mean_reduce_nb),
pd.Series([12.166666666666666, 11.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('reduce')
)
assert mapped_array_grouped['g1'].reduce(mean_reduce_nb) == 12.166666666666666
pd.testing.assert_series_equal(
mapped_array_grouped[['g1']].reduce(mean_reduce_nb),
pd.Series([12.166666666666666], index=pd.Index(['g1'], dtype='object')).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb),
mapped_array_grouped.reduce(mean_reduce_nb, group_by=False)
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, group_by=group_by),
mapped_array_grouped.reduce(mean_reduce_nb)
)
def test_reduce_to_idx(self):
@njit
def argmin_reduce_nb(col, a):
return np.argmin(a)
assert mapped_array['a'].reduce(argmin_reduce_nb, returns_idx=True) == 'x'
pd.testing.assert_series_equal(
mapped_array.reduce(argmin_reduce_nb, returns_idx=True),
pd.Series(np.array(['x', 'x', 'z', np.nan], dtype=object), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(argmin_reduce_nb, returns_idx=True, to_index=False),
pd.Series(np.array([0, 0, 2, -1], dtype=int), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array_grouped.reduce(argmin_reduce_nb, returns_idx=True, to_index=False),
pd.Series(np.array([0, 2], dtype=int), index=pd.Index(['g1', 'g2'], dtype='object')).rename('reduce')
)
def test_reduce_to_array(self):
@njit
def min_max_reduce_nb(col, a):
return np.array([np.min(a), np.max(a)])
pd.testing.assert_series_equal(
mapped_array['a'].reduce(min_max_reduce_nb, returns_array=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])),
pd.Series([10., 12.], index=pd.Index(['min', 'max'], dtype='object'), name='a')
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, wrap_kwargs=dict(name_or_index=['min', 'max'])),
pd.DataFrame(
np.array([
[10., 13., 10., np.nan],
[12., 14., 12., np.nan]
]),
index=pd.Index(['min', 'max'], dtype='object'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, fill_value=0.),
pd.DataFrame(
np.array([
[10., 13., 10., 0.],
[12., 14., 12., 0.]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, wrap_kwargs=dict(to_timedelta=True)),
pd.DataFrame(
np.array([
[10., 13., 10., np.nan],
[12., 14., 12., np.nan]
]),
columns=wrapper.columns
) * day_dt
)
pd.testing.assert_frame_equal(
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True),
pd.DataFrame(
np.array([
[10., 10.],
[14., 12.]
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True),
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True, group_by=False)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, group_by=group_by),
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True)
)
pd.testing.assert_series_equal(
mapped_array_grouped['g1'].reduce(min_max_reduce_nb, returns_array=True),
pd.Series([10., 14.], name='g1')
)
pd.testing.assert_frame_equal(
mapped_array_grouped[['g1']].reduce(min_max_reduce_nb, returns_array=True),
pd.DataFrame([[10.], [14.]], columns=pd.Index(['g1'], dtype='object'))
)
def test_reduce_to_idx_array(self):
@njit
def idxmin_idxmax_reduce_nb(col, a):
return np.array([np.argmin(a), np.argmax(a)])
pd.testing.assert_series_equal(
mapped_array['a'].reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])
),
pd.Series(
np.array(['x', 'z'], dtype=object),
index=pd.Index(['min', 'max'], dtype='object'),
name='a'
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])
),
pd.DataFrame(
{
'a': ['x', 'z'],
'b': ['x', 'y'],
'c': ['z', 'x'],
'd': [np.nan, np.nan]
},
index=pd.Index(['min', 'max'], dtype='object')
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
to_index=False
),
pd.DataFrame(
np.array([
[0, 0, 2, -1],
[2, 1, 0, -1]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
to_index=False
),
pd.DataFrame(
np.array([
[0, 2],
[1, 0]
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_nth(self):
assert mapped_array['a'].nth(0) == 10.
pd.testing.assert_series_equal(
mapped_array.nth(0),
pd.Series(np.array([10., 13., 12., np.nan]), index=wrapper.columns).rename('nth')
)
assert mapped_array['a'].nth(-1) == 12.
pd.testing.assert_series_equal(
mapped_array.nth(-1),
pd.Series(np.array([12., 13., 10., np.nan]), index=wrapper.columns).rename('nth')
)
with pytest.raises(Exception):
_ = mapped_array.nth(10)
pd.testing.assert_series_equal(
mapped_array_grouped.nth(0),
pd.Series(np.array([10., 12.]), index=pd.Index(['g1', 'g2'], dtype='object')).rename('nth')
)
def test_nth_index(self):
assert mapped_array['a'].nth(0) == 10.
pd.testing.assert_series_equal(
mapped_array.nth_index(0),
pd.Series(
np.array(['x', 'x', 'x', np.nan], dtype='object'),
index=wrapper.columns
).rename('nth_index')
)
assert mapped_array['a'].nth(-1) == 12.
pd.testing.assert_series_equal(
mapped_array.nth_index(-1),
pd.Series(
np.array(['z', 'z', 'z', np.nan], dtype='object'),
index=wrapper.columns
).rename('nth_index')
)
with pytest.raises(Exception):
_ = mapped_array.nth_index(10)
pd.testing.assert_series_equal(
mapped_array_grouped.nth_index(0),
pd.Series(
np.array(['x', 'x'], dtype='object'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('nth_index')
)
def test_min(self):
assert mapped_array['a'].min() == mapped_array['a'].to_pd().min()
pd.testing.assert_series_equal(
mapped_array.min(),
mapped_array.to_pd().min().rename('min')
)
pd.testing.assert_series_equal(
mapped_array_grouped.min(),
pd.Series([10., 10.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('min')
)
def test_max(self):
assert mapped_array['a'].max() == mapped_array['a'].to_pd().max()
pd.testing.assert_series_equal(
mapped_array.max(),
mapped_array.to_pd().max().rename('max')
)
pd.testing.assert_series_equal(
mapped_array_grouped.max(),
pd.Series([14., 12.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('max')
)
def test_mean(self):
assert mapped_array['a'].mean() == mapped_array['a'].to_pd().mean()
pd.testing.assert_series_equal(
mapped_array.mean(),
mapped_array.to_pd().mean().rename('mean')
)
pd.testing.assert_series_equal(
mapped_array_grouped.mean(),
pd.Series([12.166667, 11.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('mean')
)
def test_median(self):
assert mapped_array['a'].median() == mapped_array['a'].to_pd().median()
pd.testing.assert_series_equal(
mapped_array.median(),
mapped_array.to_pd().median().rename('median')
)
pd.testing.assert_series_equal(
mapped_array_grouped.median(),
pd.Series([12.5, 11.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('median')
)
def test_std(self):
assert mapped_array['a'].std() == mapped_array['a'].to_pd().std()
pd.testing.assert_series_equal(
mapped_array.std(),
mapped_array.to_pd().std().rename('std')
)
pd.testing.assert_series_equal(
mapped_array.std(ddof=0),
mapped_array.to_pd().std(ddof=0).rename('std')
)
pd.testing.assert_series_equal(
mapped_array_grouped.std(),
pd.Series([1.4719601443879746, 1.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('std')
)
def test_sum(self):
assert mapped_array['a'].sum() == mapped_array['a'].to_pd().sum()
pd.testing.assert_series_equal(
mapped_array.sum(),
mapped_array.to_pd().sum().rename('sum')
)
pd.testing.assert_series_equal(
mapped_array_grouped.sum(),
pd.Series([73.0, 33.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('sum')
)
def test_count(self):
assert mapped_array['a'].count() == mapped_array['a'].to_pd().count()
pd.testing.assert_series_equal(
mapped_array.count(),
mapped_array.to_pd().count().rename('count')
)
pd.testing.assert_series_equal(
mapped_array_grouped.count(),
pd.Series([6, 3], index=pd.Index(['g1', 'g2'], dtype='object')).rename('count')
)
def test_idxmin(self):
assert mapped_array['a'].idxmin() == mapped_array['a'].to_pd().idxmin()
pd.testing.assert_series_equal(
mapped_array.idxmin(),
mapped_array.to_pd().idxmin().rename('idxmin')
)
pd.testing.assert_series_equal(
mapped_array_grouped.idxmin(),
pd.Series(
np.array(['x', 'z'], dtype=object),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('idxmin')
)
def test_idxmax(self):
assert mapped_array['a'].idxmax() == mapped_array['a'].to_pd().idxmax()
pd.testing.assert_series_equal(
mapped_array.idxmax(),
mapped_array.to_pd().idxmax().rename('idxmax')
)
pd.testing.assert_series_equal(
mapped_array_grouped.idxmax(),
pd.Series(
np.array(['y', 'x'], dtype=object),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('idxmax')
)
def test_describe(self):
pd.testing.assert_series_equal(
mapped_array['a'].describe(),
mapped_array['a'].to_pd().describe()
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=None),
mapped_array.to_pd().describe(percentiles=None)
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=[]),
mapped_array.to_pd().describe(percentiles=[])
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=np.arange(0, 1, 0.1)),
mapped_array.to_pd().describe(percentiles=np.arange(0, 1, 0.1))
)
pd.testing.assert_frame_equal(
mapped_array_grouped.describe(),
pd.DataFrame(
np.array([
[6., 3.],
[12.16666667, 11.],
[1.47196014, 1.],
[10., 10.],
[11.25, 10.5],
[12.5, 11.],
[13., 11.5],
[14., 12.]
]),
columns=pd.Index(['g1', 'g2'], dtype='object'),
index=mapped_array.describe().index
)
)
def test_value_counts(self):
pd.testing.assert_series_equal(
mapped_array['a'].value_counts(),
pd.Series(
np.array([1, 1, 1]),
index=pd.Float64Index([10.0, 11.0, 12.0], dtype='float64'),
name='a'
)
)
pd.testing.assert_series_equal(
mapped_array['a'].value_counts(mapping=mapping),
pd.Series(
np.array([1, 1, 1]),
index=pd.Index(['test_10.0', 'test_11.0', 'test_12.0'], dtype='object'),
name='a'
)
)
pd.testing.assert_frame_equal(
mapped_array.value_counts(),
pd.DataFrame(
np.array([
[1, 0, 1, 0],
[1, 0, 1, 0],
[1, 0, 1, 0],
[0, 2, 0, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([10.0, 11.0, 12.0, 13.0, 14.0], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.value_counts(),
pd.DataFrame(
np.array([
[1, 1],
[1, 1],
[1, 1],
[2, 0],
[1, 0]
]),
index=pd.Float64Index([10.0, 11.0, 12.0, 13.0, 14.0], dtype='float64'),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
mapped_array2 = mapped_array.replace(mapped_arr=[4, 4, 3, 2, np.nan, 4, 3, 2, 1])
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort_uniques=False),
pd.DataFrame(
np.array([
[2, 1, 0, 0],
[1, 0, 1, 0],
[0, 1, 1, 0],
[0, 0, 1, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([4.0, 3.0, 2.0, 1.0, None], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort_uniques=True),
pd.DataFrame(
np.array([
[0, 0, 1, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[2, 1, 0, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([1.0, 2.0, 3.0, 4.0, None], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True),
pd.DataFrame(
np.array([
[2, 1, 0, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[0, 0, 1, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0, np.nan], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, ascending=True),
pd.DataFrame(
np.array([
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[2, 1, 0, 0]
]),
index=pd.Float64Index([1.0, np.nan, 2.0, 3.0, 4.0], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, normalize=True),
pd.DataFrame(
np.array([
[0.2222222222222222, 0.1111111111111111, 0.0, 0.0],
[0.0, 0.1111111111111111, 0.1111111111111111, 0.0],
[0.1111111111111111, 0.0, 0.1111111111111111, 0.0],
[0.0, 0.0, 0.1111111111111111, 0.0],
[0.0, 0.1111111111111111, 0.0, 0.0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0, np.nan], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, normalize=True, dropna=True),
pd.DataFrame(
np.array([
[0.25, 0.125, 0.0, 0.0],
[0.0, 0.125, 0.125, 0.0],
[0.125, 0.0, 0.125, 0.0],
[0.0, 0.0, 0.125, 0.0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0], dtype='float64'),
columns=wrapper.columns
)
)
@pytest.mark.parametrize(
"test_nosort",
[False, True],
)
def test_indexing(self, test_nosort):
if test_nosort:
ma = mapped_array_nosort
ma_grouped = mapped_array_nosort_grouped
else:
ma = mapped_array
ma_grouped = mapped_array_grouped
np.testing.assert_array_equal(
ma['a'].id_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
ma['a'].col_arr,
np.array([0, 0, 0])
)
pd.testing.assert_index_equal(
ma['a'].wrapper.columns,
pd.Index(['a'], dtype='object')
)
np.testing.assert_array_equal(
ma['b'].id_arr,
np.array([3, 4, 5])
)
np.testing.assert_array_equal(
ma['b'].col_arr,
np.array([0, 0, 0])
)
pd.testing.assert_index_equal(
ma['b'].wrapper.columns,
pd.Index(['b'], dtype='object')
)
np.testing.assert_array_equal(
ma[['a', 'a']].id_arr,
np.array([0, 1, 2, 0, 1, 2])
)
np.testing.assert_array_equal(
ma[['a', 'a']].col_arr,
np.array([0, 0, 0, 1, 1, 1])
)
pd.testing.assert_index_equal(
ma[['a', 'a']].wrapper.columns,
pd.Index(['a', 'a'], dtype='object')
)
np.testing.assert_array_equal(
ma[['a', 'b']].id_arr,
np.array([0, 1, 2, 3, 4, 5])
)
np.testing.assert_array_equal(
ma[['a', 'b']].col_arr,
np.array([0, 0, 0, 1, 1, 1])
)
pd.testing.assert_index_equal(
ma[['a', 'b']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
with pytest.raises(Exception):
_ = ma.iloc[::2, :] # changing time not supported
pd.testing.assert_index_equal(
ma_grouped['g1'].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert ma_grouped['g1'].wrapper.ndim == 2
assert ma_grouped['g1'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
ma_grouped['g1'].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
ma_grouped['g2'].wrapper.columns,
pd.Index(['c', 'd'], dtype='object')
)
assert ma_grouped['g2'].wrapper.ndim == 2
assert ma_grouped['g2'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
ma_grouped['g2'].wrapper.grouper.group_by,
pd.Index(['g2', 'g2'], dtype='object')
)
pd.testing.assert_index_equal(
ma_grouped[['g1']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert ma_grouped[['g1']].wrapper.ndim == 2
assert ma_grouped[['g1']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
ma_grouped[['g1']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
ma_grouped[['g1', 'g2']].wrapper.columns,
pd.Index(['a', 'b', 'c', 'd'], dtype='object')
)
assert ma_grouped[['g1', 'g2']].wrapper.ndim == 2
assert ma_grouped[['g1', 'g2']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
ma_grouped[['g1', 'g2']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1', 'g2', 'g2'], dtype='object')
)
def test_magic(self):
a = vbt.MappedArray(
wrapper,
records_arr['some_field1'],
records_arr['col'],
id_arr=records_arr['id'],
idx_arr=records_arr['idx']
)
a_inv = vbt.MappedArray(
wrapper,
records_arr['some_field1'][::-1],
records_arr['col'][::-1],
id_arr=records_arr['id'][::-1],
idx_arr=records_arr['idx'][::-1]
)
b = records_arr['some_field2']
a_bool = vbt.MappedArray(
wrapper,
records_arr['some_field1'] > np.mean(records_arr['some_field1']),
records_arr['col'],
id_arr=records_arr['id'],
idx_arr=records_arr['idx']
)
b_bool = records_arr['some_field2'] > np.mean(records_arr['some_field2'])
assert a ** a == a ** 2
with pytest.raises(Exception):
_ = a * a_inv
# binary ops
# comparison ops
np.testing.assert_array_equal((a == b).values, a.values == b)
np.testing.assert_array_equal((a != b).values, a.values != b)
np.testing.assert_array_equal((a < b).values, a.values < b)
np.testing.assert_array_equal((a > b).values, a.values > b)
np.testing.assert_array_equal((a <= b).values, a.values <= b)
np.testing.assert_array_equal((a >= b).values, a.values >= b)
# arithmetic ops
np.testing.assert_array_equal((a + b).values, a.values + b)
np.testing.assert_array_equal((a - b).values, a.values - b)
np.testing.assert_array_equal((a * b).values, a.values * b)
np.testing.assert_array_equal((a ** b).values, a.values ** b)
np.testing.assert_array_equal((a % b).values, a.values % b)
np.testing.assert_array_equal((a // b).values, a.values // b)
np.testing.assert_array_equal((a / b).values, a.values / b)
# __r*__ is only called if the left object does not have an __*__ method
np.testing.assert_array_equal((10 + a).values, 10 + a.values)
np.testing.assert_array_equal((10 - a).values, 10 - a.values)
np.testing.assert_array_equal((10 * a).values, 10 * a.values)
np.testing.assert_array_equal((10 ** a).values, 10 ** a.values)
np.testing.assert_array_equal((10 % a).values, 10 % a.values)
np.testing.assert_array_equal((10 // a).values, 10 // a.values)
np.testing.assert_array_equal((10 / a).values, 10 / a.values)
# mask ops
np.testing.assert_array_equal((a_bool & b_bool).values, a_bool.values & b_bool)
np.testing.assert_array_equal((a_bool | b_bool).values, a_bool.values | b_bool)
np.testing.assert_array_equal((a_bool ^ b_bool).values, a_bool.values ^ b_bool)
np.testing.assert_array_equal((True & a_bool).values, True & a_bool.values)
np.testing.assert_array_equal((True | a_bool).values, True | a_bool.values)
np.testing.assert_array_equal((True ^ a_bool).values, True ^ a_bool.values)
# unary ops
np.testing.assert_array_equal((-a).values, -a.values)
np.testing.assert_array_equal((+a).values, +a.values)
np.testing.assert_array_equal((abs(-a)).values, abs((-a.values)))
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Count', 'Mean', 'Std', 'Min', 'Median', 'Max', 'Min Index', 'Max Index'
], dtype='object')
pd.testing.assert_series_equal(
mapped_array.stats(),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'),
2.25, 11.777777777777779, 0.859116756396542, 11.0, 11.666666666666666, 12.666666666666666
],
index=stats_index[:-2],
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
mapped_array.stats(column='a'),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'),
3, 11.0, 1.0, 10.0, 11.0, 12.0, 'x', 'z'
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
mapped_array.stats(column='g1', group_by=group_by),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'),
6, 12.166666666666666, 1.4719601443879746, 10.0, 12.5, 14.0, 'x', 'y'
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
mapped_array['c'].stats(),
mapped_array.stats(column='c')
)
pd.testing.assert_series_equal(
mapped_array['c'].stats(),
mapped_array.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
mapped_array_grouped['g2'].stats(),
mapped_array_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
mapped_array_grouped['g2'].stats(),
mapped_array.stats(column='g2', group_by=group_by)
)
stats_df = mapped_array.stats(agg_func=None)
assert stats_df.shape == (4, 11)
pd.testing.assert_index_equal(stats_df.index, mapped_array.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
def test_stats_mapping(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Count', 'Value Counts: test_10.0',
'Value Counts: test_11.0', 'Value Counts: test_12.0',
'Value Counts: test_13.0', 'Value Counts: test_14.0'
], dtype='object')
pd.testing.assert_series_equal(
mp_mapped_array.stats(),
pd.Series([
'x',
'z',
pd.Timedelta('3 days 00:00:00'),
2.25, 0.5, 0.5, 0.5, 0.5, 0.25
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
mp_mapped_array.stats(column='a'),
pd.Series([
'x',
'z',
pd.Timedelta('3 days 00:00:00'),
3, 1, 1, 1, 0, 0
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
mp_mapped_array.stats(column='g1', group_by=group_by),
pd.Series([
'x',
'z',
pd.Timedelta('3 days 00:00:00'),
6, 1, 1, 1, 2, 1
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
mp_mapped_array.stats(),
mapped_array.stats(settings=dict(mapping=mapping))
)
pd.testing.assert_series_equal(
mp_mapped_array['c'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array.stats(column='c')
)
pd.testing.assert_series_equal(
mp_mapped_array['c'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
mp_mapped_array_grouped['g2'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
mp_mapped_array_grouped['g2'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array.stats(column='g2', group_by=group_by)
)
stats_df = mp_mapped_array.stats(agg_func=None)
assert stats_df.shape == (4, 9)
pd.testing.assert_index_equal(stats_df.index, mp_mapped_array.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
# ############# base.py ############# #
class TestRecords:
def test_config(self, tmp_path):
assert vbt.Records.loads(records['a'].dumps()) == records['a']
assert vbt.Records.loads(records.dumps()) == records
records.save(tmp_path / 'records')
assert vbt.Records.load(tmp_path / 'records') == records
def test_records(self):
pd.testing.assert_frame_equal(
records.records,
pd.DataFrame.from_records(records_arr)
)
def test_recarray(self):
np.testing.assert_array_equal(records['a'].recarray.some_field1, records['a'].values['some_field1'])
np.testing.assert_array_equal(records.recarray.some_field1, records.values['some_field1'])
def test_records_readable(self):
pd.testing.assert_frame_equal(
records.records_readable,
pd.DataFrame([
[0, 'a', 'x', 10.0, 21.0], [1, 'a', 'y', 11.0, 20.0], [2, 'a', 'z', 12.0, 19.0],
[3, 'b', 'x', 13.0, 18.0], [4, 'b', 'y', 14.0, 17.0], [5, 'b', 'z', 13.0, 18.0],
[6, 'c', 'x', 12.0, 19.0], [7, 'c', 'y', 11.0, 20.0], [8, 'c', 'z', 10.0, 21.0]
], columns=pd.Index(['Id', 'Column', 'Timestamp', 'some_field1', 'some_field2'], dtype='object'))
)
def test_is_sorted(self):
assert records.is_sorted()
assert records.is_sorted(incl_id=True)
assert not records_nosort.is_sorted()
assert not records_nosort.is_sorted(incl_id=True)
def test_sort(self):
assert records.sort().is_sorted()
assert records.sort().is_sorted(incl_id=True)
assert records.sort(incl_id=True).is_sorted(incl_id=True)
assert records_nosort.sort().is_sorted()
assert records_nosort.sort().is_sorted(incl_id=True)
assert records_nosort.sort(incl_id=True).is_sorted(incl_id=True)
def test_apply_mask(self):
mask_a = records['a'].values['some_field1'] >= records['a'].values['some_field1'].mean()
record_arrays_close(
records['a'].apply_mask(mask_a).values,
np.array([
(1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.)
], dtype=example_dt)
)
mask = records.values['some_field1'] >= records.values['some_field1'].mean()
filtered = records.apply_mask(mask)
record_arrays_close(
filtered.values,
np.array([
(2, 0, 2, 12., 19.), (3, 1, 0, 13., 18.), (4, 1, 1, 14., 17.),
(5, 1, 2, 13., 18.), (6, 2, 0, 12., 19.)
], dtype=example_dt)
)
assert records_grouped.apply_mask(mask).wrapper == records_grouped.wrapper
def test_map_field(self):
np.testing.assert_array_equal(
records['a'].map_field('some_field1').values,
np.array([10., 11., 12.])
)
np.testing.assert_array_equal(
records.map_field('some_field1').values,
np.array([10., 11., 12., 13., 14., 13., 12., 11., 10.])
)
assert records_grouped.map_field('some_field1').wrapper == \
records.map_field('some_field1', group_by=group_by).wrapper
assert records_grouped.map_field('some_field1', group_by=False).wrapper.grouper.group_by is None
def test_map(self):
@njit
def map_func_nb(record):
return record['some_field1'] + record['some_field2']
np.testing.assert_array_equal(
records['a'].map(map_func_nb).values,
np.array([31., 31., 31.])
)
np.testing.assert_array_equal(
records.map(map_func_nb).values,
np.array([31., 31., 31., 31., 31., 31., 31., 31., 31.])
)
assert records_grouped.map(map_func_nb).wrapper == \
records.map(map_func_nb, group_by=group_by).wrapper
assert records_grouped.map(map_func_nb, group_by=False).wrapper.grouper.group_by is None
def test_map_array(self):
arr = records_arr['some_field1'] + records_arr['some_field2']
np.testing.assert_array_equal(
records['a'].map_array(arr[:3]).values,
np.array([31., 31., 31.])
)
np.testing.assert_array_equal(
records.map_array(arr).values,
np.array([31., 31., 31., 31., 31., 31., 31., 31., 31.])
)
assert records_grouped.map_array(arr).wrapper == \
records.map_array(arr, group_by=group_by).wrapper
assert records_grouped.map_array(arr, group_by=False).wrapper.grouper.group_by is None
def test_apply(self):
@njit
def cumsum_apply_nb(records):
return np.cumsum(records['some_field1'])
np.testing.assert_array_equal(
records['a'].apply(cumsum_apply_nb).values,
np.array([10., 21., 33.])
)
np.testing.assert_array_equal(
records.apply(cumsum_apply_nb).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
records_grouped.apply(cumsum_apply_nb, apply_per_group=False).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
records_grouped.apply(cumsum_apply_nb, apply_per_group=True).values,
np.array([10., 21., 33., 46., 60., 73., 12., 23., 33.])
)
assert records_grouped.apply(cumsum_apply_nb).wrapper == \
records.apply(cumsum_apply_nb, group_by=group_by).wrapper
assert records_grouped.apply(cumsum_apply_nb, group_by=False).wrapper.grouper.group_by is None
def test_count(self):
assert records['a'].count() == 3
pd.testing.assert_series_equal(
records.count(),
pd.Series(
np.array([3, 3, 3, 0]),
index=wrapper.columns
).rename('count')
)
assert records_grouped['g1'].count() == 6
pd.testing.assert_series_equal(
records_grouped.count(),
pd.Series(
np.array([6, 3]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('count')
)
@pytest.mark.parametrize(
"test_nosort",
[False, True],
)
def test_indexing(self, test_nosort):
if test_nosort:
r = records_nosort
r_grouped = records_nosort_grouped
else:
r = records
r_grouped = records_grouped
record_arrays_close(
r['a'].values,
np.array([
(0, 0, 0, 10., 21.), (1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.)
], dtype=example_dt)
)
pd.testing.assert_index_equal(
r['a'].wrapper.columns,
pd.Index(['a'], dtype='object')
)
pd.testing.assert_index_equal(
r['b'].wrapper.columns,
pd.Index(['b'], dtype='object')
)
record_arrays_close(
r[['a', 'a']].values,
np.array([
(0, 0, 0, 10., 21.), (1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.),
(0, 1, 0, 10., 21.), (1, 1, 1, 11., 20.), (2, 1, 2, 12., 19.)
], dtype=example_dt)
)
pd.testing.assert_index_equal(
r[['a', 'a']].wrapper.columns,
pd.Index(['a', 'a'], dtype='object')
)
record_arrays_close(
r[['a', 'b']].values,
np.array([
(0, 0, 0, 10., 21.), (1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.),
(3, 1, 0, 13., 18.), (4, 1, 1, 14., 17.), (5, 1, 2, 13., 18.)
], dtype=example_dt)
)
pd.testing.assert_index_equal(
r[['a', 'b']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
with pytest.raises(Exception):
_ = r.iloc[::2, :] # changing time not supported
pd.testing.assert_index_equal(
r_grouped['g1'].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert r_grouped['g1'].wrapper.ndim == 2
assert r_grouped['g1'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
r_grouped['g1'].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
r_grouped['g2'].wrapper.columns,
pd.Index(['c', 'd'], dtype='object')
)
assert r_grouped['g2'].wrapper.ndim == 2
assert r_grouped['g2'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
r_grouped['g2'].wrapper.grouper.group_by,
pd.Index(['g2', 'g2'], dtype='object')
)
pd.testing.assert_index_equal(
r_grouped[['g1']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert r_grouped[['g1']].wrapper.ndim == 2
assert r_grouped[['g1']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
r_grouped[['g1']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
r_grouped[['g1', 'g2']].wrapper.columns,
pd.Index(['a', 'b', 'c', 'd'], dtype='object')
)
assert r_grouped[['g1', 'g2']].wrapper.ndim == 2
assert r_grouped[['g1', 'g2']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
r_grouped[['g1', 'g2']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1', 'g2', 'g2'], dtype='object')
)
def test_filtering(self):
filtered_records = vbt.Records(wrapper, records_arr[[0, -1]])
record_arrays_close(
filtered_records.values,
np.array([(0, 0, 0, 10., 21.), (8, 2, 2, 10., 21.)], dtype=example_dt)
)
# a
record_arrays_close(
filtered_records['a'].values,
np.array([(0, 0, 0, 10., 21.)], dtype=example_dt)
)
np.testing.assert_array_equal(
filtered_records['a'].map_field('some_field1').id_arr,
np.array([0])
)
assert filtered_records['a'].map_field('some_field1').min() == 10.
assert filtered_records['a'].count() == 1.
# b
record_arrays_close(
filtered_records['b'].values,
np.array([], dtype=example_dt)
)
np.testing.assert_array_equal(
filtered_records['b'].map_field('some_field1').id_arr,
np.array([])
)
assert np.isnan(filtered_records['b'].map_field('some_field1').min())
assert filtered_records['b'].count() == 0.
# c
record_arrays_close(
filtered_records['c'].values,
np.array([(8, 0, 2, 10., 21.)], dtype=example_dt)
)
np.testing.assert_array_equal(
filtered_records['c'].map_field('some_field1').id_arr,
np.array([8])
)
assert filtered_records['c'].map_field('some_field1').min() == 10.
assert filtered_records['c'].count() == 1.
# d
record_arrays_close(
filtered_records['d'].values,
np.array([], dtype=example_dt)
)
np.testing.assert_array_equal(
filtered_records['d'].map_field('some_field1').id_arr,
np.array([])
)
assert np.isnan(filtered_records['d'].map_field('some_field1').min())
assert filtered_records['d'].count() == 0.
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Count'
], dtype='object')
pd.testing.assert_series_equal(
records.stats(),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'), 2.25
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
records.stats(column='a'),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'), 3
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
records.stats(column='g1', group_by=group_by),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'), 6
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
records['c'].stats(),
records.stats(column='c')
)
pd.testing.assert_series_equal(
records['c'].stats(),
records.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
records_grouped['g2'].stats(),
records_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
records_grouped['g2'].stats(),
records.stats(column='g2', group_by=group_by)
)
stats_df = records.stats(agg_func=None)
assert stats_df.shape == (4, 4)
pd.testing.assert_index_equal(stats_df.index, records.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
# ############# ranges.py ############# #
ts = pd.DataFrame({
'a': [1, -1, 3, -1, 5, -1],
'b': [-1, -1, -1, 4, 5, 6],
'c': [1, 2, 3, -1, -1, -1],
'd': [-1, -1, -1, -1, -1, -1]
}, index=[
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5),
datetime(2020, 1, 6)
])
ranges = vbt.Ranges.from_ts(ts, wrapper_kwargs=dict(freq='1 days'))
ranges_grouped = vbt.Ranges.from_ts(ts, wrapper_kwargs=dict(freq='1 days', group_by=group_by))
class TestRanges:
def test_mapped_fields(self):
for name in range_dt.names:
np.testing.assert_array_equal(
getattr(ranges, name).values,
ranges.values[name]
)
def test_from_ts(self):
record_arrays_close(
ranges.values,
np.array([
(0, 0, 0, 1, 1), (1, 0, 2, 3, 1), (2, 0, 4, 5, 1), (3, 1, 3, 5, 0), (4, 2, 0, 3, 1)
], dtype=range_dt)
)
assert ranges.wrapper.freq == day_dt
pd.testing.assert_index_equal(
ranges_grouped.wrapper.grouper.group_by,
group_by
)
def test_records_readable(self):
records_readable = ranges.records_readable
np.testing.assert_array_equal(
records_readable['Range Id'].values,
np.array([
0, 1, 2, 3, 4
])
)
np.testing.assert_array_equal(
records_readable['Column'].values,
np.array([
'a', 'a', 'a', 'b', 'c'
])
)
np.testing.assert_array_equal(
records_readable['Start Timestamp'].values,
np.array([
'2020-01-01T00:00:00.000000000', '2020-01-03T00:00:00.000000000',
'2020-01-05T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-01T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['End Timestamp'].values,
np.array([
'2020-01-02T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-06T00:00:00.000000000',
'2020-01-04T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['Status'].values,
np.array([
'Closed', 'Closed', 'Closed', 'Open', 'Closed'
])
)
def test_to_mask(self):
pd.testing.assert_series_equal(
ranges['a'].to_mask(),
ts['a'] != -1
)
pd.testing.assert_frame_equal(
ranges.to_mask(),
ts != -1
)
pd.testing.assert_frame_equal(
ranges_grouped.to_mask(),
pd.DataFrame(
[
[True, True],
[False, True],
[True, True],
[True, False],
[True, False],
[True, False]
],
index=ts.index,
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_duration(self):
np.testing.assert_array_equal(
ranges['a'].duration.values,
np.array([1, 1, 1])
)
np.testing.assert_array_equal(
ranges.duration.values,
np.array([1, 1, 1, 3, 3])
)
def test_avg_duration(self):
assert ranges['a'].avg_duration() == pd.Timedelta('1 days 00:00:00')
pd.testing.assert_series_equal(
ranges.avg_duration(),
pd.Series(
np.array([86400000000000, 259200000000000, 259200000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('avg_duration')
)
pd.testing.assert_series_equal(
ranges_grouped.avg_duration(),
pd.Series(
np.array([129600000000000, 259200000000000], dtype='timedelta64[ns]'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('avg_duration')
)
def test_max_duration(self):
assert ranges['a'].max_duration() == pd.Timedelta('1 days 00:00:00')
pd.testing.assert_series_equal(
ranges.max_duration(),
pd.Series(
np.array([86400000000000, 259200000000000, 259200000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('max_duration')
)
pd.testing.assert_series_equal(
ranges_grouped.max_duration(),
pd.Series(
np.array([259200000000000, 259200000000000], dtype='timedelta64[ns]'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('max_duration')
)
def test_coverage(self):
assert ranges['a'].coverage() == 0.5
pd.testing.assert_series_equal(
ranges.coverage(),
pd.Series(
np.array([0.5, 0.5, 0.5, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges.coverage(),
ranges.replace(records_arr=np.repeat(ranges.values, 2)).coverage()
)
pd.testing.assert_series_equal(
ranges.replace(records_arr=np.repeat(ranges.values, 2)).coverage(overlapping=True),
pd.Series(
np.array([1.0, 1.0, 1.0, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges.coverage(normalize=False),
pd.Series(
np.array([3.0, 3.0, 3.0, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges.replace(records_arr=np.repeat(ranges.values, 2)).coverage(overlapping=True, normalize=False),
pd.Series(
np.array([3.0, 3.0, 3.0, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges_grouped.coverage(),
pd.Series(
np.array([0.4166666666666667, 0.25]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges_grouped.coverage(),
ranges_grouped.replace(records_arr=np.repeat(ranges_grouped.values, 2)).coverage()
)
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Coverage', 'Overlap Coverage',
'Total Records', 'Duration: Min', 'Duration: Median', 'Duration: Max',
'Duration: Mean', 'Duration: Std'
], dtype='object')
pd.testing.assert_series_equal(
ranges.stats(),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('0 days 00:00:00'), 1.25, pd.Timedelta('2 days 08:00:00'),
pd.Timedelta('2 days 08:00:00'), pd.Timedelta('2 days 08:00:00'),
pd.Timedelta('2 days 08:00:00'), pd.Timedelta('0 days 00:00:00')
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
ranges.stats(column='a'),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('0 days 00:00:00'), 3, pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('0 days 00:00:00')
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
ranges.stats(column='g1', group_by=group_by),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), pd.Timedelta('5 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'), 4, pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('1 days 12:00:00'), pd.Timedelta('1 days 00:00:00')
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
ranges['c'].stats(),
ranges.stats(column='c')
)
pd.testing.assert_series_equal(
ranges['c'].stats(),
ranges.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
ranges_grouped['g2'].stats(),
ranges_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
ranges_grouped['g2'].stats(),
ranges.stats(column='g2', group_by=group_by)
)
stats_df = ranges.stats(agg_func=None)
assert stats_df.shape == (4, 11)
| pd.testing.assert_index_equal(stats_df.index, ranges.wrapper.columns) | pandas.testing.assert_index_equal |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import copy
from datetime import datetime
import time
import pickle
import random
import pandas as pd
import numpy as np
import tensorflow as tf
import pathlib
from sklearn import preprocessing as sk_pre
from base_config import get_configs
_MIN_SEQ_NORM = 10
class Dataset(object):
"""
Builds training, validation and test datasets based on ```tf.data.Dataset``` type
Attributes:
Methods:
"""
def __init__(self, config):
self.config = config
self._data_path = os.path.join(self.config.data_dir, self.config.datafile)
self.is_train = self.config.train
self.seq_len = self.config.max_unrollings
# read and filter data_values based on start and end date
self.data = pd.read_csv(self._data_path, sep=' ', dtype={'gvkey': str})
try:
self.data['date'] = pd.to_datetime(self.data['date'], format="%Y%m%d")
self.start_date = pd.to_datetime(self.config.start_date, format="%Y%m%d")
self.end_date = | pd.to_datetime(self.config.end_date, format="%Y%m%d") | pandas.to_datetime |
import pandas as pd
import numpy as np
import math
import matplotlib.pyplot as plt
import assetallocation_arp.models.ARP as arp
# Parameters
TIMES_LAG=3
settings=arp.dataimport_settings("Settings")
# Change the universe of markets that is being used
markets="Leverage_MATR" # All "Leverage_all_markets" / Minimalist "Leverage_min"
# Leverage/scaling of individual markets
sleverage ="v" #Equal(e) / Normative(n) / Volatility(v) / Standalone(s)
def signal (index):
sig=pd.DataFrame()
for column in index:
sig1= index[column].ewm(alpha=2/15).mean()/index[column].ewm(alpha=2/30).mean()-1
sig2= index[column].ewm(alpha=2/30).mean()/index[column].ewm(alpha=2/60).mean()-1
sig3= index[column].ewm(alpha=2/60).mean()/index[column].ewm(alpha=2/120).mean()-1
#sig[column]=(sig1/sig1.ewm(alpha=1/30).std()+sig2/sig2.ewm(alpha=1/30).std()+sig3/sig3.ewm(alpha=1/30).std())/3
sig[column]=(sig1/sig1.rolling(window=90).std()+sig2/sig2.rolling(window=90).std()+sig3/sig3.rolling(window=90).std())/3
# S-curve cut out for large movement, alternative curve without cutoff: sig[column]=2/(1+math.exp(-2*sig[column]))-1
sig[column]=sig[column]*np.exp(-1*sig[column].pow(2)/6)/(math.sqrt(3)*math.exp(-0.5))
sig=arp.discretise(sig,"weekly")
sig=sig.shift(TIMES_LAG,freq="D")
return sig
# Import data
future=pd.read_pickle("Future data.pkl")
index= | pd.read_pickle("Data.pkl") | pandas.read_pickle |
from __future__ import print_function
import unittest
from unittest import mock
from io import BytesIO, StringIO
import random
import six
import os
import re
import logging
import numpy as np
import pandas as pd
from . import utils as test_utils
import dataprofiler as dp
from dataprofiler.profilers.profile_builder import StructuredColProfiler, \
UnstructuredProfiler, UnstructuredCompiler, StructuredProfiler, Profiler
from dataprofiler.profilers.profiler_options import ProfilerOptions, \
StructuredOptions, UnstructuredOptions
from dataprofiler.profilers.column_profile_compilers import \
ColumnPrimitiveTypeProfileCompiler, ColumnStatsProfileCompiler, \
ColumnDataLabelerCompiler
from dataprofiler import StructuredDataLabeler, UnstructuredDataLabeler
from dataprofiler.profilers.helpers.report_helpers import _prepare_report
test_root_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
def setup_save_mock_open(mock_open):
mock_file = BytesIO()
mock_file.close = lambda: None
mock_open.side_effect = lambda *args: mock_file
return mock_file
class TestStructuredProfiler(unittest.TestCase):
@classmethod
def setUp(cls):
test_utils.set_seed(seed=0)
@classmethod
def setUpClass(cls):
test_utils.set_seed(seed=0)
cls.input_file_path = os.path.join(
test_root_path, 'data', 'csv/aws_honeypot_marx_geo.csv'
)
cls.aws_dataset = pd.read_csv(cls.input_file_path)
profiler_options = ProfilerOptions()
profiler_options.set({'data_labeler.is_enabled': False})
with test_utils.mock_timeit():
cls.trained_schema = dp.StructuredProfiler(
cls.aws_dataset, len(cls.aws_dataset), options=profiler_options)
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnPrimitiveTypeProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnStatsProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnDataLabelerCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler',
spec=StructuredDataLabeler)
def test_bad_input_data(self, *mocks):
allowed_data_types = (r"\(<class 'list'>, "
r"<class 'pandas.core.series.Series'>, "
r"<class 'pandas.core.frame.DataFrame'>\)")
bad_data_types = [1, {}, np.inf, 'sdfs']
for data in bad_data_types:
with self.assertRaisesRegex(TypeError,
r"Data must either be imported using "
r"the data_readers or using one of the "
r"following: " + allowed_data_types):
StructuredProfiler(data)
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnPrimitiveTypeProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnStatsProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnDataLabelerCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler',
spec=StructuredDataLabeler)
@mock.patch('dataprofiler.profilers.profile_builder.'
'StructuredProfiler._update_correlation')
def test_list_data(self, *mocks):
data = [[1, 1],
[None, None],
[3, 3],
[4, 4],
[5, 5],
[None, None],
[1, 1]]
with test_utils.mock_timeit():
profiler = dp.StructuredProfiler(data)
# test properties
self.assertEqual("<class 'list'>", profiler.file_type)
self.assertIsNone(profiler.encoding)
self.assertEqual(2, profiler.row_has_null_count)
self.assertEqual(2, profiler.row_is_null_count)
self.assertEqual(7, profiler.total_samples)
self.assertEqual(5, len(profiler.hashed_row_dict))
self.assertListEqual([0, 1], list(profiler._col_name_to_idx.keys()))
self.assertIsNone(profiler.correlation_matrix)
self.assertDictEqual({'row_stats': 1}, profiler.times)
# validates the sample out maintains the same visual data format as the
# input.
self.assertListEqual(['5', '1', '1', '3', '4'],
profiler.profile[0].sample)
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnPrimitiveTypeProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnStatsProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnDataLabelerCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler',
spec=StructuredDataLabeler)
@mock.patch('dataprofiler.profilers.profile_builder.'
'StructuredProfiler._update_correlation')
def test_pandas_series_data(self, *mocks):
data = pd.Series([1, None, 3, 4, 5, None, 1])
with test_utils.mock_timeit():
profiler = dp.StructuredProfiler(data)
# test properties
self.assertEqual(
"<class 'pandas.core.series.Series'>", profiler.file_type)
self.assertIsNone(profiler.encoding)
self.assertEqual(2, profiler.row_has_null_count)
self.assertEqual(2, profiler.row_is_null_count)
self.assertEqual(7, profiler.total_samples)
self.assertEqual(5, len(profiler.hashed_row_dict))
self.assertListEqual([0], list(profiler._col_name_to_idx.keys()))
self.assertIsNone(profiler.correlation_matrix)
self.assertDictEqual({'row_stats': 1}, profiler.times)
# test properties when series has name
data.name = 'test'
profiler = dp.StructuredProfiler(data)
self.assertEqual(
"<class 'pandas.core.series.Series'>", profiler.file_type)
self.assertIsNone(profiler.encoding)
self.assertEqual(2, profiler.row_has_null_count)
self.assertEqual(2, profiler.row_is_null_count)
self.assertEqual(7, profiler.total_samples)
self.assertEqual(5, len(profiler.hashed_row_dict))
self.assertListEqual(['test'], list(profiler._col_name_to_idx.keys()))
self.assertIsNone(profiler.correlation_matrix)
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnPrimitiveTypeProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnStatsProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnDataLabelerCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler',
spec=StructuredDataLabeler)
@mock.patch('dataprofiler.profilers.profile_builder.'
'StructuredProfiler._update_correlation')
@mock.patch('dataprofiler.profilers.profile_builder.'
'StructuredProfiler._merge_correlation')
@mock.patch('dataprofiler.profilers.profile_builder.'
'StructuredProfiler._update_chi2')
def test_add_profilers(self, *mocks):
data = pd.DataFrame([1, None, 3, 4, 5, None, 1])
with test_utils.mock_timeit():
profile1 = dp.StructuredProfiler(data[:2])
profile2 = dp.StructuredProfiler(data[2:])
# test incorrect type
with self.assertRaisesRegex(TypeError,
'`StructuredProfiler` and `int` are '
'not of the same profiler type.'):
profile1 + 3
# test mismatched profiles
profile2._profile.pop(0)
profile2._col_name_to_idx.pop(0)
with self.assertRaisesRegex(ValueError,
"Cannot merge empty profiles."):
profile1 + profile2
# test mismatched profiles due to options
profile2._profile.append(None)
profile2._col_name_to_idx[0] = [0]
with self.assertRaisesRegex(ValueError,
'The two profilers were not setup with the '
'same options, hence they do not calculate '
'the same profiles and cannot be added '
'together.'):
profile1 + profile2
# test success
profile1._profile = [1]
profile1._col_name_to_idx = {"test": [0]}
profile2._profile = [2]
profile2._col_name_to_idx = {"test": [0]}
merged_profile = profile1 + profile2
self.assertEqual(3, merged_profile._profile[
merged_profile._col_name_to_idx["test"][0]])
self.assertIsNone(merged_profile.encoding)
self.assertEqual(
"<class 'pandas.core.frame.DataFrame'>", merged_profile.file_type)
self.assertEqual(2, merged_profile.row_has_null_count)
self.assertEqual(2, merged_profile.row_is_null_count)
self.assertEqual(7, merged_profile.total_samples)
self.assertEqual(5, len(merged_profile.hashed_row_dict))
self.assertDictEqual({'row_stats': 2}, merged_profile.times)
# test success if drawn from multiple files
profile2.encoding = 'test'
profile2.file_type = 'test'
merged_profile = profile1 + profile2
self.assertEqual('multiple files', merged_profile.encoding)
self.assertEqual('multiple files', merged_profile.file_type)
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnPrimitiveTypeProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnStatsProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnDataLabelerCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'StructuredProfiler._get_correlation')
def test_stream_profilers(self, *mocks):
mocks[0].return_value = None
data = pd.DataFrame([
['test1', 1.0],
['test2', None],
['test1', 1.0],
[None, None],
[None, 5.0],
[None, 5.0],
[None, None],
['test3', 7.0]])
# check prior to update
with test_utils.mock_timeit():
profiler = dp.StructuredProfiler(data[:3])
self.assertEqual(1, profiler.row_has_null_count)
self.assertEqual(0, profiler.row_is_null_count)
self.assertEqual(3, profiler.total_samples)
self.assertEqual(2, len(profiler.hashed_row_dict))
self.assertIsNone(profiler.correlation_matrix)
self.assertDictEqual({'row_stats': 1}, profiler.times)
# check after update
with test_utils.mock_timeit():
profiler.update_profile(data[3:])
self.assertIsNone(profiler.encoding)
self.assertEqual(
"<class 'pandas.core.frame.DataFrame'>", profiler.file_type)
self.assertEqual(5, profiler.row_has_null_count)
self.assertEqual(2, profiler.row_is_null_count)
self.assertEqual(8, profiler.total_samples)
self.assertEqual(5, len(profiler.hashed_row_dict))
self.assertIsNone(profiler.correlation_matrix)
self.assertDictEqual({'row_stats': 2}, profiler.times)
def test_correct_unique_row_ratio_test(self):
self.assertEqual(2999, len(self.trained_schema.hashed_row_dict))
self.assertEqual(2999, self.trained_schema.total_samples)
self.assertEqual(1.0, self.trained_schema._get_unique_row_ratio())
def test_correct_rows_ingested(self):
self.assertEqual(2999, self.trained_schema.total_samples)
def test_correct_null_row_ratio_test(self):
self.assertEqual(2999, self.trained_schema.row_has_null_count)
self.assertEqual(1.0, self.trained_schema._get_row_has_null_ratio())
self.assertEqual(0, self.trained_schema.row_is_null_count)
self.assertEqual(0, self.trained_schema._get_row_is_null_ratio())
self.assertEqual(2999, self.trained_schema.total_samples)
def test_correct_duplicate_row_count_test(self):
self.assertEqual(2999, len(self.trained_schema.hashed_row_dict))
self.assertEqual(2999, self.trained_schema.total_samples)
self.assertEqual(0.0, self.trained_schema._get_duplicate_row_count())
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnDataLabelerCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler',
spec=StructuredDataLabeler)
def test_correlation(self, *mock):
# Use the following formula to obtain the pairwise correlation
# sum((x - np.mean(x))*(y-np.mean(y))) /
# np.sqrt(sum((x - np.mean(x)**2)))/np.sqrt(sum((y - np.mean(y)**2)))
profile_options = dp.ProfilerOptions()
profile_options.set({"correlation.is_enabled": True})
# data with a sole numeric column
data = pd.DataFrame([1.0, 8.0, 1.0, -2.0, 5.0])
with test_utils.mock_timeit():
profiler = dp.StructuredProfiler(data, options=profile_options)
expected_corr_mat = np.array([[1.0]])
np.testing.assert_array_equal(expected_corr_mat,
profiler.correlation_matrix)
self.assertDictEqual({'row_stats': 1, 'correlation': 1}, profiler.times)
# data with one column with non-numeric calues
data = pd.DataFrame([1.0, None, 1.0, None, 5.0])
profiler = dp.StructuredProfiler(data, options=profile_options)
expected_corr_mat = np.array([[1]])
np.testing.assert_array_equal(expected_corr_mat,
profiler.correlation_matrix)
# data with two columns, but one is numerical
data = pd.DataFrame([
['test1', 1.0],
['test2', None],
['test1', 1.0],
[None, None]])
profiler = dp.StructuredProfiler(data, options=profile_options)
# Even the correlation with itself is NaN because the variance is zero
expected_corr_mat = np.array([
[np.nan, np.nan],
[np.nan, np.nan]
])
np.testing.assert_array_equal(expected_corr_mat,
profiler.correlation_matrix)
# data with multiple numerical columns
data = pd.DataFrame({'a': [3, 2, 1, 7, 5, 9, 4, 10, 7, 2],
'b': [10, 11, 1, 4, 2, 5, 6, 3, 9, 8],
'c': [1, 5, 3, 5, 7, 2, 6, 8, 1, 2]})
profiler = dp.StructuredProfiler(data, options=profile_options)
expected_corr_mat = np.array([
[1.0, -0.26559388521279237, 0.26594894270403086],
[-0.26559388521279237, 1.0, -0.49072329],
[0.26594894270403086, -0.49072329, 1.0]
])
np.testing.assert_array_almost_equal(expected_corr_mat,
profiler.correlation_matrix)
# data with multiple numerical columns, with nan values
data = pd.DataFrame({'a': [np.nan, np.nan, 1, 7, 5, 9, 4, 10, 7, 2],
'b': [10, 11, np.nan, 4, 2, 5, 6, 3, 9, 8],
'c': [1, 5, 3, 5, 7, 2, 6, 8, np.nan, np.nan]})
profiler = dp.StructuredProfiler(data, options=profile_options)
expected_corr_mat = np.array([
[1, -0.28527657, 0.18626508],
[-0.28527657, 1, -0.52996792],
[0.18626508, -0.52996792, 1]
])
np.testing.assert_array_almost_equal(expected_corr_mat,
profiler.correlation_matrix)
# data with multiple numerical columns, with nan values in only one
# column
data = pd.DataFrame({'a': [np.nan, np.nan, 1, 7, 5, 9, 4, 10, 7, 2],
'b': [10, 11, 1, 4, 2, 5, 6, 3, 9, 8],
'c': [1, 5, 3, 5, 7, 2, 6, 8, 1, 2]})
profiler = dp.StructuredProfiler(data, options=profile_options)
expected_corr_mat = np.array([
[1, 0.03673504, 0.22844891],
[0.03673504, 1, -0.49072329],
[0.22844891, -0.49072329, 1]])
np.testing.assert_array_almost_equal(expected_corr_mat,
profiler.correlation_matrix)
# data with only one numerical columns without nan values
data = pd.DataFrame({'a': [3, 2, 1, 7, 5, 9, 4, 10, 7, 2]})
profiler = dp.StructuredProfiler(data, options=profile_options)
expected_corr_mat = np.array([[1]])
np.testing.assert_array_almost_equal(expected_corr_mat,
profiler.correlation_matrix)
# data with no numeric columns
data = pd.DataFrame({'a': ['hi', 'hi2', 'hi3'],
'b': ['test1', 'test2', 'test3']})
profiler = dp.StructuredProfiler(data, options=profile_options)
expected_corr_mat = np.array([
[np.nan, np.nan],
[np.nan, np.nan]
])
np.testing.assert_array_almost_equal(expected_corr_mat,
profiler.correlation_matrix)
# data with only one numeric column
# data with no numeric columns
data = pd.DataFrame({'a': ['hi', 'hi2', 'hi3'],
'b': ['test1', 'test2', 'test3'],
'c': [1, 2, 3]})
profiler = dp.StructuredProfiler(data, options=profile_options)
expected_corr_mat = np.array([
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 1]
])
np.testing.assert_array_almost_equal(expected_corr_mat,
profiler.correlation_matrix)
# Data with null rows
data = pd.DataFrame({'a': [None, 2, 1, np.nan, 5, np.nan, 4, 10, 7, np.nan],
'b': [np.nan, 11, 1, 'nan', 2, np.nan, 6, 3, 9, np.nan],
'c': [np.nan, 5, 3, np.nan, 7, np.nan, 6, 8, 1, None]})
profiler = dp.StructuredProfiler(data, options=profile_options)
# correlation between [2, 1, 5, 4, 10, 7],
# [11, 1, 2, 6, 3, 9],
# [5, 3, 7, 6, 8, 1]
expected_corr_mat = np.array([
[1, -0.06987956, 0.32423975],
[-0.06987956, 1, -0.3613099],
[0.32423975, -0.3613099, 1]
])
np.testing.assert_array_almost_equal(expected_corr_mat,
profiler.correlation_matrix)
# Data with null rows and some imputed values
data = pd.DataFrame({'a': [None, np.nan, 1, 7, 5, 9, 4, 10, np.nan, 2],
'b': [10, 11, 1, 4, 2, 5, np.nan, 3, np.nan, 8],
'c': [1, 5, 3, 5, np.nan, 2, 6, 8, np.nan, 2]})
profiler = dp.StructuredProfiler(data, options=profile_options)
# correlation between [*38/7*, *38/7*, 1, 7, 5, 9, 4, 10, 2],
# [10, 11, 1, 4, 2, 5, *11/2*, 3, 8],
# [1, 5, 3, 5, *4*, 2, 6, 8, 2]
expected_corr_mat = np.array([
[1, -0.03283837, 0.40038038],
[-0.03283837, 1, -0.30346637],
[0.40038038, -0.30346637, 1]
])
np.testing.assert_array_almost_equal(expected_corr_mat,
profiler.correlation_matrix)
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnDataLabelerCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler',
spec=StructuredDataLabeler)
def test_merge_correlation(self, *mocks):
# Use the following formular to obtain the pairwise correlation
# sum((x - np.mean(x))*(y-np.mean(y))) /
# np.sqrt(sum((x - np.mean(x)**2)))/np.sqrt(sum((y - np.mean(y)**2)))
profile_options = dp.ProfilerOptions()
profile_options.set({"correlation.is_enabled": True})
# merge between two existing correlations
data = pd.DataFrame({'a': [3, 2, 1, 7, 5, 9, 4, 10, 7, 2],
'b': [10, 11, 1, 4, 2, 5, 6, 3, 9, 8],
'c': [1, 5, 3, 5, 7, 2, 6, 8, 1, 2]})
data1 = data[:5]
data2 = data[5:]
with test_utils.mock_timeit():
profile1 = dp.StructuredProfiler(data1, options=profile_options)
profile2 = dp.StructuredProfiler(data2, options=profile_options)
merged_profile = profile1 + profile2
expected_corr_mat = np.array([
[1.0, -0.26559388521279237, 0.26594894270403086],
[-0.26559388521279237, 1.0, -0.49072329],
[0.26594894270403086, -0.49072329, 1.0]
])
np.testing.assert_array_almost_equal(expected_corr_mat,
merged_profile.correlation_matrix)
self.assertDictEqual({'row_stats': 2, 'correlation': 2},
merged_profile.times)
# merge between an existing corr and None correlation (without data)
with test_utils.mock_timeit():
profile1 = dp.StructuredProfiler(None, options=profile_options)
profile2 = dp.StructuredProfiler(data, options=profile_options)
# TODO: remove the mock below when merge profile is update
with mock.patch('dataprofiler.profilers.profile_builder.'
'StructuredProfiler._add_error_checks'):
merged_profile = profile1 + profile2
expected_corr_mat = np.array([
[1.0, -0.26559388521279237, 0.26594894270403086],
[-0.26559388521279237, 1.0, -0.49072329],
[0.26594894270403086, -0.4907239, 1.0]
])
np.testing.assert_array_almost_equal(expected_corr_mat,
merged_profile.correlation_matrix)
self.assertDictEqual({'row_stats': 1, 'correlation': 1},
merged_profile.times)
# Merge between existing data and empty data that still has samples
data = pd.DataFrame({'a': [1, 2, 4, np.nan, None, np.nan],
'b': [5, 7, 1, np.nan, np.nan, 'nan']})
data1 = data[:3]
data2 = data[3:]
profile1 = dp.StructuredProfiler(data1, options=profile_options)
expected_corr_mat = np.array([
[1, -0.78571429],
[-0.78571429, 1]
])
np.testing.assert_array_almost_equal(expected_corr_mat,
profile1.correlation_matrix)
profile2 = dp.StructuredProfiler(data2, options=profile_options)
merged_profile = profile1 + profile2
np.testing.assert_array_almost_equal(expected_corr_mat,
merged_profile.correlation_matrix)
def test_correlation_update(self):
profile_options = dp.ProfilerOptions()
profile_options.set({"correlation.is_enabled": True})
# Test with all numeric columns
data = pd.DataFrame({'a': [3, 2, 1, 7, 5, 9, 4, 10, 7, 2],
'b': [10, 11, 1, 4, 2, 5, 6, 3, 9, 8],
'c': [1, 5, 3, 5, 7, 2, 6, 8, 1, 2]})
data1 = data[:5]
data2 = data[5:]
with test_utils.mock_timeit():
profiler = dp.StructuredProfiler(data1, options=profile_options)
profiler.update_profile(data2)
expected_corr_mat = np.array([
[1.0, -0.26559388521279237, 0.26594894270403086],
[-0.26559388521279237, 1.0, -0.4907239],
[0.26594894270403086, -0.4907239, 1.0]
])
np.testing.assert_array_almost_equal(expected_corr_mat,
profiler.correlation_matrix)
self.assertDictEqual({'row_stats': 2, 'correlation': 2}, profiler.times)
# Test when there's a non-numeric column
data = pd.DataFrame({'a': [3, 2, 1, 7, 5, 9, 4, 10, 7, 2],
'b': [10, 11, 1, 4, 2, 5, 6, 3, 9, 8],
'c': ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']})
data1 = data[:5]
data2 = data[5:]
profiler = dp.StructuredProfiler(data1, options=profile_options)
profiler.update_profile(data2)
expected_corr_mat = np.array([
[1.0, -0.26559388521279237, np.nan],
[-0.26559388521279237, 1.0, np.nan],
[np.nan, np.nan, np.nan]
])
np.testing.assert_array_almost_equal(expected_corr_mat,
profiler.correlation_matrix)
# Data with multiple numerical and non-numeric columns, with nan values in only one column
# NaNs imputed to (9+4+10)/3
data = pd.DataFrame({'a': [7, 2, 1, 7, 5, 9, 4, 10, np.nan, np.nan],
'b': [10, 11, 1, 4, 2, 5, 6, 3, 9, 8],
'c': ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'],
'd': [1, 5, 3, 5, 7, 2, 6, 8, 1, 2]})
data1 = data[:5]
data2 = data[5:]
profiler = dp.StructuredProfiler(data1, options=profile_options)
profiler.update_profile(data2)
expected_corr_mat = np.array([
[ 1, 0.04721482, np.nan, -0.09383408],
[ 0.04721482, 1, np.nan,-0.49072329],
[np.nan, np.nan, np.nan, np.nan],
[-0.09383408, -0.49072329, np.nan, 1]]
)
np.testing.assert_array_almost_equal(expected_corr_mat,
profiler.correlation_matrix)
# Data with null rows, all null rows are dropped
data = pd.DataFrame({'a': [np.nan, 2, 1, None, 5, np.nan, 4, 10, 7, 'NaN'],
'b': [np.nan, 11, 1, np.nan, 2, np.nan, 6, 3, 9, np.nan],
'c': [np.nan, 5, 3, np.nan, 7, None, 6, 8, 1, np.nan]})
data1 = data[:5]
data2 = data[5:]
profiler = dp.StructuredProfiler(data1, options=profile_options)
profiler.update_profile(data2)
# correlation between [2, 1, 5, 4, 10, 7],
# [11, 1, 2, 6, 3, 9],
# [5, 3, 7, 6, 8, 1]
expected_corr_mat = np.array([
[1, -0.06987956, 0.32423975],
[-0.06987956, 1, -0.3613099],
[0.32423975, -0.3613099, 1]
])
np.testing.assert_array_almost_equal(expected_corr_mat,
profiler.correlation_matrix)
# Data with null rows and some imputed values
data = pd.DataFrame({'a': [None, np.nan, 1, 7, 5, 9, 4, 10, 'nan', 2],
'b': [10, 11, 1, 4, 2, 5, 'NaN', 3, None, 8],
'c': [1, 5, 3, 5, np.nan, 2, 6, 8, None, 2]})
data1 = data[:5]
data2 = data[5:]
profiler = dp.StructuredProfiler(data1, options=profile_options)
profiler.update_profile(data2)
# correlation between [*13/3*, *13/3*, 1, 7, 5]
# [10, 11, 1, 4, 2]
# [1, 5, 3, 5, *7/2*]
# then updated with correlation (9th row dropped) between
# [9, 4, 10, 2],
# [5, *16/3*, 3, 8],
# [2, 6, 8, 2]
expected_corr_mat = np.array([
[1, -0.16079606, 0.43658332],
[-0.16079606, 1, -0.2801748],
[0.43658332, -0.2801748, 1]
])
np.testing.assert_array_almost_equal(expected_corr_mat,
profiler.correlation_matrix)
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnDataLabelerCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler',
spec=StructuredDataLabeler)
def test_chi2(self, *mocks):
# Empty
data = pd.DataFrame([])
profiler = dp.StructuredProfiler(data)
self.assertIsNone(profiler.chi2_matrix)
# Single column
data = pd.DataFrame({'a': ["y", "y", "n", "n", "y"]})
profiler = dp.StructuredProfiler(data)
expected_mat = np.array([1])
self.assertEqual(expected_mat, profiler.chi2_matrix)
data = pd.DataFrame({'a': ["y", "y", "y", "y", "n", "n", "n"],
'b': ["y", "maybe", "y", "y", "n", "n", "maybe"],
'c': ["n", "maybe", "n", "n", "n", "y", "y"]})
profiler = dp.StructuredProfiler(data)
expected_mat = np.array([
[1, 0.309924, 0.404638],
[0.309924, 1, 0.548812],
[0.404638, 0.548812, 1]
])
np.testing.assert_array_almost_equal(expected_mat,
profiler.chi2_matrix)
# All different categories
data = pd.DataFrame({'a': ["y", "y", "y", "y", "n", "n", "n"],
'b': ["a", "maybe", "a", "a", "b", "b", "maybe"],
'c': ["d", "d", "g", "g", "g", "t", "t"]})
profiler = dp.StructuredProfiler(data)
expected_mat = np.array([
[1, 0.007295, 0.007295],
[0.007295, 1, 0.015609],
[0.007295, 0.015609, 1]
])
np.testing.assert_array_almost_equal(expected_mat,
profiler.chi2_matrix)
# Identical columns
data = pd.DataFrame({'a': ["y", "y", "y", "y", "n", "n", "n"],
'b': ["y", "y", "y", "y", "n", "n", "n"],
'c': ["y", "y", "y", "y", "n", "n", "n"]})
profiler = dp.StructuredProfiler(data)
expected_mat = np.array([
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]
])
np.testing.assert_array_almost_equal(expected_mat,
profiler.chi2_matrix)
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnDataLabelerCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler',
spec=StructuredDataLabeler)
def test_merge_chi2(self, *mocks):
# Merge empty data
data = pd.DataFrame({'a': ["y", "y", "y", "y", "n", "n", "n"],
'b': ["y", "maybe", "y", "y", "n", "n", "maybe"],
'c': ["n", "maybe", "n", "n", "n", "y", "y"]})
profiler1 = dp.StructuredProfiler(None)
profiler2 = dp.StructuredProfiler(data)
with mock.patch('dataprofiler.profilers.profile_builder.'
'StructuredProfiler._add_error_checks'):
profiler3 = profiler1 + profiler2
expected_mat = np.array([
[1, 0.309924, 0.404638],
[0.309924, 1, 0.548812],
[0.404638, 0.548812, 1]
])
np.testing.assert_array_almost_equal(expected_mat,
profiler3.chi2_matrix)
data = pd.DataFrame({'a': ["y", "y", "y", "y", "n", "n", "n"],
'b': ["y", "maybe", "y", "y", "n", "n", "maybe"],
'c': ["n", "maybe", "n", "n", "n", "y", "y"]})
data1 = data[:4]
data2 = data[4:]
profiler1 = dp.StructuredProfiler(data1)
profiler2 = dp.StructuredProfiler(data2)
profiler3 = profiler1 + profiler2
expected_mat = np.array([
[1, 0.309924, 0.404638],
[0.309924, 1, 0.548812],
[0.404638, 0.548812, 1]
])
np.testing.assert_array_almost_equal(expected_mat,
profiler3.chi2_matrix)
# All different categories
data = pd.DataFrame({'a': ["y", "y", "y", "y", "n", "n", "n"],
'b': ["a", "maybe", "a", "a", "b", "b", "maybe"],
'c': ["d", "d", "g", "g", "g", "t", "t"]})
data1 = data[:4]
data2 = data[4:]
profiler1 = dp.StructuredProfiler(data1)
profiler2 = dp.StructuredProfiler(data2)
profiler3 = profiler1 + profiler2
expected_mat = np.array([
[1, 0.007295, 0.007295],
[0.007295, 1, 0.015609],
[0.007295, 0.015609, 1]
])
np.testing.assert_array_almost_equal(expected_mat,
profiler3.chi2_matrix)
# Identical columns
data = pd.DataFrame({'a': ["y", "y", "y", "y", "n", "n", "n"],
'b': ["y", "y", "y", "y", "n", "n", "n"],
'c': ["y", "y", "y", "y", "n", "n", "n"]})
data1 = data[:4]
data2 = data[4:]
profiler1 = dp.StructuredProfiler(data1)
profiler2 = dp.StructuredProfiler(data2)
profiler3 = profiler1 + profiler2
expected_mat = np.array([
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]
])
np.testing.assert_array_almost_equal(expected_mat,
profiler3.chi2_matrix)
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnDataLabelerCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler',
spec=StructuredDataLabeler)
def test_update_chi2(self, *mocks):
# Update with empty data
data1 = pd.DataFrame({'a': ["y", "y", "y", "y", "n", "n", "n"],
'b': ["y", "maybe", "y", "y", "n", "n", "maybe"],
'c': ["n", "maybe", "n", "n", "n", "y", "y"]})
data2 = pd.DataFrame({'a': [],
'b': [],
'c': []})
profiler = dp.StructuredProfiler(data1)
profiler.update_profile(data2)
expected_mat = np.array([
[1, 0.309924, 0.404638],
[0.309924, 1, 0.548812],
[0.404638, 0.548812, 1]
])
np.testing.assert_array_almost_equal(expected_mat,
profiler.chi2_matrix)
data = pd.DataFrame({'a': ["y", "y", "y", "y", "n", "n", "n"],
'b': ["y", "maybe", "y", "y", "n", "n", "maybe"],
'c': ["n", "maybe", "n", "n", "n", "y", "y"]})
data1 = data[:4]
data2 = data[4:]
profiler = dp.StructuredProfiler(data1)
profiler.update_profile(data2)
expected_mat = np.array([
[1, 0.309924, 0.404638],
[0.309924, 1, 0.548812],
[0.404638, 0.548812, 1]
])
np.testing.assert_array_almost_equal(expected_mat,
profiler.chi2_matrix)
# All different categories
data = pd.DataFrame({'a': ["y", "y", "y", "y", "n", "n", "n"],
'b': ["a", "maybe", "a", "a", "b", "b", "maybe"],
'c': ["d", "d", "g", "g", "g", "t", "t"]})
data1 = data[:4]
data2 = data[4:]
profiler = dp.StructuredProfiler(data1)
profiler.update_profile(data2)
expected_mat = np.array([
[1, 0.007295, 0.007295],
[0.007295, 1, 0.015609],
[0.007295, 0.015609, 1]
])
np.testing.assert_array_almost_equal(expected_mat,
profiler.chi2_matrix)
# Identical columns
data = pd.DataFrame({'a': ["y", "y", "y", "y", "n", "n", "n"],
'b': ["y", "y", "y", "y", "n", "n", "n"],
'c': ["y", "y", "y", "y", "n", "n", "n"]})
data1 = data[:4]
data2 = data[4:]
profiler = dp.StructuredProfiler(data1)
profiler.update_profile(data2)
expected_mat = np.array([
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]
])
np.testing.assert_array_almost_equal(expected_mat,
profiler.chi2_matrix)
def test_correct_datatime_schema_test(self):
profile_idx = self.trained_schema._col_name_to_idx["datetime"][0]
profile = self.trained_schema.profile[profile_idx]
col_schema_info = \
profile.profiles['data_type_profile']._profiles["datetime"]
self.assertEqual(2999, profile.sample_size)
self.assertEqual(col_schema_info.sample_size,
col_schema_info.match_count)
self.assertEqual(2, profile.null_count)
six.assertCountEqual(self, ['nan'], profile.null_types)
self.assertEqual(['%m/%d/%y %H:%M'], col_schema_info['date_formats'])
def test_correct_integer_column_detection_src(self):
profile_idx = self.trained_schema._col_name_to_idx["src"][0]
profile = self.trained_schema.profile[profile_idx]
col_schema_info = profile.profiles['data_type_profile']._profiles["int"]
self.assertEqual(2999, profile.sample_size)
self.assertEqual(col_schema_info.sample_size,
col_schema_info.match_count)
self.assertEqual(3, profile.null_count)
def test_correct_integer_column_detection_int_col(self):
profile_idx = self.trained_schema._col_name_to_idx["int_col"][0]
profile = self.trained_schema.profile[profile_idx]
col_schema_info = profile.profiles['data_type_profile']._profiles["int"]
self.assertEqual(2999, profile.sample_size)
self.assertEqual(col_schema_info.sample_size,
col_schema_info.match_count)
self.assertEqual(0, profile.null_count)
def test_correct_integer_column_detection_port(self):
profile_idx = self.trained_schema._col_name_to_idx["srcport"][0]
profile = self.trained_schema.profile[profile_idx]
col_schema_info = profile.profiles['data_type_profile']._profiles["int"]
self.assertEqual(2999, profile.sample_size)
self.assertEqual(col_schema_info.sample_size,
col_schema_info.match_count)
self.assertEqual(197, profile.null_count)
def test_correct_integer_column_detection_destport(self):
profile_idx = self.trained_schema._col_name_to_idx["destport"][0]
profile = self.trained_schema.profile[profile_idx]
col_schema_info = profile.profiles['data_type_profile']._profiles["int"]
self.assertEqual(2999, profile.sample_size)
self.assertEqual(col_schema_info.sample_size,
col_schema_info.match_count)
self.assertEqual(197, profile.null_count)
def test_report(self):
report = self.trained_schema.report()
self.assertListEqual(list(report.keys()), [
'global_stats', 'data_stats'])
self.assertListEqual(
list(report['global_stats']),
[
"samples_used", "column_count", "row_count",
"row_has_null_ratio", 'row_is_null_ratio',
"unique_row_ratio", "duplicate_row_count", "file_type",
"encoding", "correlation_matrix", "chi2_matrix", "profile_schema", "times"
]
)
flat_report = self.trained_schema.report(
report_options={"output_format": "flat"})
self.assertEqual(test_utils.get_depth(flat_report), 1)
with mock.patch('dataprofiler.profilers.helpers.report_helpers'
'._prepare_report') as pr_mock:
self.trained_schema.report(
report_options={"output_format": 'pretty'})
# Once for global_stats, once for each of 16 columns
self.assertEqual(pr_mock.call_count, 17)
def test_report_schema_and_data_stats_match_order(self):
data = pd.DataFrame([[1, 2, 3, 4, 5, 6],
[10, 20, 30, 40, 50, 60]],
columns=["a", "b", "a", "b", "c", "d"])
profiler_options = ProfilerOptions()
profiler_options.set({'data_labeler.is_enabled': False})
profiler = dp.StructuredProfiler(data=data, options=profiler_options)
report = profiler.report()
schema = report["global_stats"]["profile_schema"]
data_stats = report["data_stats"]
expected_schema = {"a": [0, 2], "b": [1, 3], "c": [4], "d": [5]}
self.assertDictEqual(expected_schema, schema)
# Check that the column order in the report matches the column order
# In the schema (and in the data)
for name in schema:
for idx in schema[name]:
# Use min of column to validate column order amongst duplicates
col_min = data.iloc[0, idx]
self.assertEqual(name, data_stats[idx]["column_name"])
self.assertEqual(col_min, data_stats[idx]["statistics"]["min"])
def test_pretty_report_doesnt_cast_schema(self):
report = self.trained_schema.report(
report_options={"output_format": "pretty"})
# Want to ensure the values of this dict are of type list[int]
# Since pretty "prettifies" lists into strings with ... to shorten
expected_schema = {"datetime": [0], "host": [1], "src": [2],
"proto": [3], "type": [4], "srcport": [5],
"destport": [6], "srcip": [7], "locale": [8],
"localeabbr": [9], "postalcode": [10],
"latitude": [11], "longitude": [12], "owner": [13],
"comment": [14], "int_col": [15]}
self.assertDictEqual(expected_schema,
report["global_stats"]["profile_schema"])
def test_omit_keys_with_duplicate_cols(self):
data = pd.DataFrame([[1, 2, 3, 4, 5, 6],
[10, 20, 30, 40, 50, 60]],
columns=["a", "b", "a", "b", "c", "d"])
profiler_options = ProfilerOptions()
profiler_options.set({'data_labeler.is_enabled': False})
profiler = dp.StructuredProfiler(data=data, options=profiler_options)
report = profiler.report(report_options={
"omit_keys": ["data_stats.a.statistics.min",
"data_stats.d.statistics.max",
"data_stats.*.statistics.null_types_index"]})
# Correctness of schema asserted in prior test
schema = report["global_stats"]["profile_schema"]
data_stats = report["data_stats"]
for idx in range(len(report["data_stats"])):
# Assert that min is absent from a's data_stats and not the others
if idx in schema["a"]:
self.assertNotIn("min", data_stats[idx]["statistics"])
else:
self.assertIn("min", report["data_stats"][idx]["statistics"])
# Assert that max is absent from d's data_stats and not the others
if idx in schema["d"]:
self.assertNotIn("max", report["data_stats"][idx]["statistics"])
else:
self.assertIn("max", report["data_stats"][idx]["statistics"])
# Assert that null_types_index not present in any
self.assertNotIn("null_types_index",
report["data_stats"][idx]["statistics"])
def test_omit_cols_preserves_schema(self):
data = pd.DataFrame([[1, 2, 3, 4, 5, 6],
[10, 20, 30, 40, 50, 60]],
columns=["a", "b", "a", "b", "c", "d"])
omit_cols = ["a", "d"]
omit_idxs = [0, 2, 5]
omit_keys = [f"data_stats.{col}" for col in omit_cols]
profiler_options = ProfilerOptions()
profiler_options.set({'data_labeler.is_enabled': False})
profiler = dp.StructuredProfiler(data=data, options=profiler_options)
report = profiler.report(report_options={"omit_keys": omit_keys})
for idx in range(len(report["data_stats"])):
if idx in omit_idxs:
self.assertIsNone(report["data_stats"][idx])
else:
self.assertIsNotNone(report["data_stats"][idx])
# This will keep the data_stats key but remove all columns
report = profiler.report(report_options={"omit_keys": ["data_stats.*"]})
for col_report in report["data_stats"]:
self.assertIsNone(col_report)
def test_report_quantiles(self):
report_none = self.trained_schema.report(
report_options={"num_quantile_groups": None})
report = self.trained_schema.report()
self.assertEqual(report_none, report)
for col in report["data_stats"]:
if col["column_name"] == "int_col":
report_quantiles = col["statistics"]["quantiles"]
break
self.assertEqual(len(report_quantiles), 3)
report2 = self.trained_schema.report(
report_options={"num_quantile_groups": 1000})
for col in report2["data_stats"]:
if col["column_name"] == "int_col":
report2_1000_quant = col["statistics"]["quantiles"]
break
self.assertEqual(len(report2_1000_quant), 999)
self.assertEqual(report_quantiles, {
0: report2_1000_quant[249],
1: report2_1000_quant[499],
2: report2_1000_quant[749],
})
def test_report_omit_keys(self):
# Omit both report keys manually
no_report_keys = self.trained_schema.report(
report_options={"omit_keys": ['global_stats', 'data_stats']})
self.assertCountEqual({}, no_report_keys)
# Omit just data_stats
no_data_stats = self.trained_schema.report(
report_options={"omit_keys": ['data_stats']})
self.assertCountEqual({"global_stats"}, no_data_stats)
# Omit a global stat
no_samples_used = self.trained_schema.report(
report_options={"omit_keys": ['global_stats.samples_used']})
self.assertNotIn("samples_used", no_samples_used["global_stats"])
# Omit all keys
nothing = self.trained_schema.report(
report_options={"omit_keys": ['*']})
self.assertCountEqual({}, nothing)
# Omit every data_stats column
empty_data_stats_cols = self.trained_schema.report(
report_options={"omit_keys": ['global_stats', 'data_stats.*']})
# data_stats key still present, but all columns are None
self.assertCountEqual({"data_stats"}, empty_data_stats_cols)
self.assertTrue(all([rep is None
for rep in empty_data_stats_cols["data_stats"]]))
# Omit specific data_stats column
no_datetime = self.trained_schema.report(
report_options={"omit_keys": ['data_stats.datetime']})
self.assertNotIn("datetime", no_datetime["data_stats"])
# Omit a statistic from each column
no_sum = self.trained_schema.report(
report_options={"omit_keys": ['data_stats.*.statistics.sum']})
self.assertTrue(all(["sum" not in rep["statistics"]
for rep in no_sum["data_stats"]]))
def test_report_compact(self):
report = self.trained_schema.report(
report_options={ "output_format": "pretty" })
omit_keys = [
"data_stats.*.statistics.times",
"data_stats.*.statistics.avg_predictions",
"data_stats.*.statistics.data_label_representation",
"data_stats.*.statistics.null_types_index",
"data_stats.*.statistics.histogram"
]
report = _prepare_report(report, 'pretty', omit_keys)
report_compact = self.trained_schema.report(
report_options={"output_format": "compact"})
self.assertEqual(report, report_compact)
def test_profile_key_name_without_space(self):
def recursive_test_helper(report, prev_key=None):
for key in report:
# do not test keys in 'data_stats' as they contain column names
# neither for 'ave_predictions' and 'data_label_representation'
# as they contain label names
# same for 'null_types_index'
if prev_key not in ['data_stats', 'avg_predictions',
'data_label_representation',
'null_types_index', 'categorical_count']:
# key names should contain only alphanumeric letters or '_'
self.assertIsNotNone(re.match('^[a-zA-Z0-9_]+$', str(key)))
if isinstance(report[key], dict):
recursive_test_helper(report[key], key)
_report = self.trained_schema.report()
recursive_test_helper(_report)
def test_data_label_assigned(self):
# only use 5 samples
trained_schema = dp.StructuredProfiler(self.aws_dataset, samples_per_update=5)
report = trained_schema.report()
has_non_null_column = False
for i in range(len(report['data_stats'])):
# only test non-null columns
if report['data_stats'][i]['data_type'] is not None:
self.assertIsNotNone(report['data_stats'][i]['data_label'])
has_non_null_column = True
if not has_non_null_column:
self.fail(
"Dataset tested did not have a non-null column and therefore "
"could not validate the test.")
def test_text_data_raises_error(self):
text_file_path = os.path.join(
test_root_path, 'data', 'txt/sentence-10x.txt'
)
with self.assertRaisesRegex(TypeError, 'Cannot provide TextData object'
' to StructuredProfiler'):
profiler = dp.StructuredProfiler(dp.Data(text_file_path))
@mock.patch('dataprofiler.profilers.profile_builder.'
'StructuredProfiler._update_correlation')
@mock.patch('dataprofiler.profilers.profile_builder.'
'StructuredProfiler._update_chi2')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler')
@mock.patch('dataprofiler.profilers.profile_builder.StructuredProfiler.'
'_update_row_statistics')
@mock.patch('dataprofiler.profilers.profile_builder.StructuredColProfiler')
def test_sample_size_warning_in_the_profiler(self, *mocks):
# structure data profile mock
sdp_mock = mock.Mock()
sdp_mock.clean_data_and_get_base_stats.return_value = (None, None)
mocks[0].return_value = sdp_mock
data = pd.DataFrame([1, None, 3, 4, 5, None])
with self.assertWarnsRegex(UserWarning,
"The data will be profiled with a sample "
"size of 3. All statistics will be based on "
"this subsample and not the whole dataset."):
profile1 = dp.StructuredProfiler(data, samples_per_update=3)
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnPrimitiveTypeProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnStatsProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnDataLabelerCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'StructuredProfiler._update_correlation')
def test_min_col_samples_used(self, *mocks):
# No cols sampled since no cols to sample
empty_df = pd.DataFrame([])
empty_profile = dp.StructuredProfiler(empty_df)
self.assertEqual(0, empty_profile._min_col_samples_used)
# Every column fully sampled
full_df = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
full_profile = dp.StructuredProfiler(full_df)
self.assertEqual(3, full_profile._min_col_samples_used)
# First col sampled only twice, so that is min
sparse_df = pd.DataFrame([[1, None, None],
[1, 1, None],
[1, None, 1]])
sparse_profile = dp.StructuredProfiler(sparse_df, min_true_samples=2,
samples_per_update=1)
self.assertEqual(2, sparse_profile._min_col_samples_used)
@mock.patch('dataprofiler.profilers.profile_builder.StructuredProfiler.'
'_update_profile_from_chunk')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler')
def test_min_true_samples(self, *mocks):
empty_df = pd.DataFrame([])
# Test invalid input
msg = "`min_true_samples` must be an integer or `None`."
with self.assertRaisesRegex(ValueError, msg):
profile = dp.StructuredProfiler(empty_df, min_true_samples="Bloop")
# Test invalid input given to update_profile
profile = dp.StructuredProfiler(empty_df)
with self.assertRaisesRegex(ValueError, msg):
profile.update_profile(empty_df, min_true_samples="Bloop")
# Test None input (equivalent to zero)
profile = dp.StructuredProfiler(empty_df, min_true_samples=None)
self.assertEqual(None, profile._min_true_samples)
# Test valid input
profile = dp.StructuredProfiler(empty_df, min_true_samples=10)
self.assertEqual(10, profile._min_true_samples)
def test_save_and_load(self):
datapth = "dataprofiler/tests/data/"
test_files = ["csv/guns.csv", "csv/iris.csv"]
for test_file in test_files:
# Create Data and StructuredProfiler objects
data = dp.Data(os.path.join(datapth, test_file))
options = ProfilerOptions()
options.set({"correlation.is_enabled": True})
save_profile = dp.StructuredProfiler(data)
# store the expected data_labeler
data_labeler = save_profile.options.data_labeler.data_labeler_object
# Save and Load profile with Mock IO
with mock.patch('builtins.open') as m:
mock_file = setup_save_mock_open(m)
save_profile.save()
mock_file.seek(0)
with mock.patch('dataprofiler.profilers.profile_builder.'
'DataLabeler', return_value=data_labeler):
load_profile = dp.StructuredProfiler.load("mock.pkl")
# validate loaded profile has same data labeler class
self.assertIsInstance(
load_profile.options.data_labeler.data_labeler_object,
data_labeler.__class__)
# only checks first columns
# get first column
first_column_profile = load_profile.profile[0]
self.assertIsInstance(
first_column_profile.profiles['data_label_profile']
._profiles['data_labeler'].data_labeler,
data_labeler.__class__)
# Check that reports are equivalent
save_report = test_utils.clean_report(save_profile.report())
load_report = test_utils.clean_report(load_profile.report())
np.testing.assert_equal(save_report, load_report)
def test_save_and_load_no_labeler(self):
# Create Data and UnstructuredProfiler objects
data = pd.DataFrame([1, 2, 3], columns=["a"])
profile_options = dp.ProfilerOptions()
profile_options.set({"data_labeler.is_enabled": False})
save_profile = dp.StructuredProfiler(data, options=profile_options)
# Save and Load profile with Mock IO
with mock.patch('builtins.open') as m:
mock_file = setup_save_mock_open(m)
save_profile.save()
mock_file.seek(0)
with mock.patch('dataprofiler.profilers.profile_builder.'
'DataLabeler'):
load_profile = dp.StructuredProfiler.load("mock.pkl")
# Check that reports are equivalent
save_report = test_utils.clean_report(save_profile.report())
load_report = test_utils.clean_report(load_profile.report())
self.assertDictEqual(save_report, load_report)
# validate both are still usable after
save_profile.update_profile(pd.DataFrame({"a": [4, 5]}))
load_profile.update_profile(pd.DataFrame({"a": [4, 5]}))
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnPrimitiveTypeProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnStatsProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnDataLabelerCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'StructuredProfiler._update_correlation')
def test_string_index_doesnt_cause_error(self, *mocks):
dp.StructuredProfiler(pd.DataFrame([[1, 2, 3]], index=["hello"]))
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnPrimitiveTypeProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnStatsProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnDataLabelerCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'StructuredProfiler._update_correlation')
def test_dict_in_data_no_error(self, *mocks):
# validates that _update_row_statistics does not error when trying to
# hash a dict.
profiler = dp.StructuredProfiler(pd.DataFrame([[{'test': 1}], [None]]))
self.assertEqual(1, profiler.row_is_null_count)
self.assertEqual(2, profiler.total_samples)
def test_duplicate_columns(self):
data = pd.DataFrame([[1, 2, 3, 4, 5, 6],
[10, 20, 30, 40, 50, 60]],
columns=["a", "b", "a", "b", "c", "d"])
profiler = dp.StructuredProfiler(data)
# Ensure columns are correctly allocated to profiles in list
expected_mapping = {"a": [0, 2], "b": [1, 3], "c": [4], "d": [5]}
self.assertDictEqual(expected_mapping, profiler._col_name_to_idx)
for col in profiler._col_name_to_idx:
for idx in profiler._col_name_to_idx[col]:
# Make sure every index that a column name maps to represents
# A profile for that named column
self.assertEqual(col, profiler._profile[idx].name)
# Check a few stats to ensure calculation with data occurred
# Initialization ensures column ids and profile ids are identical
for col_idx in range(len(profiler._profile)):
col_min = data.iloc[0, col_idx]
col_max = data.iloc[1, col_idx]
col_sum = col_min + col_max
self.assertEqual(col_min, profiler._profile[col_idx].
profile["statistics"]["min"])
self.assertEqual(col_max, profiler._profile[col_idx].
profile["statistics"]["max"])
self.assertEqual(col_sum, profiler._profile[col_idx].
profile["statistics"]["sum"])
# Check that update works as expected
new_data = pd.DataFrame([[100, 200, 300, 400, 500, 600]],
columns=["a", "b", "a", "b", "c", "d"])
profiler.update_profile(new_data)
self.assertDictEqual(expected_mapping, profiler._col_name_to_idx)
for col in profiler._col_name_to_idx:
for idx in profiler._col_name_to_idx[col]:
# Make sure every index that a column name maps to represents
# A profile for that named column
self.assertEqual(col, profiler._profile[idx].name)
for col_idx in range(len(profiler._profile)):
col_min = data.iloc[0, col_idx]
col_max = new_data.iloc[0, col_idx]
col_sum = col_min + col_max + data.iloc[1, col_idx]
self.assertEqual(col_min, profiler._profile[col_idx].
profile["statistics"]["min"])
self.assertEqual(col_max, profiler._profile[col_idx].
profile["statistics"]["max"])
self.assertEqual(col_sum, profiler._profile[col_idx].
profile["statistics"]["sum"])
def test_unique_col_permutation(self, *mocks):
data = pd.DataFrame([[1, 2, 3, 4],
[5, 6, 7, 8]],
columns=["a", "b", "c", "d"])
perm_data = pd.DataFrame([[4, 3, 2, 1],
[8, 7, 6, 5]],
columns=["d", "c", "b", "a"])
# Test via add
first_profiler = dp.StructuredProfiler(data)
perm_profiler = dp.StructuredProfiler(perm_data)
profiler = first_profiler + perm_profiler
for col_idx in range(len(profiler._profile)):
col_min = data.iloc[0, col_idx]
col_max = data.iloc[1, col_idx]
# Sum is doubled since it was updated with the same vals
col_sum = 2 * (col_min + col_max)
self.assertEqual(col_min, profiler._profile[col_idx].
profile["statistics"]["min"])
self.assertEqual(col_max, profiler._profile[col_idx].
profile["statistics"]["max"])
self.assertEqual(col_sum, profiler._profile[col_idx].
profile["statistics"]["sum"])
# Test via update
profiler = dp.StructuredProfiler(data)
profiler.update_profile(perm_data)
for col_idx in range(len(profiler._profile)):
col_min = data.iloc[0, col_idx]
col_max = data.iloc[1, col_idx]
# Sum is doubled since it was updated with the same vals
col_sum = 2 * (col_min + col_max)
self.assertEqual(col_min, profiler._profile[col_idx].
profile["statistics"]["min"])
self.assertEqual(col_max, profiler._profile[col_idx].
profile["statistics"]["max"])
self.assertEqual(col_sum, profiler._profile[col_idx].
profile["statistics"]["sum"])
def test_get_and_validate_schema_mapping(self):
unique_schema_1 = {"a": [0], "b": [1], "c": [2]}
unique_schema_2 = {"a": [2], "b": [0], "c": [1]}
unique_schema_3 = {"a": [0], "b": [1], "d": [2]}
msg = "Columns do not match, cannot update or merge profiles."
with self.assertRaisesRegex(ValueError, msg):
dp.StructuredProfiler._get_and_validate_schema_mapping(
unique_schema_1,unique_schema_3)
expected_schema = {0: 0, 1: 1, 2: 2}
actual_schema = dp.StructuredProfiler. \
_get_and_validate_schema_mapping(unique_schema_1, {})
self.assertDictEqual(actual_schema, expected_schema)
expected_schema = {0: 2, 1: 0, 2: 1}
actual_schema = dp.StructuredProfiler. \
_get_and_validate_schema_mapping(unique_schema_1, unique_schema_2)
self.assertDictEqual(actual_schema, expected_schema)
dupe_schema_1 = {"a": [0], "b": [1, 2], "c": [3, 4, 5]}
dupe_schema_2 = {"a": [0], "b": [1, 3], "c": [2, 4, 5]}
dupe_schema_3 = {"a": [0, 1], "b": [2, 3, 4], "c": [5]}
four_col_schema = {"a": [0], "b": [1, 2], "c": [3, 4, 5], "d": [6]}
msg = ("Different number of columns detected for "
"'a', cannot update or merge profiles.")
with self.assertRaisesRegex(ValueError, msg):
dp.StructuredProfiler._get_and_validate_schema_mapping(
dupe_schema_1, dupe_schema_3)
msg = ("Different column indices under "
"duplicate name 'b', cannot update "
"or merge unless schema is identical.")
with self.assertRaisesRegex(ValueError, msg):
dp.StructuredProfiler._get_and_validate_schema_mapping(
dupe_schema_1, dupe_schema_2)
msg = "Attempted to merge profiles with different numbers of columns"
with self.assertRaisesRegex(ValueError, msg):
dp.StructuredProfiler._get_and_validate_schema_mapping(
dupe_schema_1, four_col_schema)
expected_schema = {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5}
actual_schema = dp.StructuredProfiler. \
_get_and_validate_schema_mapping(dupe_schema_1, dupe_schema_1)
self.assertDictEqual(actual_schema, expected_schema)
@mock.patch("dataprofiler.profilers.data_labeler_column_profile."
"DataLabelerColumn.update")
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler')
@mock.patch("dataprofiler.profilers.column_profile_compilers."
"ColumnPrimitiveTypeProfileCompiler.diff")
@mock.patch("dataprofiler.profilers.column_profile_compilers."
"ColumnStatsProfileCompiler.diff")
@mock.patch("dataprofiler.profilers.column_profile_compilers."
"ColumnDataLabelerCompiler.diff")
def test_diff(self, *mocks):
# Data labeler compiler diff
mocks[0].return_value = {
'statistics': {
'avg_predictions': {
'a': 'unchanged'
},
'label_representation': {
'a': 'unchanged'
}
},
'data_label': [[], ['a'], []]
}
# stats compiler diff
mocks[1].return_value = {
'order': ['ascending', 'descending'],
'categorical': 'unchanged',
'statistics': {
'all_compiler_stats': 'unchanged'
}
}
# primitive stats compiler diff
mocks[2].return_value = {
'data_type_representation': {
'all_data_types': 'unchanged'
},
'data_type': 'unchanged',
'statistics': {
'numerical_statistics_here': "unchanged"
}
}
data1 = pd.DataFrame([[1, 2], [5, 6]], columns=["a", "b"])
data2 = pd.DataFrame([[4, 3], [8, 7], [None, None], [9, 10]],
columns=["a", "b"])
options = dp.ProfilerOptions()
options.structured_options.correlation.is_enabled = True
profile1 = dp.StructuredProfiler(data1, options=options)
options2 = dp.ProfilerOptions()
options2.structured_options.correlation.is_enabled = True
profile2 = dp.StructuredProfiler(data2, options=options2)
expected_diff = {
'global_stats': {
'samples_used': -2,
'column_count': 'unchanged',
'row_count': -2,
'row_has_null_ratio': -0.25,
'row_is_null_ratio': -0.25,
'unique_row_ratio': 'unchanged',
'duplicate_row_count': -0.25,
'file_type': 'unchanged',
'encoding': 'unchanged',
'correlation_matrix':
np.array([[1.11022302e-16, 3.13803955e-02],
[3.13803955e-02, 0.00000000e+00]],
dtype=np.float),
'chi2_matrix':
np.array([[ 0. , -0.04475479],
[-0.04475479, 0. ]],
dtype=np.float),
'profile_schema':
[{}, {'a': 'unchanged', 'b': 'unchanged'}, {}]},
'data_stats': [
{
'column_name': 'a',
'data_type': 'unchanged',
'data_label': [[], ['a'], []],
'categorical': 'unchanged',
'order': ['ascending', 'descending'],
'statistics': {
'numerical_statistics_here':
'unchanged',
'all_compiler_stats':
'unchanged',
'avg_predictions': {'a': 'unchanged'},
'label_representation': {'a': 'unchanged'},
'sample_size': -2,
'null_count': -1,
'null_types': [[], [], ['nan']],
'null_types_index': [{}, {}, {'nan': {2}}],
'data_type_representation': {
'all_data_types': 'unchanged'
}
}
},
{
'column_name': 'b',
'data_type': 'unchanged',
'data_label': [[], ['a'], []],
'categorical': 'unchanged',
'order': ['ascending', 'descending'],
'statistics': {
'numerical_statistics_here': 'unchanged',
'all_compiler_stats': 'unchanged',
'avg_predictions': {'a': 'unchanged'},
'label_representation': {'a': 'unchanged'},
'sample_size': -2,
'null_count': -1,
'null_types': [[], [], ['nan']],
'null_types_index': [{}, {}, {'nan': {2}}],
'data_type_representation': {
'all_data_types': 'unchanged'
}
}
}
]
}
diff = profile1.diff(profile2)
expected_corr_mat = expected_diff["global_stats"].pop("correlation_matrix")
diff_corr_mat = diff["global_stats"].pop("correlation_matrix")
expected_chi2_mat = expected_diff["global_stats"].pop("chi2_matrix")
diff_chi2_mat = diff["global_stats"].pop("chi2_matrix")
np.testing.assert_array_almost_equal(expected_corr_mat, diff_corr_mat)
np.testing.assert_array_almost_equal(expected_chi2_mat, diff_chi2_mat)
self.assertDictEqual(expected_diff, diff)
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler')
@mock.patch("dataprofiler.profilers.data_labeler_column_profile."
"DataLabelerColumn.update")
def test_diff_type_checking(self, *mocks):
data = pd.DataFrame([[1, 2], [5, 6]],
columns=["a", "b"])
profile = dp.StructuredProfiler(data)
with self.assertRaisesRegex(TypeError,
'`StructuredProfiler` and `str` are not of '
'the same profiler type.'):
profile.diff("ERROR")
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler')
@mock.patch("dataprofiler.profilers.data_labeler_column_profile."
"DataLabelerColumn.update")
def test_diff_with_different_schema(self, *mocks):
data1 = pd.DataFrame([[1, 2], [5, 6]],
columns=["G", "b"])
data2 = pd.DataFrame([[4, 3, 1], [8, 7, 3], [None, None, 1], [9, 1, 10]],
columns=["a", "b", "c"])
# Test via add
profile1 = dp.StructuredProfiler(data1)
profile2 = dp.StructuredProfiler(data2)
expected_diff = {
'global_stats': {
'file_type': 'unchanged',
'encoding': 'unchanged',
'samples_used': -2,
'column_count': -1,
'row_count': -2,
'row_has_null_ratio': -0.25,
'row_is_null_ratio': 'unchanged',
'unique_row_ratio': 'unchanged',
'duplicate_row_count': 'unchanged',
'correlation_matrix': None,
'chi2_matrix': None,
'profile_schema': [{'G': [0]},
{'b': 'unchanged'},
{'a': [0], 'c': [2]}]},
'data_stats': []
}
self.assertDictEqual(expected_diff, profile1.diff(profile2))
@mock.patch("dataprofiler.profilers.data_labeler_column_profile."
"DataLabelerColumn.update")
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler')
@mock.patch("dataprofiler.profilers.column_profile_compilers."
"ColumnPrimitiveTypeProfileCompiler.diff")
@mock.patch("dataprofiler.profilers.column_profile_compilers."
"ColumnStatsProfileCompiler.diff")
@mock.patch("dataprofiler.profilers.column_profile_compilers."
"ColumnDataLabelerCompiler.diff")
@mock.patch("sys.stderr", new_callable=StringIO)
def test_logs(self, mock_stderr, *mocks):
options = StructuredOptions()
options.multiprocess.is_enabled = False
# Capture logs of level INFO and above
with self.assertLogs('DataProfiler.profilers.profile_builder',
level='INFO') as logs:
StructuredProfiler(pd.DataFrame([[0, 1], [2, 3]]), options=options)
# Logs to update user on nulls and statistics
self.assertEqual(['INFO:DataProfiler.profilers.profile_builder:'
'Finding the Null values in the columns... ',
'INFO:DataProfiler.profilers.profile_builder:'
'Calculating the statistics... '],
logs.output)
# Ensure tqdm printed progress bar
self.assertIn('#' * 10, mock_stderr.getvalue())
# Clear stderr
mock_stderr.seek(0)
mock_stderr.truncate(0)
# Now tqdm shouldn't be printed
dp.set_verbosity(logging.WARNING)
StructuredProfiler(pd.DataFrame([[0, 1], [2, 3]]))
# Ensure no progress bar printed
self.assertNotIn('#' * 10, mock_stderr.getvalue())
def test_unique_row_ratio_empty_profiler(self):
profiler = StructuredProfiler(pd.DataFrame([]))
self.assertEqual(0, profiler._get_unique_row_ratio())
class TestStructuredColProfilerClass(unittest.TestCase):
def setUp(self):
test_utils.set_seed(seed=0)
@classmethod
def setUpClass(cls):
test_utils.set_seed(seed=0)
cls.input_file_path = os.path.join(
test_root_path, 'data', 'csv/aws_honeypot_marx_geo.csv'
)
cls.aws_dataset = | pd.read_csv(cls.input_file_path) | pandas.read_csv |
# -*- coding: utf-8 -*-
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pandas.compat as compat
###############################################################
# Index / Series common tests which may trigger dtype coercions
###############################################################
class CoercionBase(object):
klasses = ['index', 'series']
dtypes = ['object', 'int64', 'float64', 'complex128', 'bool',
'datetime64', 'datetime64tz', 'timedelta64', 'period']
@property
def method(self):
raise NotImplementedError(self)
def _assert(self, left, right, dtype):
# explicitly check dtype to avoid any unexpected result
if isinstance(left, pd.Series):
tm.assert_series_equal(left, right)
elif isinstance(left, pd.Index):
tm.assert_index_equal(left, right)
else:
raise NotImplementedError
self.assertEqual(left.dtype, dtype)
self.assertEqual(right.dtype, dtype)
def test_has_comprehensive_tests(self):
for klass in self.klasses:
for dtype in self.dtypes:
method_name = 'test_{0}_{1}_{2}'.format(self.method,
klass, dtype)
if not hasattr(self, method_name):
msg = 'test method is not defined: {0}, {1}'
raise AssertionError(msg.format(type(self), method_name))
class TestSetitemCoercion(CoercionBase, tm.TestCase):
method = 'setitem'
def _assert_setitem_series_conversion(self, original_series, loc_value,
expected_series, expected_dtype):
""" test series value's coercion triggered by assignment """
temp = original_series.copy()
temp[1] = loc_value
tm.assert_series_equal(temp, expected_series)
# check dtype explicitly for sure
self.assertEqual(temp.dtype, expected_dtype)
# .loc works different rule, temporary disable
# temp = original_series.copy()
# temp.loc[1] = loc_value
# tm.assert_series_equal(temp, expected_series)
def test_setitem_series_object(self):
obj = pd.Series(list('abcd'))
self.assertEqual(obj.dtype, np.object)
# object + int -> object
exp = pd.Series(['a', 1, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1, exp, np.object)
# object + float -> object
exp = pd.Series(['a', 1.1, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.object)
# object + complex -> object
exp = pd.Series(['a', 1 + 1j, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.object)
# object + bool -> object
exp = pd.Series(['a', True, 'c', 'd'])
self._assert_setitem_series_conversion(obj, True, exp, np.object)
def test_setitem_series_int64(self):
obj = | pd.Series([1, 2, 3, 4]) | pandas.Series |
import pandas as pd
import os,sys
import re
import torch
inp_path = r'/home/tiwarikajal/embeddingdata'
out_path = r'/home/tiwarikajal/data/'
error = []
df = | pd.DataFrame(columns=['year', 'Company', 'embeddings1a', 'embeddings7']) | pandas.DataFrame |
import mysql.connector
import pandas as pd
class MySQLInterface:
def __init__(self, server, username, password, dbname):
self.server = server
self.username = username
self.password = password
self.dbname = dbname
def __connect(self):
try:
self.cnx = mysql.connector.connect(user=self.username, password=self.password, host=self.server, database=self.dbname)
return True
except mysql.connector.Error as err:
print(err)
return False
def select(self, query):
if(not self.__connect()):
return None
try:
output = []
cursor = self.cnx.cursor()
cursor.execute(query)
for row in cursor:
inner_list = []
for val in row:
inner_list.append(str(val).strip())
output.append(inner_list)
cursor.close()
self.cnx.close()
return | pd.DataFrame(output) | pandas.DataFrame |
import pandas as pd
def generate_train(playlists):
# define category range
cates = {'cat1': (10, 50), 'cat2': (10, 78), 'cat3': (10, 100), 'cat4': (40, 100), 'cat5': (40, 100),
'cat6': (40, 100),'cat7': (101, 250), 'cat8': (101, 250), 'cat9': (150, 250), 'cat10': (150, 250)}
cat_pids = {}
for cat, interval in cates.items():
df = playlists[(playlists['num_tracks'] >= interval[0]) & (playlists['num_tracks'] <= interval[1])].sample(
n=1000)
cat_pids[cat] = list(df.pid)
playlists = playlists.drop(df.index)
playlists = playlists.reset_index(drop=True)
return playlists, cat_pids
def generate_test(cat_pids, playlists, interactions, tracks):
def build_df_none(cat_pids, playlists, cat, num_samples):
df = playlists[playlists['pid'].isin(cat_pids[cat])]
df = df[['pid', 'num_tracks']]
df['num_samples'] = num_samples
df['num_holdouts'] = df['num_tracks'] - df['num_samples']
return df
def build_df_name(cat_pids, playlists, cat, num_samples):
df = playlists[playlists['pid'].isin(cat_pids[cat])]
df = df[['name', 'pid', 'num_tracks']]
df['num_samples'] = num_samples
df['num_holdouts'] = df['num_tracks'] - df['num_samples']
return df
df_test_pl = pd.DataFrame()
df_test_itr = pd.DataFrame()
df_eval_itr = pd.DataFrame()
for cat in list(cat_pids.keys()):
if cat == 'cat1':
num_samples = 0
df = build_df_name(cat_pids, playlists, cat, num_samples)
df_test_pl = pd.concat([df_test_pl, df])
# all interactions used for evaluation
df_itr = interactions[interactions['pid'].isin(cat_pids[cat])]
df_eval_itr = pd.concat([df_eval_itr, df_itr])
# clean interactions for training
interactions = interactions.drop(df_itr.index)
print("cat1 done")
if cat == 'cat2':
num_samples = 1
df = build_df_name(cat_pids, playlists, cat, num_samples)
df_test_pl = pd.concat([df_test_pl, df])
df_itr = interactions[interactions['pid'].isin(cat_pids[cat])]
# clean interactions for training
interactions = interactions.drop(df_itr.index)
df_sample = df_itr[df_itr['pos'] == 0]
df_test_itr = pd.concat([df_test_itr, df_sample])
df_itr = df_itr.drop(df_sample.index)
df_eval_itr = pd.concat([df_eval_itr, df_itr])
print("cat2 done")
if cat == 'cat3':
num_samples = 5
df = build_df_name(cat_pids, playlists, cat, num_samples)
df_test_pl = pd.concat([df_test_pl, df])
df_itr = interactions[interactions['pid'].isin(cat_pids[cat])]
# clean interactions for training
interactions = interactions.drop(df_itr.index)
df_sample = df_itr[(df_itr['pos'] >= 0) & (df_itr['pos'] < num_samples)]
df_test_itr = pd.concat([df_test_itr, df_sample])
df_itr = df_itr.drop(df_sample.index)
df_eval_itr = pd.concat([df_eval_itr, df_itr])
print("cat3 done")
if cat == 'cat4':
num_samples = 5
df = build_df_none(cat_pids, playlists, cat, num_samples)
df_test_pl = pd.concat([df_test_pl, df])
df_itr = interactions[interactions['pid'].isin(cat_pids[cat])]
# clean interactions for training
interactions = interactions.drop(df_itr.index)
df_sample = df_itr[(df_itr['pos'] >= 0) & (df_itr['pos'] < num_samples)]
df_test_itr = pd.concat([df_test_itr, df_sample])
df_itr = df_itr.drop(df_sample.index)
df_eval_itr = pd.concat([df_eval_itr, df_itr])
print("cat4 done")
if cat == 'cat5':
num_samples = 10
df = build_df_name(cat_pids, playlists, cat, num_samples)
df_test_pl = pd.concat([df_test_pl, df])
df_itr = interactions[interactions['pid'].isin(cat_pids[cat])]
# clean interactions for training
interactions = interactions.drop(df_itr.index)
df_sample = df_itr[(df_itr['pos'] >= 0) & (df_itr['pos'] < num_samples)]
df_test_itr = | pd.concat([df_test_itr, df_sample]) | pandas.concat |
#!/usr/bin/env python3
import os
import io
import re
import argparse
import itertools
import collections as col
import operator as op
import pickle as pck
import pandas as pd
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--agp-file',
'-a',
type=str,
dest='agp',
help='AGP assembly layout file generated by Bionano tool set for hybrid assembly.'
)
parser.add_argument(
'--fasta-file',
'-f',
type=str,
dest='fasta',
help='FASTA file containig scaffold sequences generated by Bionano tool set for hybrid assembly.'
)
parser.add_argument(
'--dummy-fasta',
'-d',
type=str,
dest='dummy',
help='FASTA sequence for dummy contig used to avoid empty output files.'
)
parser.add_argument('--no-fasta-cache', action='store_true', default=False, dest='no_fasta_cache')
parser.add_argument(
'--bed-file',
'-b',
type=str,
dest='bed',
help='Contig-to-reference alignments (unfiltered) of original contigs used as input to hybrid scaffolding.'
)
parser.add_argument(
'--assembly-index',
'-fai',
type=str,
dest='index',
help='FASTA index file of original assembly, used to verify scaffolded contig length.'
)
parser.add_argument(
'--output',
'-o',
type=str,
dest='output',
help='Specify output prefix (directories will be created). Default: $PWD/bng_hybrid',
default=os.path.join(os.getcwd(), 'bng_hybrid')
)
args = parser.parse_args()
return args
def compute_scaffold_layout(agp_layout, fasta_seqs, layout_cache):
if layout_cache is None:
fasta_layout = compute_scaffold_sequence_stats(agp_layout, fasta_seqs)
else:
if os.path.isfile(layout_cache):
with pd.HDFStore(layout_cache, 'r') as hdf:
fasta_layout = hdf['/cache']
else:
fasta_layout = compute_scaffold_sequence_stats(agp_layout, fasta_seqs)
with pd.HDFStore(layout_cache, 'w') as hdf:
hdf.put('cache', fasta_layout, format='fixed')
return fasta_layout
def match_agp_to_fasta(agp_row, sequence_part):
get_nuc_counts = op.itemgetter(*('A', 'C', 'G', 'T', 'a', 'c', 'g', 't', 'N', 'n'))
orient_map = {
'+': 1,
'-': -1
}
seq_stats = col.Counter(sequence_part)
if agp_row['comp_type'] == 'N':
# inserted sequence gap
entity = (
agp_row['object_name'],
'gap',
agp_row['comp_number'],
agp_row['object_start'] - 1,
agp_row['object_end'],
int(agp_row['comp_name_OR_gap_length']),
0, # orientation
'gap',
-1, # component start, end, complete
-1,
-1,
*get_nuc_counts(seq_stats)
)
elif agp_row['comp_type'] == 'W':
# assembled WGS contig
contig_name = agp_row['comp_name_OR_gap_length']
if '_subseq_' in contig_name:
contig_name, subseq = contig_name.split('_subseq_')
start, end = subseq.split(':')
start = int(start) - 1
end = int(end)
else:
start = 0
end = len(sequence_part)
orientation = orient_map[agp_row['comp_orient_OR_linkage_evidence']]
# BUG / FIXME
# this should check if the contig is scaffolded start to end;
# it is trivially always the same length as the respective sequence
# part, so the below condition will always be 1
complete = 1 if (end - start) == len(sequence_part) else 0
comp_length = agp_row['object_end'] - (agp_row['object_start'] - 1)
assert comp_length == len(sequence_part) == (end - start), \
'Length mismatch: {} / {} / {}'.format(comp_length, len(sequence_part), end - start)
entity = (
agp_row['object_name'],
'sequence',
agp_row['comp_number'],
agp_row['object_start'] - 1,
agp_row['object_end'],
(end - start),
orientation,
contig_name,
start,
end,
complete,
*get_nuc_counts(seq_stats)
)
else:
raise ValueError('Unexpected component type: {}'.format(agp_row))
return entity
def compute_scaffold_sequence_stats(agp_layout, fasta_seqs):
get_nuc_counts = op.itemgetter(*('A', 'C', 'G', 'T', 'a', 'c', 'g', 't', 'N', 'n'))
scaffold_idx = agp_layout['object_name'].str.match('Super-Scaffold')
scaffolds = sorted(set(agp_layout.loc[scaffold_idx, 'object_name'].values))
fasta_entities = []
for scf in scaffolds:
scf_seq = fasta_seqs[scf]
seq_stats = col.Counter(scf_seq)
fasta_entities.append(
(
'scaffold',
'self',
0,
0,
len(scf_seq),
len(scf_seq),
0,
scf,
0,
len(scf_seq),
1,
*get_nuc_counts(seq_stats)
)
)
for idx, row in agp_layout.loc[agp_layout['object_name'] == scf, :].iterrows():
row_entity = match_agp_to_fasta(row, scf_seq[row['object_start']-1:row['object_end']])
fasta_entities.append(row_entity)
df = pd.DataFrame.from_records(
fasta_entities,
columns=[
'object',
'component',
'order',
'start',
'end',
'length',
'orientation',
'name',
'component_start',
'component_end',
'component_complete',
'A', 'C', 'G', 'T', 'a', 'c', 'g', 't', 'N', 'n'
]
)
return df
def load_assembly_contig_sizes(file_path):
columns = ['contig_name', 'contig_size']
df = pd.read_csv(
file_path,
sep='\t',
header=None,
names=columns,
usecols=columns,
index_col=None
)
return df
def read_fasta_file(fasta_path):
current_scf = None
current_seq = ''
seq_store = dict()
with open(fasta_path, 'r') as fasta:
for line in fasta:
if line.startswith('>'):
if current_scf is not None:
seq_store[current_scf] = current_seq
current_seq = ''
scaffold = line.strip().strip('>')
current_scf = scaffold
continue
current_seq += line.strip()
if current_seq:
seq_store[current_scf] = current_seq
return seq_store
def load_fasta_scaffolds(fasta_path, seq_cache):
if seq_cache is None:
seq_store = read_fasta_file(fasta_path)
else:
if os.path.isfile(seq_cache):
with open(seq_cache, 'rb') as cache:
seq_store = pck.load(cache)
else:
seq_store = read_fasta_file(fasta_path)
with open(seq_cache, 'wb') as cache:
pck.dump(seq_store, cache)
return seq_store
def fill_in_gap_coordinates(fasta_layout):
rows = []
starts = []
ends = []
for idx in fasta_layout.loc[fasta_layout['component'] == 'gap', :].index.values:
rows.append(idx)
starts.append(fasta_layout.at[idx-1, 'end'])
ends.append(fasta_layout.at[idx+1, 'start'])
fasta_layout.loc[rows, 'start'] = starts
fasta_layout.loc[rows, 'end'] = ends
return fasta_layout
def parse_agp_layout(agp_path):
agp_header = [
'object_name',
'object_start',
'object_end',
'comp_number',
'comp_type',
'comp_name_OR_gap_length',
'comp_start_OR_gap_type',
'comp_end_OR_linkage',
'comp_orient_OR_linkage_evidence'
]
df = pd.read_csv(agp_path, sep='\t', comment='#', names=agp_header)
# hopfully, all AGP files are simple in structure
assert len(set(df['comp_type'].values).union(set(['W', 'N']))) == 2, 'Unexpected component type'
assert df['comp_end_OR_linkage'].str.match('([0-9]+|yes)').all(), 'Unexpected linkage type'
assert df['comp_start_OR_gap_type'].str.match('([0-9]+|scaffold)').all(), 'Unexpected gap type'
return df
def compute_bng_contig_support(agp_layout):
supported = []
unsupported = []
contig_names = []
contig_to_scaffold = col.defaultdict(list)
scaffold_to_contig = col.defaultdict(list)
unsupported_broken = col.Counter()
for idx, row in agp_layout.iterrows():
if row['comp_type'] == 'N':
continue
else:
contig_name = row['comp_name_OR_gap_length']
if 'subseq' in contig_name:
contig_name = contig_name.split('_subseq_')[0]
if 'Super-Scaffold' not in row['object_name']:
# unscaffolded sequence
if 'subseq' in row['comp_name_OR_gap_length']:
# happens that multiple fragments of a contig
# appear as unsupported / unscaffolded for
# whatever reason
unsupported_broken[contig_name] += 1
supported.append(0)
unsupported.append(int(row['comp_end_OR_linkage']))
else:
contig_to_scaffold[contig_name].append(row['object_name'])
scaffold_to_contig[row['object_name']].append(contig_name)
supported.append(int(row['comp_end_OR_linkage']))
unsupported.append(0)
contig_names.append(contig_name)
df = pd.DataFrame(
[contig_names, supported, unsupported],
index=[
'contig_name',
'BNG_supported',
'BNG_unsupported'
]
)
df = df.transpose()
contig_counts = df['contig_name'].value_counts()
df = df.groupby('contig_name')[['BNG_supported', 'BNG_unsupported']].sum()
df['contig_name'] = df.index.values
df.reset_index(drop=True, inplace=True)
df['contig_breaks'] = df['contig_name'].apply(lambda x: contig_counts[x] - 1)
# no clue why, but some contigs are broken despite being unsupported
# cluster10_contig_270_subseq_1:79636_obj
# cluster10_contig_270_subseq_79637:120374_obj
# ---> cluster10_contig_270 120374
# so fix that here
df.loc[df['BNG_supported'] == 0, 'contig_breaks'] = 0
# now fix cases where a single contig has several BNG unsupported
# fragments, which would otherwise be counted multiple times
for ctg, broken_count in unsupported_broken.most_common():
if broken_count < 2:
break
# count several "unsupported" fragments as one
unsupported_breaks = broken_count - 1
counted_breaks = int(df.loc[df['contig_name'] == ctg, 'contig_breaks'])
if counted_breaks > 0:
# avoids clash/duplicates together with first fix
df.loc[df['contig_name'] == ctg, 'contig_breaks'] -= unsupported_breaks
return df, contig_to_scaffold, scaffold_to_contig
def parse_contig_alignments(bed_path):
bed_columns = [
'chrom',
'start',
'end',
'contig',
'mapq',
'strand'
]
df = | pd.read_csv(bed_path, sep='\t', names=bed_columns, header=None) | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# # **<NAME> - Tracking Data Assignment**
#
# Sunday 11th October 2020
#
# ---
# In[1]:
import pandas as pd
import numpy as np
import datetime
# imports required by data prep functions
import json
# Laurie's libraries
import scipy.signal as signal
import matplotlib.animation as animation
# removing annoying matplotlib warnings
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
import re
import os
from collections import Counter, defaultdict
# plotting
import matplotlib.pyplot as plt
pd.options.display.max_rows = 500
pd.options.display.max_columns = 500
signalityRepo = r'2019/Tracking Data/'
movieRepo = r'Movies/'
# # **1)** Data Preparation Functions
# In[2]:
def initialise_dic_tracks(df_homePlayers, df_awayPlayers):
"""
Initialises dictionaries for both home and away player locations
"""
dic_home_tracks = {}
dic_away_tracks = {}
for homePlayer in df_homePlayers.playerIndex:
for xy in ['x','y']:
dic_home_tracks[f'Home_{homePlayer}_{xy}'] = []
for awayPlayer in df_awayPlayers.playerIndex:
for xy in ['x','y']:
dic_away_tracks[f'Away_{awayPlayer}_{xy}'] = []
return dic_home_tracks, dic_away_tracks
# In[3]:
def populate_df_tracks(homeAway, homeAway_tracks, playersJerseyMapping, dic_tracks, df_players):
"""
For a given team (home OR away), will transform the JSON track data to produce a dataframe just like Laurie's
"""
lst_playerJerseys = df_players.jersey_number.values
# iterating through frames for home/away team
for n, frame in enumerate(homeAway_tracks):
lst_playerJerseysPerFrame = []
for player in frame:
jersey_number = player.get('jersey_number')
playerIndex = playersJerseyMapping[jersey_number]
x,y = player.get('position', [np.nan, np.nan])
# keeping track of jerseys that have a position for that frame
lst_playerJerseysPerFrame.append(jersey_number)
dic_tracks[f'{homeAway}_{playerIndex}_x'].append(x)
# flipping the y axis to make the data sync with Laurie's plotting methods
dic_tracks[f'{homeAway}_{playerIndex}_y'].append(-1*y)
# list of jerseys that aren't in the frame
lst_playerJerseysNotInFrame = list(set(lst_playerJerseys) - set(lst_playerJerseysPerFrame))
# adding the jerseys that aren't in frame and providing an x,y position of nan, nan
for jersey_number in lst_playerJerseysNotInFrame:
playerIndex = playersJerseyMapping[jersey_number]
x,y = [np.nan, np.nan]
dic_tracks[f'{homeAway}_{playerIndex}_x'].append(x)
dic_tracks[f'{homeAway}_{playerIndex}_y'].append(y)
# transforming tracking dic to a tracking dataframe
df_tracks = pd.DataFrame(dic_tracks)
return df_tracks
# In[4]:
def to_single_playing_direction(home,away):
"""
Switches x and y co-ords with negative sign in the second half
Requires the co-ords to be symmetric about 0,0 (i.e. going from roughly -60 to +60 in the x direction and -34 to +34 in the y direction)
"""
for team in [home,away]:
second_half_idx = team.Period.idxmax(2)
columns = [c for c in team.columns if c[-1].lower() in ['x','y']]
team.loc[second_half_idx:,columns] *= -1
return home,away
# In[5]:
def shoot_direction(gk_x_position):
"""
Produces either 1 (L2R) or -1 (R2L) based on GK position
"""
if gk_x_position > 0:
# shooting right-to-left
return -1
else:
# shotting left-to-right
return 1
# In[6]:
def parse_raw_to_df(signalityRepo, rootFileName, interpolate=True):
"""
Takes raw root of a match string e.g. 20190930.Hammarby-Örebrö and transforms it into 4 dataframes:
1) home players
2) away players
3) home tracking
4) away tracking
"""
lst_df_home = []
lst_df_away = []
for half in ['.1','.2']:
# producing filename prefix (just need to add either "-info_live.json" or "-tracks.json")
fileNamePrefix = rootFileName + half
# load info
## looks like the info JSON is duplicated between the two halves
with open(os.path.join(signalityRepo, f'{fileNamePrefix}-info_live.json')) as f:
info = json.load(f)
# load tracks
with open(os.path.join(signalityRepo, f'{fileNamePrefix}-tracks.json')) as f:
tracks = json.load(f)
# unpacking info
## looks like .1 and .2 files are duplicated, so just looking at the .1 (first half file)
if half == '.1':
matchId = info.get('id')
venueId = info.get('venueId')
timeStart = info.get('time_start')
pitchLength, pitchWidth = info.get('calibration').get('pitch_size')
homeTeam = info.get('team_home_name')
awayTeam = info.get('team_away_name')
# unpacking players
homePlayers = info.get('team_home_players')
awayPlayers = info.get('team_away_players')
homeLineup = info.get('team_home_lineup')
awayLineup = info.get('team_away_lineup')
homeLineupSwitch = {homeLineup[i]:i for i in homeLineup}
awayLineupSwitch = {awayLineup[i]:i for i in awayLineup}
# putting player metadata in dataframe
df_homePlayers = | pd.DataFrame(homePlayers) | pandas.DataFrame |
# -*- coding: utf-8 -*-
'''
TopQuant-TQ极宽智能量化回溯分析系统2019版
Top极宽量化(原zw量化),Python量化第一品牌
by Top极宽·量化开源团队 2019.01.011 首发
网站: www.TopQuant.vip www.ziwang.com
QQ群: Top极宽量化总群,124134140
文件名:toolkit.py
默认缩写:import topquant2019 as tk
简介:Top极宽量化·常用量化系统参数模块
'''
#
import sys, os, re
import arrow, bs4, random
import numexpr as ne
#
# import reduce #py2
from functools import reduce # py3
import itertools
import collections
#
# import cpuinfo as cpu
import psutil as psu
from functools import wraps
import datetime as dt
import pandas as pd
import os
import copy
#
import numpy as np
import pandas as pd
import tushare as ts
# import talib as ta
import matplotlib as mpl
import matplotlib.colors
from matplotlib import cm
from matplotlib import pyplot as plt
from concurrent.futures import ProcessPoolExecutor
from concurrent.futures import ThreadPoolExecutor
from concurrent.futures import as_completed
# import multiprocessing
#
import pyfolio as pf
from pyfolio.utils import (to_utc, to_series)
#
import backtrader as bt
import backtrader.observers as btobv
import backtrader.indicators as btind
import backtrader.analyzers as btanz
import backtrader.feeds as btfeeds
#
from backtrader.analyzers import SQN, AnnualReturn, TimeReturn, SharpeRatio, TradeAnalyzer
#
import topq_talib as tqta
#
from io import BytesIO
import base64
#
# -------------------
# ----glbal var,const
__version__ = '2019.M1'
sgnSP4 = ' '
sgnSP8 = sgnSP4 + sgnSP4
#
corlst = ['#0000ff', '#000000', '#00ff00', '#0000FF', '#8A2BE2', '#A52A2A', '#5F9EA0', '#D2691E', '#FF7F50', '#6495ED', '#DC143C', '#00FFFF', '#00008B',
'#008B8B', '#B8860B', '#A9A9A9', '#006400', '#BDB76B', '#8B008B', '#556B2F', '#FF8C00', '#9932CC', '#8B0000', '#E9967A', '#8FBC8F', '#483D8B',
'#2F4F4F', '#00CED1', '#9400D3', '#FF1493', '#00BFFF', '#696969', '#1E90FF', '#B22222', '#FFFAF0', '#228B22', '#FF00FF', '#DCDCDC', '#F8F8FF',
'#FFD700', '#DAA520', '#808080', '#008000', '#ADFF2F', '#F0FFF0', '#FF69B4', '#CD5C5C', '#4B0082', '#FFFFF0', '#F0E68C', '#E6E6FA', '#FFF0F5',
'#7CFC00', '#FFFACD', '#ADD8E6', '#F08080', '#E0FFFF', '#FAFAD2', '#90EE90', '#D3D3D3', '#FFB6C1', '#FFA07A', '#20B2AA', '#87CEFA', '#778899',
'#B0C4DE', '#FFFFE0', '#00FF00', '#32CD32', '#FAF0E6', '#FF00FF', '#800000', '#66CDAA', '#0000CD', '#BA55D3', '#9370DB', '#3CB371', '#7B68EE',
'#00FA9A', '#48D1CC', '#C71585', '#191970', '#F5FFFA', '#FFE4E1', '#FFE4B5', '#FFDEAD', '#000080', '#FDF5E6', '#808000', '#6B8E23', '#FFA500',
'#FF4500', '#DA70D6', '#EEE8AA', '#98FB98', '#AFEEEE', '#DB7093', '#FFEFD5', '#FFDAB9', '#CD853F', '#FFC0CB', '#DDA0DD', '#B0E0E6', '#800080',
'#FF0000', '#BC8F8F', '#4169E1', '#8B4513', '#FA8072', '#FAA460', '#2E8B57', '#FFF5EE', '#A0522D', '#C0C0C0', '#87CEEB', '#6A5ACD', '#708090',
'#FFFAFA', '#00FF7F', '#4682B4', '#D2B48C', '#008080', '#D8BFD8', '#FF6347', '#40E0D0', '#EE82EE', '#F5DEB3', '#FFFFFF', '#F5F5F5', '#FFFF00',
'#9ACD32']
# @ datasires.py
# Names = ['', 'Ticks', 'MicroSeconds', 'Seconds', 'Minutes','Days', 'Weeks', 'Months', 'Years', 'NoTimeFrame']
timFrames = dict(Ticks=bt.TimeFrame.Ticks, MicroSeconds=bt.TimeFrame.MicroSeconds, Seconds=bt.TimeFrame.Seconds, Minutes=bt.TimeFrame.Minutes
, Days=bt.TimeFrame.Days, Weeks=bt.TimeFrame.Weeks, Months=bt.TimeFrame.Months, Years=bt.TimeFrame.Years, NoTimeFrame=bt.TimeFrame.NoTimeFrame)
#
rdat0 = '/TQDat/'
rdatDay = rdat0 + "day/"
rdatDayInx = rdatDay + "inx/"
rdatDayEtf = rdatDay + "etf/"
#
rdatMin0 = rdat0 + "min/"
rdatTick0 = rdat0 + "tick/"
rdatReal0 = rdat0 + "real/"
#
ohlcLst = ['open', 'high', 'low', 'close']
ohlcVLst = ohlcLst + ['volume']
#
ohlcDLst = ['date'] + ohlcLst
ohlcDVLst = ['date'] + ohlcVLst
#
ohlcDExtLst = ohlcDVLst + ['adj close']
ohlcBTLst = ohlcDVLst + ['openinterest'] # backtrader
#
# ----kline
tq10_corUp, tq10_corDown = ['#7F7F7F', '#17BECF'] # plotly
tq09_corUp, tq09_corDown = ['#B61000', '#0061B3']
tq08_corUp, tq08_corDown = ['#FB3320', '#020AF0']
tq07_corUp, tq07_corDown = ['#B0F76D', '#E1440F']
tq06_corUp, tq06_corDown = ['#FF3333', '#47D8D8']
tq05_corUp, tq05_corDown = ['#FB0200', '#007E00']
tq04_corUp, tq04_corDown = ['#18DEF5', '#E38323']
tq03_corUp, tq03_corDown = ['black', 'blue']
tq02_corUp, tq02_corDown = ['red', 'blue']
tq01_corUp, tq01_corDown = ['red', 'lime']
#
tq_ksty01 = dict(volup=tq01_corUp, voldown=tq01_corDown, barup=tq01_corUp, bardown=tq01_corDown)
tq_ksty02 = dict(volup=tq02_corUp, voldown=tq02_corDown, barup=tq02_corUp, bardown=tq02_corDown)
tq_ksty03 = dict(volup=tq03_corUp, voldown=tq03_corDown, barup=tq03_corUp, bardown=tq03_corDown)
tq_ksty04 = dict(volup=tq04_corUp, voldown=tq04_corDown, barup=tq04_corUp, bardown=tq04_corDown)
tq_ksty05 = dict(volup=tq05_corUp, voldown=tq05_corDown, barup=tq05_corUp, bardown=tq05_corDown)
tq_ksty06 = dict(volup=tq06_corUp, voldown=tq06_corDown, barup=tq06_corUp, bardown=tq06_corDown)
tq_ksty07 = dict(volup=tq07_corUp, voldown=tq07_corDown, barup=tq07_corUp, bardown=tq07_corDown)
tq_ksty08 = dict(volup=tq08_corUp, voldown=tq08_corDown, barup=tq08_corUp, bardown=tq08_corDown)
tq_ksty09 = dict(volup=tq09_corUp, voldown=tq09_corDown, barup=tq09_corUp, bardown=tq09_corDown)
tq_ksty10 = dict(volup=tq10_corUp, voldown=tq10_corDown, barup=tq10_corUp, bardown=tq10_corDown)
# -------------------
# --------------------
class TQ_bar(object):
'''
设置TopQuant项目的各个全局参数
尽量做到all in one
'''
def __init__(self):
# ----rss.dir
#
# BT回测核心变量Cerebro,缩::cb
self.cb = None
#
# BT回测默认参数
self.prjNm = '' # 项目名称
self.cash0 = 100000 # 启动最近 10w
self.trd_mod = 1 # 交易模式:1,定量交易(默认);2,现金额比例交易
self.stake0 = 100 # 定量交易,每次交易数目,默认为 100 手
self.ktrd0 = 30 # 比例交易,每次交易比例,默认为 30%
# 数据目录
self.rdat0 = '' # 产品(股票/基金/期货等)数据目录
self.rbas0 = '' # 对比基数(指数等)数据目录
#
self.pools = {} # 产品(股票/基金/期货等)池,dict字典格式
self.pools_code = {} # 产品代码(股票/基金/期货等)池,dict字典格式
#
# ------bt.var
# 分析模式: 0,base基础分析; 1, 交易底层数据分析
# pyfolio专业图表分析,另外单独调用
self.anz_mod = 1
self.bt_results = None # BT回测运行结果数据,主要用于分析模块
#
self.tim0, self.tim9 = None, None # BT回测分析起始时间、终止时间
self.tim0str, self.tim9str = '', '' # BT回测分析起始时间、终止时间,字符串格式
#
# ----------------------
# ----------top.quant.2019
def tq_init(prjNam='TQ01', cash0=100000.0, stake0=100):
#
def _xfloat3(x):
return '%.3f' % x
# ----------
#
# 初始化系统环境参数,设置绘图&数据输出格式
mpl.style.use('seaborn-whitegrid');
pd.set_option('display.width', 450)
# pd.set_option('display.float_format', lambda x: '%.3g' % x)
pd.set_option('display.float_format', _xfloat3)
np.set_printoptions(suppress=True) # 取消科学计数法 #as_num(1.2e-4)
#
#
# 设置部分BT量化回测默认参数,清空全局股票池、代码池
qx = TQ_bar()
qx.prjName, qx.cash0, qx.stake0 = prjNam, cash0, stake0
qx.pools, qx.pools_code = {}, {}
#
#
return qx
# ----------bt.xxx
def plttohtml(plt, filename):
# plt.show()
# 转base64
figfile = BytesIO()
plt.savefig(figfile, format='png')
figfile.seek(0)
figdata_png = base64.b64encode(figfile.getvalue()) # 将图片转为base64
figdata_str = str(figdata_png, "utf-8") # 提取base64的字符串,不然是b'xxx'
# 保存为.html
html = '<img src=\"data:image/png;base64,{}\"/>'.format(figdata_str)
if filename is None:
filename = 'result' + '.html'
with open(filename + '.html', 'w') as f:
f.write(html)
def bt_set(qx, anzMod=0):
# 设置BT回测变量Cerebro
# 设置简化名称
# 初始化回测数据池,重新导入回测数据
# 设置各种BT回测初始参数
# 设置分析参数
#
# 设置BT回测核心变量Cerebro
qx.cb = bt.Cerebro()
#
# 设置简化名称
qx.anz, qx.br = bt.analyzers, qx.cb.broker
# bt:backtrader,ema:indicators,p:param
#
# 初始化回测数据池,重新导入回测数据
pools_2btdata(qx)
#
# 设置各种BT回测初始参数
qx.br.setcash(qx.cash0)
qx.br.setcommission(commission=0.001)
qx.br.set_slippage_fixed(0.01)
#
# 设置交易默认参数
qx.trd_mod = 1
qx.ktrd0 = 30
qx.cb.addsizer(bt.sizers.FixedSize, stake=qx.stake0)
#
#
# 设置分析参数
qx.cb.addanalyzer(qx.anz.Returns, _name="Returns")
qx.cb.addanalyzer(qx.anz.DrawDown, _name='DW')
# SharpeRatio夏普指数
qx.cb.addanalyzer(qx.anz.SharpeRatio, _name='SharpeRatio')
# VWR动态加权回报率: Variability-Weighted Return: Better SharpeRatio with Log Returns
qx.cb.addanalyzer(qx.anz.VWR, _name='VWR')
qx.cb.addanalyzer(SQN)
#
qx.cb.addanalyzer(qx.anz.AnnualReturn, _name='AnnualReturn') # 年化回报率
# 设置分析级别参数
qx.anz_mod = anzMod
if anzMod > 0:
qx.cb.addanalyzer(qx.anz.TradeAnalyzer, _name='TradeAnalyzer')
# cerebro.addanalyzer(TimeReturn, timeframe=timFrames['years'])
# cerebro.addanalyzer(SharpeRatio, timeframe=timFrames['years'])
#
#
qx.cb.addanalyzer(qx.anz.PyFolio, _name='pyfolio')
#
return qx
def bt_anz(qx):
# 分析BT量化回测数据
print('\nanz...')
#
dcash0, dval9 = qx.br.startingcash, qx.br.getvalue()
dget = dval9 - dcash0
# kret=dval9/dcash0*100
kget = dget / dcash0 * 100
#
strat = qx.bt_results[0]
anzs = strat.analyzers
#
#
# dsharp=anzs.SharpeRatio.get_analysis()['sharperatio']
dsharp = anzs.SharpeRatio.get_analysis()['sharperatio']
if dsharp == None: dsharp = 0
#
if qx.anz_mod > 1:
trade_info = anzs.TradeAnalyzer.get_analysis()
#
dw = anzs.DW.get_analysis()
max_drowdown_len = dw['max']['len']
max_drowdown = dw['max']['drawdown']
max_drowdown_money = dw['max']['moneydown']
# --------
print('\n-----------anz lv# 1 ----------')
print('\nBT回测数据分析')
print('时间周期:%s 至 %s' % (qx.tim0str, qx.tim9str))
# print('%s终止时间:%s'% (sgnSP4,qx.tim9str))
print('==================================================')
print('起始资金 Starting Portfolio Value: %.2f' % dcash0)
print('资产总值 Final Portfolio Value: %.2f' % dval9)
print('利润总额 Total Profit: %.2f' % dget)
print('ROI投资回报率 Return on Investment: %.2f %%' % kget)
print('==================================================')
#
print('夏普指数 SharpeRatio : %.2f' % dsharp)
print('最大回撤周期 max_drowdown_len : %.2f' % max_drowdown_len)
print('最大回撤 max_drowdown : %.2f' % max_drowdown)
print('最大回撤(资金) max_drowdown_money : %.2f' % max_drowdown_money)
print('==================================================\n')
#
if qx.anz_mod > 1:
print('\n-----------anz lv# %d ----------\n' % qx.anz_mod)
for dat in anzs:
dat.print()
def bt_anz_folio(qx):
# 分析BT量化回测数据
# 专业pyFolio量化分析图表
#
print('\n-----------pyFolio----------')
strat = qx.bt_results[0]
anzs = strat.analyzers
#
xpyf = anzs.getbyname('pyfolio')
xret, xpos, xtran, gross_lev = xpyf.get_pf_items()
#
# xret.to_csv('tmp/x_ret.csv',index=True,header=None,encoding='utf8')
# xpos.to_csv('tmp/x_pos.csv',index=True,encoding='utf8')
# xtran.to_csv('tmp/x_tran.csv',index=True,encoding='utf8')
#
xret, xpos, xtran = to_utc(xret), to_utc(xpos), to_utc(xtran)
#
# 创建瀑布(活页)式分析图表
# 部分图表需要联网现在spy标普数据,
# 可能会出现"假死"现象,需要人工中断
pf.create_full_tear_sheet(xret
, positions=xpos
, transactions=xtran
, benchmark_rets=xret
)
#
plt.show()
'''
【ps,附录:专业pyFolio量化分析图表图片函数接口API】
有关接口函数API,不同版本差异很大,请大家注意相关细节
def create_full_tear_sheet(returns,
positions=None,
transactions=None,
market_data=None,
benchmark_rets=None,
slippage=None,
live_start_date=None,
sector_mappings=None,
bayesian=False,
round_trips=False,
estimate_intraday='infer',
hide_positions=False,
cone_std=(1.0, 1.5, 2.0),
bootstrap=False,
unadjusted_returns=None,
set_context=True):
pf.create_full_tear_sheet(
#pf.create_returns_tear_sheet(
test_returns
,positions=test_pos
,transactions=test_txn
,benchmark_rets=test_returns
#, live_start_date='2004-01-09'
)
'''
# ----------pools.data.xxx
def pools_get4fn(fnam, tim0str, tim9str, fgSort=True, fgCov=True):
'''
从csv文件,数据读取函数,兼容csv标准OHLC数据格式文件
【输入参数】
fnam:csv数据文件名
tim0str,tim9str:回测起始时间,终止时间,字符串格式
fgSort:正序排序标志,默认为 True
【输出数据】
data:BT回测内部格式的数据包
'''
# skiprows=skiprows,header=header,parse_dates=True, index_col=0,
# df = pd.read_hdf(fnam, index_col=1, parse_dates=True, key='df', mode='r')
# df = pd.DataFrame(df)
# df.set_index('candle_begin_time', inplace=True)
# print(df)
df = pd.read_csv(fnam, index_col=0, parse_dates=True)
df.sort_index(ascending=fgSort, inplace=True) # True:正序
df.index = pd.to_datetime(df.index, format='%Y-%m-%dT%H:%M:%S.%fZ')
#
tim0 = None if tim0str == '' else dt.datetime.strptime(tim0str, '%Y-%m-%d')
tim9 = None if tim9str == '' else dt.datetime.strptime(tim9str, '%Y-%m-%d')
# prDF(df)
# xxx
#
df['openinterest'] = 0
if fgCov:
data = bt.feeds.PandasData(dataname=df, fromdate=tim0, todate=tim9)
else:
data = df
#
return data
def pools_get4df(df, tim0str, tim9str, fgSort=True, fgCov=True):
'''
从csv文件,数据读取函数,兼容csv标准OHLC数据格式文件
【输入参数】
fnam:csv数据文件名
tim0str,tim9str:回测起始时间,终止时间,字符串格式
fgSort:正序排序标志,默认为 True
【输出数据】
data:BT回测内部格式的数据包
'''
# skiprows=skiprows,header=header,parse_dates=True, index_col=0,
# df = pd.read_hdf(fnam, index_col=1, parse_dates=True, key='df', mode='r')
# df = pd.DataFrame(df)
# df.set_index('candle_begin_time', inplace=True)
# print(df)
# prDF(df)
# xxx
#
if fgCov:
df['openinterest'] = 0
df.sort_index(ascending=fgSort, inplace=True) # True:正序
df.index = pd.to_datetime(df.index, format='%Y-%m-%dT%H:%M:%S')
#
tim0 = None if tim0str == '' else dt.datetime.strptime(tim0str, '%Y-%m-%d')
tim9 = None if tim9str == '' else dt.datetime.strptime(tim9str, '%Y-%m-%d')
data = bt.feeds.PandasData(dataname=df, fromdate=tim0, todate=tim9)
else:
# Create a Data Feed
tim0 = None if tim0str == '' else dt.datetime.strptime(tim0str, '%Y-%m-%d')
tim9 = None if tim9str == '' else dt.datetime.strptime(tim9str, '%Y-%m-%d')
data = bt.feeds.GenericCSVData(
timeframe=bt.TimeFrame.Minutes,
compression=1,
dataname=df,
fromdate=tim0,
todate=tim9,
nullvalue=0.0,
dtformat=('%Y-%m-%d %H:%M:%S'),
tmformat=('%H:%M:%S'),
datetime=0,
open=1,
high=2,
low=3,
close=4,
volume=5,
openinterest=-1,
reverse=False)
#
# print(data)
# data.index = pd.to_datetime(df.index, format='%Y-%m-%dT%H:%M:%S.%fZ')
return data
def prepare_data(symbol, fromdt, todt, datapath=None):
"""
:param symbol:
:param datapath: None
:param fromdt:
:param todt:
:return:
# prepare 1m backtesting dataq
"""
# df9path = f'..//data//{symbol}_1m_{mode}.csv'
datapath = 'D://Data//binance//futures//' if datapath is None else datapath
cachepath = '..//data//'
filename = f'{symbol}_{fromdt}_{todt}_1m.csv'
if os.path.exists(cachepath+filename): # check if .//Data// exist needed csv file
df = pd.read_csv(cachepath+filename)
df['openinterest'] = 0
df.sort_index(ascending=True, inplace=True) # True:正序
df.index = | pd.to_datetime(df.index, format='%Y-%m-%dT%H:%M:%S') | pandas.to_datetime |
import numpy as np
import pandas as pd
from tqdm import tqdm
from prereise.gather.solardata.helpers import get_plant_id_unique_location
from prereise.gather.solardata.nsrdb.nrel_api import NrelApi
def retrieve_data(solar_plant, email, api_key, year="2016"):
"""Retrieve irradiance data from NSRDB and calculate the power output
using a simple normalization.
:param pandas.DataFrame solar_plant: plant data frame.
:param str email: email used to `sign up <https://developer.nrel.gov/signup/>`_.
:param str api_key: API key.
:param str year: year.
:return: (*pandas.DataFrame*) -- data frame with *'Pout'*, *'plant_id'*,
*'ts'* and *'ts_id'* as columns. Values are power output for a 1MW generator.
"""
# Identify unique location
coord = get_plant_id_unique_location(solar_plant)
api = NrelApi(email, api_key)
data = | pd.DataFrame({"Pout": [], "plant_id": [], "ts": [], "ts_id": []}) | pandas.DataFrame |
####################
# Import Libraries
####################
import os
import sys
from PIL import Image
import cv2
import numpy as np
import pandas as pd
import pytorch_lightning as pl
from pytorch_lightning.metrics import Accuracy
from pytorch_lightning import loggers
from pytorch_lightning import seed_everything
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from sklearn.model_selection import StratifiedKFold
from sklearn import model_selection
import albumentations as A
import timm
from omegaconf import OmegaConf
import glob
from tqdm import tqdm
from sklearn.metrics import roc_auc_score
from nnAudio.Spectrogram import CQT1992v2, CQT2010v2
from scipy import signal
####################
# Utils
####################
def get_score(y_true, y_pred):
score = roc_auc_score(y_true, y_pred)
return score
def load_pytorch_model(ckpt_name, model, ignore_suffix='model'):
state_dict = torch.load(ckpt_name, map_location='cpu')["state_dict"]
new_state_dict = {}
for k, v in state_dict.items():
name = k
if name.startswith(str(ignore_suffix)+"."):
name = name.replace(str(ignore_suffix)+".", "", 1) # remove `model.`
new_state_dict[name] = v
model.load_state_dict(new_state_dict, strict=False)
return model
class CWT(nn.Module):
def __init__(
self,
wavelet_width,
fs,
lower_freq,
upper_freq,
n_scales,
size_factor=1.0,
border_crop=0,
stride=1
):
super().__init__()
self.initial_wavelet_width = wavelet_width
self.fs = fs
self.lower_freq = lower_freq
self.upper_freq = upper_freq
self.size_factor = size_factor
self.n_scales = n_scales
self.wavelet_width = wavelet_width
self.border_crop = border_crop
self.stride = stride
wavelet_bank_real, wavelet_bank_imag = self._build_wavelet_kernel()
self.wavelet_bank_real = nn.Parameter(wavelet_bank_real, requires_grad=False)
self.wavelet_bank_imag = nn.Parameter(wavelet_bank_imag, requires_grad=False)
self.kernel_size = self.wavelet_bank_real.size(3)
def _build_wavelet_kernel(self):
s_0 = 1 / self.upper_freq
s_n = 1 / self.lower_freq
base = np.power(s_n / s_0, 1 / (self.n_scales - 1))
scales = s_0 * np.power(base, np.arange(self.n_scales))
frequencies = 1 / scales
truncation_size = scales.max() * np.sqrt(4.5 * self.initial_wavelet_width) * self.fs
one_side = int(self.size_factor * truncation_size)
kernel_size = 2 * one_side + 1
k_array = np.arange(kernel_size, dtype=np.float32) - one_side
t_array = k_array / self.fs
wavelet_bank_real = []
wavelet_bank_imag = []
for scale in scales:
norm_constant = np.sqrt(np.pi * self.wavelet_width) * scale * self.fs / 2.0
scaled_t = t_array / scale
exp_term = np.exp(-(scaled_t ** 2) / self.wavelet_width)
kernel_base = exp_term / norm_constant
kernel_real = kernel_base * np.cos(2 * np.pi * scaled_t)
kernel_imag = kernel_base * np.sin(2 * np.pi * scaled_t)
wavelet_bank_real.append(kernel_real)
wavelet_bank_imag.append(kernel_imag)
wavelet_bank_real = np.stack(wavelet_bank_real, axis=0)
wavelet_bank_imag = np.stack(wavelet_bank_imag, axis=0)
wavelet_bank_real = torch.from_numpy(wavelet_bank_real).unsqueeze(1).unsqueeze(2)
wavelet_bank_imag = torch.from_numpy(wavelet_bank_imag).unsqueeze(1).unsqueeze(2)
return wavelet_bank_real, wavelet_bank_imag
def forward(self, x):
x = x.unsqueeze(dim=0)
border_crop = self.border_crop // self.stride
start = border_crop
end = (-border_crop) if border_crop > 0 else None
# x [n_batch, n_channels, time_len]
out_reals = []
out_imags = []
in_width = x.size(2)
out_width = int(np.ceil(in_width / self.stride))
pad_along_width = np.max((out_width - 1) * self.stride + self.kernel_size - in_width, 0)
padding = pad_along_width // 2 + 1
for i in range(3):
# [n_batch, 1, 1, time_len]
x_ = x[:, i, :].unsqueeze(1).unsqueeze(2)
out_real = nn.functional.conv2d(x_, self.wavelet_bank_real, stride=(1, self.stride), padding=(0, padding))
out_imag = nn.functional.conv2d(x_, self.wavelet_bank_imag, stride=(1, self.stride), padding=(0, padding))
out_real = out_real.transpose(2, 1)
out_imag = out_imag.transpose(2, 1)
out_reals.append(out_real)
out_imags.append(out_imag)
out_real = torch.cat(out_reals, axis=1)
out_imag = torch.cat(out_imags, axis=1)
out_real = out_real[:, :, :, start:end]
out_imag = out_imag[:, :, :, start:end]
scalograms = torch.sqrt(out_real ** 2 + out_imag ** 2)
return scalograms[0]
####################
# Config
####################
conf_dict = {'batch_size': 8,#32,
'epoch': 30,
'height': 512,#640,
'width': 512,
'model_name': 'efficientnet_b0',
'lr': 0.001,
'drop_rate': 0.0,
'drop_path_rate': 0.0,
'data_dir': '../input/seti-breakthrough-listen',
'model_path': None,
'output_dir': './',
'seed': 2021,
'snap': 1}
conf_base = OmegaConf.create(conf_dict)
####################
# Dataset
####################
class G2NetDataset(Dataset):
def __init__(self, df, transform=None, conf=None, train=True):
self.df = df.reset_index(drop=True)
self.dir_names = df['dir'].values
self.labels = df['target'].values
self.wave_transform = [
CQT1992v2(sr=2048, fmin=20, fmax=1024, hop_length=8, bins_per_octave=8, window='flattop'),
CQT1992v2(sr=2048, fmin=20, fmax=1024, hop_length=8, bins_per_octave=8, window='blackmanharris'),
CQT1992v2(sr=2048, fmin=20, fmax=1024, hop_length=8, bins_per_octave=8, window='nuttall'),
CWT(wavelet_width=8,fs=2048,lower_freq=20,upper_freq=1024,n_scales=384,stride=8)]
#self.wave_transform = CQT1992v2(sr=2048, fmin=10, fmax=1024, hop_length=8, bins_per_octave=8, window='flattop')
#self.wave_transform = CQT1992v2(sr=2048, fmin=20, fmax=1024, hop_length=1, bins_per_octave=14, window='flattop')
#self.wave_transform = CQT2010v2(sr=2048, fmin=10, fmax=1024, hop_length=32, n_bins=32, bins_per_octave=8, window='flattop')
self.stat = [
[0.013205823003608798,0.037445450696502146],
[0.009606230606511236,0.02489221471650526], # 10000 sample
[0.009523397709568962,0.024628402379527688],
[0.0010164694150735158,0.0015815201992169022]] # 10000 sample
# hop lengthは変えてみたほうが良いかも
self.transform = transform
self.conf = conf
self.train = train
def __len__(self):
return len(self.df)
def apply_qtransform(self, waves, transform):
#print(waves.shape)
#waves = np.hstack(waves)
#print(np.max(np.abs(waves), axis=1))
#waves = waves / np.max(np.abs(waves), axis=1, keepdims=True)
#waves = waves / np.max(waves)
waves = waves / 4.6152116213830774e-20
waves = torch.from_numpy(waves).float()
image = transform(waves)
return image
def __getitem__(self, idx):
img_id = self.df.loc[idx, 'id']
file_path = os.path.join(self.dir_names[idx],"{}/{}/{}/{}.npy".format(img_id[0], img_id[1], img_id[2], img_id))
waves = np.load(file_path)
label = torch.tensor([self.labels[idx]]).float()
image1 = self.apply_qtransform(waves, self.wave_transform[0])
image1 = image1.squeeze().numpy().transpose(1,2,0)
image1 = cv2.vconcat([image1[:,:,0],image1[:,:,1],image1[:,:,2]])
image1 = (image1-self.stat[0][0])/self.stat[0][1]
image1 = cv2.resize(image1, (self.conf.width, self.conf.height), interpolation=cv2.INTER_CUBIC)
image2 = self.apply_qtransform(waves, self.wave_transform[1])
image2 = image2.squeeze().numpy().transpose(1,2,0)
image2 = cv2.vconcat([image2[:,:,0],image2[:,:,1],image2[:,:,2]])
image2 = (image2-self.stat[1][0])/self.stat[1][1]
image2 = cv2.resize(image2, (self.conf.width, self.conf.height), interpolation=cv2.INTER_CUBIC)
image3 = self.apply_qtransform(waves, self.wave_transform[2])
image3 = image3.squeeze().numpy().transpose(1,2,0)
image3 = cv2.vconcat([image3[:,:,0],image3[:,:,1],image3[:,:,2]])
image3 = (image3-self.stat[2][0])/self.stat[2][1]
image3 = cv2.resize(image3, (self.conf.width, self.conf.height), interpolation=cv2.INTER_CUBIC)
image4 = self.apply_qtransform(waves, self.wave_transform[3])
image4 = image4.squeeze().numpy().transpose(1,2,0)
image4 = cv2.vconcat([image4[:,:,0],image4[:,:,1],image4[:,:,2]])
image4 = (image4-self.stat[3][0])/self.stat[3][1]
image4 = cv2.resize(image4, (self.conf.width, self.conf.height), interpolation=cv2.INTER_CUBIC)
#if self.transform is not None:
# image = self.transform(image=image)['image']
image1 = torch.from_numpy(image1).unsqueeze(dim=0)
image2 = torch.from_numpy(image2).unsqueeze(dim=0)
image3 = torch.from_numpy(image3).unsqueeze(dim=0)
image4 = torch.from_numpy(image4).unsqueeze(dim=0)
return image1, image2, image3, image4, label
####################
# Data Module
####################
class SETIDataModule(pl.LightningDataModule):
def __init__(self, conf):
super().__init__()
self.conf = conf
# OPTIONAL, called only on 1 GPU/machine(for download or tokenize)
def prepare_data(self):
pass
# OPTIONAL, called for every GPU/machine
def setup(self, stage=None, fold=None):
if stage == 'test':
#test_df = pd.read_csv(os.path.join(self.conf.data_dir, "sample_submission.csv"))
#test_df['dir'] = os.path.join(self.conf.data_dir, "test")
#self.test_dataset = G2NetDataset(test_df, transform=None,conf=self.conf, train=False)
df = pd.read_csv(os.path.join(self.conf.data_dir, "training_labels.csv"))
df['dir'] = os.path.join(self.conf.data_dir, "train")
# cv split
skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=self.conf.seed)
for n, (train_index, val_index) in enumerate(skf.split(df, df['target'])):
df.loc[val_index, 'fold'] = int(n)
df['fold'] = df['fold'].astype(int)
train_df = df[df['fold'] != fold]
self.valid_df = df[df['fold'] == fold]
self.valid_dataset = G2NetDataset(self.valid_df, transform=None,conf=self.conf, train=False)
# ====================================================
# Inference function
# ====================================================
def inference(models, test_loader):
tk0 = tqdm(enumerate(test_loader), total=len(test_loader))
raw_probs = [[] for i in range(len(models))]
probs = []
probs_flattop = []
probs_blackmanharris = []
probs_nuttall = []
probs_cwt = []
with torch.no_grad():
for i, (images) in tk0:
images1 = images[0].cuda()
images2 = images[1].cuda()
images3 = images[2].cuda()
images4 = images[3].cuda()
avg_preds = []
flattop = []
blackmanharris = []
nuttall = []
cwt = []
for mid, model in enumerate(models):
y_preds_1 = model(images1)
y_preds_2 = model(images2)
y_preds_3 = model(images3)
y_preds_4 = model(images4)
y_preds = (y_preds_1 + y_preds_2 + y_preds_3 + y_preds_4)/4
avg_preds.append(y_preds.sigmoid().to('cpu').numpy())
flattop.append(y_preds_1.sigmoid().to('cpu').numpy())
blackmanharris.append(y_preds_2.sigmoid().to('cpu').numpy())
nuttall.append(y_preds_3.sigmoid().to('cpu').numpy())
cwt.append(y_preds_4.sigmoid().to('cpu').numpy())
#raw_probs[mid].append(y_preds.sigmoid().to('cpu').numpy())
avg_preds = np.mean(avg_preds, axis=0)
flattop = np.mean(flattop, axis=0)
blackmanharris = np.mean(blackmanharris, axis=0)
nuttall = np.mean(nuttall, axis=0)
cwt = np.mean(cwt, axis=0)
probs.append(avg_preds)
probs_flattop.append(flattop)
probs_blackmanharris.append(blackmanharris)
probs_nuttall.append(nuttall)
probs_cwt.append(cwt)
#for mid in range(len(models)):
# raw_probs[mid] = np.concatenate(raw_probs[mid])
probs = np.concatenate(probs)
probs_flattop = np.concatenate(probs_flattop)
probs_blackmanharris = np.concatenate(probs_blackmanharris)
probs_nuttall = np.concatenate(probs_nuttall)
probs_cwt = np.concatenate(probs_cwt)
return probs, probs_flattop, probs_blackmanharris, probs_nuttall, probs_cwt#, raw_probs
####################
# Train
####################
def main():
conf_cli = OmegaConf.from_cli()
conf = OmegaConf.merge(conf_base, conf_cli)
print(OmegaConf.to_yaml(conf))
seed_everything(2021)
# get model path
model_path = []
for i in range(5):
target_model = glob.glob(os.path.join(conf.model_dir, f'fold{i}/ckpt/*epoch*.ckpt'))
scores = [float(os.path.splitext(os.path.basename(i))[0].split('=')[-1]) for i in target_model]
model_path.append(target_model[scores.index(max(scores))])
models = []
for ckpt in model_path:
m = timm.create_model(model_name=conf.model_name, num_classes=1, pretrained=False, in_chans=1)
m = load_pytorch_model(ckpt, m, ignore_suffix='model')
m.cuda()
m.eval()
models.append(m)
# make oof
oof_df = pd.DataFrame()
oof_df_flattop = | pd.DataFrame() | pandas.DataFrame |
import gradio as gr
import pickle
import os
import pandas as pd
import json
import urllib.parse
from stats import create_pdf
from pycaret.classification import *
welcome_message = """
Hello !
Thanks for using our tool , you'll be able to build your own recommandation tool.
You'll be able to find out if you like or not a song just giving its name , we analyse it for you
and we tell you if it's your taste or not.
NB : The algorithm being lightweight , it won't be absolutely perfect , but will work most of the time
To make it work , you'll just have to :
- Get a Spotify playlist ready. This playlist will cointain at least 100 songs ( you can have more but only the 100 first will be used ).
Try to use the BEST songs in your opinion so the algorithm will perfectly know what you like
The 'Liked songs' playlist can't work because it is private
( don't worry about privacy , we don't even have servers to store your data , it will then remain private and on your computer )
You will have to give us its ID
Just copy its link. It will look like this
https://open.spotify.com/playlist/[ID]?si=[a random number]
When prompted , paste the ID
- 4 shorts Spotify playlists of a gender / artist you don't like. Try to use different genders so the algorithm will better know
what you don't like.
And don't worry ! You don't have to create these playlist. You can just use the "This is [name of the artist]" playlists
made by Spotify , or type the name of the gender you don't like and take the first playlist.
Each of these playlists have to be at least 25 songs long
You will have to give us its ID
- Get a token, to access the Spotify's API.
To do so, visit this link : https://developer.spotify.com/console/get-several-tracks/
Click on "Get Token", log in and then copy the token in a file called tokent.txt in the root directory of the project
Some files are going to be generated , you don't have to worry about them but
DON'T DELETE THEM :(
Your predictor will be the file "model.sav" in the data folder, with other files.
You can't read it but once generated , you can run main.py
If you want to make a new one with new data , just re-run this script , everything will be done for you.
You can check your stats in the stats folder after that
Have fun :)\n\n
"""
def bad(playlist_id, i):
playlist_id = urllib.parse.quote(str(playlist_id).replace(" ", ""))
stream = os.popen(
f'curl -X "GET" "https://api.spotify.com/v1/playlists/{playlist_id}/tracks?fields=items(track(id%2Cname))?limit=25" -H "Accept: application/json" -H "Content-Type: application/json" -H "Authorization: Bearer {token}"')
data = stream.read()
try:
data = json.loads(data)["items"]
songs_ids = ""
for track in data:
songs_ids += track["track"]["id"] + ","
songs_ids = songs_ids[:-1]
stream = os.popen(
f'curl -X "GET" "https://api.spotify.com/v1/audio-features?ids={songs_ids}" -H "Accept: application/json" -H "Content-Type: application/json" -H "Authorization: Bearer {token}"')
data = stream.read()
with open(f"data/bad{i}.json", "w") as f:
f.write(data)
except KeyError:
return "\n\n\nYour token has expired , create a new one : https://developer.spotify.com/console/get-several-tracks/\n\n\n"
except IndexError:
return "\n\n\nWe didn't find the playlist you were looking for\n\n\n"
try:
os.mkdir("data")
except FileExistsError:
pass
try:
os.mkdir("stats")
except FileExistsError:
pass
def get_stats(liked_Playlist,
disliked_Playlist_1,
disliked_Playlist_2,
disliked_Playlist_3,
disliked_Playlist_4):
global token, done_getting
# Get data
try:
# Get token
with open("token.txt", "r") as f:
token = f.read().replace("\n", "")
# Get the data from the liked playlist
playlist_id = urllib.parse.quote(liked_Playlist.replace(" ", ""))
stream = os.popen(
f'curl -X "GET" "https://api.spotify.com/v1/playlists/{playlist_id}/tracks?fields=items(track(id%2Cname))" -H "Accept: application/json" -H "Content-Type: application/json" -H "Authorization: Bearer {token}"')
data = stream.read()
try:
data = json.loads(data)["items"]
songs_ids = ""
for track in data:
songs_ids += track["track"]["id"] + ","
songs_ids = songs_ids[:-1]
stream = os.popen(
f'curl -X "GET" "https://api.spotify.com/v1/audio-features?ids={songs_ids}" -H "Accept: application/json" -H "Content-Type: application/json" -H "Authorization: Bearer {token}"')
data = stream.read()
with open("data/good.json", "w") as f:
f.write(data)
# Get the data from the disliked playlists
bad(disliked_Playlist_1, 1)
bad(disliked_Playlist_2, 2)
bad(disliked_Playlist_3, 3)
bad(disliked_Playlist_4, 4)
done_getting = True
except KeyError:
return """\n\n
Your token has expired , create a new one : https://developer.spotify.com/console/get-several-tracks/
If you refreshed / created your token within the last hour , make sure you have the good ID
\n\n\n"""
except FileNotFoundError:
return """
FileNotFoundError : There is no token file
To create one , visit this page : https://developer.spotify.com/console/get-several-tracks/
Log in to your spotify Account , do not check any scope, and then copy what's in "OAuth Token" field
into a file called "token.txt" in the root directory of the project
"""
# Clean and process data
if done_getting:
with open("data/good.json", "r") as f:
liked = json.load(f)
try:
liked = pd.DataFrame(liked["audio_features"])
liked["liked"] = [1] * 100
except ValueError:
return "\n\nYour 'liked' playlist wasn't long enough. It has to be at least 100 songs long."
with open("data/bad1.json", "r") as f:
disliked = json.load(f)
bad1 = pd.DataFrame(disliked['audio_features'][:25])
with open("data/bad2.json", "r") as f:
disliked = json.load(f)
bad2 = pd.DataFrame(disliked['audio_features'][:25])
with open("data/bad3.json", "r") as f:
disliked = json.load(f)
bad3 = pd.DataFrame(disliked['audio_features'][:25])
with open("data/bad4.json", "r") as f:
disliked = json.load(f)
bad4 = pd.DataFrame(disliked['audio_features'][:25])
try:
bad1["liked"] = [0] * 25
except ValueError:
return "\n\n'Disliked' playlist n.1 wasn't long enough. It has to be at least 25 songs long."
try:
bad2["liked"] = [0] * 25
except ValueError:
return "\n\n'Disliked' playlist n.2 wasn't long enough. It has to be at least 25 songs long."
try:
bad3["liked"] = [0] * 25
except ValueError:
return "\n\n'Disliked' playlist n.3 wasn't long enough. It has to be at least 25 songs long."
try:
bad4["liked"] = [0] * 25
except ValueError:
return "\n\n'Disliked' playlist n.4 wasn't long enough. It has to be at least 25 songs long."
# Modelling
data = | pd.concat([liked, bad1, bad2, bad3, bad4]) | pandas.concat |
import web
import pandas as pd
import numpy as np
import common
import os
import click
def hydro_op_chars_inputs_(webdb, project,
hydro_op_chars_sid,
balancing_type_project):
rows = webdb.where("inputs_project_hydro_operational_chars",
project=project,
hydro_operational_chars_scenario_id=hydro_op_chars_sid,
balancing_type_project=balancing_type_project).list()
if rows:
return pd.DataFrame(rows)
else:
raise common.NoEntriesError(f"Table inputs_project_hydro_operational_chars has no entries for project={project}, hydro_op_chars_scenario_id={hydro_op_chars_sid}, balancing_type_project={balancing_type_project}")
def hydro_op_chars_inputs(webdb, scenario, project):
hydro_op_chars_scenario_id = get_hydro_ops_chars_sceanario_id(webdb,
scenario, project)
balancing_type_project = get_balancing_type(webdb, scenario)
return hydro_op_chars_inputs_(webdb, project,
hydro_op_chars_scenario_id,
balancing_type_project)
def get_capacity(webdb,
scenario,
project):
capacity_scenario_id = get_project_specified_capacity_scenario_id(webdb,
scenario)
return common.get_field(webdb , "inputs_project_specified_capacity",
"specified_capacity_mw",
project=project,
project_specified_capacity_scenario_id=capacity_scenario_id)
def get_project_specified_capacity_scenario_id(webdb, scenario):
return common.get_field(webdb,
"scenarios",
"project_specified_capacity_scenario_id",
scenario_name=scenario)
def get_temporal_scenario_id(webdb, scenario):
return common.get_field(webdb,
"scenarios",
"temporal_scenario_id",
scenario_name=scenario)
def get_balancing_type(webdb, scenario):
temporal_scenario_id = get_temporal_scenario_id(webdb, scenario)
return common.get_field(webdb, "inputs_temporal_horizons",
"balancing_type_horizon",
temporal_scenario_id=temporal_scenario_id)
def get_temporal_start_end_table(conn, scenario):
temporal_id = get_temporal_scenario_id(conn, scenario)
temporal = conn.where("inputs_temporal_horizon_timepoints_start_end",
temporal_scenario_id=temporal_id).list()
return temporal
def get_power_mw_dataset(webdb, scenario, project):
scenario_id = common.get_field(webdb,
'scenarios',
"scenario_id",
scenario_name = scenario)
rows = webdb.where("results_project_dispatch",
scenario_id=scenario_id,
project=project,
operational_type='gen_hydro').list()
return pd.DataFrame(rows)
def adjust_mean_const(b, min_, max_):
"""
adjusts values in b such that original average of b remains as it is
but every value of b lied between corresponding min_ and max_
"""
def adjust(c):
c1 = c.copy()
less, more, between = c < min_, c > max_, (c >= min_) & (c <= max_)
if less.sum() and more.sum():
#print("+-"*5)
c1[less] += (c1[more]- max_[more]).sum()/less.sum()
c1[more] = max_[more]
elif more.sum():
#print("+"*5)
c1[between] += (c1[more] - max_[more]).sum()/between.sum()
c1[more] = max_[more]
elif less.sum():
#print("-"*5)
c1[between] -= (min_[less] - c1[less]).sum()/between.sum()
c1[less] = min_[less]
#print(c.mean(), c1.mean())
return c1
c1 = adjust(b)
#printcols(c1, min_, max_)
n = 0
while n <20 and not np.all((c1 >= min_) & (c1 <= max_)):
#print(f"iteration {n}..")
c1 = adjust(c1)
#printcols(c1, min_, max_)
n += 1
if n ==20:
print("Failed to adjust mean")
#print(b.mean(), c1.mean())
return c1
def printcols(*cols):
for i, args in enumerate(zip(*cols)):
print(f"{i:3d}", " ".join([f"{arg:5f}" for arg in args]))
def get_projects(webdb, scenario):
proj_ops_char_sc_id = common.get_field(webdb,
"scenarios",
"project_operational_chars_scenario_id",
scenario_name=scenario
)
rows = webdb.where("inputs_project_operational_chars",
project_operational_chars_scenario_id=proj_ops_char_sc_id,
operational_type="gen_hydro")
return [row['project'] for row in rows]
def reduce_size(webdb, df, scenario):
tmp1 = get_temporal_start_end_table(webdb, scenario)
horizon = []
for row in df.to_dict(orient="records"):
x = row['timepoint']
horizon.append([p['horizon'] for p in tmp1 if x >= p['tmp_start'] and x <= p['tmp_end']][0])
df['horizon'] = horizon
grouped = df.groupby('horizon').mean()
grouped.reset_index(inplace=True, drop=True)
return grouped
def adjusted_mean_results(webdb, scenario1, scenario2, project):
cols = ["balancing_type_project", "horizon", "period",
"average_power_fraction","min_power_fraction", "max_power_fraction"]
df = hydro_op_chars_inputs(webdb, scenario2, project)
power_mw_df = get_power_mw_dataset(webdb, scenario1, project)
capacity = get_capacity(webdb, scenario1, project)
cuf = power_mw_df['power_mw']/capacity
min_, max_ = [df[c] for c in cols[-2:]]
if len(cuf) > len(min_):
power_mw_df = reduce_size(webdb, power_mw_df, scenario2)
cuf = power_mw_df['power_mw']/capacity
avg = adjust_mean_const(cuf, min_, max_)
results = df[cols]
del results['average_power_fraction']
results['average_power_fraction'] = avg
return results
def get_hydro_ops_chars_sceanario_id(webdb, scenario, project):
pocs_id = common.get_field(webdb,
"scenarios",
"project_operational_chars_scenario_id",
scenario_name=scenario)
return common.get_field(webdb,
"inputs_project_operational_chars",
"hydro_operational_chars_scenario_id",
project_operational_chars_scenario_id=pocs_id,
project = project)
def write_results_csv(results,
project,
subscenario,
subscenario_id,
csv_location,
description):
csvpath = common.get_subscenario_csvpath(project, subscenario,
subscenario_id, csv_location, description)
cols = ["balancing_type_project", "horizon", "period",
"average_power_fraction","min_power_fraction", "max_power_fraction"]
on = 'horizon'
common.merge_in_csv(results, csvpath, cols, on)
return subscenario, subscenario_id
def hydro_op_chars(scenario1,
scenario2,
csv_location,
database,
gridpath_rep,
project,
update_database,
description):
webdb = common.get_database(database)
projects = get_projects(webdb, scenario1)
if project:
projects = [project]#projects[:1]
subscenario = "hydro_operational_chars_scenario_id"
for project_ in projects:
print(f"Computing data for {project_}")
subscenario_id = get_hydro_ops_chars_sceanario_id(webdb, scenario2, project_)
results = adjusted_mean_results(webdb, scenario1, scenario2, project_)
write_results_csv(results,
project_,
subscenario,
subscenario_id,
csv_location,
description)
if update_database:
common.update_subscenario_via_gridpath(subscenario,
subscenario_id,
project_,
csv_location,
database,
gridpath_rep)
@click.command()
@click.option("-s1", "--scenario1", default="toy1_pass1", help="Name of scenario1")
@click.option("-s2", "--scenario2", default="toy1_pass2", help="Name of scenario2")
@click.option("-c", "--csv_location", default="csvs_toy", help="Path to folder where csvs are")
@click.option("-d", "--database", default="../toy.db", help="Path to database")
@click.option("-g", "--gridpath_rep", default="../", help="Path of gridpath source repository")
@click.option("--project", default=None, help="Run for only one project")
@click.option("--update_database/--no-update_database", default=False, help="Update database only if this flag is True")
@click.option("-m", "--description", default="rpo50S3_all", help="Description for csv files.")
def main(scenario1,
scenario2,
csv_location,
database,
gridpath_rep,
project,
update_database,
description
):
hydro_op_chars(scenario1,
scenario2,
csv_location,
database,
gridpath_rep,
project,
update_database,
description)
def dbtest():
webdb = common.get_database("/home/vikrant/programming/work/publicgit/gridpath/mh.db")
scenario1 = "rpo30_pass1"
scenario2 = 'rpo30_pass2'
project = 'Koyna_Stage_3'
adjusted_mean_results(webdb, scenario1, scenario2, project)
def test_1():
datapath = "/home/vikrant/programming/work/publicgit/gridpath-0.8.1/gridpath/db/csvs_mh/project/opchar/hydro_opchar/hydro-daily-limits-rpo30.xlsx"
project = 'Koyna_Stage_1'
hydro_dispatch = | pd.read_excel(datapath, sheet_name=project, nrows=365) | pandas.read_excel |
"""Interactions with rainfall and river data."""
import numpy as np
import pandas as pd
__all__ = ["get_station_data"]
def get_station_data(filename, station_reference):
"""Return readings for a specified recording station from .csv file.
Parameters
----------
filename: str
filename to read
station_reference
station_reference to return.
>>> data = get_station_data('resources/wet_day.csv)
"""
frame = | pd.read_csv(filename) | pandas.read_csv |
import datetime
import pandas as pd
from pathlib import Path
import matplotlib.pyplot as plt
_repos_csv = []
_issues_csv = []
CSV_FPATH = Path('/home/lucas.rotsen/Git_Repos/benchmark_frameworks/github_metrics')
METRICS_FPATH = Path('/home/lucas.rotsen/Git_Repos/benchmark_frameworks/metrics/raw')
def load_csv(file):
return pd.read_csv(file, sep=',')
def get_files():
global _repos_csv, _issues_csv
csv_files = list(CSV_FPATH.glob('*.csv'))
for file in csv_files:
if 'issues' in file.name:
_issues_csv.append(file)
else:
_repos_csv.append(file)
# TODO: avaliar e calcular métricas para o CSV consolidado
def consolidate_repos_csv():
dfs = [load_csv(repo_csv) for repo_csv in _repos_csv]
consolidated_df = | pd.concat(dfs) | pandas.concat |
# Test for evaluering af hvert forecast og sammenligning mellem forecast
import pandas as pd
import numpy as np
from numpy.random import rand
from numpy import ix_
from itertools import product
import chart_studio.plotly as py
import chart_studio
import plotly.graph_objs as go
import statsmodels.api as sm
chart_studio.tools.set_credentials_file(username='Emborg', api_key='<KEY>')
np.random.seed(1337)
# Predictions from each forecast
data = pd.read_csv('Data/All_Merged.csv') # , parse_dates=[0], date_parser=dateparse
data.isna().sum()
data.fillna(0, inplace=True)
data = data.set_index('date')
data = data.loc[~data.index.duplicated(keep='first')]
data = data.drop('2018-10-29')
# Forecasts
LSTM = pd.read_csv('Data/LSTM_Pred.csv', index_col=0)
LSTM = LSTM.loc[~LSTM.index.duplicated(keep='first')]
LSTM = LSTM.iloc[:-11, :]
LSTM = LSTM.drop('2018-10-29')
LSTM_NS = pd.read_csv('Data/LSTM_Pred_NoSent.csv', index_col=0)
LSTM_NS = LSTM_NS.loc[~LSTM_NS.index.duplicated(keep='first')]
LSTM_NS = LSTM_NS.iloc[:-11, :]
LSTM_NS = LSTM_NS.drop('2018-10-29')
ARIMA = pd.read_csv('Data/ARIMA_Pred.csv', index_col=0)
ARIMA = ARIMA.iloc[:-11, :]
ARIMA_NS = pd.read_csv('Data/ARIMA_Pred_NoSent.csv', index_col=0)
ARIMA_NS = ARIMA_NS.iloc[:-11, :]
XGB = pd.read_csv('Data/XGB_Pred.csv', index_col=0)
XGB = XGB.loc[~XGB.index.duplicated(keep='first')]
XGB = XGB.iloc[1:, :]
XGB = XGB.drop('2018-10-29')
XGB_NS = pd.read_csv('Data/XGB_Pred_nosenti.csv', index_col=0)
XGB_NS = XGB_NS.loc[~XGB_NS.index.duplicated(keep='first')]
XGB_NS = XGB_NS.iloc[1:, :]
XGB_NS = XGB_NS.drop('2018-10-29')
AR1 = pd.read_csv('Data/AR1.csv', index_col=0)
AR1 = AR1.iloc[:-11, :]
VAR = pd.read_csv('Data/VAR_pred.csv', index_col=0)
VAR = VAR.loc[~VAR.index.duplicated(keep='first')]
VAR = VAR[VAR.index.isin(LSTM.index)]['price']
VAR_NS = pd.read_csv('Data/VAR_pred_nosenti.csv', index_col=0)
VAR_NS = VAR_NS.loc[~VAR_NS.index.duplicated(keep='first')]
VAR_NS = VAR_NS[VAR_NS.index.isin(LSTM.index)]['price']
# Price for the forecasting period
price = data[data.index.isin(LSTM.index)]
price = price[['price']]
ARIMA.index = price.index
ARIMA_NS.index = price.index
XGB.index = price.index
XGB_NS.index = price.index
colors = [
'#1f77b4', # muted blue
'#ff7f0e', # safety orange
'#2ca02c', # cooked asparagus green
'#d62728', # brick red
'#9467bd', # muted purple
'#8c564b', # chestnut brown
'#e377c2', # raspberry yogurt pink
'#7f7f7f', # middle gray
'#bcbd22', # curry yellow-green
'#17becf' # blue-teal
]
# Combined Forecast DataFrame
fc = pd.DataFrame()
fc = price
fc = fc.merge(AR1[['forecast']], how='left', left_index=True, right_index=True)
fc = fc.merge(ARIMA[['forecast']], how='left', left_index=True, right_index=True)
fc = fc.merge(ARIMA_NS[['forecast']], how='left', left_index=True, right_index=True)
fc = fc.merge(VAR, how='left', left_index=True, right_index=True)
fc = fc.merge(VAR_NS, how='left', left_index=True, right_index=True)
fc = fc.merge(XGB, how='left', left_index=True, right_index=True)
fc = fc.merge(XGB_NS, how='left', left_index=True, right_index=True)
fc = fc.merge(LSTM[['LSTM']], how='left', left_index=True, right_index=True)
fc = fc.merge(LSTM_NS[['LSTM']], how='left', left_index=True, right_index=True)
# fc = fc.merge(XGB_NS, how='left', left_index=True, right_index=True)
fc.columns = ['Price', 'AR1', 'ARIMAX', 'ARIMAX_NS', 'VAR', 'VAR_NS', 'XGB', 'XGB_NS', 'LSTM', 'LSTM_NS']
# fc.to_csv(r'Data\All_Forecasts.csv')
fig = go.Figure()
n = 0
for key in fc.columns:
fig.add_trace(go.Scatter(x=fc.index,
y=fc[key],
mode='lines',
name=key,
line=dict(color=colors[n % len(colors)])))
n = n + 1
fig.update_layout(yaxis=dict(title='USD'),
xaxis=dict(title='date'))
py.plot(fig, filename='price_all_fc')
# Actual price
actual = fc[['Price']]
fc = fc.iloc[:, 1:]
# Error metrics
def RMSE(fc, actual):
actual = actual.values
fc = fc.values
losses = fc - actual
RMSE = np.sqrt(np.mean(losses ** 2, axis=0))
return (RMSE)
def MAE(fc, actual):
actual = actual.values
fc = fc.values
losses = fc - actual
MAE = np.mean(np.abs(losses), axis=0)
return (MAE)
def residual_bar_plot(fc_1, fc_2, actuals, name1, name2):
df = pd.DataFrame(fc_1.values - actuals.values)
df[name2] = fc_2.values - actuals.values
df.columns = [name1,name2]
df.hist()
print(name1)
print(round(sm.tsa.stattools.adfuller(df[name1])[1],4))
print(round(sm.stats.stattools.jarque_bera(df[name1])[1],4))
print(name2)
print(round(sm.tsa.stattools.adfuller(df[name2])[1],4))
print(round(sm.stats.stattools.jarque_bera(df[name2])[1],4))
residual_bar_plot(fc[['ARIMAX']], fc[['ARIMAX_NS']], actual, 'ARIMA', 'ARIMA_NS')
residual_bar_plot(fc[['LSTM']], fc[['LSTM_NS']], actual, 'LSTM', 'LSTM_NS')
residual_bar_plot(fc[['VAR']], fc[['VAR_NS']], actual, 'VAR', 'VAR_NS')
residual_bar_plot(fc[['XGB']], fc[['XGB_NS']], actual, 'XGB', 'XGB_NS')
name1 = 'ARIMAX'
fc_1 = fc[['ARIMAX']]
# split_date = '2019-05-01'
# fc = fc.loc[fc.index >= split_date]
# actual = actual.loc[actual.index >= split_date]
rmse = RMSE(fc, actual)
mae = MAE(fc, actual)
print(pd.DataFrame(rmse).to_latex())
# <NAME> testing
dm_result = list()
done_models = list()
models_list = fc.columns
for model1 in models_list:
for model2 in models_list:
if model1 != model2:
dm_result.append(dm_test(fc[[model1]], fc[[model2]], actual))
dm_result = pd.DataFrame(dm_result)
# dm_result['t-stat'] = np.abs(dm_result['t-stat'])
dm_result = dm_result.loc[~np.abs(dm_result['t-stat']).duplicated(keep='first')]
dm_result['t-stat'] = round(dm_result['t-stat'],2)
dm_result['p-value'] = round(dm_result['p-value'],4)
print(dm_result.to_latex())
# <NAME>
cw1 = cw_test(ARIMA, ARIMA_NS, actual)
print(cw1)
cw2 = cw_test(LSTM[['LSTM']], LSTM_NS[['LSTM']], actual)
print(cw2)
cw3 = cw_test(XGB[['est']], XGB_NS[['est']], actual)
print(cw3)
cspe_plot(fc[['XGB_NS']], fc[['XGB']], actual)
# Model Confidence Set
# https://michael-gong.com/blogs/model-confidence-set/?fbclid=IwAR38oo302TSJ4BFqTpluh5aeivkyM6A1cc0tnZ_JUX08PNwRzQkIi4WPlps
# Wrap data and compute the Mean Absolute Error
MCS_data = pd.DataFrame(np.c_[fc.AR1, fc.ARIMAX, fc.ARIMAX_NS, fc.LSTM, fc.LSTM_NS, fc.VAR, fc.VAR_NS, fc.XGB, fc.XGB_NS, actual.Price],
columns=['AR1','ARIMAX', 'ARIMAX_NS', 'LSTM', 'LSTM_NS','VAR','VAR_NS','XGB','XGB_NS', 'Actual'])
losses = pd.DataFrame()
for model in MCS_data.columns: #['ARIMA', 'ARIMA_NS', 'LSTM', 'LSTM_NS']:
losses[model] = np.abs(MCS_data[model] - MCS_data['Actual'])
losses=losses.iloc[:,:-1]
mcs = ModelConfidenceSet(losses, 0.1, 3, 1000).run()
mcs.included
mcs.pvalues
# Forecast combinations
fc.columns[1:]
l1 = fc.columns[1:].values
l2 = ['ARIMAX', 'VAR', 'XGB','LSTM']
l3 = ['ARIMAX_NS', 'VAR_NS', 'XGB_NS','LSTM_NS']
comb_results = pd.DataFrame([[0,0,0,0,0],[0,0,0,0,0],[0,0,0,0,0]])
comb_results.index = ['All','S','NS']
comb_results.columns = ['Equal', 'MSE', 'Rank', 'Time(1)','Time(7)']
l_list = [l1,l2,l3]
i = 0
for l in l_list:
print(l)
pred = fc[l]
# Combinations
eq = fc_comb(actual=actual, fc=pred, weights="equal")
#bgw = fc_comb(actual=actual, fc=fc[fc.columns[1:]], weights="BGW")
mse = fc_comb(actual=actual, fc=pred, weights="MSE")
rank = fc_comb(actual=actual, fc=pred, weights="rank")
time = fc_comb(actual=actual, fc=pred, weights="time")
time7 = fc_comb(actual=actual, fc=pred, weights="time", window=7)
time14 = fc_comb(actual=actual, fc=pred, weights="time", window=14)
time30 = fc_comb(actual=actual, fc=pred, weights="time", window=30)
time60 = fc_comb(actual=actual, fc=pred, weights="time", window=60)
comb_results.iloc[i,0] = MAE(eq, actual)
comb_results.iloc[i,1] = MAE(mse, actual)
comb_results.iloc[i,2] = MAE(rank, actual)
comb_results.iloc[i,3] = MAE(time, actual)
comb_results.iloc[i,4] = MAE(time7, actual)
i = i + 1
print(round(comb_results,2).to_latex())
rank = pd.DataFrame(rank)
rank.columns = ['Rank']
eq = | pd.DataFrame(eq) | pandas.DataFrame |
from __future__ import annotations
import logging
import os
import numpy as np
import json
import warnings
import sys
import shutil
from datetime import timedelta
import pandas as pd
import pickle
import copy
import yaml
import torch
from torch import nn
from torch.nn.modules.loss import _Loss
import torch.nn.functional as F
import torchmetrics
from omegaconf import OmegaConf, DictConfig
import operator
import pytorch_lightning as pl
from pytorch_lightning.utilities.types import _METRIC
from typing import Optional, List, Dict, Union, Callable
from sklearn.model_selection import train_test_split
from autogluon.core.utils.utils import default_holdout_frac
from autogluon.core.utils.loaders import load_pd
from autogluon.common.utils.log_utils import set_logger_verbosity
from autogluon.common.utils.utils import setup_outputdir
from .constants import (
LABEL, BINARY, MULTICLASS, REGRESSION, Y_PRED,
Y_PRED_PROB, Y_TRUE, LOGITS, FEATURES, AUTOMM,
AUTOMM_TUTORIAL_MODE, UNIFORM_SOUP, GREEDY_SOUP,
BEST, MIN, MAX, TEXT,
)
from .data.datamodule import BaseDataModule
from .data.infer_types import infer_column_problem_types
from .data.preprocess_dataframe import MultiModalFeaturePreprocessor
from .utils import (
create_model,
create_and_save_model,
init_df_preprocessor,
init_data_processors,
select_model,
compute_score,
average_checkpoints,
infer_metrics,
get_config,
LogFilter,
apply_log_filter,
save_pretrained_models,
convert_checkpoint_name,
save_text_tokenizers,
load_text_tokenizers,
modify_duplicate_model_names,
assign_feature_column_names,
turn_on_off_feature_column_info,
)
from .optimization.utils import (
get_metric,
get_loss_func,
)
from .optimization.lit_module import LitModule
from .optimization.lit_distiller import DistillerLitModule
from .. import version as ag_version
logger = logging.getLogger(AUTOMM)
class AutoMMModelCheckpoint(pl.callbacks.ModelCheckpoint):
"""
Class that inherits pl.callbacks.ModelCheckpoint. The purpose is to resolve the potential issues in lightning.
- Issue1:
It solves the issue described in https://github.com/PyTorchLightning/pytorch-lightning/issues/5582.
For ddp_spawn, the checkpoint_callback.best_k_models will be empty.
Here, we resolve it by storing the best_models to "SAVE_DIR/best_k_models.yaml".
"""
def _update_best_and_save(
self, current: torch.Tensor, trainer: "pl.Trainer",
monitor_candidates: Dict[str, _METRIC]
) -> None:
super(AutoMMModelCheckpoint, self)._update_best_and_save(current=current,
trainer=trainer,
monitor_candidates=monitor_candidates)
self.to_yaml()
class AutoMMPredictor:
"""
AutoMMPredictor can predict the values of one dataframe column conditioned on the rest columns.
The prediction can be either a classification or regression problem. The feature columns can contain
image paths, text, numerical, and categorical features.
"""
def __init__(
self,
label: str,
problem_type: Optional[str] = None,
eval_metric: Optional[str] = None,
path: Optional[str] = None,
verbosity: Optional[int] = 3,
warn_if_exist: Optional[bool] = True,
enable_progress_bar: Optional[bool] = None,
):
"""
Parameters
----------
label
Name of the column that contains the target variable to predict.
problem_type
Type of prediction problem, i.e. is this a binary/multiclass classification or regression problem
(options: 'binary', 'multiclass', 'regression').
If `problem_type = None`, the prediction problem type is inferred
based on the label-values in provided dataset.
eval_metric
Evaluation metric name. If `eval_metric = None`, it is automatically chosen based on `problem_type`.
Defaults to 'accuracy' for binary and multiclass classification, 'root_mean_squared_error' for regression.
path
Path to directory where models and intermediate outputs should be saved.
If unspecified, a time-stamped folder called "AutogluonAutoMM/ag-[TIMESTAMP]"
will be created in the working directory to store all models.
Note: To call `fit()` twice and save all results of each fit,
you must specify different `path` locations or don't specify `path` at all.
Otherwise files from first `fit()` will be overwritten by second `fit()`.
verbosity
Verbosity levels range from 0 to 4 and control how much information is printed.
Higher levels correspond to more detailed print statements (you can set verbosity = 0 to suppress warnings).
If using logging, you can alternatively control amount of information printed via `logger.setLevel(L)`,
where `L` ranges from 0 to 50
(Note: higher values of `L` correspond to fewer print statements, opposite of verbosity levels)
warn_if_exist
Whether to raise warning if the specified path already exists.
enable_progress_bar
Whether to show progress bar. It will be True by default and will also be
disabled if the environment variable os.environ["AUTOMM_DISABLE_PROGRESS_BAR"] is set.
"""
if eval_metric is not None and not isinstance(eval_metric, str):
eval_metric = eval_metric.name
if eval_metric is not None and eval_metric.lower() in ["rmse", "r2", "pearsonr", "spearmanr"]:
problem_type = REGRESSION
if os.environ.get(AUTOMM_TUTORIAL_MODE):
verbosity = 1 # don't use 3, which doesn't suppress logger.info() in .load().
enable_progress_bar = False
if verbosity is not None:
set_logger_verbosity(verbosity, logger=logger)
self._label_column = label
self._problem_type = problem_type.lower() if problem_type is not None else None
self._eval_metric_name = eval_metric
self._validation_metric_name = None
self._output_shape = None
self._save_path = path
self._ckpt_path = None
self._pretrained_path = None
self._config = None
self._df_preprocessor = None
self._column_types = None
self._data_processors = None
self._model = None
self._resume = False
self._verbosity = verbosity
self._warn_if_exist = warn_if_exist
self._enable_progress_bar = enable_progress_bar if enable_progress_bar is not None else True
@property
def path(self):
return self._save_path
@property
def label(self):
return self._label_column
@property
def problem_type(self):
return self._problem_type
# This func is required by the abstract trainer of TabularPredictor.
def set_verbosity(self, verbosity: int):
set_logger_verbosity(verbosity, logger=logger)
def fit(
self,
train_data: pd.DataFrame,
config: Optional[dict] = None,
tuning_data: Optional[pd.DataFrame] = None,
time_limit: Optional[int] = None,
save_path: Optional[str] = None,
hyperparameters: Optional[Union[str, Dict, List[str]]] = None,
column_types: Optional[dict] = None,
holdout_frac: Optional[float] = None,
teacher_predictor: Union[str, AutoMMPredictor] = None,
seed: Optional[int] = 123,
):
"""
Fit AutoMMPredictor predict label column of a dataframe based on the other columns,
which may contain image path, text, numeric, or categorical features.
Parameters
----------
train_data
A dataframe containing training data.
config
A dictionary with four keys "model", "data", "optimization", and "environment".
Each key's value can be a string, yaml file path, or OmegaConf's DictConfig.
Strings should be the file names (DO NOT include the postfix ".yaml") in
automm/configs/model, automm/configs/data, automm/configs/optimization, and automm/configs/environment.
For example, you can configure a late-fusion model for the image, text, and tabular data as follows:
config = {
"model": "fusion_mlp_image_text_tabular",
"data": "default",
"optimization": "adamw",
"environment": "default",
}
or
config = {
"model": "/path/to/model/config.yaml",
"data": "/path/to/data/config.yaml",
"optimization": "/path/to/optimization/config.yaml",
"environment": "/path/to/environment/config.yaml",
}
or
config = {
"model": OmegaConf.load("/path/to/model/config.yaml"),
"data": OmegaConf.load("/path/to/data/config.yaml"),
"optimization": OmegaConf.load("/path/to/optimization/config.yaml"),
"environment": OmegaConf.load("/path/to/environment/config.yaml"),
}
tuning_data
A dataframe containing validation data, which should have the same columns as the train_data.
If `tuning_data = None`, `fit()` will automatically
hold out some random validation examples from `train_data`.
time_limit
How long `fit()` should run for (wall clock time in seconds).
If not specified, `fit()` will run until the model has completed training.
save_path
Path to directory where models and intermediate outputs should be saved.
hyperparameters
This is to override some default configurations.
For example, changing the text and image backbones can be done by formatting:
a string
hyperparameters = "model.hf_text.checkpoint_name=google/electra-small-discriminator model.timm_image.checkpoint_name=swin_small_patch4_window7_224"
or a list of strings
hyperparameters = ["model.hf_text.checkpoint_name=google/electra-small-discriminator", "model.timm_image.checkpoint_name=swin_small_patch4_window7_224"]
or a dictionary
hyperparameters = {
"model.hf_text.checkpoint_name": "google/electra-small-discriminator",
"model.timm_image.checkpoint_name": "swin_small_patch4_window7_224",
}
column_types
A dictionary that maps column names to their data types.
For example: `column_types = {"item_name": "text", "image": "image_path",
"product_description": "text", "height": "numerical"}`
may be used for a table with columns: "item_name", "brand", "product_description", and "height".
If None, column_types will be automatically inferred from the data.
The current supported types are:
- "image_path": each row in this column is one image path.
- "text": each row in this column contains text (sentence, paragraph, etc.).
- "numerical": each row in this column contains a number.
- "categorical": each row in this column belongs to one of K categories.
holdout_frac
Fraction of train_data to holdout as tuning_data for optimizing hyper-parameters or
early stopping (ignored unless `tuning_data = None`).
Default value (if None) is selected based on the number of rows in the training data
and whether hyper-parameter-tuning is utilized.
teacher_predictor
The pre-trained teacher predictor or its saved path. If provided, `fit()` can distill its
knowledge to a student predictor, i.e., the current predictor.
seed
The random seed to use for this training run.
Returns
-------
An "AutoMMPredictor" object (itself).
"""
pl.seed_everything(seed, workers=True)
if self._config is not None: # continuous training
config = self._config
config = get_config(
config=config,
overrides=hyperparameters,
)
if self._resume or save_path is None:
save_path = self._save_path
else:
save_path = os.path.expanduser(save_path)
if not self._resume:
save_path = setup_outputdir(
path=save_path,
warn_if_exist=self._warn_if_exist,
)
logger.debug(f"save path: {save_path}")
if tuning_data is None:
if self._problem_type in [BINARY, MULTICLASS]:
stratify = train_data[self._label_column]
else:
stratify = None
if holdout_frac is None:
val_frac = default_holdout_frac(len(train_data), hyperparameter_tune=False)
else:
val_frac = holdout_frac
train_data, tuning_data = train_test_split(
train_data,
test_size=val_frac,
stratify=stratify,
random_state=np.random.RandomState(seed),
)
column_types, problem_type, output_shape = \
infer_column_problem_types(
train_df=train_data,
valid_df=tuning_data,
label_columns=self._label_column,
problem_type=self._problem_type,
provided_column_types=column_types,
)
logger.debug(f"column_types: {column_types}")
logger.debug(f"image columns: {[k for k, v in column_types.items() if v == 'image_path']}")
if self._column_types is not None and self._column_types != column_types:
warnings.warn(
f"Inferred column types {column_types} are inconsistent with "
f"the previous {self._column_types}. "
f"New columns will not be used in the current training."
)
# use previous column types to avoid inconsistency with previous numerical mlp and categorical mlp
column_types = self._column_types
if self._problem_type is not None:
assert self._problem_type == problem_type, \
f"Inferred problem type {problem_type} is different from " \
f"the previous {self._problem_type}"
if self._output_shape is not None:
assert self._output_shape == output_shape, \
f"Inferred output shape {output_shape} is different from " \
f"the previous {self._output_shape}"
if self._df_preprocessor is None:
df_preprocessor = init_df_preprocessor(
config=config.data,
column_types=column_types,
label_column=self._label_column,
train_df_x=train_data.drop(columns=self._label_column),
train_df_y=train_data[self._label_column],
)
else: # continuing training
df_preprocessor = self._df_preprocessor
config = select_model(
config=config,
df_preprocessor=df_preprocessor,
)
if self._data_processors is None:
data_processors = init_data_processors(
config=config,
df_preprocessor=df_preprocessor,
)
else: # continuing training
data_processors = self._data_processors
data_processors_count = {k: len(v) for k, v in data_processors.items()}
logger.debug(f"data_processors_count: {data_processors_count}")
if self._model is None:
model = create_model(
config=config,
num_classes=output_shape,
num_numerical_columns=len(df_preprocessor.numerical_feature_names),
num_categories=df_preprocessor.categorical_num_categories
)
else: # continuing training
model = self._model
if self._validation_metric_name is None or self._eval_metric_name is None:
validation_metric_name, eval_metric_name = infer_metrics(
problem_type=problem_type,
eval_metric_name=self._eval_metric_name,
)
else:
validation_metric_name = self._validation_metric_name
eval_metric_name = self._eval_metric_name
validation_metric, minmax_mode, custom_metric_func = get_metric(
metric_name=validation_metric_name,
problem_type=problem_type,
num_classes=output_shape,
)
loss_func = get_loss_func(problem_type)
if time_limit is not None:
time_limit = timedelta(seconds=time_limit)
# set attributes for saving and prediction
self._problem_type = problem_type # In case problem type isn't provided in __init__().
self._eval_metric_name = eval_metric_name # In case eval_metric isn't provided in __init__().
self._validation_metric_name = validation_metric_name
self._save_path = save_path
self._config = config
self._output_shape = output_shape
self._column_types = column_types
self._df_preprocessor = df_preprocessor
self._data_processors = data_processors
self._model = model
# save artifacts for the current running, except for model checkpoint, which will be saved in _fit()
self.save(save_path)
# need to assign the above attributes before setting up distillation
if teacher_predictor is not None:
teacher_model, critics, baseline_funcs, soft_label_loss_func, \
teacher_df_preprocessor, teacher_data_processors = \
self._setup_distillation(
teacher_predictor=teacher_predictor,
)
else:
teacher_model, critics, baseline_funcs, soft_label_loss_func,\
teacher_df_preprocessor, teacher_data_processors = None, None, None, None, None, None
self._fit(
train_df=train_data,
val_df=tuning_data,
df_preprocessor=df_preprocessor,
data_processors=data_processors,
model=model,
config=config,
loss_func=loss_func,
validation_metric=validation_metric,
validation_metric_name=validation_metric_name,
custom_metric_func=custom_metric_func,
minmax_mode=minmax_mode,
teacher_model=teacher_model,
critics=critics,
baseline_funcs=baseline_funcs,
soft_label_loss_func=soft_label_loss_func,
teacher_df_preprocessor=teacher_df_preprocessor,
teacher_data_processors=teacher_data_processors,
max_time=time_limit,
save_path=save_path,
ckpt_path=self._ckpt_path,
resume=self._resume,
enable_progress_bar=self._enable_progress_bar,
)
return self
def _setup_distillation(
self,
teacher_predictor: Union[str, AutoMMPredictor],
):
"""
Prepare for distillation. It verifies whether the student and teacher predictors have consistent
configurations. If teacher and student have duplicate model names, it modifies teacher's model names.
Parameters
----------
teacher_predictor
The teacher predictor in knowledge distillation.
Returns
-------
teacher_model
The teacher predictor's model.
critics
The critics used in computing mutual information loss.
baseline_funcs
The baseline functions used in computing mutual information loss.
soft_label_loss_func
The loss function using teacher's logits as labels.
df_preprocessor
The teacher predictor's dataframe preprocessor.
data_processors
The teacher predictor's data processors.
"""
logger.debug("setting up distillation...")
if isinstance(teacher_predictor, str):
teacher_predictor = AutoMMPredictor.load(teacher_predictor)
# verify that student and teacher configs are consistent.
assert self._problem_type == teacher_predictor._problem_type
assert self._label_column == teacher_predictor._label_column
assert self._eval_metric_name == teacher_predictor._eval_metric_name
assert self._output_shape == teacher_predictor._output_shape
assert self._validation_metric_name == teacher_predictor._validation_metric_name
# if teacher and student have duplicate model names, change teacher's model names
# we don't change student's model names to avoid changing the names back when saving the model.
teacher_predictor = modify_duplicate_model_names(
predictor=teacher_predictor,
postfix="teacher",
blacklist=self._config.model.names,
)
critics, baseline_funcs = None, None
if self._config.distiller.soft_label_loss_type == "mean_square_error":
soft_label_loss_func = nn.MSELoss()
elif self._config.distiller.soft_label_loss_type == "cross_entropy":
soft_label_loss_func = nn.CrossEntropyLoss()
else:
raise ValueError(
f"Unknown soft_label_loss_type: {self._config.distiller.soft_label_loss_type}"
)
# turn on returning column information in data processors
self._data_processors = turn_on_off_feature_column_info(
data_processors=self._data_processors,
flag=True,
)
teacher_predictor._data_processors = turn_on_off_feature_column_info(
data_processors=teacher_predictor._data_processors,
flag=True,
)
logger.debug(
f"teacher preprocessor text_feature_names: {teacher_predictor._df_preprocessor._text_feature_names}"
)
logger.debug(
f"teacher preprocessor image_path_names: {teacher_predictor._df_preprocessor._image_path_names}"
)
logger.debug(
f"teacher preprocessor categorical_feature_names: {teacher_predictor._df_preprocessor._categorical_feature_names}"
)
logger.debug(
f"teacher preprocessor numerical_feature_names: {teacher_predictor._df_preprocessor._numerical_feature_names}"
)
logger.debug(
f"student preprocessor text_feature_names: {self._df_preprocessor._text_feature_names}"
)
logger.debug(
f"student preprocessor image_path_names: {self._df_preprocessor._image_path_names}"
)
logger.debug(
f"student preprocessor categorical_feature_names: {self._df_preprocessor._categorical_feature_names}"
)
logger.debug(
f"student preprocessor numerical_feature_names: {self._df_preprocessor._numerical_feature_names}"
)
return (
teacher_predictor._model,
critics,
baseline_funcs,
soft_label_loss_func,
teacher_predictor._df_preprocessor,
teacher_predictor._data_processors,
)
def _fit(
self,
train_df: pd.DataFrame,
val_df: pd.DataFrame,
df_preprocessor: MultiModalFeaturePreprocessor,
data_processors: dict,
model: nn.Module,
config: DictConfig,
loss_func: _Loss,
validation_metric: torchmetrics.Metric,
validation_metric_name: str,
custom_metric_func: Callable,
minmax_mode: str,
teacher_model: nn.Module,
critics: nn.ModuleList,
baseline_funcs: nn.ModuleList,
soft_label_loss_func: _Loss,
teacher_df_preprocessor: MultiModalFeaturePreprocessor,
teacher_data_processors: dict,
max_time: timedelta,
save_path: str,
ckpt_path: str,
resume: bool,
enable_progress_bar: bool,
):
if teacher_df_preprocessor is not None:
df_preprocessor = [df_preprocessor, teacher_df_preprocessor]
if teacher_data_processors is not None:
data_processors = [data_processors, teacher_data_processors]
train_dm = BaseDataModule(
df_preprocessor=df_preprocessor,
data_processors=data_processors,
per_gpu_batch_size=config.env.per_gpu_batch_size,
num_workers=config.env.num_workers,
train_data=train_df,
val_data=val_df,
)
optimization_kwargs = dict(
optim_type=config.optimization.optim_type,
lr_choice=config.optimization.lr_choice,
lr_schedule=config.optimization.lr_schedule,
lr=config.optimization.learning_rate,
lr_decay=config.optimization.lr_decay,
end_lr=config.optimization.end_lr,
lr_mult=config.optimization.lr_mult,
weight_decay=config.optimization.weight_decay,
warmup_steps=config.optimization.warmup_steps,
)
metrics_kwargs = dict(
validation_metric=validation_metric,
validation_metric_name=validation_metric_name,
custom_metric_func=custom_metric_func,
)
is_distill = teacher_model is not None
if is_distill:
task = DistillerLitModule(
student_model=model,
teacher_model=teacher_model,
matches=config.distiller.matches,
critics=critics,
baseline_funcs=baseline_funcs,
hard_label_weight=config.distiller.hard_label_weight,
soft_label_weight=config.distiller.soft_label_weight,
temperature=config.distiller.temperature,
hard_label_loss_func=loss_func,
soft_label_loss_func=soft_label_loss_func,
**metrics_kwargs,
**optimization_kwargs,
)
else:
task = LitModule(
model=model,
loss_func=loss_func,
efficient_finetune=OmegaConf.select(config, 'optimization.efficient_finetune'),
**metrics_kwargs,
**optimization_kwargs,
)
logger.debug(f"validation_metric_name: {task.validation_metric_name}")
logger.debug(f"minmax_mode: {minmax_mode}")
checkpoint_callback = AutoMMModelCheckpoint(
dirpath=save_path,
save_top_k=config.optimization.top_k,
verbose=True,
monitor=task.validation_metric_name,
mode=minmax_mode,
save_last=True,
)
early_stopping_callback = pl.callbacks.EarlyStopping(
monitor=task.validation_metric_name,
patience=config.optimization.patience,
mode=minmax_mode
)
lr_callback = pl.callbacks.LearningRateMonitor(logging_interval="step")
model_summary = pl.callbacks.ModelSummary(max_depth=1)
callbacks = [checkpoint_callback, early_stopping_callback, lr_callback, model_summary]
tb_logger = pl.loggers.TensorBoardLogger(
save_dir=save_path,
name="",
version="",
)
num_gpus = (
config.env.num_gpus
if isinstance(config.env.num_gpus, int)
else len(config.env.num_gpus)
)
if num_gpus < 0: # In case config.env.num_gpus is -1, meaning using all gpus.
num_gpus = torch.cuda.device_count()
if num_gpus == 0: # CPU only training
warnings.warn(
"Only CPU is detected in the instance. "
"AutoMMPredictor will be trained with CPU only. "
"This may results in slow training speed. "
"Consider to switch to an instance with GPU support.",
UserWarning,
)
grad_steps = max(config.env.batch_size // (
config.env.per_gpu_batch_size * config.env.num_nodes
), 1)
precision = 32 # Force to use fp32 for training since fp16-based AMP is not available in CPU.
# Try to check the status of bf16 training later.
else:
grad_steps = max(config.env.batch_size // (
config.env.per_gpu_batch_size * num_gpus * config.env.num_nodes
), 1)
precision = config.env.precision
if precision == 'bf16' and not torch.cuda.is_bf16_supported():
warnings.warn('bf16 is not supported by the GPU device / cuda version. '
'Consider to use GPU devices with version after Amphere (e.g., available as AWS P4 instances) '
'and upgrade cuda to be >=11.0. '
'Currently, AutoGluon will downgrade the precision to 32.', UserWarning)
precision = 32
if num_gpus <= 1:
strategy = None
else:
strategy = config.env.strategy
blacklist_msgs = ["already configured with model summary"]
log_filter = LogFilter(blacklist_msgs)
with apply_log_filter(log_filter):
trainer = pl.Trainer(
gpus=num_gpus,
auto_select_gpus=config.env.auto_select_gpus if num_gpus != 0 else False,
num_nodes=config.env.num_nodes,
precision=precision,
strategy=strategy,
benchmark=False,
deterministic=config.env.deterministic,
max_epochs=config.optimization.max_epochs,
max_steps=config.optimization.max_steps,
max_time=max_time,
callbacks=callbacks,
logger=tb_logger,
gradient_clip_val=1,
gradient_clip_algorithm="norm",
accumulate_grad_batches=grad_steps,
log_every_n_steps=10,
enable_progress_bar=enable_progress_bar,
fast_dev_run=config.env.fast_dev_run,
val_check_interval=config.optimization.val_check_interval,
)
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
".*does not have many workers which may be a bottleneck. "
"Consider increasing the value of the `num_workers` argument` "
".* in the `DataLoader` init to improve performance.*"
)
warnings.filterwarnings(
"ignore",
"Checkpoint directory .* exists and is not empty."
)
trainer.fit(
task,
datamodule=train_dm,
ckpt_path=ckpt_path if resume else None, # this is to resume training that was broken accidentally
)
if trainer.global_rank == 0:
self._top_k_average(
model=model,
save_path=save_path,
minmax_mode=minmax_mode,
is_distill=is_distill,
config=config,
val_df=val_df,
validation_metric_name=validation_metric_name,
trainer=trainer,
)
else:
sys.exit(
f"Training finished, exit the process with global_rank={trainer.global_rank}..."
)
def _top_k_average(
self,
model,
save_path,
minmax_mode,
is_distill,
config,
val_df,
validation_metric_name,
trainer,
):
if os.path.exists(os.path.join(save_path, 'best_k_models.yaml')):
with open(os.path.join(save_path, 'best_k_models.yaml'), 'r') as f:
best_k_models = yaml.load(f, Loader=yaml.Loader)
os.remove(os.path.join(save_path, 'best_k_models.yaml'))
else:
# In some cases, the training ends up too early (e.g., due to time_limit) so that there is
# no saved best_k model checkpoints. In that scenario, we won't perform any model averaging.
best_k_models = None
last_ckpt_path = os.path.join(save_path, "last.ckpt")
if is_distill:
prefix = "student_model."
else:
prefix = "model."
if best_k_models:
if config.optimization.top_k_average_method == UNIFORM_SOUP:
logger.info(
f"Start to fuse {len(best_k_models)} checkpoints via the uniform soup algorithm."
)
ingredients = top_k_model_paths = list(best_k_models.keys())
else:
top_k_model_paths = [
v[0] for v in sorted(
list(best_k_models.items()),
key=lambda ele: ele[1],
reverse=(minmax_mode == MAX),
)
]
if config.optimization.top_k_average_method == GREEDY_SOUP:
# Select the ingredients based on the methods proposed in paper
# "Model soups: averaging weights of multiple fine-tuned models improves accuracy without
# increasing inference time", https://arxiv.org/pdf/2203.05482.pdf
monitor_op = {MIN: operator.le, MAX: operator.ge}[minmax_mode]
logger.info(
f"Start to fuse {len(top_k_model_paths)} checkpoints via the greedy soup algorithm."
)
ingredients = [top_k_model_paths[0]]
self._model = self._load_state_dict(
model=model,
path=top_k_model_paths[0],
prefix=prefix,
)
best_score = self.evaluate(val_df, [validation_metric_name])[validation_metric_name]
for i in range(1, len(top_k_model_paths)):
cand_avg_state_dict = average_checkpoints(
checkpoint_paths=ingredients + [top_k_model_paths[i]],
)
self._model = self._load_state_dict(
model=self._model,
state_dict=cand_avg_state_dict,
prefix=prefix,
)
cand_score = self.evaluate(val_df, [validation_metric_name])[validation_metric_name]
if monitor_op(cand_score, best_score):
# Add new ingredient
ingredients.append(top_k_model_paths[i])
best_score = cand_score
elif config.optimization.top_k_average_method == BEST:
ingredients = [top_k_model_paths[0]]
else:
raise ValueError(
f"The key for 'optimization.top_k_average_method' is not supported. "
f"We only support '{GREEDY_SOUP}', '{UNIFORM_SOUP}' and '{BEST}'. "
f"The provided value is '{config.optimization.top_k_average_method}'."
)
else:
# best_k_models is empty so we will manually save a checkpoint from the trainer
# and use it as the main ingredients
trainer.save_checkpoint(os.path.join(save_path, "model.ckpt"))
ingredients = [os.path.join(save_path, "model.ckpt")]
top_k_model_paths = []
# Average all the ingredients
avg_state_dict = average_checkpoints(
checkpoint_paths=ingredients,
)
self._model = self._load_state_dict(
model=model,
state_dict=avg_state_dict,
prefix=prefix,
)
if is_distill:
avg_state_dict = self._replace_model_name_prefix(
state_dict=avg_state_dict,
old_prefix="student_model",
new_prefix="model",
)
checkpoint = {"state_dict": avg_state_dict}
torch.save(checkpoint, os.path.join(save_path, "model.ckpt"))
# clean old checkpoints + the intermediate files stored
for per_path in top_k_model_paths:
if os.path.isfile(per_path):
os.remove(per_path)
if os.path.isfile(last_ckpt_path):
os.remove(last_ckpt_path)
def _predict(
self,
data: Union[pd.DataFrame, dict, list],
ret_type: str,
requires_label: bool,
) -> torch.Tensor:
data = self._data_to_df(data)
# For prediction data with no labels provided.
if not requires_label:
data_processors = copy.deepcopy(self._data_processors)
data_processors.pop(LABEL, None)
else:
data_processors = self._data_processors
num_gpus = (
self._config.env.num_gpus
if isinstance(self._config.env.num_gpus, int)
else len(self._config.env.num_gpus)
)
if num_gpus < 0:
num_gpus = torch.cuda.device_count()
if num_gpus == 0: # CPU only prediction
warnings.warn(
"Only CPU is detected in the instance. "
"AutoMMPredictor will predict with CPU only. "
"This may results in slow prediction speed. "
"Consider to switch to an instance with GPU support.",
UserWarning,
)
precision = 32 # Force to use fp32 for training since fp16-based AMP is not available in CPU
else:
precision = self._config.env.precision
if precision == 'bf16' and not torch.cuda.is_bf16_supported():
warnings.warn('bf16 is not supported by the GPU device / cuda version. '
'Consider to use GPU devices with version after Amphere or upgrade cuda to be >=11.0. '
'Currently, AutoGluon will downgrade the precision to 32.', UserWarning)
precision = 32
if num_gpus > 1:
strategy = "dp"
# If using 'dp', the per_gpu_batch_size would be split by all GPUs.
# So, we need to use the GPU number as a multiplier to compute the batch size.
batch_size = self._config.env.per_gpu_batch_size_evaluation * num_gpus
else:
strategy = None
batch_size = self._config.env.per_gpu_batch_size_evaluation
predict_dm = BaseDataModule(
df_preprocessor=self._df_preprocessor,
data_processors=data_processors,
per_gpu_batch_size=batch_size,
num_workers=self._config.env.num_workers_evaluation,
predict_data=data,
)
task = LitModule(
model=self._model,
)
blacklist_msgs = []
if self._verbosity <= 3: # turn off logging in prediction
blacklist_msgs.append("Automatic Mixed Precision")
blacklist_msgs.append("GPU available")
blacklist_msgs.append("TPU available")
blacklist_msgs.append("IPU available")
blacklist_msgs.append("LOCAL_RANK")
log_filter = LogFilter(blacklist_msgs)
with apply_log_filter(log_filter):
evaluator = pl.Trainer(
gpus=num_gpus,
auto_select_gpus=self._config.env.auto_select_gpus if num_gpus != 0 else False,
num_nodes=self._config.env.num_nodes,
precision=precision,
strategy=strategy,
benchmark=False,
enable_progress_bar=self._enable_progress_bar,
deterministic=self._config.env.deterministic,
logger=False,
)
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
".*does not have many workers which may be a bottleneck. "
"Consider increasing the value of the `num_workers` argument` "
".* in the `DataLoader` init to improve performance.*"
)
outputs = evaluator.predict(
task,
datamodule=predict_dm,
)
if ret_type == LOGITS:
logits = [ele[LOGITS] for ele in outputs]
ret = torch.cat(logits)
elif ret_type == FEATURES:
features = [ele[FEATURES] for ele in outputs]
ret = torch.cat(features)
else:
raise ValueError(f"Unknown return type: {ret_type}")
return ret
@staticmethod
def _logits_to_prob(logits: torch.Tensor):
assert logits.ndim == 2
prob = F.softmax(logits.float(), dim=1)
prob = prob.detach().cpu().float().numpy()
return prob
def evaluate(
self,
data: Union[pd.DataFrame, dict, list],
metrics: Optional[List[str]] = None,
return_pred: Optional[bool] = False,
):
"""
Evaluate model on a test dataset.
Parameters
----------
data
A dataframe, containing the same columns as the training data
metrics
A list of metric names to report.
If None, we only return the score for the stored `_eval_metric_name`.
return_pred
Whether to return the prediction result of each row.
Returns
-------
A dictionary with the metric names and their corresponding scores.
Optionally return a dataframe of prediction results.
"""
logits = self._predict(
data=data,
ret_type=LOGITS,
requires_label=True,
)
metric_data = {}
if self._problem_type in [BINARY, MULTICLASS]:
y_pred_prob = self._logits_to_prob(logits)
metric_data[Y_PRED_PROB] = y_pred_prob
y_pred = self._df_preprocessor.transform_prediction(y_pred=logits, inverse_categorical=False)
y_pred_transformed = self._df_preprocessor.transform_prediction(y_pred=logits, inverse_categorical=True)
y_true = self._df_preprocessor.transform_label_for_metric(df=data)
metric_data.update({
Y_PRED: y_pred,
Y_TRUE: y_true,
})
if metrics is None:
metrics = [self._eval_metric_name]
results = {}
for per_metric in metrics:
if self._problem_type != BINARY and per_metric.lower() in ["roc_auc", "average_precision"]:
raise ValueError(
f"Metric {per_metric} is only supported for binary classification."
)
score = compute_score(
metric_data=metric_data,
metric_name=per_metric.lower(),
)
results[per_metric] = score
if return_pred:
return results, self.as_pandas(data=data, to_be_converted=y_pred_transformed)
else:
return results
def predict(
self,
data: Union[pd.DataFrame, dict, list],
as_pandas: Optional[bool] = True,
):
"""
Predict values for the label column of new data.
Parameters
----------
data
The data to make predictions for. Should contain same column names as training data and
follow same format (except for the `label` column).
as_pandas
Whether to return the output as a pandas DataFrame(Series) (True) or numpy array (False).
Returns
-------
Array of predictions, one corresponding to each row in given dataset.
"""
logits = self._predict(
data=data,
ret_type=LOGITS,
requires_label=False,
)
pred = self._df_preprocessor.transform_prediction(y_pred=logits)
if as_pandas:
pred = self.as_pandas(data=data, to_be_converted=pred)
return pred
def predict_proba(
self,
data: Union[pd.DataFrame, dict, list],
as_pandas: Optional[bool] = True,
as_multiclass: Optional[bool] = True,
):
"""
Predict probabilities class probabilities rather than class labels.
This is only for the classification tasks. Calling it for a regression task will throw an exception.
Parameters
----------
data
The data to make predictions for. Should contain same column names as training data and
follow same format (except for the `label` column).
as_pandas
Whether to return the output as a pandas DataFrame(Series) (True) or numpy array (False).
as_multiclass
Whether to return the probability of all labels or
just return the probability of the positive class for binary classification problems.
Returns
-------
Array of predicted class-probabilities, corresponding to each row in the given data.
When as_multiclass is True, the output will always have shape (#samples, #classes).
Otherwise, the output will have shape (#samples,)
"""
assert self._problem_type in [BINARY, MULTICLASS], \
f"Problem {self._problem_type} has no probability output."
logits = self._predict(
data=data,
ret_type=LOGITS,
requires_label=False,
)
prob = self._logits_to_prob(logits)
if not as_multiclass:
if self._problem_type == BINARY:
prob = prob[:, 1]
if as_pandas:
prob = self.as_pandas(data=data, to_be_converted=prob)
return prob
def extract_embedding(
self,
data: Union[pd.DataFrame, dict, list],
as_pandas: Optional[bool] = False,
):
"""
Extract features for each sample, i.e., one row in the provided dataframe `data`.
Parameters
----------
data
The data to extract embeddings for. Should contain same column names as training dataset and
follow same format (except for the `label` column).
as_pandas
Whether to return the output as a pandas DataFrame (True) or numpy array (False).
Returns
-------
Array of embeddings, corresponding to each row in the given data.
It will have shape (#samples, D) where the embedding dimension D is determined
by the neural network's architecture.
"""
features = self._predict(
data=data,
ret_type=FEATURES,
requires_label=False,
)
features = features.detach().cpu().numpy()
if as_pandas:
features = pd.DataFrame(features, index=data.index)
return features
def _data_to_df(self, data: Union[pd.DataFrame, dict, list]):
if isinstance(data, pd.DataFrame):
return data
if isinstance(data, (list, dict)):
data = pd.DataFrame(data)
elif isinstance(data, str):
data = load_pd.load(data)
else:
raise NotImplementedError(
f'The format of data is not understood. '
f'We have type(data)="{type(data)}", but a pd.DataFrame was required.'
)
return data
def as_pandas(
self,
data: Union[pd.DataFrame, dict, list],
to_be_converted: np.ndarray,
):
if isinstance(data, pd.DataFrame):
index = data.index
else:
index = None
if to_be_converted.ndim == 1:
return pd.Series(to_be_converted, index=index, name=self._label_column)
else:
return | pd.DataFrame(to_be_converted, index=index, columns=self.class_labels) | pandas.DataFrame |
# Ref: https://towardsdatascience.com/data-apps-with-pythons-streamlit-b14aaca7d083
#/app.py
import streamlit as st
import json
import requests
# import sys
# import os
import pandas as pd
import numpy as np
import re
from datetime import datetime as dt
from pandas_profiling import ProfileReport
from streamlit_pandas_profiling import st_profile_report
from matplotlib import pyplot as plt
import seaborn as sns
# Initial setup
st.set_page_config(layout="wide")
with open('./env_variable.json','r') as j:
json_data = json.load(j)
#SLACK_BEARER_TOKEN = os.environ.get('SLACK_BEARER_TOKEN') ## Get in setting of Streamlit Share
SLACK_BEARER_TOKEN = json_data['SLACK_BEARER_TOKEN']
DTC_GROUPS_URL = ('https://raw.githubusercontent.com/anhdanggit/atom-assignments/main/data/datacracy_groups.csv')
#st.write(json_data['SLACK_BEARER_TOKEN'])
@st.cache
def load_users_df():
# Slack API User Data
endpoint = "https://slack.com/api/users.list"
headers = {"Authorization": "Bearer {}".format(json_data['SLACK_BEARER_TOKEN'])}
response_json = requests.post(endpoint, headers=headers).json()
user_dat = response_json['members']
# Convert to CSV
user_dict = {'user_id':[],'name':[],'display_name':[],'real_name':[],'title':[],'is_bot':[]}
for i in range(len(user_dat)):
user_dict['user_id'].append(user_dat[i]['id'])
user_dict['name'].append(user_dat[i]['name'])
user_dict['display_name'].append(user_dat[i]['profile']['display_name'])
user_dict['real_name'].append(user_dat[i]['profile']['real_name_normalized'])
user_dict['title'].append(user_dat[i]['profile']['title'])
user_dict['is_bot'].append(int(user_dat[i]['is_bot']))
user_df = pd.DataFrame(user_dict)
# Read dtc_group hosted in github
dtc_groups = pd.read_csv(DTC_GROUPS_URL)
user_df = user_df.merge(dtc_groups, how='left', on='name')
return user_df
@st.cache
def load_channel_df():
endpoint2 = "https://slack.com/api/conversations.list"
data = {'types': 'public_channel,private_channel'} # -> CHECK: API Docs https://api.slack.com/methods/conversations.list/test
headers = {"Authorization": "Bearer {}".format(SLACK_BEARER_TOKEN)}
response_json = requests.post(endpoint2, headers=headers, data=data).json()
channel_dat = response_json['channels']
channel_dict = {'channel_id':[], 'channel_name':[], 'is_channel':[],'creator':[],'created_at':[],'topics':[],'purpose':[],'num_members':[]}
for i in range(len(channel_dat)):
channel_dict['channel_id'].append(channel_dat[i]['id'])
channel_dict['channel_name'].append(channel_dat[i]['name'])
channel_dict['is_channel'].append(channel_dat[i]['is_channel'])
channel_dict['creator'].append(channel_dat[i]['creator'])
channel_dict['created_at'].append(dt.fromtimestamp(float(channel_dat[i]['created'])))
channel_dict['topics'].append(channel_dat[i]['topic']['value'])
channel_dict['purpose'].append(channel_dat[i]['purpose']['value'])
channel_dict['num_members'].append(channel_dat[i]['num_members'])
channel_df = pd.DataFrame(channel_dict)
return channel_df
@st.cache(allow_output_mutation=True)
def load_msg_dict(user_df,channel_df):
endpoint3 = "https://slack.com/api/conversations.history"
headers = {"Authorization": "Bearer {}".format(SLACK_BEARER_TOKEN)}
msg_dict = {'channel_id':[],'msg_id':[], 'msg_ts':[], 'user_id':[], 'latest_reply':[],'reply_user_count':[],'reply_users':[],'github_link':[],'text':[]}
for channel_id, channel_name in zip(channel_df['channel_id'], channel_df['channel_name']):
print('Channel ID: {} - Channel Name: {}'.format(channel_id, channel_name))
try:
data = {"channel": channel_id}
response_json = requests.post(endpoint3, data=data, headers=headers).json()
msg_ls = response_json['messages']
for i in range(len(msg_ls)):
if 'client_msg_id' in msg_ls[i].keys():
msg_dict['channel_id'].append(channel_id)
msg_dict['msg_id'].append(msg_ls[i]['client_msg_id'])
msg_dict['msg_ts'].append(dt.fromtimestamp(float(msg_ls[i]['ts'])))
msg_dict['latest_reply'].append(dt.fromtimestamp(float(msg_ls[i]['latest_reply'] if 'latest_reply' in msg_ls[i].keys() else 0))) ## -> No reply: 1970-01-01
msg_dict['user_id'].append(msg_ls[i]['user'])
msg_dict['reply_user_count'].append(msg_ls[i]['reply_users_count'] if 'reply_users_count' in msg_ls[i].keys() else 0)
msg_dict['reply_users'].append(msg_ls[i]['reply_users'] if 'reply_users' in msg_ls[i].keys() else 0)
msg_dict['text'].append(msg_ls[i]['text'] if 'text' in msg_ls[i].keys() else 0)
## -> Censor message contains tokens
text = msg_ls[i]['text']
github_link = re.findall('(?:https?://)?(?:www[.])?github[.]com/[\w-]+/?', text)
msg_dict['github_link'].append(github_link[0] if len(github_link) > 0 else None)
except:
print('====> '+ str(response_json))
msg_df = | pd.DataFrame(msg_dict) | pandas.DataFrame |
import pandas as pd
import random
import math
import numpy as np
import matplotlib.pyplot as plt
from shapely.geometry.polygon import LinearRing, Polygon, Point
from maxrect import get_intersection, get_maximal_rectangle, rect2poly
from vertical_adhesion import *
def get_min_max(input_list):
'''
get minimum and maximum value in the list
:param input_list: list of numbers
:return: min, max
'''
min_value = input_list[0]
max_value = input_list[0]
for i in input_list:
if i > max_value:
max_value = i
elif i < min_value:
min_value = i
return min_value, max_value
def adhesion_structure_horizontal(file_name):
gcode = open(file_name)
lines = gcode.readlines()
# get inner wall
extruder = 0
layer = 0
is_inner_wall = 0
inner_walls = []
layer_count = 0
all_layers = []
set = ""
for l in lines:
if "T0" in l:
extruder = 0
elif "T1" in l:
extruder = 1
elif ";LAYER:" in l:
layer = int(l.split(":")[1].strip())
if ";TYPE:WALL-INNER" in l:
is_inner_wall = 1
elif is_inner_wall == 1 and ";TYPE:" in l:
is_inner_wall = 0
if is_inner_wall == 1:
if len(inner_walls) == 0:
set += l
inner_walls.append([layer, extruder, set])
else:
if inner_walls[-1][0] == layer and inner_walls[-1][1] == extruder:
set += l
inner_walls[-1][2] = set
else:
set = l
inner_walls.append([layer, extruder, set])
# inner_walls.append([layer, extruder, l])
all_layers.append([layer, extruder])
if ";LAYER_COUNT:" in l:
layer_count = int(l.split(":")[-1].strip())
# get multimaterial layers
layers_drop_dups = []
for i in all_layers:
if i not in layers_drop_dups:
layers_drop_dups.append(i)
layer_df = pd.DataFrame(layers_drop_dups, columns=['layer', 'extruder'])
layer_df = layer_df.groupby(['layer']).size().reset_index(name='count')
multi_layers_number = []
for i in range(len(layer_df)):
if layer_df.iloc[i]['count'] > 1:
multi_layers_number.append(layer_df.iloc[i]['layer'])
first_or_last = []
excluded_layers = [0, 1, 2, 3, 4,
layer_count - 1, layer_count - 2, layer_count - 3, layer_count - 4, layer_count - 5]
for i in excluded_layers:
multi_layers_number.remove(i)
# get inner walls of multimaterial layers
multi_inner_walls = []
for i in range(len(inner_walls)):
if inner_walls[i][0] in multi_layers_number: # if the layer contains two materials
multi_inner_walls.append(inner_walls[i])
flag = 0
points_0 = []
points_1 = []
# for i in range(len(infills)):
# points_0 = []
# points_1 = []
# print(infills)
# get outer wall
is_outer_wall = 0
extruder = 0
layer = 0
set = ""
outer_walls = []
for l in lines:
if "T0" in l:
extruder = 0
elif "T1" in l:
extruder = 1
elif ";LAYER:" in l:
layer = int(l.split(":")[1].strip())
if layer in multi_layers_number:
if ";TYPE:WALL-OUTER" in l:
is_outer_wall = 1
elif is_outer_wall == 1 and ";" in l:
is_outer_wall = 0
if is_outer_wall == 1:
# outer_walls.append([layer, extruder, l])
if len(outer_walls) == 0:
set += l
outer_walls.append([layer, extruder, set])
else:
if outer_walls[-1][0] == layer and outer_walls[-1][1] == extruder:
set += l
outer_walls[-1][2] = set
else:
set = l
outer_walls.append([layer, extruder, set])
set = ""
# plt.plot(x_values, y_values, 'ro')
# plt.plot(a_x, a_y, 'bo')
# plt.plot(b_x, b_y, 'go')
# plt.show()
inner_walls_df = pd.DataFrame(multi_inner_walls, columns=['layer', 'extruder', 'commands'])
outer_walls_df = pd.DataFrame(outer_walls, columns=['layer', 'extruder', 'commands'])
# for i in range(len(outer_walls_df)):
# print(outer_walls_df.iloc[i]['commands'])
# polygons_x_list = []
# polygons_y_list = []
polygons_list = []
for i in range(len(outer_walls)):
commands = outer_walls[i][2].split("\n")
extruder = outer_walls[i][1]
polygons_list.append(get_polygons_of_wall(commands))
outer_walls_df['polygons'] = polygons_list
polygons_list = []
for i in range(len(multi_inner_walls)):
commands = multi_inner_walls[i][2].split("\n")
extruder = multi_inner_walls[i][1]
polygons_list.append(get_polygons_of_wall(commands))
inner_walls_df['polygons'] = polygons_list
stitches_per_layer = []
dist = 0.4 # nozzle diameter, the maximum gap to find adjacent points
'''
#----------------------------------------------
i = 10
current_outer_walls_df = outer_walls_df.loc[outer_walls_df['layer'] == i]
current_inner_walls_df = inner_walls_df.loc[inner_walls_df['layer'] == i]
adjacency_set = []
# first material
polygons_0 = current_outer_walls_df.iloc[0]['polygons']
# second material
polygons_1 = current_outer_walls_df.iloc[1]['polygons']
# inner polygons
inner_polygon_0 = current_inner_walls_df.iloc[0]['polygons']
inner_polygon_1 = current_inner_walls_df.iloc[1]['polygons']
pairs = []
print(inner_polygon_0)
print(inner_polygon_1)
all_the_points = []
for poly in inner_polygon_0:
for point in poly:
all_the_points.append(point)
for poly in inner_polygon_1:
for point in poly:
all_the_points.append(point)
print(all_the_points)
inner_x = []
inner_y = []
#for point in all_the_points:
# find material 0 - material 1 pairs
for j in range(len(polygons_0)):
for k in range(len(polygons_1)):
pairs.append([j, k])
# print(pairs)
adjacency = []
for j in range(len(pairs)):
p_0 = polygons_0[pairs[j][0]]
p_1 = polygons_1[pairs[j][1]]
for k in range(len(p_0)):
for l in range(len(p_1)):
if math.hypot(p_0[k][0] - p_1[l][0], p_0[k][1] - p_1[l][1]) <= dist:
# print(math.hypot(p_0[k][0] - p_1[l][0], p_0[k][1] - p_1[l][1]))
if p_0[k] not in adjacency:
adjacency.append(p_0[k])
if p_1[l] not in adjacency:
adjacency.append(p_1[l])
if len(adjacency) != 0:
adjacency_set.append(adjacency)
adjacency = []
# print(adjacency_set)
'''
for i in multi_layers_number:
current_outer_walls_df = outer_walls_df.loc[outer_walls_df['layer'] == i]
current_inner_walls_df = inner_walls_df.loc[inner_walls_df['layer'] == i]
adjacency_set = []
# first material
polygons_0 = current_outer_walls_df.iloc[0]['polygons']
# second material
polygons_1 = current_outer_walls_df.iloc[1]['polygons']
# inner polygons
inner_polygon_0 = current_inner_walls_df.iloc[0]['polygons']
inner_polygon_1 = current_inner_walls_df.iloc[1]['polygons']
pairs = []
#print(polygons_0)
#print(polygons_1)
# find material 0 - material 1 pairs
for j in range(len(polygons_0)):
for k in range(len(polygons_1)):
pairs.append([j, k])
# print(pairs)
adjacency = []
for j in range(len(pairs)):
p_0 = polygons_0[pairs[j][0]]
p_1 = polygons_1[pairs[j][1]]
for k in range(len(p_0)):
for l in range(len(p_1)):
if math.hypot(p_0[k][0] - p_1[l][0], p_0[k][1] - p_1[l][1]) <= dist:
# print(math.hypot(p_0[k][0] - p_1[l][0], p_0[k][1] - p_1[l][1]))
if p_0[k] not in adjacency:
adjacency.append(p_0[k])
if p_1[l] not in adjacency:
adjacency.append(p_1[l])
if len(adjacency) != 0:
adjacency_set.append(adjacency)
adjacency = []
# print(adjacency_set)
stitches = ";TYPE:STITCH\n"
for j in range(len(adjacency_set)):
adj_points = adjacency_set[j]
x_min = 0
y_min = 0
x_max = 0
y_max = 0
x_values = []
y_values = []
# print(adj_points)
for k in range(len(adj_points)):
x_values.append(adj_points[k][0])
y_values.append(adj_points[k][1])
x_min, x_max = get_min_max(x_values)
y_min, y_max = get_min_max(y_values)
fair_dist = 3
fair_dist_to_outer = 1.2
# direction = 0 # 0: horizontal, 1: vertical
if x_max - x_min < y_max - y_min:
direction = 0
else:
direction = 1
if direction == 0: # horizontal alignment
x_min -= fair_dist
x_max += fair_dist
y_min += fair_dist_to_outer
y_max -= fair_dist_to_outer
elif direction == 1: # vertical alignment
x_min += fair_dist_to_outer
x_max -= fair_dist_to_outer
y_min -= fair_dist
y_max += fair_dist
stitch_x, stitch_y = generate_adjacent_stitch(x_min, x_max, y_min, y_max, direction)
stitch = generate_full_infill_for_horizontal_stitch(stitch_x, stitch_y, direction)
stitches += stitch
stitches_per_layer.append([i, stitches])
stitch_df = | pd.DataFrame(stitches_per_layer, columns=['layer', 'stitch']) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
import numpy as np
import pandas as pd
from copy import deepcopy
from functools import partial
import matplotlib.pyplot as plt
import optuna
import pickle
from sklearn.metrics import mean_squared_error
from tqdm import tqdm
import os
code_path = os.path.dirname(os.path.abspath(__file__))
# leaked_df = pd.read_csv(f'{code_path}/../input/leaked_data_all.csv', parse_dates=['timestamp'])
with open(f'{code_path}/../prepare_data/leak_data_drop_bad_rows.pkl', 'rb') as f:
leaked_df = pickle.load(f).rename(columns={'meter_reading': 'leaked_meter_reading'})
# leaked_df = pd.read_feather(f'{code_path}/../input/leak_data.feather').rename(columns={'meter_reading': 'leaked_meter_reading'})
leaked_df = leaked_df[['building_id','meter','timestamp', 'leaked_meter_reading']]
leaked_df = leaked_df.query('timestamp>=20170101')
building_meta = pd.read_csv(f"{code_path}/../input/building_metadata.csv")
leaked_df = leaked_df.merge(building_meta[['building_id', 'site_id']], on='building_id', how='left')
leaked_df = leaked_df.query('~(meter==0 & site_id==0)')
# leaked_df = leaked_df.query('site_id==[2,4,15]')
# leaked_df = leaked_df.query('105<=building_id<=564 | 656<=building_id')
test = pd.read_csv(f"{code_path}/../input/test.csv", parse_dates=['timestamp'])
i = 1
for mul in tqdm(['05', '10', '15']):
submission_s1 = pd.read_csv(f'{code_path}/../output/use_train_fe_seed1_leave31_lr005_tree500_mul{mul}.csv')
# submission_s2 = pd.read_csv(f'{code_path}/../output/use_train_fe_seed2_leave31_lr005_tree500_mul{mul}.csv')
# submission_s3 = pd.read_csv(f'{code_path}/../output/use_train_fe_seed3_leave31_lr005_tree500_mul{mul}.csv')
# test[f'pred{i}'] = (submission_s1['meter_reading'] + submission_s2['meter_reading'] + submission_s3['meter_reading']) / 3
test[f'pred{i}'] = submission_s1['meter_reading']
i += 1
# del submission_s1, submission_s2, submission_s3
# for name in ['fe2_lgbm', 'submission_tomioka', 'submission_half_and_half', 'submission_distill', 'submission_TE_50000tree_seed1_mul075']:
for name in ['submission_half_and_half', 'submission_simple_data_cleanup']:#, 'use_train_fe_seed1_leave15_lr001_tree20000_mul05']:#, 'fe2_lgbm']:
print(i, end=' ')
test[f'pred{i}'] = pd.read_csv(f'{code_path}/../external_data/{name}.csv')['meter_reading']
i += 1
test[f'pred{i}'] = np.exp(1) - 1
i += 1
test = test.merge(leaked_df, on=['building_id', 'meter', 'timestamp'], how='left')
N = test.columns.str.startswith('pred').sum()
print(N)
test_sub = test.copy()
test = test[~test['leaked_meter_reading'].isnull()]
test2017 = test.query('timestamp<20180101')
test2018 = test.query('20180101<=timestamp')
def preproceeding(submission, N):
submission.loc[:,'pred1':'leaked_meter_reading'] = np.log1p(submission.loc[:,'pred1':'leaked_meter_reading'])
g = submission.groupby('meter')
sub_sub = [dict(), dict(), dict(), dict()]
leak_sub = [dict(), dict(), dict(), dict()]
leak_leak = [0,0,0,0]
for meter in [3,2,1,0]:
for i in tqdm(range(1,N+1)):
leak_sub[meter][i] = sum(-2 * g.get_group(meter)['leaked_meter_reading'] * g.get_group(meter)[f'pred{i}'])
for j in range(1,N+1):
if i > j:
sub_sub[meter][(i,j)] = sub_sub[meter][(j,i)]
else:
sub_sub[meter][(i,j)] = sum(g.get_group(meter)[f'pred{i}'] * g.get_group(meter)[f'pred{j}'])
leak_leak[meter] = (sum(g.get_group(meter)['leaked_meter_reading'] ** 2))
return sub_sub, leak_sub, leak_leak
def optimization(meter, sub_sub, leak_sub, leak_leak, length, W):
# global count_itr
# if count_itr%1000 == 0: print(count_itr, end=' ')
# count_itr += 1
loss_total = 0
for i, a in enumerate(W, 1):
for j, b in enumerate(W, 1):
loss_total += a * b * sub_sub[meter][(i, j)]
for i, a in enumerate(W, 1):
loss_total += leak_sub[meter][i] * a
loss_total += leak_leak[meter]
return np.sqrt(loss_total / length)
def make_ensemble_weight(focus_df, N):
sub_sub, leak_sub, leak_leak = preproceeding(focus_df.copy(), N)
np.random.seed(1)
score = [list(), list(), list(), list()]
weight = [list(), list(), list(), list()]
for meter in [0,1,2,3]:
f = partial(optimization, meter, sub_sub, leak_sub, leak_leak, len(focus_df.query(f'meter=={meter}')))
for i in tqdm(range(1000000)):
W = np.random.rand(N)
to_zero = np.arange(N)
np.random.shuffle(to_zero)
W[to_zero[:np.random.randint(N)]] = 0
W /= W.sum()
W *= np.random.rand() * 0.3 + 0.8
score[meter].append(f(W))
weight[meter].append(W)
score[meter] = np.array(score[meter])
weight[meter] = np.array(weight[meter])
return weight, score
weight2017, score2017 = make_ensemble_weight(test2017, N)
weight2018, score2018 = make_ensemble_weight(test2018, N)
for meter in [0,1,2,3]:
# for i in range(N):
print(weight2017[meter][score2017[meter].argmin()])
print()
# for meter in [0,1,2,3]:
# print(score2017[meter].min())
# print(weight2017[meter][score2017[meter].argmin()].sum())
# print()
for meter in [0,1,2,3]:
# for i in range(N):
print(weight2018[meter][score2018[meter].argmin()])
print()
# for meter in [0,1,2,3]:
# print(score2018[meter].min())
# print(weight2018[meter][score2018[meter].argmin()].sum())
# print()
def new_pred(test, weight, score, N):
pred_new = list()
for meter in [0,1,2,3]:
test_m = test.query(f'meter=={meter}')
ensemble_m = sum([np.log1p(test_m[f'pred{i+1}']) * weight[meter][score[meter].argmin()][i] for i in range(N)])
pred_new.append(ensemble_m)
pred_new = | pd.concat(pred_new) | pandas.concat |
import pytz
import pytest
import dateutil
import warnings
import numpy as np
from datetime import timedelta
from itertools import product
import pandas as pd
import pandas._libs.tslib as tslib
import pandas.util.testing as tm
from pandas.errors import PerformanceWarning
from pandas.core.indexes.datetimes import cdate_range
from pandas import (DatetimeIndex, PeriodIndex, Series, Timestamp, Timedelta,
date_range, TimedeltaIndex, _np_version_under1p10, Index,
datetime, Float64Index, offsets, bdate_range)
from pandas.tseries.offsets import BMonthEnd, CDay, BDay
from pandas.tests.test_base import Ops
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setup_method(self, method):
super(TestDatetimeIndexOps, self).setup_method(method)
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
f = lambda x: isinstance(x, DatetimeIndex)
self.check_ops_properties(DatetimeIndex._field_ops, f)
self.check_ops_properties(DatetimeIndex._object_ops, f)
self.check_ops_properties(DatetimeIndex._bool_ops, f)
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
pytest.raises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
assert s.year == 2000
assert s.month == 1
assert s.day == 10
pytest.raises(AttributeError, lambda: s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.tolist() == expected_list
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.tolist() == expected_list
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.tolist() == expected_list
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
assert idx1.is_monotonic
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
assert not idx2.is_monotonic
for idx in [idx1, idx2]:
assert idx.min() == Timestamp('2011-01-01', tz=tz)
assert idx.max() == Timestamp('2011-01-03', tz=tz)
assert idx.argmin() == 0
assert idx.argmax() == 2
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
assert pd.isna(getattr(obj, op)())
obj = DatetimeIndex([pd.NaT])
assert pd.isna(getattr(obj, op)())
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
assert pd.isna(getattr(obj, op)())
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
assert np.min(dr) == Timestamp('2016-01-15 00:00:00', freq='D')
assert np.max(dr) == Timestamp('2016-01-20 00:00:00', freq='D')
errmsg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, errmsg, np.min, dr, out=0)
tm.assert_raises_regex(ValueError, errmsg, np.max, dr, out=0)
assert np.argmin(dr) == 0
assert np.argmax(dr) == 5
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assert_raises_regex(
ValueError, errmsg, np.argmin, dr, out=0)
tm.assert_raises_regex(
ValueError, errmsg, np.argmax, dr, out=0)
def test_round(self):
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq='H'), expected_rng)
assert elt.round(freq='H') == expected_elt
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assert_raises_regex(ValueError, msg):
rng.round(freq='foo')
with tm.assert_raises_regex(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assert_raises_regex(ValueError, msg, rng.round, freq='M')
tm.assert_raises_regex(ValueError, msg, elt.round, freq='M')
# GH 14440 & 15578
index = pd.DatetimeIndex(['2016-10-17 12:00:00.0015'], tz=tz)
result = index.round('ms')
expected = pd.DatetimeIndex(['2016-10-17 12:00:00.002000'], tz=tz)
tm.assert_index_equal(result, expected)
for freq in ['us', 'ns']:
tm.assert_index_equal(index, index.round(freq))
index = pd.DatetimeIndex(['2016-10-17 12:00:00.00149'], tz=tz)
result = index.round('ms')
expected = pd.DatetimeIndex(['2016-10-17 12:00:00.001000'], tz=tz)
tm.assert_index_equal(result, expected)
index = pd.DatetimeIndex(['2016-10-17 12:00:00.001501031'])
result = index.round('10ns')
expected = pd.DatetimeIndex(['2016-10-17 12:00:00.001501030'])
tm.assert_index_equal(result, expected)
with tm.assert_produces_warning():
ts = '2016-10-17 12:00:00.001501031'
pd.DatetimeIndex([ts]).round('1010ns')
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
assert result.freq is None
assert len(result) == 5 * len(rng)
for tz in self.tz:
index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'],
tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01',
'NaT', 'NaT', 'NaT',
'2003-01-01', '2003-01-01', '2003-01-01'],
tz=tz)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
def test_repeat(self):
reps = 2
msg = "the 'axis' parameter is not supported"
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=2,
freq='30Min', tz=tz)
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
])
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
assert res.freq is None
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
tm.assert_raises_regex(ValueError, msg, np.repeat,
rng, reps, axis=1)
def test_representation(self):
idx = []
idx.append(DatetimeIndex([], freq='D'))
idx.append(DatetimeIndex(['2011-01-01'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], freq='H', tz='Asia/Tokyo'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC'))
exp = []
exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', "
"freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', "
"'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"
", dtype='datetime64[ns, Asia/Tokyo]', freq='H')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', "
"'2011-01-01 10:00:00-05:00', 'NaT'], "
"dtype='datetime64[ns, US/Eastern]', freq=None)")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', "
"'2011-01-01 10:00:00+00:00', 'NaT'], "
"dtype='datetime64[ns, UTC]', freq=None)""")
with pd.option_context('display.width', 300):
for indx, expected in zip(idx, exp):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(indx, func)()
assert result == expected
def test_representation_to_series(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = | DatetimeIndex(['2011-01-01'], freq='D') | pandas.DatetimeIndex |
# -*- coding: utf-8 -*-
"""
__date: 2021/05/12
__author: ssfang
__corporation: OriginQuantum
__usage:
"""
import os
import re
import time
import threading
from queue import Queue
from datetime import datetime
import yaml
import pandas as pd
import matplotlib.pyplot as plt
pd.set_option('display.max_columns', None)
| pd.set_option('display.max_rows', None) | pandas.set_option |
import os
import sys
import pdb
import bdb
import click
import logging
import signal
import hashlib
import inspect
import traceback
import pandas as pd
from subir import Uploader
from .browser_interactor import BrowserInteractor
from .user_interactor import UserInteractor, Interaction
from .pilot import Pilot
from .maneuver import Maneuver, Position, InteractQueueManeuver, BreakManeuver
from .base import MenuOption, ControlMode, ControlAction, Ordnance
from .error import RaspadorDidNotCompleteManuallyError, RaspadorInvalidManeuverError, RaspadorInvalidPositionError, RaspadorInteract, RaspadorSkip, RaspadorSkipOver, RaspadorSkipUp, RaspadorSkipToBreak, RaspadorQuit, RaspadorUnexpectedResultsError
from .style import Format, Styled
from .parser import Parser
from data_layer import Redshift as SQL
from typing import Dict, List, Optional, TypeVar, Generic, Union
from enum import Enum
from io_map import IOMap
class Raspador(IOMap):
browser: BrowserInteractor
user: UserInteractor
configuration: Dict[str, any]
flight_logs: List[pd.DataFrame]
def __init__(self, browser: Optional[BrowserInteractor]=None, user: Optional[UserInteractor]=None, configuration: Dict[str, any]=None, interactive: Optional[bool]=None):
self.configuration = configuration if configuration else {}
self.browser = browser if browser else BrowserInteractor()
self.user = user if user else UserInteractor(driver=self.browser.driver)
self.flight_logs = [pd.DataFrame()]
if interactive is not None:
self.user.interactive = interactive
@property
def description(self) -> str:
return self.name
@property
def name(self) -> str:
return type(self).__name__
@property
def flight_log(self) -> pd.DataFrame:
return self.flight_logs[-1]
@flight_log.setter
def flight_log(self, flight_log: pd.DataFrame):
self.flight_logs[-1] = flight_log
@property
def top_maneuvers_report(self) -> pd.DataFrame:
report = self.flight_log[['maneuver', 'option', 'result']].groupby(['maneuver', 'option', 'result']).size()
return report
@property
def top_errors_report(self) -> pd.DataFrame:
report = self.flight_log[self.flight_log.error != ''][['error', 'maneuver']].groupby(['error', 'maneuver']).size()
return report
def scrape(self):
if not self.flight_log.empty:
self.user.present_report(report=self.top_maneuvers_report, title='Mission Report')
self.user.present_report(self.top_errors_report, title='Error Report')
self.save_log()
unexpected_results = list(filter(lambda r: r not in ['Completed', ''], self.flight_log.result.unique()))
if unexpected_results:
unexpected_results_error = RaspadorUnexpectedResultsError(unexpected_results=unexpected_results)
if self.user.interactive:
self.user.present_message('Unexpected results.', error=unexpected_results_error)
else:
raise unexpected_results_error
self.flight_logs.append( | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
import pytest
import orca
from urbansim_templates import utils
def test_parse_version():
assert utils.parse_version('0.1.0.dev0') == (0, 1, 0, 0)
assert utils.parse_version('0.115.3') == (0, 115, 3, None)
assert utils.parse_version('3.1.dev7') == (3, 1, 0, 7)
assert utils.parse_version('5.4') == (5, 4, 0, None)
def test_version_greater_or_equal():
assert utils.version_greater_or_equal('2.0', '0.1.1') == True
assert utils.version_greater_or_equal('0.1.1', '2.0') == False
assert utils.version_greater_or_equal('2.1', '2.0.1') == True
assert utils.version_greater_or_equal('2.0.1', '2.1') == False
assert utils.version_greater_or_equal('1.1.3', '1.1.2') == True
assert utils.version_greater_or_equal('1.1.2', '1.1.3') == False
assert utils.version_greater_or_equal('1.1.3', '1.1.3') == True
assert utils.version_greater_or_equal('1.1.3.dev1', '1.1.3.dev0') == True
assert utils.version_greater_or_equal('1.1.3.dev0', '1.1.3') == False
###############################
## get_df
@pytest.fixture
def df():
d = {'id': [1,2,3], 'val1': [4,5,6], 'val2': [7,8,9]}
return pd.DataFrame(d).set_index('id')
def test_get_df_dataframe(df):
"""
Confirm that get_df() works when passed a DataFrame.
"""
df_out = utils.get_df(df)
pd.testing.assert_frame_equal(df, df_out)
def test_get_df_str(df):
"""
Confirm that get_df() works with str input.
"""
orca.add_table('df', df)
df_out = utils.get_df('df')
pd.testing.assert_frame_equal(df, df_out)
def test_get_df_dataframewrapper(df):
"""
Confirm that get_df() works with orca.DataFrameWrapper input.
"""
dfw = orca.DataFrameWrapper('df', df)
df_out = utils.get_df(dfw)
pd.testing.assert_frame_equal(df, df_out)
def test_get_df_tablefuncwrapper(df):
"""
Confirm that get_df() works with orca.TableFuncWrapper input.
"""
def df_callable():
return df
tfw = orca.TableFuncWrapper('df', df_callable)
df_out = utils.get_df(tfw)
pd.testing.assert_frame_equal(df, df_out)
def test_get_df_columns(df):
"""
Confirm that get_df() limits columns, and filters out duplicates and invalid ones.
"""
dfw = orca.DataFrameWrapper('df', df)
df_out = utils.get_df(dfw, ['id', 'val1', 'val1', 'val3'])
pd.testing.assert_frame_equal(df[['val1']], df_out)
def test_get_df_unsupported_type(df):
"""
Confirm that get_df() raises an error for an unsupported type.
"""
try:
df_out = utils.get_df([df])
except ValueError as e:
print(e)
return
pytest.fail()
###############################
## all_cols
def test_all_cols_dataframe(df):
"""
Confirm that all_cols() works with DataFrame input.
"""
cols = utils.all_cols(df)
assert sorted(cols) == sorted(['id', 'val1', 'val2'])
def test_all_cols_orca(df):
"""
Confirm that all_cols() works with Orca input.
"""
orca.add_table('df', df)
cols = utils.all_cols('df')
assert sorted(cols) == sorted(['id', 'val1', 'val2'])
def test_all_cols_extras(df):
"""
Confirm that all_cols() includes columns not part of the Orca core table.
"""
orca.add_table('df', df)
orca.add_column('df', 'newcol', pd.Series())
cols = utils.all_cols('df')
assert sorted(cols) == sorted(['id', 'val1', 'val2', 'newcol'])
def test_all_cols_unsupported_type(df):
"""
Confirm that all_cols() raises an error for an unsupported type.
"""
try:
cols = utils.all_cols([df])
except ValueError as e:
print(e)
return
pytest.fail()
###############################
## get_data
@pytest.fixture
def orca_session():
d1 = {'id': [1, 2, 3],
'building_id': [1, 2, 3],
'tenure': [1, 1, 0],
'age': [25, 45, 65]}
d2 = {'building_id': [1, 2, 3],
'zone_id': [17, 17, 17],
'pop': [2, 2, 2]}
d3 = {'zone_id': [17],
'pop': [500]}
households = pd.DataFrame(d1).set_index('id')
orca.add_table('households', households)
buildings = pd.DataFrame(d2).set_index('building_id')
orca.add_table('buildings', buildings)
zones = pd.DataFrame(d3).set_index('zone_id')
orca.add_table('zones', zones)
orca.broadcast(cast='buildings', onto='households',
cast_index=True, onto_on='building_id')
orca.broadcast(cast='zones', onto='buildings',
cast_index=True, onto_on='zone_id')
def test_get_data(orca_session):
"""
General test - multiple tables, binding filters, extra columns.
"""
df = utils.get_data(tables = ['households', 'buildings'],
model_expression = 'tenure ~ pop',
filters = ['age > 20', 'age < 50'],
extra_columns = 'zone_id')
assert(set(df.columns) == set(['tenure', 'pop', 'age', 'zone_id']))
assert(len(df) == 2)
def test_get_data_single_table(orca_session):
"""
Single table, no other params.
"""
df = utils.get_data(tables = 'households')
assert(len(df) == 3)
def test_get_data_bad_columns(orca_session):
"""
Bad column name, should be ignored.
"""
df = utils.get_data(tables = ['households', 'buildings'],
model_expression = 'tenure ~ pop + potato')
assert(set(df.columns) == set(['tenure', 'pop']))
def test_update_column(orca_session):
"""
General test.
Additional tests to add: series without index, adding column on the fly.
"""
table = 'buildings'
column = 'pop'
data = pd.Series([3,3,3], index=[1,2,3])
utils.update_column(table, column, data)
assert(orca.get_table(table).to_frame()[column].tolist() == [3,3,3])
def test_update_column_incomplete_series(orca_session):
"""
Update certain values but not others, with non-matching index orders.
"""
table = 'buildings'
column = 'pop'
data = pd.Series([10,5], index=[3,1])
utils.update_column(table, column, data)
assert(orca.get_table(table).to_frame()[column].tolist() == [5,2,10])
def test_add_column_incomplete_series(orca_session):
"""
Add an incomplete column to confirm that it's aligned based on the index. (The ints
will be cast to floats to accommodate the missing values.)
"""
table = 'buildings'
column = 'pop2'
data = | pd.Series([10,5], index=[3,1]) | pandas.Series |
import PyPDF2
import csv
from pathlib import Path
import io
import pandas
import numpy
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfpage import PDFPage
def Cpk(usl, lsl, avg, sigma , cf, sigma_cf):
cpu = (usl - avg - (cf*sigma)) / (sigma_cf*sigma)
cpl = (avg - lsl - (cf*sigma)) / (sigma_cf*sigma)
cpk = numpy.min([cpu, cpl])
return cpl,cpu,cpk
def convert_pdf_to_txt(path):
rsrcmgr = PDFResourceManager()
retstr = io.BytesIO()
codec = 'utf-8'
laparams = LAParams()
device = TextConverter(rsrcmgr, retstr, codec=codec, laparams=laparams)
fp = open(path, 'rb')
interpreter = PDFPageInterpreter(rsrcmgr, device)
password = ""
maxpages = 0
caching = True
pagenos = set()
for page in PDFPage.get_pages(fp, pagenos, maxpages=maxpages,
password=password,
caching=caching,
check_extractable=True):
interpreter.process_page(page)
text = retstr.getvalue()
fp.close()
device.close()
retstr.close()
return text
def filename_extraction(inp_filename):
raw = inp_filename.split('_')
dev = raw[1]
volt = raw[2]
temp = raw[3]
condition = raw[4]+raw[5]+raw[6]+raw[7]
return dev,volt,temp,condition
############################### User inputs ###############################################
path_of_files = r'C:\Users\vind\OneDrive - Cypress Semiconductor\documents\python_codes\EYE_DIAG_ANALYZER\pdf_ccg3pa2_tt'
pathlist = Path(path_of_files).glob('**/*.pdf')
output_filename = 'out'
automated_data_collection = 'yes' #'no'
cpl_matrix = []
cpu_matrix = []
cpk_matrix = []
################################# Program Begins #########################################
if automated_data_collection == 'no':
with open(output_filename +'raw'+ '.csv', 'a', newline='') as csvfile:
mywriter1 = csv.DictWriter(csvfile, dialect='excel',
fieldnames=['rise_time_average', 'rise_time_minimum', 'rise_time_maximum',
'fall_time_average', 'fall_time_minimum', 'fall_time_maximum',
'bit_rate_average', 'bit_rate_minimum', 'bit_rate_maximum',
'voltage_swing_average', 'voltage_swing_minimum', 'voltage_swing_maximum', 'filename'])
mywriter1.writeheader()
for files in pathlist:
###################### extracting only measurement page of the pdf file ##########################################
print(files.name)
pdfFileObj = open(files,'rb')
pdfReader = PyPDF2.PdfFileReader(pdfFileObj)
pdfWriter = PyPDF2.PdfFileWriter()
pdfReader.getNumPages()
pageNum = 3
pageObj = pdfReader.getPage(pageNum)
pdfWriter.addPage(pageObj)
pdfOutput = open('temp.pdf', 'wb')
pdfWriter.write(pdfOutput)
pdfOutput.close()
######################### pdf to text conversion ################################
x= convert_pdf_to_txt('temp.pdf')
text_extracted = x.split()
counter_list = list(enumerate(text_extracted, 1))
rise_time_average = (counter_list[91])[1]
fall_time_average = (counter_list[93])[1]
bit_rate_average = (counter_list[97])[1]
rise_time_minimum = (counter_list[145])[1]
fall_time_minimum = (counter_list[147])[1]
bit_rate_minimum = (counter_list[151])[1]
rise_time_maximum = (counter_list[156])[1]
fall_time_maximum = (counter_list[158])[1]
bit_rate_maximum = (counter_list[162])[1]
voltage_swing_average = (counter_list[131])[1]
voltage_swing_minimum = (counter_list[170])[1]
voltage_swing_maximum = (counter_list[174])[1]
data_raw = [float(rise_time_average), float(rise_time_minimum), float(rise_time_maximum), float(fall_time_average),
float(fall_time_minimum), float(fall_time_maximum), float(bit_rate_average), float(bit_rate_minimum),
float(bit_rate_maximum), float(voltage_swing_average), float(voltage_swing_minimum),
float(voltage_swing_maximum), files.name]
print(data_raw)
mywriter2 = csv.writer(csvfile, delimiter=',', dialect = 'excel')
mywriter2.writerow(data_raw)
################## Analysis begins ##########################################
| pandas.set_option('display.expand_frame_repr', False) | pandas.set_option |
import pandas as pd
from evaluate.calculator import (
RecallCalculator,
PrecisionCalculator,
EmptyReportError,
)
import pytest
from unittest.mock import patch, Mock
from evaluate.report import (
Report,
PrecisionReport,
RecallReport
)
from tests.common import create_precision_report_row
from io import StringIO
class TestPrecisionCalculator:
def test_calculatePrecision_NoReportsRaisesEmptyReportError(self):
columns = ["sample", "query_probe_header", "ref_probe_header", "classification"]
df = | pd.DataFrame(columns=columns) | pandas.DataFrame |
from set_figure_defaults import FigureDefaults
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sn
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
import operator
import warnings
import pickle
import sklearn as sklearn
def plot_heatmap(corrMatrix, title = '', vmin=None, vmax=None, cmap=None, ticklabels=False):
"""
Plots a correlation matrix as a labeled heatmap.
Parameters:
corrMatrix (df): Correlation matrix
title (str, optional): Title of the plot
annot (boolean, optional): Show the value of each matrix cell in the plot.
"""
if cmap == None:
cmap_heatmap = "icefire"
else:
cmap_heatmap = cmap
sn.heatmap(corrMatrix, vmin=vmin, vmax=vmax, cmap=cmap_heatmap, square=True, xticklabels=ticklabels, yticklabels=ticklabels, rasterized=True)
plt.title(title)
plt.tight_layout()
plt.savefig('./Results/'+title+'.png', dpi=300)
plt.savefig('./Results/'+title+'.pdf')
#plt.savefig('./Results/'+title+'.svg')
plt.show()
def plot_RF_test(y_test, y_pred, title = None, xlabel = 'Measured $\log_2(MIC)$', ylabel = 'Predicted $\log_2(MIC)$', legend = ['Ideal', 'Result'], groups = None, saveas = None):
"""
Plots the results of predicting test set y values using the random forest
model.
3
Parameters:
y_test (df): Experimental test set y values.
y_pred (df): Predicted test set y values.
title (str, optional): Title of the plot
xlabel (str, optional)
ylabel (str, optional)
legend (str (2,), optional)
"""
sn.set_palette('colorblind')
def_color = 'k'#np.array(sn.color_palette())[0,:]
#fig, ax = plt.subplots(1,1)
##fig.set_figheight(5)
##fig.set_figwidth(5)
if groups is not None:
groups_obj = pd.concat([y_test, y_pred], axis=1).groupby(groups)
cmap=plt.get_cmap('tab10')
for name, group in groups_obj:
# Works only for groups with numeric names that are max cmap length:
fig, ax = plt.subplots(1,1)
ax.plot(group.iloc[:,0], group.iloc[:,1], marker=".", linestyle="", label=int(name), color = cmap.colors[int(name)])
#ax.legend()
else:
sn.scatterplot(x=y_test.values.ravel(),y=y_pred.values.ravel(), color=def_color)
#ax.scatter(y_test,y_pred, color = 'red', marker='.')
ax_max = 10
if np.max(y_test.values)>ax_max:
ax_max = np.max(y_test).values
ax_min = 0
if np.min(y_test.values)<ax_min:
ax_min = np.min(y_test.values)
plt.plot([ax_min, ax_max], [ax_min, ax_max], '--', color='black')
#plt.gca().set_aspect('equal', 'box')
if title is not None:
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.tight_layout()
if (saveas is None) and (title is not None):
plt.savefig(title+'.pdf')
plt.savefig(title+'.svg')
plt.savefig(title+'.png', dpi=300)
#plt.show()
elif (saveas is not None):
plt.savefig(saveas+'.pdf')
plt.savefig(saveas+'.svg')
plt.savefig(saveas+'.png', dpi=300)
plt.show()
def splitAndScale(X, y, test_size, random_state = None):
"""
Splits the data into train and test sets. Scales the train and test sets
using a StandardScaler (sklearn). The datasets are being scaled separately
to avoid "leaking" information from train to test set.
Parameters:
X (df): X data to be split and scaled (features in columns, samples in rows)
y (df): y data to be split and scaled (one column, samples in rows)
test_size (float): Proportion of the test size from the original data.
Returns:
X_train_scaled (df): X data of the train set
X_test_scaled (df): X data of the test set
y_train_scaled (df): y data of the train set
y_test_scaled (df): y data of the test set
scaler_train (StandardScaler): StandardScaler that is needed for scaling the
train set back to initial units.
scaler_test (StandardScaler): StandardScaler that is needed for scaling the
test set back to initial units.
random_state (int, optional): Seed for train test split.
"""
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = test_size, random_state = random_state)
# Scale.
scaler_test = preprocessing.StandardScaler()
scaler_train = preprocessing.StandardScaler()
test_scaled = X_test.copy()
test_scaled[y_test.columns[0]] = y_test.values
train_scaled = X_train.copy()
train_scaled[y_train.columns[0]] = y_train.values
test_scaled = pd.DataFrame(scaler_test.fit_transform(test_scaled), columns=test_scaled.columns, index=test_scaled.index)
train_scaled = pd.DataFrame(scaler_train.fit_transform(train_scaled), columns=train_scaled.columns, index=train_scaled.index)
X_train_scaled = train_scaled.iloc[:,:-1]
y_train_scaled = train_scaled.iloc[:,[-1]]#y_train#
X_test_scaled = test_scaled.iloc[:,:-1]
y_test_scaled = test_scaled.iloc[:,[-1]]#y_test#
return X_train_scaled, X_test_scaled, y_train_scaled, y_test_scaled, scaler_train, scaler_test
def define_scale(X_train, y_train):
scaler_train = preprocessing.StandardScaler()
train_scaled = X_train.copy()
train_scaled[y_train.columns[-1]] = y_train.values
train_scaled = pd.DataFrame(scaler_train.fit_transform(train_scaled), columns=train_scaled.columns, index=train_scaled.index)
X_train_scaled = train_scaled.iloc[:,:-1]
y_train_scaled = train_scaled.iloc[:,[-1]]
return X_train_scaled, y_train_scaled, scaler_train
def scale(X_data, y_data, scaler):
data_scaled = X_data.copy()
data_scaled[y_data.columns[-1]] = y_data.values
data_scaled = pd.DataFrame(scaler.transform(data_scaled), columns=data_scaled.columns, index=data_scaled.index)
X_data_scaled = data_scaled.iloc[:,:-1]
y_data_scaled = data_scaled.iloc[:,[-1]]
return X_data_scaled, y_data_scaled
def inverseScale(X_data, y_data, scaler):
datasets_scaled = X_data.copy()
datasets_scaled[y_data.columns[-1]] = y_data.values
datasets_unscaled = pd.DataFrame(scaler.inverse_transform(datasets_scaled), columns=datasets_scaled.columns, index = datasets_scaled.index)
X_data_unscaled = datasets_unscaled.iloc[:,:-1]
y_data_unscaled = datasets_unscaled.iloc[:,[-1]]
return X_data_unscaled, y_data_unscaled
def RF_feature_analysis(X, y, groups = None, groups_only_for_plotting = False,
test_indices = None, test_proportion = 0.1, top_n = 5,
n_estimators = 100, max_depth = None,
min_samples_split = 2, min_samples_leaf = 1,
max_features = 'auto', bootstrap = True, i='',
random_state = None, sample_weighing = True,
plotting = True, saveas = None, title = True, max_samples = None):
"""
Splits 'X' and 'y' to train and test sets so that 'test_proportion' of
samples is in the test set. Fits a
(sklearn) random forest model to the data according to RF parameters
('n_estimators', 'max_depth', 'min_samples_split', 'min_samples_leaf',
'max_features', 'bootstrap'). Estimates feature importances and determines
'top_n' most important features. A plot and printouts for describing the
results.
Parameters:
X (df): X data (features in columns, samples in rows)
y (df): y data (one column, samples in rows)
test_proportion (float, optional): Proportion of the test size from the original data.
top_n (float, optional): The number of features in output 'top_feature_weights'
n_estimators (int, optional): Number of trees in the forest
max_depth (int, optional): Maximum depth of the tree
min_samples split (int, optional): minimum number of samples required to split an internal node (could also be a float, see sklearn documentation)
min_samples_leaf (int, optional): The minimum number od samples to be at a leaf node (could also be a float, see sklearn documentation)
max_features (str, float, string, or None, optional): The number of features to consider when looking for the best split (see the options in sklearn documentation, 'sqrt' means max number is sqrt(number of features))
bootstrap (boolean, optional): False means the whole dataset is used for building each tree, True means bootstrap of samples is used
TO DO: Add value range that works for 5K dataset
i (int, optional): Optional numeric index for figure filename.
random_state (int, optional): Seed for train test split.
Returns:
feature_weights (df): weights of all the features
top_feature_weights (df): weights of the features with the most weight
regressor (RandomForestRegressor) RF regressor
R2 (float): R2 value of the prediction for the test set.
"""
if test_proportion == 0:
# Use the whole dataset for both training and "testing".
X_train = X.copy()
X_test = X.copy()
y_train = y.copy()
y_test = y.copy()
elif test_proportion == None:
# Assume X and y are lists with two datasets...
# Use dataset 0 as train and dataset 1 as test.
X_train = X[0].copy()
X_test = X[1].copy()
y_train = y[0].copy()
y_test = y[1].copy()
else:
# Split into test and train sets, and scale with StandardScaler.
if test_indices is None:
if groups is not None:
if groups_only_for_plotting == False:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_proportion, random_state=random_state, stratify=groups)
else:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_proportion, random_state=random_state)
#shufflesplit = sklearn.model_selection.ShuffleSplit(n_splits=1, test_size=test_proportion, random_state=random_state)
#X_train, X_test, y_train, y_test = shufflesplit.split(X, y, groups=groups)
else:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_proportion, random_state=random_state)
else:
#X_test = X.copy() # Are these needed?
#y_test = y.copy() # Are these needed?
X_test = X[test_indices].copy()
y_test = y[test_indices].copy()
#X_train = X.copy()
#y_train = y.copy()
X_train = X[~test_indices].copy()
y_train = y[~test_indices].copy()
#print(y_test)
if sample_weighing:
#sample_weight = np.divide(1,y_train.iloc[:,0]+0.1)
#sample_weight = np.abs(y_train.iloc[:,0]-8.5)
#sample_weight = np.abs(y_train.iloc[:,0]-4.1)
sample_weight = y_train.copy()
sample_weight[y_train<=3] = 5
sample_weight[y_train>=8] = 5
sample_weight[(y_train>3)&(y_train<8)] = 1
sample_weight = sample_weight.squeeze()
else:
sample_weight = None
#print(sample_weight)
#X_train_s, X_test_s, y_train_s, y_test_s, scaler_train, scaler_test = scale(X_train, X_test, y_train, y_test)
# Uncomment this part if you want to upsample the data.
# This works only with class data. For that, you need to modify splitAndScale function and input y.
#smote = SMOTE()
#print(y_train_s.shape)
#plot_2d_space(X_train_s, y_train_s, 'Original PCA')
#X_train_s, y_train_s = smote.fit_sample(X_train_s, y_train_s)
#print(y_train_s.shape, X_train_s.shape)
#plot_2d_space(X_train_s, y_train_s, 'SMOTE over-sampling')
#y_smogn = y_train_s.copy().join(X_train_s).reset_index(drop=True)
#print(y_smogn.columns.get_loc('log(MIC)'))
#print(y_smogn)
#data_smogn = smogn.smoter(data = y_smogn, y = 'log(MIC)',
# samp_method = 'extreme', under_samp = True,
# rel_xtrm_type='both', rel_thres = 0.9, rel_method = 'auto',
# rel_coef = 0.8)#, rel_ctrl_pts_rg = [[2,1,0], [8,1,0], [128,0,0]])
#print(data_smogn)
#y_train_s = data_smogn.iloc[:,0]
#X_train_s = data_smogn.iloc[:,1::]
#plot_2d_space(X_train_s, y_train_s, 'Smogned PCA')
# Fit and estimate feature importances.
regressor = RandomForestRegressor(n_estimators = n_estimators,
max_depth = max_depth,
min_samples_split = min_samples_split,
min_samples_leaf = min_samples_leaf,
max_features = max_features,
bootstrap = bootstrap,
n_jobs = -2, criterion='mse',
max_samples = max_samples,
random_state=random_state)
#regressor = RandomForestRegressor(n_jobs = -2, criterion='mse')
#print(X_train.shape, y_train.shape)
regressor.fit(X_train,np.ravel(y_train), sample_weight = sample_weight)
R2, RMSE, y_pred = predict_plot_RF(regressor, X_test, y_test,
plotting=plotting, title=title,
groups = groups, saveas = saveas)
feature_weight = regressor.feature_importances_
#print('Feature weights for RF with ' + str(X.shape[1]+1) + ' features: ', feature_weight)
'''
y_pred = regressor.predict(X_test)
y_pred = pd.Series(data=y_pred, index=y_test.index)
#y_pred = y_pred.round() # MIC are exponents of two.
feature_weight = regressor.feature_importances_
#print('Feature weights for RF with ' + str(X.shape[1]+1) + ' features: ', feature_weight)
#regressor.score(X_test_s, y_test_s)
# Transform back to the original units.
#X_test, y_test, y_pred = inverseScale(X_test_s, y_test_s, y_pred_s, scaler_test)
R2 = sklearn.metrics.r2_score(y_test, y_pred)
mse = sklearn.metrics.mean_squared_error(y_test, y_pred)
RMSE = np.sqrt(mse)
#y_pred = np.exp2(y_pred) # Exponential data didn't look good in the plot.
#y_test = np.exp2(y_test)
if plotting is True:
if title is not None:
title_temp = 'Results/log_MIC RF with ' + str(X_train.shape[1]) + ' features'+str(i)
else:
title_temp = None
if groups is not None:
plot_RF_test(y_test, y_pred,
title = title_temp,
groups=groups.loc[y_test.index], saveas = saveas)
else:
plot_RF_test(y_test, y_pred,
title = title_temp,
groups=None, saveas = saveas)
'''
# Sort the features by importance.
features = np.array(list(X_train.columns))
#print('Features set : ', features)
assert len(features) == len(feature_weight)
i = 0
l_dict = []
while i < len(feature_weight):
l_dict.append({features[i]:feature_weight[i]})
i += 1
res = sorted(zip(features, feature_weight), key = operator.itemgetter(1), reverse = True)
# Let's take the top features from the original set.
top_features = [i[0] for i in res[:top_n]]
#print('Top ', top_n, ' of the given features: ', top_features)
# Let's put features into two small dataframes.
feature_weights = pd.DataFrame(feature_weight.reshape((1,len(feature_weight))),
columns = features,
index = [0])
top_feature_weights = feature_weights.loc[:, top_features].copy()
#pd.DataFrame((feature_weights.loc[0,top_features].values).reshape((1, len(top_features))), columns = top_features, index = [0])
scaler_test = None
return feature_weights, top_feature_weights, regressor, R2, RMSE, scaler_test, X_test, y_test, y_pred, X_train, y_train
def predict_plot_RF(regressor, X_test, y_test, plotting=True, title=None, groups = None, saveas = '', ):
y_pred = regressor.predict(X_test)
if y_test is None:
y_pred = pd.DataFrame(y_pred, index=X_test.index, columns=['log2mic'])
R2 = None
mse = None
RMSE = None
else:
y_pred = pd.DataFrame(data=y_pred, index=y_test.index, columns=['log2mic'])
R2 = sklearn.metrics.r2_score(y_test, y_pred)
mse = sklearn.metrics.mean_squared_error(y_test, y_pred)
RMSE = np.sqrt(mse)
#y_pred = np.exp2(y_pred) # Exponential data didn't look good in the plot.
#y_test = np.exp2(y_test)
if plotting is True:
if title is not None:
title_temp = 'Results/log_MIC RF with ' + str(X_test.shape[1]) + ' features'
else:
title_temp = None
if groups is not None:
plot_RF_test(y_test, y_pred,
title = title_temp,
groups=groups.loc[y_test.index], saveas = saveas)
else:
plot_RF_test(y_test, y_pred,
title = title_temp,
groups=None, saveas = saveas)
return R2, RMSE, y_pred
def save_to_csv_pickle(dataset, filename, join_with = None, index=True):
"""
Saves any dataset to a csv file and picklefile with the given filename.
Parameters:
dataset (any pickle and to_csv compatible type): dataset to be saved into file
filename (str): filename used for both csv and pickle file
"""
dataset.to_csv(filename + '.csv', index=index)
picklefile = open(filename, 'wb')
pickle.dump(dataset,picklefile)
picklefile.close()
if join_with is not None:
(join_with.join(dataset)).to_csv(filename + '.csv', index=index)
def save_to_pickle(dataset, filename):
"""
Saves any dataset to a csv file and picklefile with the given filename.
Parameters:
dataset (any pickle and to_csv compatible type): dataset to be saved into file
filename (str): filename used for both csv and pickle file
"""
picklefile = open(filename, 'wb')
pickle.dump(dataset,picklefile)
picklefile.close()
def fetch_pickle(filename):
"""
Fetches any variable saved into a picklefile with the given filename.
Parameters:
filename (str): filename of the pickle file
Returns:
variable (any pickle compatible type): variable that was saved into the picklefile.
"""
with open(filename, 'rb') as picklefile:
variable = pickle.load(picklefile)
return variable
def fetch_pickled_HO(filename):
"""
Fetches random forest regression hyperparamaters saved into a picklefile
and returns each hyperparameter.
Parameters:
filename (str): Filename of the pickle file. An example of the variable
that is expected to be stored in the pickle file:
pickled_variable = {'bootstrap': True,\n",
'max_depth': 18,\n",
'max_features': 'sqrt',\n",
'min_samples_leaf': 1,\n",
'min_samples_split': 2,\n",
'n_estimators': 300}
Returns:
n_estimators (int, optional): Number of trees in the forest
max_depth (int, optional): Maximum depth of the tree
min_samples split (int, optional): minimum number of samples required
to split an internal node (could also be a float, see sklearn
documentation)
min_samples_leaf (int, optional): The minimum number od samples to be
at a leaf node (could also be a float, see sklearn documentation)
max_features (str, float, string, or None, optional): The number of
features to consider when looking for the best split (see the options
in sklearn documentation, 'sqrt' means max number is sqrt(number of
features))
bootstrap (boolean, optional): False means the whole dataset is used
for building each tree, True means bootstrapping of samples is used
"""
ho = fetch_pickle(filename)
bootstrap = ho['bootstrap']
max_depth = ho['max_depth']
max_features = ho['max_features']
min_samples_leaf = ho['min_samples_leaf']
min_samples_split = ho['min_samples_split']
n_estimators = ho['n_estimators']
return n_estimators, max_depth, min_samples_split, min_samples_leaf, max_features, bootstrap
def read_molecule_excel(filename, sheet_smiles_y_id = 'SMILES',
column_smiles = 'SMILES ',
column_y = 'MIC VALUE (Y VALUE)',
column_id = 'No.',
column_class = 'Class',
column_name = 'NAME',
sheet_features = ['1k','2k','3k','4k','5k','300'],
start_column_features = 2):
"""
Reads molecule ID, output to be optimized, and features from the given
sheets of the given Excel file, and outputs them as a single DataFrame.
Parameters:
filename (str): Filename of the dataset Excel file.
sheet_smiles_y_idx (str,optional): To do
column_smiles (str,optional): To do
column_y (str,optional): To do
column_id (str,optional): To do
sheet_features ([str],optional): To do
start_column_features (int,optional): To do
Returns:
dataset_original (df): Dataframe with molecules on each row, and
columns in this order: [Idx, y value, feature0, feature1, ...]
"""
datasets = pd.read_excel(filename,
sheet_name = [sheet_smiles_y_id].extend(
sheet_features),
na_values='na', convert_float = False)
if column_class is not None:
dataset_original = (datasets[sheet_smiles_y_id]).loc[:, [column_id, column_name, column_class, column_smiles, column_y]]
else:
dataset_original = (datasets[sheet_smiles_y_id]).loc[:, [column_id, column_name, column_smiles, column_y]]
for i in range(len(sheet_features)):
dataset_original = pd.concat([dataset_original,
datasets[sheet_features[i]
].iloc[:, start_column_features::]],
axis=1)
return dataset_original
# Old version, might still be in use somewhere. Doesn't have regressors as output.
'''
def analyze_RF_for_multiple_seeds(list_X, list_y, ho_params = None, n_seeds = 20, save_pickle = False, bar_plot = True, groups = None, groups_only_for_plotting = False, test_proportion = 0.21, top_n = 20, plotting=True):
n_datasets = len(list_X)
# Let's repeat y stratification. At the same, let's create a dataset for
# RF hyperparameter optimization.
R2_all2 = np.zeros((n_seeds,n_datasets))
RMSE_all2 = np.zeros((n_seeds,n_datasets))
top_features_all2 = [[None]*n_seeds]*n_datasets
features_all2 = [[None]*n_seeds]*n_datasets
X_tests = [[None]*n_seeds]*n_datasets
y_tests = [[None]*n_seeds]*n_datasets
X_trains = [[None]*n_seeds]*n_datasets
y_trains = [[None]*n_seeds]*n_datasets
filenames = ['X_tests_imp', 'y_tests_imp', 'X_tests', 'y_tests',
'X_trains_imp', 'y_trains_imp', 'X_trains', 'y_trains']
for j in range(n_datasets):
if ho_params is not None:
n_estimators = ho_params[j]['n_estimators']
max_depth = ho_params[j]['max_depth']
min_samples_split = ho_params[j]['min_samples_split']
min_samples_leaf = ho_params[j]['min_samples_leaf']
max_features = ho_params[j]['max_features']
bootstrap = ho_params[j]['bootstrap']
for i in range(n_seeds):
if ho_params is None:
feature_weights, top_feature_weights, regressor, R2, RMSE, scaler_test, X_test, y_test, y_pred, X_train, y_train = RF_feature_analysis(
list_X[j], list_y[j], groups=groups,
groups_only_for_plotting = groups_only_for_plotting,
test_indices = None, test_proportion = test_proportion,
top_n = top_n, i='', random_state = i,
sample_weighing = False, plotting=plotting)
else:
feature_weights, top_feature_weights, regressor, R2, RMSE, scaler_test, X_test, y_test, y_pred, X_train, y_train = RF_feature_analysis(
list_X[j], list_y[j], groups=groups,
groups_only_for_plotting = groups_only_for_plotting,
test_indices = None, test_proportion = test_proportion,
top_n = top_n, i='', random_state = i,
sample_weighing = False, n_estimators=n_estimators,
max_depth=max_depth, min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
max_features=max_features, bootstrap=bootstrap, plotting=plotting)
R2_all2[i,j] = R2
RMSE_all2[i,j] = RMSE
top_features_all2[j][i] = top_feature_weights.copy()
features_all2[j][i] = feature_weights.copy()
X_tests[j][i] = X_test.copy()
y_tests[j][i] = y_test.copy()
X_trains[j][i] = X_train.copy()
y_trains[j][i] = y_train.copy()
#if (i == 0) and (j==0):
# top_feature_weights2 = top_feature_weights
#if (i == 0) and (j==1):
# top_feature_weights_imp2 = top_feature_weights_imp
print('R2 and RMSE for dataset ', j, ': ', R2_all2[:,j], RMSE_all2[:,j])
print('Mean: ', np.mean(R2_all2[:,j]), np.mean(RMSE_all2[:,j]))
print('Std: ', np.std(R2_all2[:,j]), np.std(RMSE_all2[:,j]))
print('Min: ', np.min(R2_all2[:,j]), np.min(RMSE_all2[:,j]))
print('Max: ', np.max(R2_all2[:,j]), np.max(RMSE_all2[:,j]))
if save_pickle == True:
# Pickles for HO:
if j == 0:
save_to_pickle(X_tests, filenames[2])
save_to_pickle(y_tests, filenames[3])
save_to_pickle(X_trains, filenames[6])
save_to_pickle(y_trains, filenames[7])
if j == 1:
save_to_pickle(X_tests, filenames[0])
save_to_pickle(y_tests, filenames[1])
save_to_pickle(X_trains, filenames[4])
save_to_pickle(y_trains, filenames[5])
# Plot the results. Compare feature weights of two methods. E.g., here the top
# 50 feature weights of FilteredImportant dataset are compared to the top 50
# feature weights of the Filtered dataset.
if (bar_plot == True) and (n_datasets>1):
compare_features_barplot(top_features_all2[0][0], top_features_all2[1][0])
return R2_all2, RMSE_all2, top_features_all2, features_all2, X_tests, y_tests, X_trains, y_trains
'''
def analyze_RF_for_multiple_seeds(list_X, list_y, ho_params = None, n_seeds = 20, save_pickle = False, bar_plot = True, groups = None, groups_only_for_plotting = False, test_proportion = 0.21, top_n = 20, plotting=True, saveas = None, title=True):
n_datasets = len(list_X)
# Let's repeat y stratification. At the same, let's create a dataset for
# RF hyperparameter optimization.
R2_all2 = np.zeros((n_seeds,n_datasets))
RMSE_all2 = np.zeros((n_seeds,n_datasets))
top_features_all2 = []
features_all2 = []
X_tests = []
y_tests = []
X_trains = []
y_trains = []
regressors = []
filenames = ['X_tests_imp', 'y_tests_imp', 'X_tests', 'y_tests',
'X_trains_imp', 'y_trains_imp', 'X_trains', 'y_trains']
for j in range(n_datasets):
if ho_params is not None:
n_estimators = ho_params[j]['n_estimators']
max_depth = ho_params[j]['max_depth']
min_samples_split = ho_params[j]['min_samples_split']
min_samples_leaf = ho_params[j]['min_samples_leaf']
max_features = ho_params[j]['max_features']
bootstrap = ho_params[j]['bootstrap']
max_samples = ho_params[j]['max_samples']
top_features_temp = []
features_temp = []
X_tests_temp = []
y_tests_temp = []
X_trains_temp = []
y_trains_temp = []
regressors_temp = []
if title is not None:
title_temp = True
else:
title_temp = None
for i in range(n_seeds):
if saveas is not None:
saveas_temp = saveas+str(i)
else:
saveas_temp = saveas
if ho_params is None:
feature_weights, top_feature_weights, regressor, R2, RMSE, scaler_test, X_test, y_test, y_pred, X_train, y_train = RF_feature_analysis(
list_X[j], list_y[j], groups=groups,
groups_only_for_plotting = groups_only_for_plotting,
test_indices = None, test_proportion = test_proportion,
top_n = top_n, i='', random_state = i,
sample_weighing = False, plotting=plotting, saveas = saveas_temp, title = title_temp)
else:
feature_weights, top_feature_weights, regressor, R2, RMSE, scaler_test, X_test, y_test, y_pred, X_train, y_train = RF_feature_analysis(
list_X[j], list_y[j], groups=groups,
groups_only_for_plotting = groups_only_for_plotting,
test_indices = None, test_proportion = test_proportion,
top_n = top_n, i='', random_state = i,
sample_weighing = False, n_estimators=n_estimators,
max_depth=max_depth, min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
max_features=max_features, bootstrap=bootstrap, plotting=plotting, saveas = saveas_temp, title = title_temp, max_samples = max_samples)
R2_all2[i,j] = R2
RMSE_all2[i,j] = RMSE
top_features_temp.append(top_feature_weights.copy())
features_temp.append(feature_weights.copy())
X_tests_temp.append(X_test.copy())
y_tests_temp.append(y_test.copy())
X_trains_temp.append(X_train.copy())
y_trains_temp.append(y_train.copy())
regressors_temp.append(regressor)
top_features_all2.append(top_features_temp)
features_all2.append(features_temp)
X_tests.append(X_tests_temp)
y_tests.append(y_tests_temp)
X_trains.append(X_trains_temp)
y_trains.append(y_trains_temp)
regressors.append(regressors_temp)
print('R2 and RMSE for dataset ', j, ': ', R2_all2[:,j], RMSE_all2[:,j])
print('Mean: ', np.mean(R2_all2[:,j]), np.mean(RMSE_all2[:,j]))
print('Std: ', np.std(R2_all2[:,j]), np.std(RMSE_all2[:,j]))
print('Min: ', np.min(R2_all2[:,j]), np.min(RMSE_all2[:,j]))
print('Max: ', np.max(R2_all2[:,j]), np.max(RMSE_all2[:,j]))
if save_pickle == True:
# Pickles for HO:
if j == 0:
save_to_pickle(X_tests, filenames[2])
save_to_pickle(y_tests, filenames[3])
save_to_pickle(X_trains, filenames[6])
save_to_pickle(y_trains, filenames[7])
if j == 1:
save_to_pickle(X_tests, filenames[0])
save_to_pickle(y_tests, filenames[1])
save_to_pickle(X_trains, filenames[4])
save_to_pickle(y_trains, filenames[5])
# Plot the results. Compare feature weights of two methods. E.g., here the top
# 50 feature weights of FilteredImportant dataset are compared to the top 50
# feature weights of the Filtered dataset.
if (bar_plot == True) and (n_datasets>1):
compare_features_barplot(top_features_all2[0][0], top_features_all2[1][0])
return R2_all2, RMSE_all2, top_features_all2, features_all2, X_tests, y_tests, X_trains, y_trains, regressors
def compare_features_barplot(feature_weights1, feature_weights2, filename_fig = None, title=None):
features_to_append = feature_weights2.copy()
rf_features_for_plots = feature_weights1.copy()
rf_features_for_plots = rf_features_for_plots.append(features_to_append, sort=False, ignore_index = True)
rf_features_for_plots=pd.melt(rf_features_for_plots.reset_index(), value_vars=rf_features_for_plots.columns,
id_vars = 'index')
plt.figure()
sn.barplot(x='value', y='variable', hue='index', data = rf_features_for_plots)
if title is not None:
plt.title(title)
plt.show()
if filename_fig is not None:
plt.savefig(filename_fig+'.png')
plt.savefig(filename_fig+'.pdf')
plt.savefig(filename_fig+'.svg')
return None
# The following functions are meant for functionalizing the feature selection code. Not used in this file.
def clean_mics(dataset, y_column):
# Replace e.g. '>128' with 128*2 in y data (in column 2).
idx = dataset[dataset.iloc[:,y_column].str.find('>')==0].index
y_column_label = dataset.columns[y_column]
dataset.loc[idx,y_column_label] = dataset.loc[idx,y_column_label].str[1::]
dataset.loc[:,y_column_label] = np.double(dataset.loc[:,y_column_label])
# Approximate "MIC>X" values with the next highest available MIC value (2*X).
dataset.loc[idx, y_column_label] = dataset.loc[idx, y_column_label]*2
# Drop rows with y data nan, and columns with any nan.
dataset = dataset.dropna(axis=0, how='all', subset=[y_column_label])
dataset = dataset.dropna(axis=1, how='any')
if (y_column_label != 'MIC VALUE (Y VALUE)') and (y_column_label != 'log2mic'):
warnings.warn('Dataset is not as expected. Check that everything is ok.')
return dataset
def logmic(dataset, y_column):
# First, take log from Y feature.
dataset.iloc[:,y_column] = np.log2(dataset.iloc[:,y_column])
return dataset
def corrMatrix(dataset, y_column, corrMethod='spearman'):
corrMatrix = dataset.iloc[:,y_column::].corr(method=corrMethod)
return corrMatrix
def var_filtering(dataset, y_column, variance_limit=0.1, plotCorrMatrix = True, corrMethod = 'spearman'):
corrMatrixInitial = dataset.iloc[:,y_column::].corr(method=corrMethod)
if plotCorrMatrix == True:
plot_heatmap(corrMatrixInitial, 'Initial dataset: '
+ str(corrMatrixInitial.shape[0]-1) + ' descriptors')
print('Initial dataset: ' + str(corrMatrixInitial.shape[0]-1) + ' descriptors')
# Drop constant features (note: this goes through also the No., SMILES, and y
# value columns but it shouldn't be a problem because they are not constants)
# Not needed anymore after variance filtering is implemented.
# dataset = dataset.drop(columns=dataset.columns[(dataset == dataset.iloc[0,:]).all()])
# Drop almost constant features (do not check No, SMILES, y value columns).
idx_boolean = [False]*(y_column)
idx_boolean.append(True)
idx_boolean.extend(((np.var(dataset.iloc[:,(y_column+1)::])/np.mean(dataset.iloc[:,(y_column+1)::]))>variance_limit).values) #Numpy booleans here instead of python booleans, is it ok?
corrMatrixVar = dataset.iloc[:,idx_boolean].corr(method=corrMethod)
if plotCorrMatrix == True:
plot_heatmap(corrMatrixVar, 'After dropping constant or almost constant descriptors: '
+ str(corrMatrixVar.shape[0]-1) + ' descriptors')
print('After dropping constant or almost constant descriptors: '
+ str(corrMatrixVar.shape[0]-1) + ' descriptors')
return corrMatrixInitial, corrMatrixVar
def cor_filtering(dataset, y_column, filterWithCorrMatrix = False, corrMatrixForFiltering = None, plotCorrMatrix = True, corrMethod = 'spearman', corr_limit1 = 0.9, corr_limit2 = 0.05):
# Full correlation matrix with corrMatrixForFiltering taken into account.
if filterWithCorrMatrix == False:
corrMatrix = dataset.iloc[:,y_column::].corr(method=corrMethod)#'pearson')#
else:
corrMatrix = (dataset.loc[:,corrMatrixForFiltering.columns]).corr(method=corrMethod)#
if plotCorrMatrix == True:
plot_heatmap(corrMatrix, 'After dropping constant or almost constant descriptors: '
+ str(corrMatrix.shape[0]-1) + ' descriptors')
print('After dropping constant or almost constant descriptors: '
+ str(corrMatrix.shape[0]-1) + ' descriptors')
'''
# See which features correlate with Y more than others.
corrMatrixImportant = corrMatrix.loc[:,(np.abs(corrMatrix.iloc[0,:])>0.01).values]
plot_heatmap(corrMatrixImportant)
# --> Still a lot of correlating features.
'''
# Next, we want to drop features correlating too much with each other.
# Mask upper triangle to drop only the other one of each two correlated features.
corr_limit = corr_limit1 # Final value: 0.95
tri_corrMatrix = pd.DataFrame(np.triu(corrMatrix,1), index = corrMatrix.index,
columns = corrMatrix.columns)
# List column names of highly correlated features.
to_drop = [c for c in tri_corrMatrix.columns if any(np.abs(tri_corrMatrix[c]) > corr_limit)]
# And drop them.
corrMatrixCorX = corrMatrix.drop(columns = to_drop, index = to_drop)
if plotCorrMatrix == True:
plot_heatmap(corrMatrixCorX, 'After filtering out highly correlated descriptors (limit ' +
str(corr_limit) + ': ' + str(corrMatrixCorX.shape[0]-1) + ' descriptors')
print('After filtering out highly correlated descriptors (limit ' +
str(corr_limit) + ': ' + str(corrMatrixCorX.shape[0]-1) + ' descriptors')
# See again which of the remaining features correlate with Y.
corr_limit = corr_limit2 # Final values: 0.025
corrMatrixCor = corrMatrixCorX.loc[(np.abs(
corrMatrixCorX.iloc[0,:])>corr_limit).values,(np.abs(
corrMatrixCorX.iloc[0,:])>corr_limit).values]
if plotCorrMatrix == True:
plot_heatmap(corrMatrixCor, 'Correlation with Y higher than ' +
str(corr_limit) + ': ' + str(corrMatrixCor.shape[0]-1) +
' descriptors')#, True)
print('Correlation with Y higher than ' +
str(corr_limit) + ': ' + str(corrMatrixCor.shape[0]-1) +
' descriptors')
# --> results in top75 features.
return corrMatrix, corrMatrixCorX, corrMatrixCor
def pick_xy_from_columnlist(dataset, columnlist):
y = pd.DataFrame(dataset.loc[:,columnlist[0]])
X = dataset.loc[:,columnlist[1::]]
return X, y
def pick_xy_from_corrmatrix(dataset, corrMatrix):
X,y = pick_xy_from_columnlist(dataset, corrMatrix.columns)
return X, y
def define_groups_yvalue(y):
# RF with y value stratification.
groups_yvalue = y.copy()
groups_yvalue[y<3] = 1
groups_yvalue[y>6] = 3
groups_yvalue[(y>=3)&(y<=6)] = 2
groups_yvalue = groups_yvalue.squeeze()
return groups_yvalue
def dropHighErrorSamples(y, X, dataset, groups = None, rmse_lim = 3.5):
# 1 sample at a time as a test set for 10 seeds. This will be utilized for
# dropping the moleculest with the largest test set error.
R2_all1 = np.zeros((y.shape[0],10))
RMSE_all1 = np.zeros((y.shape[0],10))
top_feature_weights_all1 = [[None]*10]*y.shape[0]
for i in range(10):
for j in range(y.shape[0]):
test_indices = y.index == y.index[j]
feature_weights_1, top_feature_weights_all1[j][i], regressor1, R21, RMSE1, scaler_test1, X_test1, y_test1, y_pred1, X_train1, y_train1 = RF_feature_analysis(
X, y, groups=None, test_indices = test_indices, test_proportion = 0.2, top_n = 15, i='', random_state = i, sample_weighing = False, plotting = False)
print(R21, RMSE1)
print(top_feature_weights_all1[j][i].columns)
# R2 should not be used for 1 sample. To do: remove
R2_all1[j,i] = R21
RMSE_all1[j,i] = RMSE1
print('R2 and RMSE with single-molecule test sets: ', R2_all1, RMSE_all1)
print('Mean: ', np.mean(R2_all1), np.mean(RMSE_all1))
print('Std: ', np.std(R2_all1), np.std(RMSE_all1))
print('Min: ', np.min(R2_all1), np.min(RMSE_all1))
print('Max: ', np.max(R2_all1), np.max(RMSE_all1))
single_mol_rmse = np.mean(RMSE_all1, axis=1)
print('There are ', np.sum(single_mol_rmse>rmse_lim), ' molecules with RMSE>', rmse_lim, '. These will be dropped from the analysis.')
print(dataset.loc[single_mol_rmse>=rmse_lim, ['no', 'name', 'log2mic']])#, 'Class']])
X = X[single_mol_rmse<rmse_lim]
y = y[single_mol_rmse<rmse_lim]
dataset_new = dataset[single_mol_rmse<rmse_lim]
if groups is not None:
groups = groups[single_mol_rmse<rmse_lim]
else:
groups = None
return X, y, dataset_new, groups
if __name__ == "__main__":
#plt.rcParams.update({'font.size': 12})
#plt.rcParams.update({'font.sans-serif': 'Arial', 'font.family': 'sans-serif'})
mystyle = FigureDefaults('nature_comp_mat_dc')
###############################################################################
# BLOCK 0: INPUT VARIABLES
###############################################################################
# Dataset
#dataset_original = pd.read_excel(r'./03132020 5K descriptors of 101 COE.xlsx',
# na_values='na', convert_float = False)
filename = '07032020 updates 5k descriptors classes.xlsx'
y_column = 4 # The code assumes features start after y data column.
dataset_original = read_molecule_excel(filename, column_class='Class')#'Simplified Class')
seed = 8
test_proportion = 0.1
# Pickle files that contain round 1 optimized hyperparameters for random forest
# regression (will be needed in block 2 of the code).
pickle_ho_incorr_features = 'HO_result_5K_incorrelated_features'
pickle_ho_incorr_features_imp = 'HO_result_5K_incorrelated_important_features'
# Pickle files that contain round 2 optimized hyperparameters for random forest
# regression (will be needed in block 3 of the code).
pickle_ho_incorr_features2 = 'HO_result_5K_incorrelated_features_ho1'
pickle_ho_incorr_features_imp2 = 'HO_result_5K_incorrelated_important_features_ho1'
###############################################################################
# BLOCK 1: DATA FILTERING
###############################################################################
# Filtering data utilizing correlation matrices. Removing constant and almost
# constant values. Scaling to 0 mean and unit variance. Y data is treated as
# log2(Y).
'''
plot_heatmap(dataset_original.iloc[:,y_column::].corr(), title = 'Starting point: ' +
str(dataset_original.shape[1]-y_column-1) + ' features')
'''
dataset = dataset_original.copy()
# Replace e.g. '>128' with 128*2 in y data (in column 2).
idx = dataset[dataset.iloc[:,y_column].str.find('>')==0].index
dataset.iloc[idx,y_column] = dataset.iloc[idx,y_column].str[1::]
dataset.iloc[:,y_column] = np.double(dataset.iloc[:,y_column])*2
# Drop rows with y data nan, and columns with any nan.
dataset = dataset.dropna(axis=0, how='all', subset=[dataset.columns[y_column]])
dataset = dataset.dropna(axis=1, how='any')
if dataset.columns[y_column] != 'MIC VALUE (Y VALUE)':
warnings.warn('Dataset is not as expected. Check that everything is ok.')
# Initial correlation matrix.
# --> A lot of ones there. --> needs filtering.
# Also different scales in the dataset --> needs scaling.
corrMatrixInitial = dataset.iloc[:,y_column::].corr()
'''plot_heatmap(corrMatrixInitial, title = 'After dropping NaNs: ' +
str(corrMatrixInitial.shape[0]-1) + ' features')
'''
# First, take log from Y feature.
dataset.iloc[:,y_column] = np.log2(dataset.iloc[:,y_column])
# Drop constant features (note: this goes through also the No., SMILES, and y
# value columns but it shouldn't be a problem because they are not constants)
dataset = dataset.drop(columns=dataset.columns[(dataset == dataset.iloc[0,:]).all()])
# Drop almost constant features (do not check No, SMILES, y value columns).
idx_boolean = [True]*(y_column+1)
idx_boolean.extend(((np.var(dataset.iloc[:,(y_column+1)::])/np.mean(dataset.iloc[:,(y_column+1)::]))>0.1).values)
dataset = dataset.iloc[:,idx_boolean]
# Spearman might be affected by certain scaling operations, showing
# correlations where it doesn't exist. RF is not affected by scaling.
# So let's not use it for now.
'''
# Scale the whole dataset. (It doesn't actually seem to affect correlation
# matrix. TO DO: Check and remove if true.)
dataset_scaled = dataset.copy()
# Remove the mean and scale to unit variance.
scaler = preprocessing.StandardScaler() #Other tested options: PowerTransformer()#MinMaxScaler()
# Scale.
dataset_scaled.iloc[:,(y_column+1)::] = pd.DataFrame(scaler.fit_transform(
dataset_scaled.iloc[:,(y_column+1)::]), columns=dataset_scaled.iloc[:,(y_column+1)::].columns,
index=dataset_scaled.iloc[:,(y_column+1)::].index)
# Full correlation matrix
corrMatrix = dataset_scaled.iloc[:,y_column::].corr(method='spearman')#'pearson')#
plot_heatmap(corrMatrix, 'After dropping constant or almost constant features: '
+ str(corrMatrix.shape[0]-1) + ' features')
'''
# Full correlation matrix
corrMatrix = dataset.iloc[:,y_column::].corr(method='spearman')#'pearson')#
'''plot_heatmap(corrMatrix, 'After dropping constant or almost constant features: '
+ str(corrMatrix.shape[0]-1) + ' features')
'''
'''
# See which features correlate with Y more than others.
corrMatrixImportant = corrMatrix.loc[:,(np.abs(corrMatrix.iloc[0,:])>0.01).values]
plot_heatmap(corrMatrixImportant)
# --> Still a lot of correlating features.
'''
# Next, we want to drop features correlating too much with each other.
# Mask upper triangle to drop only the other one of each two correlated features.
corr_limit = 0.9 # Final value: 0.95
tri_corrMatrix = pd.DataFrame(np.triu(corrMatrix,1), index = corrMatrix.index,
columns = corrMatrix.columns)
# List column names of highly correlated features.
to_drop = [c for c in tri_corrMatrix.columns if any(np.abs(tri_corrMatrix[c]) > corr_limit)]
# And drop them.
corrMatrixFiltered = corrMatrix.drop(columns = to_drop, index = to_drop)
'''plot_heatmap(corrMatrixFiltered, 'After filtering out highly correlated features (limit ' +
str(corr_limit) + ': ' + str(corrMatrixFiltered.shape[0]-1) + ' features')
'''
# See again which of the remaining features correlate with Y.
corr_limit = 0.05 # Final values: 0.025
corrMatrixFilteredImportant = corrMatrixFiltered.loc[(np.abs(
corrMatrixFiltered.iloc[0,:])>corr_limit).values,(np.abs(
corrMatrixFiltered.iloc[0,:])>corr_limit).values]
'''plot_heatmap(corrMatrixFilteredImportant, 'Correlation with Y higher than ' +
str(corr_limit) + ': ' + str(corrMatrixFilteredImportant.shape[0]-1) +
' features')#, True)
# --> results in top75 features.
'''
###############################################################################
# BLOCK 2: RF WITHOUT HO
###############################################################################
# Let's do Random Forest for purpose of selecting most important features.
###############################################################################
# Default RF for the FilteredImportant features (top 75):
# Data
# We are not using dataset_scaled because scaling needs to be done separately
# for train and test sets.
y_imp = pd.DataFrame(dataset.loc[:,corrMatrixFilteredImportant.columns[0]])
X_imp = dataset.loc[:,corrMatrixFilteredImportant.columns[1::]]
y = | pd.DataFrame(dataset.loc[:,corrMatrixFiltered.columns[0]]) | pandas.DataFrame |
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pytest
import pandas_datareader.data as web
pytestmark = pytest.mark.stable
class TestEurostat(object):
def test_get_ert_h_eur_a(self):
# Former euro area national currencies vs. euro/ECU
# annual data (ert_h_eur_a)
df = web.DataReader(
"ert_h_eur_a",
"eurostat",
start=pd.Timestamp("2009-01-01"),
end=pd.Timestamp("2010-01-01"),
)
assert isinstance(df, pd.DataFrame)
header = df.columns.levels[0][0]
currencies = ["Italian lira", "Lithuanian litas"]
df = df[header]
df = df["Average"][currencies]
exp_col = pd.MultiIndex.from_product(
[currencies, ["Annual"]], names=["CURRENCY", "FREQ"]
)
exp_idx = pd.DatetimeIndex(["2009-01-01", "2010-01-01"], name="TIME_PERIOD")
values = np.array([[1936.27, 3.4528], [1936.27, 3.4528]])
expected = pd.DataFrame(values, index=exp_idx, columns=exp_col)
tm.assert_frame_equal(df, expected)
def test_get_sts_cobp_a(self):
# Building permits - annual data (2010 = 100)
df = web.DataReader(
"sts_cobp_a",
"eurostat",
start= | pd.Timestamp("2000-01-01") | pandas.Timestamp |
"""Module for running decoding experiments."""
from pathlib import Path
from typing import Optional, Sequence, Union
import numpy as np
import pandas as pd
from joblib import Parallel, delayed
from sklearn.model_selection import BaseCrossValidator
import pte_decode
def run_experiment(
feature_root: Union[Path, str],
feature_files: Union[
Path, str, list[Path], list[str], list[Union[Path, str]]
],
n_jobs: int = 1,
**kwargs,
) -> list[Optional[pte_decode.Experiment]]:
"""Run prediction experiment with given number of files."""
if not feature_files:
raise ValueError("No feature files specified.")
if not isinstance(feature_files, list):
feature_files = [feature_files]
if len(feature_files) == 1 or n_jobs in (0, 1):
return [
_run_single_experiment(
feature_root=feature_root,
feature_file=feature_file,
**kwargs,
)
for feature_file in feature_files
]
return [
Parallel(n_jobs=n_jobs)(
delayed(_run_single_experiment)(
feature_root=feature_root, feature_file=feature_file, **kwargs
)
for feature_file in feature_files
)
] # type: ignore
def _run_single_experiment(
feature_root: Union[Path, str],
feature_file: Union[Path, str],
classifier: str,
label_channels: Sequence[str],
target_begin: Union[str, int, float],
target_end: Union[str, int, float],
optimize: bool,
balancing: Optional[str],
out_root: Union[Path, str],
use_channels: str,
feature_keywords: Sequence,
cross_validation: BaseCrossValidator,
plot_target_channels: list[str],
scoring: str = "balanced_accuracy",
artifact_channels=None,
bad_epochs_path: Optional[Union[Path, str]] = None,
pred_mode: str = "classify",
pred_begin: Union[int, float] = -3.0,
pred_end: Union[int, float] = 2.0,
use_times: int = 1,
dist_onset: Union[int, float] = 2.0,
dist_end: Union[int, float] = 2.0,
excep_dist_end: Union[int, float] = 0.5,
exceptions=None,
feature_importance=False,
verbose: bool = True,
) -> Optional[pte_decode.Experiment]:
"""Run experiment with single file."""
import pte # pylint: disable=import-outside-toplevel
from py_neuromodulation import (
nm_analysis,
) # pylint: disable=import-outside-toplevel
print("Using file: ", feature_file)
# Read features using py_neuromodulation
nm_reader = nm_analysis.Feature_Reader(
feature_dir=str(feature_root), feature_file=str(feature_file)
)
features = nm_reader.feature_arr
settings = nm_reader.settings
sidecar = nm_reader.sidecar
# Pick label for classification
try:
label = _get_column_picks(
column_picks=label_channels,
features=features,
)
except ValueError as error:
print(error, "Discarding file: {feature_file}")
return None
# Handle bad events file
bad_epochs_df = pte.filetools.get_bad_epochs(
bad_epochs_dir=bad_epochs_path, filename=feature_file
)
bad_epochs = bad_epochs_df.event_id.to_numpy() * 2
# Pick target for plotting predictions
target_series = _get_column_picks(
column_picks=plot_target_channels,
features=features,
)
features_df = get_feature_df(features, feature_keywords, use_times)
# Pick artifact channel
if artifact_channels:
artifacts = _get_column_picks(
column_picks=artifact_channels,
features=features,
).to_numpy()
else:
artifacts = None
# Generate output file name
out_path = _generate_outpath(
out_root,
feature_file,
classifier,
target_begin,
target_end,
use_channels,
optimize,
use_times,
)
dist_end = _handle_exception_files(
fullpath=out_path,
dist_end=dist_end,
excep_dist_end=excep_dist_end,
exception_files=exceptions,
)
side = "right" if "R_" in str(out_path) else "left"
decoder = pte_decode.get_decoder(
classifier=classifier,
scoring=scoring,
balancing=balancing,
optimize=optimize,
)
# Initialize Experiment instance
experiment = pte_decode.Experiment(
features=features_df,
plotting_target=target_series,
pred_label=label,
ch_names=sidecar["ch_names"],
decoder=decoder,
side=side,
artifacts=artifacts,
bad_epochs=bad_epochs,
sfreq=settings["sampling_rate_features"],
scoring=scoring,
feature_importance=feature_importance,
target_begin=target_begin,
target_end=target_end,
dist_onset=dist_onset,
dist_end=dist_end,
use_channels=use_channels,
pred_mode=pred_mode,
pred_begin=pred_begin,
pred_end=pred_end,
cv_outer=cross_validation,
verbose=verbose,
)
experiment.run()
experiment.save_results(path=out_path)
# experiment.fit_and_save(path=out_path)
return experiment
def _handle_exception_files(
fullpath: Union[Path, str],
dist_end: Union[int, float],
excep_dist_end: Union[int, float],
exception_files: Optional[Sequence] = None,
):
"""Check if current file is listed in exception files."""
if exception_files:
if any(exc in str(fullpath) for exc in exception_files):
print("Exception file recognized: ", Path(fullpath).name)
return excep_dist_end
return dist_end
def _generate_outpath(
root: Union[Path, str],
feature_file: Union[Path, str],
classifier: str,
target_begin: Union[str, int, float],
target_end: Union[str, int, float],
use_channels: str,
optimize: bool,
use_times: int,
) -> Path:
"""Generate file name for output files."""
if target_begin == 0.0:
target_begin = "trial_begin"
if target_end == 0.0:
target_end = "trial_begin"
target_str = "_".join(("decode", str(target_begin), str(target_end)))
clf_str = "_".join(("model", classifier))
ch_str = "_".join(("chs", use_channels))
opt_str = "yes_opt" if optimize else "no_opt"
feat_str = "_".join(("feats", str(use_times * 100), "ms"))
out_name = "_".join((target_str, clf_str, ch_str, opt_str, feat_str))
return Path(root, out_name, feature_file, feature_file)
def get_feature_df(
data: pd.DataFrame, feature_keywords: Sequence, use_times: int = 1
) -> pd.DataFrame:
"""Extract features to use from given DataFrame."""
column_picks = [
col
for col in data.columns
if any(pick in col for pick in feature_keywords)
]
used_features = data[column_picks]
# Initialize list of features to use
features = [
used_features.rename(
columns={col: col + "_100_ms" for col in used_features.columns}
)
]
# Use additional features from previous time points
# use_times = 1 means no features from previous time points are
# being used
for use_time in np.arange(1, use_times):
features.append(
used_features.shift(use_time, axis=0).rename(
columns={
col: col + "_" + str((use_time + 1) * 100) + "_ms"
for col in used_features.columns
}
)
)
# Return final features dataframe
return | pd.concat(features, axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
import logging
import os
import click
import pandas as pd
from src.libs.bookmaker import BookMaker
from sqlalchemy import create_engine
import pymysql
pymysql.install_as_MySQLdb()
@click.command()
@click.option('--model', default='mlp_1')
@click.option('--strategy', default='value_bet_0.5')
def main(model='mlp_1', strategy='value_bet_0.5'):
"""
:param bet_file:
:return:
"""
logger = logging.getLogger(__name__)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
logger.info('Load bets: model={model}, strategy={strategy}'.format(model=model, strategy=strategy))
db = create_engine("mysql://root@localhost/football_data")
bet = pd.read_sql(sql="select MATCH_ID, bH, bD, bA from match_bet where MODEL = '{model}' and STRATEGY = '{strategy}'"
.format(model=model, strategy=strategy), con=db)
matches = | pd.read_sql(sql="select MATCH_ID, BbAvH, BbAvD, BbAvA, FTR from matches", con=db) | pandas.read_sql |
# Do some analytics on Shopify transactions.
import pandas as pd
from datetime import datetime, timedelta
class Analytics:
def __init__(self, filename: str, datetime_now, refund_window: int):
raw = pd.read_csv(filename)
clean = raw[raw['Status'].isin(['success'])] # Filter down to successful transactions only.
# Filter down to Sales only.
sales = clean[clean['Kind'].isin(['sale'])].rename(columns={'Amount': 'Sales'})
refunds = clean[clean['Kind'].isin(['refund'])] # Filter down to Refunds only.
# Make a table with total refunds paid for each 'Name'.
total_refunds = refunds.groupby('Name')['Amount'].sum().reset_index(name='Refunds')
# Join the Sales and Refunds tables together.
sales_and_refunds = | pd.merge(sales, total_refunds, on='Name', how='outer') | pandas.merge |
#web scrapping libraries
from bs4 import BeautifulSoup as bs
import requests
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.chrome.options import Options
#data processing libraries
import fsspec
import os
import folium
import time
import numpy as np
import pandas as pd
import geopandas as gpd
from pyproj import CRS, Transformer
import utm
import rasterio as rio
from rasterio import features
from rasterio import warp
from rasterio import windows
from rasterio.enums import Resampling
import torch.nn as nn
from PIL import Image
import matplotlib.pyplot as plt
#planetary computer libraries
from pystac_client import Client
from pystac.extensions.raster import RasterExtension as raster
import planetary_computer as pc
from pystac.extensions.eo import EOExtension as eo
from azure.storage.blob import BlobClient
import stackstac
import traceback
import sys
sys.path.append('/content')
from src.utils import normalized_diff
BANDS_10M = ['AOT', 'B02', 'B03', 'B04', 'B08', 'WVP']
BANDS_20M = ['B05', 'B06', 'B07', 'B8A', 'B11', "B12"]
EMPTY_METADATA_DICT = {
"mean_viewing_azimuth": np.nan,
"mean_viewing_zenith": np.nan,
"mean_solar_azimuth": np.nan,
"mean_solar_zenith": np.nan,
"sensing_time": pd.NaT
}
BAD_USGS_COLS = ["Instantaneous computed discharge (cfs)_x",
"Instantaneous computed discharge (cfs)_y"]
class USGS_Water_DB:
"""A custom class for storing for querying the http://nrtwq.usgs.gov data portal and
storing data to Pandas DataFrame format.
"""
def __init__(self, verbose=False):
"""Initializes the class to create web driver set source url.
Parameters
----------
verbose : bool
Sets the verbosity of the web scrapping query.
"""
self.source_url = 'https://nrtwq.usgs.gov'
self.verbose = verbose
self.create_driver()
def create_driver(self):
chrome_options = Options()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--disable-dev-shm-usage')
driver = webdriver.Chrome(ChromeDriverManager().install(), options=chrome_options)
self.driver = driver
def get_station_df(self):
soup = self.get_url_text(self.source_url)
js = str(soup.findAll('script')[6])
marker_text_raw = js.split('L.marker')[1:-1]
self.station_df = pd.concat([self.get_marker_info(m) for m in marker_text_raw]).reset_index(drop=True)
def get_url_text(self, url):
self.driver.get(url)
result = requests.get(url, allow_redirects=False)
if result.status_code==200:
if self.verbose:
print(f'Data found at {url}!')
soup = bs(result.text, 'html.parser')
return soup
else:
if self.verbose:
print(f'{url} response not 202!')
return None
def process_soup(self, soup):
data_raw = str(soup).split('\n')
data_raw = [elem for elem in data_raw if not ('#' in elem)]
data_split = [d.split('\t') for d in data_raw]
y = (i for i,v in enumerate(data_split) if ('' in v))
stop = next(y)
cols = data_split[0]
units = data_split[1]
columns = [f'{c} ({u})' if ' ' not in u else f'{c}' for c,u in zip(cols,units) ]
data = data_split[2:stop]
df = pd.DataFrame(data=data, columns=columns)
return df
def get_marker_info(self, marker_text):
site_no = marker_text.split('site_no=')[1].split('>')[0].replace('"','')
point = [float(p) for p in marker_text.split('[')[1].split(']')[0].split(',')]
lat = point[0]
lon = point[1]
site_name = marker_text.split('<hr>')[1].split('<br')[0]
df = pd.DataFrame([{'site_no':site_no,'site_name':site_name,'Latitude':lat,'Longitude':lon}])
return gpd.GeoDataFrame(df, geometry=gpd.points_from_xy(df.Longitude,df.Latitude))
class USGS_Station:
"""A custom class for storing USGS Station data. Specific functions collect
station instantaneous and modeled discharge and suspended sediment concentration.
"""
def __init__(self, site_no, instantaneous=False, verbose=False, year_range=np.arange(2013,2022)):
"""Initializes the USGS_Station class based on user-provided parameters.
Parameters
----------
site_no : str
The 8 digit USGS station site number that is zero padded.
instantaneous : bool
Sets data query for instantaneous recorded data only.
verbose : bool
Sets the query verbosity.
year_range : numpy int array
Numpy array of year range to search.
"""
self.site_no = site_no
self.instantaneous = instantaneous
self.verbose = verbose
self.year_range = year_range
self.create_driver()
def create_driver(self):
chrome_options = Options()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--disable-dev-shm-usage')
self.driver = webdriver.Chrome(ChromeDriverManager().install(), options=chrome_options)
def get_water_url(self, attribute, year):
pcode_list = {'discharge':'00060',\
'turbidity':'63680',\
'temperature':'00010',\
'dissolved_oxygen':'00300',\
'ssd':'99409'}
url_header = 'https://nrtwq.usgs.gov/explore/datatable?'
timestep = 'uv'
period = f'{year}_all'
l = {'url_header':url_header, 'site_no':self.site_no, 'timestep':timestep}
l['period'] = period
l['pcode'] = pcode_list[attribute]
url = f"{l['url_header']}site_no={l['site_no']}&pcode={l['pcode']}&period={l['period']}×tep={l['timestep']}&format=rdb&is_verbose=y"
return url
def get_url_text(self, url):
self.driver.get(url)
result = requests.get(url, allow_redirects=False)
if result.status_code==200:
if self.verbose:
print('Data found!')
soup = bs(result.text, 'html.parser')
return soup
else:
if self.verbose:
print('Data does not exist')
return None
def process_soup(self,soup,attribute):
#might need to update this method to include instantaneous measurements
if ((self.instantaneous) & (attribute=='ssd')):
data_raw = str(soup).split('Discrete (laboratory-analyzed)')[1].split('\n')
data_raw = [elem for elem in data_raw if not (' data' in elem)]
else:
data_raw = str(soup).split('\n')
data_raw = [elem for elem in data_raw if not ('#' in elem)]
#could use regex here..
data_split = [d.split('\t') for d in data_raw]
y = (i for i,v in enumerate(data_split) if ('' in v))
stop = next(y)
cols = data_split[0]
units = data_split[1]
columns = [f'{c} ({u})' if ' ' not in u else f'{c}' for c,u in zip(cols,units) ]
data = data_split[2:stop]
df = | pd.DataFrame(data=data, columns=columns) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 18 14:22:56 2021
@author: KRS1BBH
"""
from ImportFilter import Importfile
import pandas as pd
import os, glob
#get path of directory script is executed from
dirname = os.path.dirname(__file__)
#nuk
Filelist=[dirname+'/testdata/NuK/LotResultSummaryAll.csv']
product='test'
recipe='test'
equipment='NuK'
data_object_nuk=pd.DataFrame()
for file in Filelist:
file_object_nuk=Importfile(equipment,product,recipe,file)
file_object_nuk.read_data()
data_object_nuk=data_object_nuk.append(file_object_nuk.data)
#smv
Filelist=[dirname+"/testdata/SmV/TEST.REC"]
product='test'
recipe='test'
equipment='SmV'
data_object_smv=pd.DataFrame()
for file in Filelist:
file_object_smv=Importfile(equipment,product,recipe,file)
file_object_smv.read_data()
data_object_smv=data_object_smv.append(file_object_smv.data, ignore_index=True)
#elli
Filelist=[dirname+"/testdata/Elli/test.txt"]
product='test'
recipe='test'
equipment='Elli'
data_object_elli= | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
import logging
import numpy
from pandas import DataFrame, Series, pivot_table
from scipy.spatial.distance import pdist, squareform
from scipy.cluster.hierarchy import *
from matplotlib import pyplot
from pylie.methods.methods import hlinkage_to_treematrix
from pylie.plotting import plot_matrix
from pylie.model.liedataframe import LIEDataFrame, lie_deltag
from pylie.model.lieseries import LIESeries
from pylie.model.liebase import LIEDataFrameBase
logger = logging.getLogger('pylie')
DEFAULT_SCAN_COLUMN_NAMES = {'case': 'case',
'poses': 'poses',
'alpha': 'alpha',
'beta': 'beta',
'gamma': 'gamma',
'vdw': 'vdw',
'coul': 'coul',
'dg_calc': 'dg_calc',
'ref_affinity': 'ref_affinity', }
class LIEScanDataFrame(LIEDataFrameBase):
"""
Perform an alpha/beta grid scan for provided cases.
This function will systematically scan alpha/beta parameter space.
The range of values for alpha and beta can be set to arbitrary start, stop and
step size values. the gamma parameter is set to a fixed value.
By default, an alpha/beta range between 0 and 1 with a step size of 0.01 is
sampled.
NOTE: the default step size is set to 0.01. Be carefull with setting smaller
step sizes in particular for larger datasets where the number of scan
combinations may easily explode resulting in long calculation times.
The function expects Pandas DataFrame or Series objects as input. As such, the
scan can be performed on a single case (a Series) or multiple cases
(a DataFrame) in wich the latter may be multiple cases single pose or multiple
cases multiple poses.
The scan results are returned as a LIEScanDataFrame with the calculated dG
values for each alpha/beta scan combination (columns) for each case (rows).
Columns headers are tuples of alpha/beta values.
"""
_class_name = 'scan'
_column_names = DEFAULT_SCAN_COLUMN_NAMES
def __init__(self, *args, **kwargs):
"""
Class __init__ method
Check input data:
- Needs to be of type Pandas DataFrame or Series and contain at least
<NAME> (vdw), Coulomb (coul) and Pose (poses) columns.
Arguments
---------
:param dataframe: 'vdw' and 'coul' data to perform the scan on.
:ptype dataframe: LIEDataFrame
:param max_combinations: Maximum number of alpha/beta parameter combinations
that are allowed to be sampled. A safety measure to prevent long
computation times.
:ptype max_combinations: int, default 100000000.
"""
super(LIEScanDataFrame, self).__init__(*args, **kwargs)
def _init_custom_finalize(self, **kwargs):
self._declare_scan_parameters()
@property
def _constructor(self):
return LIEScanDataFrame
def _pivot_data(self, column):
"""
Create matrix of VdW and Coul values for every pose of every case.
Use a pivot table to collect VdW and Coul values as matrix from a DataFrame.
Make new (1,1) array for VdW and Coul values from a Series (only one pose).
@params string column: DataFrame column name to create pivot table for
@return Pivot table as new Pandas DataFrame
"""
if type(self.data) == LIEDataFrame:
pivot = | pivot_table(self.data, values=column, index=['case'], columns=['poses']) | pandas.pivot_table |
"""Tests various time series functions which are used extensively in tcapy
"""
__author__ = 'saeedamen' # <NAME> / <EMAIL>
#
# Copyright 2017 Cuemacro Ltd. - http//www.cuemacro.com / @cuemacro
#
# See the License for the specific language governing permissions and limitations under the License.
#
import pandas as pd
import numpy as np
from datetime import timedelta
from pandas.testing import assert_frame_equal
from tcapy.util.timeseries import TimeSeriesOps
from tcapy.util.customexceptions import *
from test.config import *
ticker = 'EURUSD'
start_date = '20 Apr 2017'
finish_date = '07 Jun 2017'
def test_vlookup():
"""Runs a test for the VLOOKUP function which is used extensively in a lot of the metric construction
"""
dt = pd.date_range(start='01 Jan 2018', end='05 Jan 2018', freq='1min')
rand_data = np.random.random(len(dt))
df_before = pd.DataFrame(index=dt, columns=['rand'], data=rand_data)
millseconds_tests = [100, 500]
# Try perturbing by nothing, then 100 and 500 milliseconds
for millseconds in millseconds_tests:
df_perturb = pd.DataFrame(index=dt - timedelta(milliseconds=millseconds), columns=['rand'],
data=rand_data)
# Do a VLOOKUP (which should give us all the previous ones) - take off the last point (which would be AFTER
# our perturbation)
search, dt_search = TimeSeriesOps().vlookup_style_data_frame(dt[0:-1], df_perturb, 'rand')
df_after = pd.DataFrame(index=dt_search + timedelta(milliseconds=millseconds), data=search.values,
columns=['rand'])
# check the search dataframes are equal
assert_frame_equal(df_before[0:-1], df_after, check_dtype=False)
# in this case, our lookup series doesn't overlap at all with our range, so we should get back and exception
dt_lookup = pd.date_range(start='30 Dec 2017', end='31 Dec 2018', freq='1min')
df_perturb = pd.DataFrame(index=dt + timedelta(milliseconds=millseconds), columns=['rand'],
data=rand_data)
exception_has_been_triggered = False
try:
search, dt_search = TimeSeriesOps().vlookup_style_data_frame(dt_lookup, df_perturb, 'rand')
except ValidationException:
exception_has_been_triggered = True
assert (exception_has_been_triggered)
def test_filter_between_days_times():
"""Runs a test for the filter by time of day and day of the week, on synthetically constructed data and then checks
that no data is outside those time windows
"""
from tcapy.analysis.tradeorderfilter import TradeOrderFilterTimeOfDayWeekMonth
dt = pd.date_range(start='01 Jan 2018', end='05 Jan 2018', freq='1min')
df = pd.DataFrame(index=dt, columns=['Rand'], data=np.random.random(len(dt)))
df = df.tz_localize('utc')
trade_order_filter = TradeOrderFilterTimeOfDayWeekMonth(time_of_day={'start_time': '07:00:00',
'finish_time': '17:00:00'},
day_of_week='mon')
df = trade_order_filter.filter_trade_order(trade_order_df=df)
assert (df.index[0].hour >= 7 and df.index[-1].hour <= 17 and df.index[0].dayofweek == 0)
def test_remove_consecutive_duplicates():
"""Tests that consecutive duplicates are removed correctly in time series
"""
dt = | pd.date_range(start='01 Jan 2018', end='05 Jan 2018', freq='30s') | pandas.date_range |
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 16 23:11:56 2017
@author: Flamingo
"""
import pandas as pd
import numpy as np
import datetime
import copy
import sys
sys.path.append('../TOOLS')
from IJCAI2017_TOOL import *
#%% readin shop data
HOLI = pd.read_csv('../additional/HOLI.csv')
HOLI = HOLI.set_index(['DATE'],drop = True)
HOLI_TAB = HOLI.transpose()
HOLI_TAB.columns = [str((datetime.datetime.strptime('20150626','%Y%m%d') + datetime.timedelta(days=x)).date()) for x in range( HOLI_TAB.shape[1])]
#%% readin shop data
PAYNW = pd.read_csv('../data/user_pay_new.csv')
VIENW = pd.read_csv('../data/user_view_new.csv')
PAYNW_SHOP_DATE = PAYNW.groupby(['SHOP_ID','DATE'],as_index = False).sum()
PAYNW_SHOP_DATE = PAYNW_SHOP_DATE[['SHOP_ID','DATE','Num_post']]
#PAYNW_TAB_FIX = pd.read_csv('FillOctober.csv')
#PAYNW_TAB_FIX['DATE'] = [ (lambda x:str(datetime.datetime.strptime('2015/06/26','%Y/%m/%d').date() ) ) (x) for x in PAYNW_TAB_FIX['DATE']]
#
#PAYNW_SHOP_DATE = pd.concat([PAYNW_SHOP_DATE ,PAYNW_TAB_FIX],axis = 0)
#
#
#PAYNW_SHOP_DATE = PAYNW_SHOP_DATE.drop_duplicates(subset = ['SHOP_ID','DATE'], keep = 'last')
#PAYNW_SHOP_DATE = PAYNW_SHOP_DATE.sort_values(by = ['SHOP_ID','DATE'])
PAYNW_SHOP_DATE.reset_index(level=0)
PAYNW_TAB = pd.pivot_table(PAYNW_SHOP_DATE, values=['Num_post'], index=['SHOP_ID'],columns=['DATE'], aggfunc=np.sum)
#PAYNW_TAB = pd.pivot_table(PAYNW, values=['Num_post'], index=['SHOP_ID'],columns=['DATE'], aggfunc=np.sum)
PAYNW_TAB = pd.concat( [PAYNW_TAB[PAYNW_TAB.columns[0:169:1]], pd.DataFrame({'A':[np.nan],},index=np.arange(1,2001)),PAYNW_TAB[PAYNW_TAB.columns[169::1]] ], axis = 1)
PAYNW_TAB.columns = [str((datetime.datetime.strptime('20150626','%Y%m%d') + datetime.timedelta(days=x)).date()) for x in range( PAYNW_TAB.shape[1])]
PAYNW_TAB['2015-12-12'] = PAYNW_TAB['2015-12-13']
PAYNW_TAB_T = PAYNW_TAB.transpose()
#%% shop_related_features
SHOP_INFO = pd.read_csv("../external/SHOP_FEATURES_0221.csv",low_memory=False)
SHOP_SC = ['SC00']
SHOP_SD = map(lambda x:'SD'+ str(x).zfill(2), np.arange(5))
SHOP_SE = map(lambda x:'SE'+ str(x).zfill(2), np.arange(1))
SHOP_SF = map(lambda x:'SF'+ str(x).zfill(2), np.arange(1))
SHOP_SG = map(lambda x:'SG'+ str(x).zfill(2), np.arange(4))
SHOP_SH = map(lambda x:'SH'+ str(x).zfill(2), np.arange(2))
SHOP_SI = [(lambda x:('SI'+ str(x).zfill(2))) (x) for x in range(10)]
SHOP_SJ = map(lambda x:'SJ'+ str(x).zfill(2), np.arange(15))
SHOP_columns = SHOP_SC + SHOP_SD + SHOP_SE + SHOP_SF + SHOP_SG + SHOP_SH + SHOP_SI + SHOP_SJ
#%%
TRN_N = 21
TST_N = 14
TST_PAD_N = 14 + 4
end_date = datetime.datetime.strptime('2016-10-31','%Y-%m-%d')
day_N = 494
date_list = [str((end_date- datetime.timedelta(days=x)).date()) for x in range(day_N)]
date_list.reverse()
#%%
TRAIN = pd.DataFrame()
train_date_zip = zip(date_list[0:day_N-(TRN_N+TST_N)+1],date_list[TRN_N-1:day_N-TST_N+1],date_list[TRN_N:day_N-TST_N+2], date_list[TRN_N+TST_N-1:day_N])
train_date_zip_df = | pd.DataFrame(train_date_zip) | pandas.DataFrame |