prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# Common Python library imports
import difflib
from concurrent.futures import ThreadPoolExecutor as TPE
from multiprocessing import cpu_count
# Pip package imports
import pandas as pd
from loguru import logger
# Internal package imports
from miner.core import IHandler, Converter
from miner.footballdata.scrapper import FootballDataRequest
__all__ = ["FootballDataHandler", "get_default_converter"]
def get_default_converter():
try:
from miner.footballdata.converters import SqlConverter
logger.debug("Class \'SqlConverter\' selected.")
return SqlConverter
except ImportError as err:
logger.warning(err)
try:
import pandas as pd
# TODO: Return with the fallback pandas converter
except ImportError as err:
logger.warning(err)
logger.debug("Class \'Converter\' selected.")
logger.warning("No [db_conn, pandas] packages are found. Falling back to the default Converter. Please makes sure if this is the expected behaviour")
return Converter
class FootballDataHandler(IHandler):
class SqlQuery(object):
def __init__(self, *args, **kwargs):
pass
def get_matches_where_odds_are_null(self, start_date, end_date):
try:
from db_conn.connection.postgresql import ConnectionPool
from db_conn.query.sc_soccer.select import get_matches_where_odds_are_null
except ImportError as err:
logger.error(err)
return pd.DataFrame()
else:
pool = ConnectionPool()
return pool.sql_query(get_matches_where_odds_are_null(start_date, end_date))
name = "Football-Data Scrapper"
slug = "football-data-scrapper"
version = "v0_1"
default_config = {
'years': ["19/20", "18/19", "17/18", "16/17", "15/16", "14/15", "13/14", "11/12", "10/11", "09/10", "08/09", "07/08", "06/07", "05/06"],
'alias': {
'Wolverhampton': 'Wolves',
'PSG': 'Paris SG',
'Bremen': '<NAME>',
'Fortuna': '<NAME>',
'1. FC Köln': 'FC Koln',
'Mainz 05': 'Mainz',
'Athletic': '<NAME>',
'Real Sociedad': 'Sociedad',
'ACR Messina': 'Messina',
'<NAME>': 'Siena',
'<NAME>.': '<NAME>',
'Deportivo La Coruña': 'La Coruna',
},
'multithreading': False,
'num_of_threads': cpu_count()
}
def __init__(self, *args, **kwargs):
kwargs['config'] = { **FootballDataHandler.default_config, **kwargs.get('config', {}) }
kwargs['converter'] = kwargs.get('converter', get_default_converter())
m_kwargs = {**{
'name': FootballDataHandler.name,
'slug': FootballDataHandler.slug,
'version': FootballDataHandler.version
}, **kwargs}
super(FootballDataHandler, self).__init__(*args, **m_kwargs)
self._query_executor = kwargs.get('query', FootballDataHandler.SqlQuery())
# Create the singleton Sofa requester
self._req = FootballDataRequest()
def _get_close_match(self, name, fd_name_list):
return difflib.get_close_matches(name, fd_name_list, cutoff=0.8)
#if len(result) == 0:
#pass
#logger.error("Team name: %s not found in list: %s" % (name, fd_name_list))
#return result
def _match_name(self, grp_data, football_df, key_var, curr_date):
selected_match = pd.DataFrame()
fd_home = football_df['HomeTeam'].to_list()
fd_away = football_df['AwayTeam'].to_list()
name_h = grp_data[key_var % 'home']
if name_h not in self._get_config('alias').keys():
result_h = self._get_close_match(name_h, fd_home)
else:
result_h = [self._get_config('alias')[name_h]]
# 0 or more than 1 result.
if len(result_h) != 1:
# Match the away team names
name_a = grp_data[key_var % 'away']
if name_a not in self._get_config('alias').keys():
result_a = self._get_close_match(name_a, fd_away)
else:
result_a = [self._get_config('alias')[name_a]]
# If 0 result found, log error and continue
if len(result_h) == 0 and len(result_a) == 0:
logger.warn("At date: %s No matched name for: \'%s\' with the possibilities: %s. All possibility that day [Home]: %s | [Away]: %s" % (
curr_date, [name_h, name_a], (result_h + result_a), fd_home, fd_away))
elif len(result_a) == 1:
selected_match = football_df[(football_df['AwayTeam'] == result_a[0])]
else:
# Select the row, where home teams are partially match, but filter with away team correctly.
selected_match = football_df[((football_df['AwayTeam'] == result_a[0]) & (
football_df['HomeTeam'].isin(result_h)))]
else:
# Filter with home team only
selected_match = football_df[football_df['HomeTeam'] == result_h[0]]
return selected_match
def _team_name_matcher(self, data, football_df, q, curr_date):
for row_index, grp_data in data.iterrows():
try:
selected_match = self._match_name(grp_data, football_df, "%s_team_short", curr_date)
if len(selected_match) == 0:
selected_match = self._match_name(grp_data, football_df, "%s_team", curr_date)
if len(selected_match) == 0:
continue
q.update_match_statistic(grp_data['id'], selected_match)
q.update_match_odds(grp_data['id'], selected_match)
except Exception as err:
logger.error(err)
def _fetch_date(self, curr_date, *args, **kwargs):
pass
def _process(self, input_tuple):
q = self._converter()
tr, season, df = input_tuple
football_df = self._req.parse_odds(tr, season)
# Convert Date object
df['date'] = pd.to_datetime(df['date'], format='%Y-%m-%d')
try:
football_df['Date'] = pd.to_datetime(football_df['Date'], format='%d/%m/%Y')
except Exception:
football_df['Date'] = | pd.to_datetime(football_df['Date'], format='%d/%m/%y') | pandas.to_datetime |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import Lasso
import pickle
import os
import warnings
currentpath = os.getcwd()
warnings.filterwarnings('ignore')
rating_path = 'analysisapp/data/ratings.csv'
my_rating_path = 'analysisapp/data/my_ratings_input.csv'
movie_path = 'analysisapp/data/movies.csv'
genre_path = 'analysisapp/data/genres.p'
ratings = | pd.read_csv(rating_path) | pandas.read_csv |
from src.typeDefs.iexRtmRecord import IIexRtmRecord, ISection_1_1
import datetime as dt
from src.repos.metricsData.metricsDataRepo import MetricsDataRepo
import pandas as pd
def fetchIexRtmTableContext(appDbConnStr: str, startDt: dt.datetime, endDt: dt.datetime) -> IIexRtmRecord:
mRepo = MetricsDataRepo(appDbConnStr)
# get iex rtm data for the range between start date and end date
iexRtmMcvVals = mRepo.getIexRtmBlockWiseData('MCV (MW)', startDt, endDt)
iexRtmMcpVals = mRepo.getIexRtmBlockWiseData('MCP (Rs/MWh) ', startDt, endDt)
iexRtmMcvDf = | pd.DataFrame(iexRtmMcvVals) | pandas.DataFrame |
""" test the scalar Timestamp """
import pytz
import pytest
import dateutil
import calendar
import locale
import numpy as np
from dateutil.tz import tzutc
from pytz import timezone, utc
from datetime import datetime, timedelta
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.tseries import offsets
from pandas._libs.tslibs import conversion
from pandas._libs.tslibs.timezones import get_timezone, dateutil_gettz as gettz
from pandas.errors import OutOfBoundsDatetime
from pandas.compat import long, PY3
from pandas.compat.numpy import np_datetime64_compat
from pandas import Timestamp, Period, Timedelta, NaT
class TestTimestampProperties(object):
def test_properties_business(self):
ts = Timestamp('2017-10-01', freq='B')
control = Timestamp('2017-10-01')
assert ts.dayofweek == 6
assert not ts.is_month_start # not a weekday
assert not ts.is_quarter_start # not a weekday
# Control case: non-business is month/qtr start
assert control.is_month_start
assert control.is_quarter_start
ts = Timestamp('2017-09-30', freq='B')
control = Timestamp('2017-09-30')
assert ts.dayofweek == 5
assert not ts.is_month_end # not a weekday
assert not ts.is_quarter_end # not a weekday
# Control case: non-business is month/qtr start
assert control.is_month_end
assert control.is_quarter_end
def test_fields(self):
def check(value, equal):
# that we are int/long like
assert isinstance(value, (int, long))
assert value == equal
# GH 10050
ts = Timestamp('2015-05-10 09:06:03.000100001')
check(ts.year, 2015)
check(ts.month, 5)
check(ts.day, 10)
check(ts.hour, 9)
check(ts.minute, 6)
check(ts.second, 3)
pytest.raises(AttributeError, lambda: ts.millisecond)
check(ts.microsecond, 100)
check(ts.nanosecond, 1)
check(ts.dayofweek, 6)
check(ts.quarter, 2)
check(ts.dayofyear, 130)
check(ts.week, 19)
check(ts.daysinmonth, 31)
check(ts.daysinmonth, 31)
# GH 13303
ts = Timestamp('2014-12-31 23:59:00-05:00', tz='US/Eastern')
check(ts.year, 2014)
check(ts.month, 12)
check(ts.day, 31)
check(ts.hour, 23)
check(ts.minute, 59)
check(ts.second, 0)
pytest.raises(AttributeError, lambda: ts.millisecond)
check(ts.microsecond, 0)
check(ts.nanosecond, 0)
check(ts.dayofweek, 2)
check(ts.quarter, 4)
check(ts.dayofyear, 365)
check(ts.week, 1)
check(ts.daysinmonth, 31)
ts = Timestamp('2014-01-01 00:00:00+01:00')
starts = ['is_month_start', 'is_quarter_start', 'is_year_start']
for start in starts:
assert getattr(ts, start)
ts = Timestamp('2014-12-31 23:59:59+01:00')
ends = ['is_month_end', 'is_year_end', 'is_quarter_end']
for end in ends:
assert getattr(ts, end)
# GH 12806
@pytest.mark.parametrize('data',
[Timestamp('2017-08-28 23:00:00'),
Timestamp('2017-08-28 23:00:00', tz='EST')])
@pytest.mark.parametrize('time_locale', [
None] if tm.get_locales() is None else [None] + tm.get_locales())
def test_names(self, data, time_locale):
# GH 17354
# Test .weekday_name, .day_name(), .month_name
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
assert data.weekday_name == 'Monday'
if time_locale is None:
expected_day = 'Monday'
expected_month = 'August'
else:
with tm.set_locale(time_locale, locale.LC_TIME):
expected_day = calendar.day_name[0].capitalize()
expected_month = calendar.month_name[8].capitalize()
assert data.day_name(time_locale) == expected_day
assert data.month_name(time_locale) == expected_month
# Test NaT
nan_ts = Timestamp(NaT)
assert np.isnan(nan_ts.day_name(time_locale))
assert np.isnan(nan_ts.month_name(time_locale))
@pytest.mark.parametrize('tz', [None, 'UTC', 'US/Eastern', 'Asia/Tokyo'])
def test_is_leap_year(self, tz):
# GH 13727
dt = Timestamp('2000-01-01 00:00:00', tz=tz)
assert dt.is_leap_year
assert isinstance(dt.is_leap_year, bool)
dt = Timestamp('1999-01-01 00:00:00', tz=tz)
assert not dt.is_leap_year
dt = Timestamp('2004-01-01 00:00:00', tz=tz)
assert dt.is_leap_year
dt = Timestamp('2100-01-01 00:00:00', tz=tz)
assert not dt.is_leap_year
def test_woy_boundary(self):
# make sure weeks at year boundaries are correct
d = datetime(2013, 12, 31)
result = Timestamp(d).week
expected = 1 # ISO standard
assert result == expected
d = datetime(2008, 12, 28)
result = Timestamp(d).week
expected = 52 # ISO standard
assert result == expected
d = datetime(2009, 12, 31)
result = Timestamp(d).week
expected = 53 # ISO standard
assert result == expected
d = datetime(2010, 1, 1)
result = Timestamp(d).week
expected = 53 # ISO standard
assert result == expected
d = datetime(2010, 1, 3)
result = Timestamp(d).week
expected = 53 # ISO standard
assert result == expected
result = np.array([Timestamp(datetime(*args)).week
for args in [(2000, 1, 1), (2000, 1, 2), (
2005, 1, 1), (2005, 1, 2)]])
assert (result == [52, 52, 53, 53]).all()
class TestTimestampConstructors(object):
def test_constructor(self):
base_str = '2014-07-01 09:00'
base_dt = datetime(2014, 7, 1, 9)
base_expected = 1404205200000000000
# confirm base representation is correct
import calendar
assert (calendar.timegm(base_dt.timetuple()) * 1000000000 ==
base_expected)
tests = [(base_str, base_dt, base_expected),
('2014-07-01 10:00', datetime(2014, 7, 1, 10),
base_expected + 3600 * 1000000000),
('2014-07-01 09:00:00.000008000',
datetime(2014, 7, 1, 9, 0, 0, 8),
base_expected + 8000),
('2014-07-01 09:00:00.000000005',
Timestamp('2014-07-01 09:00:00.000000005'),
base_expected + 5)]
timezones = [(None, 0), ('UTC', 0), (pytz.utc, 0), ('Asia/Tokyo', 9),
('US/Eastern', -4), ('dateutil/US/Pacific', -7),
(pytz.FixedOffset(-180), -3),
(dateutil.tz.tzoffset(None, 18000), 5)]
for date_str, date, expected in tests:
for result in [Timestamp(date_str), Timestamp(date)]:
# only with timestring
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# re-creation shouldn't affect to internal value
result = Timestamp(result)
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# with timezone
for tz, offset in timezones:
for result in [Timestamp(date_str, tz=tz), Timestamp(date,
tz=tz)]:
expected_tz = expected - offset * 3600 * 1000000000
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should preserve tz
result = Timestamp(result)
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should convert to UTC
result = Timestamp(result, tz='UTC')
expected_utc = expected - offset * 3600 * 1000000000
assert result.value == expected_utc
assert conversion.pydt_to_i8(result) == expected_utc
def test_constructor_with_stringoffset(self):
# GH 7833
base_str = '2014-07-01 11:00:00+02:00'
base_dt = datetime(2014, 7, 1, 9)
base_expected = 1404205200000000000
# confirm base representation is correct
import calendar
assert (calendar.timegm(base_dt.timetuple()) * 1000000000 ==
base_expected)
tests = [(base_str, base_expected),
('2014-07-01 12:00:00+02:00',
base_expected + 3600 * 1000000000),
('2014-07-01 11:00:00.000008000+02:00', base_expected + 8000),
('2014-07-01 11:00:00.000000005+02:00', base_expected + 5)]
timezones = [(None, 0), ('UTC', 0), (pytz.utc, 0), ('Asia/Tokyo', 9),
('US/Eastern', -4), ('dateutil/US/Pacific', -7),
(pytz.FixedOffset(-180), -3),
(dateutil.tz.tzoffset(None, 18000), 5)]
for date_str, expected in tests:
for result in [Timestamp(date_str)]:
# only with timestring
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# re-creation shouldn't affect to internal value
result = Timestamp(result)
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# with timezone
for tz, offset in timezones:
result = Timestamp(date_str, tz=tz)
expected_tz = expected
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should preserve tz
result = Timestamp(result)
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should convert to UTC
result = Timestamp(result, tz='UTC')
expected_utc = expected
assert result.value == expected_utc
assert conversion.pydt_to_i8(result) == expected_utc
# This should be 2013-11-01 05:00 in UTC
# converted to Chicago tz
result = Timestamp('2013-11-01 00:00:00-0500', tz='America/Chicago')
assert result.value == Timestamp('2013-11-01 05:00').value
expected = "Timestamp('2013-11-01 00:00:00-0500', tz='America/Chicago')" # noqa
assert repr(result) == expected
assert result == eval(repr(result))
# This should be 2013-11-01 05:00 in UTC
# converted to Tokyo tz (+09:00)
result = Timestamp('2013-11-01 00:00:00-0500', tz='Asia/Tokyo')
assert result.value == Timestamp('2013-11-01 05:00').value
expected = "Timestamp('2013-11-01 14:00:00+0900', tz='Asia/Tokyo')"
assert repr(result) == expected
assert result == eval(repr(result))
# GH11708
# This should be 2015-11-18 10:00 in UTC
# converted to Asia/Katmandu
result = Timestamp("2015-11-18 15:45:00+05:45", tz="Asia/Katmandu")
assert result.value == Timestamp("2015-11-18 10:00").value
expected = "Timestamp('2015-11-18 15:45:00+0545', tz='Asia/Katmandu')"
assert repr(result) == expected
assert result == eval(repr(result))
# This should be 2015-11-18 10:00 in UTC
# converted to Asia/Kolkata
result = Timestamp("2015-11-18 15:30:00+05:30", tz="Asia/Kolkata")
assert result.value == Timestamp("2015-11-18 10:00").value
expected = "Timestamp('2015-11-18 15:30:00+0530', tz='Asia/Kolkata')"
assert repr(result) == expected
assert result == eval(repr(result))
def test_constructor_invalid(self):
with tm.assert_raises_regex(TypeError, 'Cannot convert input'):
Timestamp(slice(2))
with tm.assert_raises_regex(ValueError, 'Cannot convert Period'):
Timestamp(Period('1000-01-01'))
def test_constructor_invalid_tz(self):
# GH#17690
with tm.assert_raises_regex(TypeError, 'must be a datetime.tzinfo'):
Timestamp('2017-10-22', tzinfo='US/Eastern')
with tm.assert_raises_regex(ValueError, 'at most one of'):
Timestamp('2017-10-22', tzinfo=utc, tz='UTC')
with tm.assert_raises_regex(ValueError, "Invalid frequency:"):
# GH#5168
# case where user tries to pass tz as an arg, not kwarg, gets
# interpreted as a `freq`
Timestamp('2012-01-01', 'US/Pacific')
def test_constructor_tz_or_tzinfo(self):
# GH#17943, GH#17690, GH#5168
stamps = [Timestamp(year=2017, month=10, day=22, tz='UTC'),
Timestamp(year=2017, month=10, day=22, tzinfo=utc),
Timestamp(year=2017, month=10, day=22, tz=utc),
Timestamp(datetime(2017, 10, 22), tzinfo=utc),
Timestamp(datetime(2017, 10, 22), tz='UTC'),
Timestamp(datetime(2017, 10, 22), tz=utc)]
assert all(ts == stamps[0] for ts in stamps)
def test_constructor_positional(self):
# see gh-10758
with pytest.raises(TypeError):
Timestamp(2000, 1)
with pytest.raises(ValueError):
Timestamp(2000, 0, 1)
with pytest.raises(ValueError):
Timestamp(2000, 13, 1)
with pytest.raises(ValueError):
Timestamp(2000, 1, 0)
with pytest.raises(ValueError):
Timestamp(2000, 1, 32)
# see gh-11630
assert (repr(Timestamp(2015, 11, 12)) ==
repr(Timestamp('20151112')))
assert (repr(Timestamp(2015, 11, 12, 1, 2, 3, 999999)) ==
repr(Timestamp('2015-11-12 01:02:03.999999')))
def test_constructor_keyword(self):
# GH 10758
with pytest.raises(TypeError):
Timestamp(year=2000, month=1)
with pytest.raises(ValueError):
Timestamp(year=2000, month=0, day=1)
with pytest.raises(ValueError):
Timestamp(year=2000, month=13, day=1)
with pytest.raises(ValueError):
Timestamp(year=2000, month=1, day=0)
with pytest.raises(ValueError):
Timestamp(year=2000, month=1, day=32)
assert (repr(Timestamp(year=2015, month=11, day=12)) ==
repr(Timestamp('20151112')))
assert (repr(Timestamp(year=2015, month=11, day=12, hour=1, minute=2,
second=3, microsecond=999999)) ==
repr(Timestamp('2015-11-12 01:02:03.999999')))
def test_constructor_fromordinal(self):
base = datetime(2000, 1, 1)
ts = Timestamp.fromordinal(base.toordinal(), freq='D')
assert base == ts
assert ts.freq == 'D'
assert base.toordinal() == ts.toordinal()
ts = Timestamp.fromordinal(base.toordinal(), tz='US/Eastern')
assert Timestamp('2000-01-01', tz='US/Eastern') == ts
assert base.toordinal() == ts.toordinal()
# GH#3042
dt = datetime(2011, 4, 16, 0, 0)
ts = Timestamp.fromordinal(dt.toordinal())
assert ts.to_pydatetime() == dt
# with a tzinfo
stamp = Timestamp('2011-4-16', tz='US/Eastern')
dt_tz = stamp.to_pydatetime()
ts = Timestamp.fromordinal(dt_tz.toordinal(), tz='US/Eastern')
assert ts.to_pydatetime() == dt_tz
@pytest.mark.parametrize('result', [
Timestamp(datetime(2000, 1, 2, 3, 4, 5, 6), nanosecond=1),
Timestamp(year=2000, month=1, day=2, hour=3, minute=4, second=5,
microsecond=6, nanosecond=1),
Timestamp(year=2000, month=1, day=2, hour=3, minute=4, second=5,
microsecond=6, nanosecond=1, tz='UTC'),
Timestamp(2000, 1, 2, 3, 4, 5, 6, 1, None),
Timestamp(2000, 1, 2, 3, 4, 5, 6, 1, pytz.UTC)])
def test_constructor_nanosecond(self, result):
# GH 18898
expected = Timestamp(datetime(2000, 1, 2, 3, 4, 5, 6), tz=result.tz)
expected = expected + Timedelta(nanoseconds=1)
assert result == expected
@pytest.mark.parametrize('arg', ['year', 'month', 'day', 'hour', 'minute',
'second', 'microsecond', 'nanosecond'])
def test_invalid_date_kwarg_with_string_input(self, arg):
kwarg = {arg: 1}
with pytest.raises(ValueError):
Timestamp('2010-10-10 12:59:59.999999999', **kwarg)
def test_out_of_bounds_value(self):
one_us = np.timedelta64(1).astype('timedelta64[us]')
# By definition we can't go out of bounds in [ns], so we
# convert the datetime64s to [us] so we can go out of bounds
min_ts_us = np.datetime64(Timestamp.min).astype('M8[us]')
max_ts_us = np.datetime64(Timestamp.max).astype('M8[us]')
# No error for the min/max datetimes
Timestamp(min_ts_us)
Timestamp(max_ts_us)
# One us less than the minimum is an error
with pytest.raises(ValueError):
Timestamp(min_ts_us - one_us)
# One us more than the maximum is an error
with pytest.raises(ValueError):
Timestamp(max_ts_us + one_us)
def test_out_of_bounds_string(self):
with pytest.raises(ValueError):
Timestamp('1676-01-01')
with pytest.raises(ValueError):
Timestamp('2263-01-01')
def test_barely_out_of_bounds(self):
# GH#19529
# GH#19382 close enough to bounds that dropping nanos would result
# in an in-bounds datetime
with pytest.raises(OutOfBoundsDatetime):
Timestamp('2262-04-11 23:47:16.854775808')
def test_bounds_with_different_units(self):
out_of_bounds_dates = ('1677-09-21', '2262-04-12')
time_units = ('D', 'h', 'm', 's', 'ms', 'us')
for date_string in out_of_bounds_dates:
for unit in time_units:
dt64 = np.datetime64(date_string, dtype='M8[%s]' % unit)
with pytest.raises(ValueError):
Timestamp(dt64)
in_bounds_dates = ('1677-09-23', '2262-04-11')
for date_string in in_bounds_dates:
for unit in time_units:
dt64 = np.datetime64(date_string, dtype='M8[%s]' % unit)
Timestamp(dt64)
def test_min_valid(self):
# Ensure that Timestamp.min is a valid Timestamp
Timestamp(Timestamp.min)
def test_max_valid(self):
# Ensure that Timestamp.max is a valid Timestamp
Timestamp(Timestamp.max)
def test_now(self):
# GH#9000
ts_from_string = Timestamp('now')
ts_from_method = Timestamp.now()
ts_datetime = datetime.now()
ts_from_string_tz = Timestamp('now', tz='US/Eastern')
ts_from_method_tz = Timestamp.now(tz='US/Eastern')
# Check that the delta between the times is less than 1s (arbitrarily
# small)
delta = Timedelta(seconds=1)
assert abs(ts_from_method - ts_from_string) < delta
assert abs(ts_datetime - ts_from_method) < delta
assert abs(ts_from_method_tz - ts_from_string_tz) < delta
assert (abs(ts_from_string_tz.tz_localize(None) -
ts_from_method_tz.tz_localize(None)) < delta)
def test_today(self):
ts_from_string = Timestamp('today')
ts_from_method = Timestamp.today()
ts_datetime = datetime.today()
ts_from_string_tz = Timestamp('today', tz='US/Eastern')
ts_from_method_tz = Timestamp.today(tz='US/Eastern')
# Check that the delta between the times is less than 1s (arbitrarily
# small)
delta = Timedelta(seconds=1)
assert abs(ts_from_method - ts_from_string) < delta
assert abs(ts_datetime - ts_from_method) < delta
assert abs(ts_from_method_tz - ts_from_string_tz) < delta
assert (abs(ts_from_string_tz.tz_localize(None) -
ts_from_method_tz.tz_localize(None)) < delta)
class TestTimestamp(object):
def test_tz(self):
tstr = '2014-02-01 09:00'
ts = Timestamp(tstr)
local = ts.tz_localize('Asia/Tokyo')
assert local.hour == 9
assert local == Timestamp(tstr, tz='Asia/Tokyo')
conv = local.tz_convert('US/Eastern')
assert conv == Timestamp('2014-01-31 19:00', tz='US/Eastern')
assert conv.hour == 19
# preserves nanosecond
ts = Timestamp(tstr) + offsets.Nano(5)
local = ts.tz_localize('Asia/Tokyo')
assert local.hour == 9
assert local.nanosecond == 5
conv = local.tz_convert('US/Eastern')
assert conv.nanosecond == 5
assert conv.hour == 19
def test_utc_z_designator(self):
assert get_timezone(Timestamp('2014-11-02 01:00Z').tzinfo) == 'UTC'
def test_asm8(self):
np.random.seed(7960929)
ns = [Timestamp.min.value, Timestamp.max.value, 1000]
for n in ns:
assert (Timestamp(n).asm8.view('i8') ==
np.datetime64(n, 'ns').view('i8') == n)
assert (Timestamp('nat').asm8.view('i8') ==
np.datetime64('nat', 'ns').view('i8'))
def test_class_ops_pytz(self):
def compare(x, y):
assert (int(Timestamp(x).value / 1e9) ==
int(Timestamp(y).value / 1e9))
compare(Timestamp.now(), datetime.now())
compare(Timestamp.now('UTC'), datetime.now(timezone('UTC')))
compare(Timestamp.utcnow(), datetime.utcnow())
compare(Timestamp.today(), datetime.today())
current_time = calendar.timegm(datetime.now().utctimetuple())
compare(Timestamp.utcfromtimestamp(current_time),
datetime.utcfromtimestamp(current_time))
compare(Timestamp.fromtimestamp(current_time),
datetime.fromtimestamp(current_time))
date_component = datetime.utcnow()
time_component = (date_component + timedelta(minutes=10)).time()
compare(Timestamp.combine(date_component, time_component),
datetime.combine(date_component, time_component))
def test_class_ops_dateutil(self):
def compare(x, y):
assert (int(np.round(Timestamp(x).value / 1e9)) ==
int(np.round(Timestamp(y).value / 1e9)))
compare(Timestamp.now(), datetime.now())
compare(Timestamp.now('UTC'), datetime.now(tzutc()))
compare(Timestamp.utcnow(), datetime.utcnow())
compare(Timestamp.today(), datetime.today())
current_time = calendar.timegm(datetime.now().utctimetuple())
compare(Timestamp.utcfromtimestamp(current_time),
datetime.utcfromtimestamp(current_time))
compare(Timestamp.fromtimestamp(current_time),
datetime.fromtimestamp(current_time))
date_component = datetime.utcnow()
time_component = (date_component + timedelta(minutes=10)).time()
compare(Timestamp.combine(date_component, time_component),
datetime.combine(date_component, time_component))
def test_basics_nanos(self):
val = np.int64(946684800000000000).view('M8[ns]')
stamp = Timestamp(val.view('i8') + 500)
assert stamp.year == 2000
assert stamp.month == 1
assert stamp.microsecond == 0
assert stamp.nanosecond == 500
# GH 14415
val = np.iinfo(np.int64).min + 80000000000000
stamp = Timestamp(val)
assert stamp.year == 1677
assert stamp.month == 9
assert stamp.day == 21
assert stamp.microsecond == 145224
assert stamp.nanosecond == 192
def test_unit(self):
def check(val, unit=None, h=1, s=1, us=0):
stamp = Timestamp(val, unit=unit)
assert stamp.year == 2000
assert stamp.month == 1
assert stamp.day == 1
assert stamp.hour == h
if unit != 'D':
assert stamp.minute == 1
assert stamp.second == s
assert stamp.microsecond == us
else:
assert stamp.minute == 0
assert stamp.second == 0
assert stamp.microsecond == 0
assert stamp.nanosecond == 0
ts = Timestamp('20000101 01:01:01')
val = ts.value
days = (ts - Timestamp('1970-01-01')).days
check(val)
check(val / long(1000), unit='us')
check(val / long(1000000), unit='ms')
check(val / long(1000000000), unit='s')
check(days, unit='D', h=0)
# using truediv, so these are like floats
if PY3:
check((val + 500000) / long(1000000000), unit='s', us=500)
check((val + 500000000) / long(1000000000), unit='s', us=500000)
check((val + 500000) / long(1000000), unit='ms', us=500)
# get chopped in py2
else:
check((val + 500000) / long(1000000000), unit='s')
check((val + 500000000) / long(1000000000), unit='s')
check((val + 500000) / long(1000000), unit='ms')
# ok
check((val + 500000) / long(1000), unit='us', us=500)
check((val + 500000000) / long(1000000), unit='ms', us=500000)
# floats
check(val / 1000.0 + 5, unit='us', us=5)
check(val / 1000.0 + 5000, unit='us', us=5000)
check(val / 1000000.0 + 0.5, unit='ms', us=500)
check(val / 1000000.0 + 0.005, unit='ms', us=5)
check(val / 1000000000.0 + 0.5, unit='s', us=500000)
check(days + 0.5, unit='D', h=12)
def test_roundtrip(self):
# test value to string and back conversions
# further test accessors
base = Timestamp('20140101 00:00:00')
result = Timestamp(base.value + Timedelta('5ms').value)
assert result == Timestamp(str(base) + ".005000")
assert result.microsecond == 5000
result = Timestamp(base.value + Timedelta('5us').value)
assert result == Timestamp(str(base) + ".000005")
assert result.microsecond == 5
result = Timestamp(base.value + Timedelta('5ns').value)
assert result == Timestamp(str(base) + ".000000005")
assert result.nanosecond == 5
assert result.microsecond == 0
result = Timestamp(base.value + Timedelta('6ms 5us').value)
assert result == Timestamp(str(base) + ".006005")
assert result.microsecond == 5 + 6 * 1000
result = Timestamp(base.value + Timedelta('200ms 5us').value)
assert result == Timestamp(str(base) + ".200005")
assert result.microsecond == 5 + 200 * 1000
def test_hash_equivalent(self):
d = {datetime(2011, 1, 1): 5}
stamp = Timestamp(datetime(2011, 1, 1))
assert d[stamp] == 5
class TestTimestampNsOperations(object):
def setup_method(self, method):
self.timestamp = Timestamp(datetime.utcnow())
def assert_ns_timedelta(self, modified_timestamp, expected_value):
value = self.timestamp.value
modified_value = modified_timestamp.value
assert modified_value - value == expected_value
def test_timedelta_ns_arithmetic(self):
self.assert_ns_timedelta(self.timestamp + np.timedelta64(-123, 'ns'),
-123)
def test_timedelta_ns_based_arithmetic(self):
self.assert_ns_timedelta(self.timestamp + np.timedelta64(
1234567898, 'ns'), 1234567898)
def test_timedelta_us_arithmetic(self):
self.assert_ns_timedelta(self.timestamp + np.timedelta64(-123, 'us'),
-123000)
def test_timedelta_ms_arithmetic(self):
time = self.timestamp + np.timedelta64(-123, 'ms')
self.assert_ns_timedelta(time, -123000000)
def test_nanosecond_string_parsing(self):
ts = Timestamp('2013-05-01 07:15:45.123456789')
# GH 7878
expected_repr = '2013-05-01 07:15:45.123456789'
expected_value = 1367392545123456789
assert ts.value == expected_value
assert expected_repr in repr(ts)
ts = Timestamp('2013-05-01 07:15:45.123456789+09:00', tz='Asia/Tokyo')
assert ts.value == expected_value - 9 * 3600 * 1000000000
assert expected_repr in repr(ts)
ts = Timestamp('2013-05-01 07:15:45.123456789', tz='UTC')
assert ts.value == expected_value
assert expected_repr in repr(ts)
ts = | Timestamp('2013-05-01 07:15:45.123456789', tz='US/Eastern') | pandas.Timestamp |
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 31 19:28:58 2020
@author: hcb
"""
import pandas as pd
import numpy as np
import lightgbm as lgb
import os
from tqdm import tqdm
from sklearn.model_selection import KFold
from sklearn.metrics import f1_score
from config import config
import warnings
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import TruncatedSVD
import geohash
warnings.filterwarnings("ignore")
trn_path = config.train_dir
test_path = config.test_dir
def mode_mean(x):
return x.mode().mean()
def get_data(path):
df_list = []
for file in tqdm(sorted(os.listdir(path))):
file_path = os.path.join(path, file)
df = pd.read_csv(file_path)
df['time_id'] = list(range(len(df)))
df_list.append(df)
df = pd.concat(df_list)
return df
def get_latlng(df, precision=7):
tmp_df = pd.DataFrame()
tmp_df['lng'] = df['lon']
tmp_df['lat'] = df['lat']
tmp_df['code'] = tmp_df[[
'lng', 'lat'
]].apply(lambda x: geohash.encode(x['lat'], x['lng'],
precision=precision),
axis=1)
code = tmp_df['code'].values
return code
def transform_day(df):
df['day'] = df['time'].apply(lambda x: int(x[0:4]))
df['month'] = df['time'].apply(lambda x: int(x[0:2]))
df['hour'] = df['time'].apply(lambda x: int(x[5:7]))
df['minute'] = df['time'].apply(lambda x: int(x[8:10]))
df['seconds'] = df['time'].apply(lambda x: int(x[11:13]))
df['time_transform'] = (df['month'] * 31 + df['day']) * 24 + df[
'hour'
] + df['minute'] / 60 + df['seconds'] / 3600
return df
def get_feature(df2, train):
df = df2.copy()
df['new_id'] = (df['渔船ID'] + 1) * 10000 + df['time_id']
tmp_df = df[['渔船ID', 'lat', 'lon', 'time_transform', 'new_id']].copy()
tmp_df.columns = ['渔船ID', 'x_1', 'y_1', 'time_transform_1', 'new_id']
tmp_df['new_id'] = tmp_df['new_id'] + 1
df = df.merge(tmp_df, on=['渔船ID', 'new_id'], how='left')
df['dis_path'] = np.sqrt((df['x_1'] - df['lat']) ** 2 +
(df['y_1'] - df['lon']) ** 2)
df['slope'] = np.abs((df['y_1'] - df['lon']) /
(df['x_1'] - df['lat'] + 0.001))
df.dropna(inplace=True)
tmp_df = df.groupby('渔船ID')['dis_path'].agg({
'max', 'median', 'mean', 'sum'
}).reset_index()
tmp_df.columns = ['渔船ID', 'dis_path_max', 'dis_path_median',
'dis_path_mean', 'dis_path_sum']
train = train.merge(tmp_df, on='渔船ID', how='left')
tmp_df = df.groupby('渔船ID')['slope'].agg({
'max', 'median', 'mean'
}).reset_index()
tmp_df.columns = ['渔船ID', 'slope_max', 'slope_median', 'slope_mean1']
train = train.merge(tmp_df, on='渔船ID', how='left')
tmp_df = df[df['速度'] > 0]
tmp_df = tmp_df.groupby('渔船ID')['dis_path'].agg({
'min', 'std', 'median', 'mean'
}).reset_index()
tmp_df.columns = ['渔船ID', 'dis_path_min2', 'dis_path_std2',
'dis_path_median2', 'dis_path_mean']
train = train.merge(tmp_df, on='渔船ID', how='left')
tmp_df = df.groupby('渔船ID')['slope'].agg({
'min', 'median', 'mean'
}).reset_index()
tmp_df.columns = ['渔船ID', 'slope_min', 'slope_median2', 'slope_mean2']
train = train.merge(tmp_df, on='渔船ID', how='left')
tmp_df = df[df['速度'] > 0]
tmp_df = tmp_df.groupby('渔船ID')['slope'].agg({
'min', 'std', 'median', 'mean'
}).reset_index()
tmp_df.columns = ['渔船ID', 'slope_min3', 'slope_std3', 'slope_median3',
'slope_mean3']
train = train.merge(tmp_df, on='渔船ID', how='left')
df['time_delt'] = np.abs(df['time_transform_1'] - df['time_transform'])
df['dis/time'] = df['dis_path'] / df['time_delt']
tmp_df = df.groupby('渔船ID')['dis/time'].agg({
'mean', 'median'
}).reset_index()
tmp_df.columns = ['渔船ID', 'dis/time_mean', 'dis/time_median']
train = train.merge(tmp_df, on='渔船ID', how='left')
return train
def get_feature2(df2, train):
df = df2.copy()
df['new_id'] = (df['渔船ID'] + 1) * 10000 + df['time_id']
tmp_df = df[['渔船ID', '方向', '速度', 'new_id']].copy()
tmp_df.columns = ['渔船ID', '方向_1', '速度_1', 'new_id']
tmp_df['new_id'] = tmp_df['new_id'] + 1
df = df.merge(tmp_df, on=['渔船ID', 'new_id'], how='left')
df['方向_delt'] = np.abs(df['方向_1'] - df['方向'])
df['速度_delt'] = np.abs(df['速度_1'] - df['速度'])
df.dropna(inplace=True)
tmp_df = df.groupby('渔船ID')['方向_delt'].agg({
'max', 'median', 'mean'
}).reset_index()
tmp_df.columns = ['渔船ID', '方向_delt_mmax', '方向_delt_median', '方向_delt_mean']
train = train.merge(tmp_df, on='渔船ID', how='left')
tmp_df = df[df['速度'] > 0]
tmp_df = df.groupby('渔船ID')['方向_delt'].agg({
'min', 'std', 'median', 'mean'
}).reset_index()
tmp_df.columns = ['渔船ID', '方向_delt_min2', '方向_delt_std2',
'方向_delt_median2', '方向_delt_mean2']
train = train.merge(tmp_df, on='渔船ID', how='left')
tmp_df = df[df['速度'] > 0]
tmp_df = tmp_df.groupby('渔船ID')['方向_delt'].agg({
'min', 'std', 'median', 'mean'
}).reset_index()
tmp_df.columns = ['渔船ID', '方向_delt_min3', '方向_delt_std3',
'方向_delt_median3', '方向_delt_mean3']
train = train.merge(tmp_df, on='渔船ID', how='left')
tmp_df = df.groupby('渔船ID')['速度_delt'].agg({
'max', 'median', 'mean'
}).reset_index()
tmp_df.columns = ['渔船ID', '速度_delt_max', '速度_delt_median', '速度_delt_mean']
train = train.merge(tmp_df, on='渔船ID', how='left')
tmp_df = df[df['速度'] > 0]
tmp_df = df.groupby('渔船ID')['速度_delt'].agg({
'min', 'std', 'median', 'mean'
}).reset_index()
tmp_df.columns = ['渔船ID', '速度_delt_min2', '速度_delt_std2',
'速度_delt_median2', '速度_delt_mean2']
train = train.merge(tmp_df, on='渔船ID', how='left')
tmp_df = df[df['速度'] > 0]
tmp_df = tmp_df.groupby('渔船ID')['速度_delt'].agg({
'min', 'std', 'median', 'mean'
}).reset_index()
tmp_df.columns = ['渔船ID', '速度_delt_min3', '速度_delt_std3',
'速度_delt_median3', '速度_delt_mean3']
train = train.merge(tmp_df, on='渔船ID', how='left')
return train
df_train = get_data(trn_path)
train_ = df_train[['渔船ID', 'type']].drop_duplicates()
df_train = transform_day(df_train)
train_ = get_feature(df_train, train_)
train_ = get_feature2(df_train, train_)
train_.drop(['type', 'slope_mean1', 'slope_mean2'], axis=1, inplace=True)
df_test = get_data(test_path)
test = df_test[['渔船ID']].drop_duplicates()
df_test = transform_day(df_test)
test = get_feature(df_test, test)
test = get_feature2(df_test, test)
test.drop(['slope_mean1', 'slope_mean2'], axis=1, inplace=True)
print('begin tfidf')
data = pd.concat((df_train, df_test))
data['destination'] = data['lat'].map(str) + '_' + data['lon'].map(str)
enc_vec = TfidfVectorizer()
group_df = data.groupby(['渔船ID'])['destination'].agg({
lambda x: list(x)
}).reset_index()
group_df.columns = ['渔船ID', 'destination']
group_df['destination'] = group_df['destination'].apply(lambda x: ' '.join(x))
tfidf_vec = enc_vec.fit_transform(group_df['destination'])
svd_enc = TruncatedSVD(n_components=30, n_iter=20, random_state=1996)
vec_svd = svd_enc.fit_transform(tfidf_vec)
vec_svd = pd.DataFrame(vec_svd)
vec_svd.columns = ['svd_{}_{}'.format('destination', i) for i in range(30)]
group_df = pd.concat([group_df, vec_svd], axis=1)
train_ = train_.merge(group_df, on=['渔船ID'], how='left')
del train_['destination']
test = test.merge(group_df, on=['渔船ID'], how='left')
del test['destination']
data = pd.concat((df_train, df_test))
mode_df = data.groupby(['渔船ID', 'lat',
'lon'])['time'].agg({'count'}).reset_index()
mode_df = mode_df.rename(columns={'count': 'mode_count'})
mode_df['rank'] = mode_df.groupby('渔船ID')['mode_count'].rank(method='first',
ascending=False)
for i in range(1, 4):
tmp_df = mode_df[mode_df['rank'] == i]
del tmp_df['rank']
tmp_df.columns = ['渔船ID', 'rank{}_mode_lat'.format(i),
'rank{}_mode_lon'.format(i), 'rank{}_mode_cnt'.format(i)]
train_ = train_.merge(tmp_df, on='渔船ID', how='left')
test = test.merge(tmp_df, on='渔船ID', how='left')
def split_speed(speed):
if speed <= 4:
return 'low'
elif speed <= 10 and speed > 4:
return 'median-low'
elif speed <= 18 and speed > 10:
return 'median'
elif speed <= 50 and speed > 18:
return 'high'
else:
return 'very-high'
tmp_df = data.groupby('渔船ID')['渔船ID'].agg({'count'}).reset_index(
) # , '方向skew':'skew'
tmp_df = tmp_df.rename(columns={'count': 'id_count'})
train_ = train_.merge(tmp_df, on='渔船ID', how='left')
test = test.merge(tmp_df, on='渔船ID', how='left')
data['速度_type'] = data['速度'].apply(lambda x: split_speed(x))
group_df = data.groupby(['渔船ID', '速度_type']).size().unstack().fillna(0)
group_df.columns = ['速度_' + f + '_cnt' for f in group_df.columns]
group_df.reset_index(inplace=True)
train_ = train_.merge(group_df, on=['渔船ID'], how='left')
test = test.merge(group_df, on=['渔船ID'], how='left')
for col in group_df.columns:
if col not in ['渔船ID']:
train_[col.replace('cnt', 'ratio')] = train_[col] / train_['id_count']
test[col.replace('cnt', 'ratio')] = test[col] / test['id_count']
train_.drop('id_count', axis=1, inplace=True)
test.drop('id_count', axis=1, inplace=True)
countvec = CountVectorizer()
data = pd.concat((df_train, df_test))
code = get_latlng(data)
data['destination'] = code
group_df = data.groupby(['渔船ID'])['destination'].agg({
lambda x: list(x)
}).reset_index()
group_df.columns = ['渔船ID', 'destination']
group_df['destination'] = group_df['destination'].apply(lambda x: ' '.join(x))
count_vec_tmp = countvec.fit_transform(group_df['destination'])
svd_tmp = TruncatedSVD(n_components=30, n_iter=20, random_state=1996)
svd_tmp = svd_tmp.fit_transform(count_vec_tmp)
svd_tmp = | pd.DataFrame(svd_tmp) | pandas.DataFrame |
from collections import defaultdict
import pandas as pd
from ..sql.functions import Column, AggColumn, min as F_min, max as F_max, col, _SpecialSpandaColumn
from spanda.core.typing import *
from .utils import wrap_col_args, wrap_dataframe
class DataFrameWrapper:
"""
DataFrameWrapper takes in a Pandas Dataframe and transforms it to a Spanda dataframe,
which can be manipulated with (Spark inspired) Spanda functions.
Example:
sdf = DataFrameWrapper(df)
employee_ids = sdf.filter("is_employee").select("id")
"""
def __init__(self, df: pd.DataFrame):
self._df = df
@property
def columns(self):
return list(self._df.columns)
@staticmethod
def _tmp_col_name(cols):
i = 0
col_name = '__tmp'
while col_name in cols:
col_name = f'__tmp_{i}'
i += 1
return col_name
@wrap_dataframe
def intersect(self, other: 'DataFrameWrapper'):
"""
Return intersection of dataframes
"""
assert set(self.columns) == set(other.columns), "columns must be the same when intersecting dataframes"
# TODO: CHECK
return pd.merge(self._df, other._df, on=self.columns, how='inner').drop_duplicates()
@wrap_dataframe
def union(self, other: 'DataFrameWrapper'):
"""
Returns union of dataframes
"""
assert set(self.columns) == set(other.columns), "columns must be the same when unioning dataframes"
return pd.concat([self._df, other._df], axis='index').drop_duplicates()
def subtract(self, other: 'DataFrameWrapper'):
"""
Subtraction of dataframes (as sets of rows)
"""
assert set(self.columns) == set(other.columns), "columns must be the same when subtracting dataframes"
return self.join(other, on=self.columns, how='left_anti')
@wrap_dataframe
def withColumn(self, name: str, col: Column):
"""
Returns a new Spanda dataframe with a new column
"""
return self._df.assign(**{name: Column._apply(col, self._df)})
@wrap_dataframe
def distinct(self):
"""
Return new Spanda dataframe with no duplicate rows
"""
return self._df.drop_duplicates()
def drop(self, *cols: str):
"""
Returns Spanda dataframe without the mentioned columns
"""
return self.select(*filter(lambda c: c not in cols, self.columns))
def count(self) -> int:
"""
Returns the number of rows in dataframe
"""
return len(self._df)
@wrap_dataframe
@wrap_col_args
def filter(self, col: Column):
"""
Returns a Spanda dataframe with only the records for which `col` equals True.
"""
df = self._df
if isinstance(col, Column):
cond = Column._apply(col, df)
return df[cond]
elif isinstance(col, str):
return df.query(col)
else:
raise NotImplementedError
def agg(self, *agg_cols: AggColumn):
"""
Aggregate entire dataframe as one group
"""
grp_data = GroupedDataFrameWrapper(self._df, tuple(), {0: self._df.index})
return grp_data.agg(*agg_cols)
def min(self):
"""
Compute minimum for each column in the dataframe
"""
return self.agg(*[F_min(c) for c in self.columns])
def max(self):
"""
Compute maximum for each column in the dataframe
"""
return self.agg(*[F_max(c) for c in self.columns])
@wrap_dataframe
def sort(self, *cols: str, ascending: bool = True):
"""
order the rows by the columns named in cols.
if ascending is True, it will be ordered in ascending order; otherwise - in descending order
"""
return self._df.sort_values(list(cols), ascending=ascending)
def where(self, col: Column):
"""
Alias for `.filter()`
"""
return self.filter(col)
def withColumnRenamed(self, old_name: str, new_name: str):
return self.withColumn(new_name, col(old_name)).drop(old_name)
@wrap_dataframe
def head(self, n :int = 5):
"""
Return first n rows of dataframe
"""
return self._df.head(n)
@wrap_dataframe
@wrap_col_args
def select(self, *cols: Column):
"""
Returns a Spanda dataframe with only the selected columns.
"""
metadata = {}
df = self._df
col_names = []
special_cols = []
for col in cols:
if isinstance(col, _SpecialSpandaColumn):
metadata.update({col._name: _SpecialSpandaColumn._apply_special_preprocess(col, df)})
for col in cols:
if isinstance(col, Column):
df = df.assign(**{col._name: Column._apply(col, df)})
col_names.append(col._name)
elif isinstance(col, _SpecialSpandaColumn):
df = df.assign(**{col._name: _SpecialSpandaColumn._apply_special(col, df,
metadata=metadata[col._name])})
col_names.append(col._name)
special_cols.append((col._name, col._transformation_type))
else:
raise NotImplementedError
for (special_col_name, trans_type) in special_cols:
df = _SpecialSpandaColumn._apply_special_postprocess(df=df, col_name=special_col_name,
trans_type=trans_type,
metadata=metadata[special_col_name],
all_col_names=col_names)
return df[col_names]
@wrap_dataframe
def join(self, other: 'DataFrameWrapper', on: Union[str, List[str]], how: str = 'inner'):
"""
Joins with another Spanda dataframe.
`on` is a column name or a list of column names we join by.
`how` decides which type of join will be used ('inner', 'outer', 'left', 'right', 'cross', 'left_anti')
"""
assert isinstance(other, DataFrameWrapper), "can join only with spanda dataframes"
assert how in ['inner', 'outer', 'left', 'right', 'cross', 'leftanti', 'left_anti',
'right_anti', 'rightanti', 'left_semi', 'leftsemi'], \
"this join method ('how' parameter) is not supported"
if isinstance(on, str):
on = [on]
if how in ['left_semi', 'leftsemi']:
# TODO: be aware some duplicate columns not in 'on' may exist
return self.join(other, on=on, how='left').select(*self.columns).distinct()._df
elif how in ['rightanti', 'right_anti']:
return other.join(self, on=on, how='left_anti')._df
elif how in ['leftanti', 'left_anti']:
tmp_col = DataFrameWrapper._tmp_col_name(set(self.columns).union(other.columns))
tmp_df = pd.concat([self.select(*on).withColumn(tmp_col, 'A')._df.drop_duplicates(),
other.select(*on).withColumn(tmp_col, 'B')._df.drop_duplicates()], axis='index')
tmp_df = tmp_df.groupby(list(on)).agg({tmp_col: tuple})
tmp_df = tmp_df[tmp_df[tmp_col] == ('A',)]
return pd.merge(self._df, tmp_df, on=on, how='inner').drop(tmp_col, axis='columns')
else:
return | pd.merge(self._df, other._df, on=on, how=how) | pandas.merge |
import os
import pickle
import re
from pathlib import Path
from typing import Tuple, Dict
import pandas as pd
import requests
from bs4 import BeautifulSoup
from selenium import webdriver
from brFinance.scraper.cvm.financial_report import FinancialReport
from brFinance.scraper.cvm.search import SearchDFP, SearchITR
from brFinance.utils.browser import Browser
class Company:
"""
An instance of a Company can fetch useful information about Financial Reports, Social Capital and Registration Data
"""
def __init__(self, cvm_code: int):
"""
Parameters
----------
cvm_code : int
CVM company code
"""
self.cvm_code = cvm_code
def _save_files(self, driver, report_info) -> Dict:
document_number = re.search(r"(?<=\Documento=)(.*?)(?=&)", report_info['linkView']).group()
# Create folder and save reports locally
path_save_reports = f'{os.getcwd()}/reports'
report_file = f'{path_save_reports}/{document_number}.plk'
Path(path_save_reports).mkdir(exist_ok=True)
# Check if report is available locally, otherwise scrape it.
if Path(report_file).exists():
with open(report_file, 'rb') as load_report:
report_obj = pickle.load(load_report)
print("Carregado localmente!")
else:
report_obj = FinancialReport(link=report_info["linkView"], driver=driver).get_report()
with open(report_file, 'wb') as save_report:
pickle.dump(report_obj, save_report)
return report_obj
def get_social_capital_data(self) -> pd.DataFrame:
"""
Returns a dataframe including number of preference or ordinal shares for a company
Returns
-------
pandas.Dataframe
Dataframe with Social Capital Data
"""
url = f"http://bvmf.bmfbovespa.com.br/pt-br/mercados/acoes/empresas/ExecutaAcaoConsultaInfoEmp.asp?CodCVM={self.cvm_code}"
df = pd.DataFrame()
try:
html_content = requests.get(url).content.decode("utf8")
social_capital_data = BeautifulSoup(html_content, "lxml").find("div",
attrs={"id": "divComposicaoCapitalSocial"})
df = pd.read_html(str(social_capital_data), thousands='.')[0]
df.columns = ["Type", "Quantity"]
except Exception as exp:
print(exp)
return df
def get_registration_data(self) -> pd.DataFrame:
"""
Returns a dataframe including useful information about company's registration data
Returns
-------
pandas.Dataframe
Dataframe with Registration Data
"""
url = "http://dados.cvm.gov.br/dados/CIA_ABERTA/CAD/DADOS/cad_cia_aberta.csv"
company_registration_data = | pd.DataFrame() | pandas.DataFrame |
import os
import re
import datetime
import copy
import codecs
from lxml import etree
import pandas as pd
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.dialects import postgresql as psql
from sqlalchemy import Column, Integer, String, DATE
from sqlalchemy.orm import sessionmaker
# EXTRACT
def remove_special_chars(string: str) -> str:
"""
Replaces special char & with its xml equivalent.
"""
output = copy.deepcopy(string)
output = re.sub('&', '&', output)
# output = re.sub('<', '<', output)
# output = re.sub('>', '>', output)
output = re.sub("”", '"', output)
output = re.sub("‘", ''', output)
output = re.sub("'", ''', output)
output = re.sub('"', '"', output)
return output
def read_file(fn: str) -> str:
"""
Given a filename string, return the contents in string type with added data tags.
"""
data = '<data>'
with open(fn, 'rb') as f:
raw = f.read()
file_data = remove_special_chars(codecs.decode(raw, errors='replace'))
data += file_data
data += '</data>'
return data
def read_xml(fn: str) -> etree._Element:
"""
Given an XML filename in string type, parse and return an XML object.
"""
xml = etree.fromstring(
read_file(fn))
return xml
def process_xml(dir: str) -> dict:
"""
Check errors and generates and error.txt
"""
fn = []
for _, _, fn in os.walk(dir):
fn = fn
data = {}
final = {}
logfn = open('errors.txt', 'w')
for file in fn:
try:
d = extract_data(read_xml(file))
tmp_fn = re.sub('.pdf.*$', '', file)
formatted_date = format_date(d['date']['text'])
d['date']['text'] = str(formatted_date)
data[tmp_fn] = d
except Exception as error:
logfn.write(str(file) + ": " + str(error) + "\n")
final = {i: {k: v.get('text')
for k, v in data[i].items()} for i in data.keys()}
for i in final.keys():
final[i]['image'] = data[i]['image']
# inefficient / bad fix for now
final[i]['images'] = final[i].pop('image')
final[i]['title'] = i
final[i]['raw_body'] = data[i]
if final[i]['body'] == None:
raise AttributeError('Body error for ' + final[i]['title'])
logfn.close()
return final
def check_tags(data: etree._Element) -> bool:
names = set([
'date',
'docnum',
'doctype',
'subject',
'body',
'image',
'sign',
'signtitle',
'missingtext'
])
elements = data.findall('.//')
tags = set([i.tag for i in elements])
diff = tags.difference(names)
if len(diff) != 0:
raise AttributeError(
str(diff) + " are mislabeled / do not follow protocols.")
return True
def extract_data(data: etree._Element) -> dict:
"""
Converts xml data to dict with text and line numbers
"""
d = {}
check_tags(data)
d['date'] = find_tag(data, 'date')
d['doctype'] = find_tag(data, 'doctype')
d['docnum'] = find_tag(data, 'docnum')
d['subject'] = find_tag(data, 'subject')
d['body'] = find_tag(data, 'body')
d['sign'] = find_tag(data, 'sign')
d['signtitle'] = find_tag(data, 'signtitle')
d['image'] = find_tag(data, 'image', all=True)
return d
def get_text(node):
result = node.text or ""
for child in node:
if child.tail is not None:
result += child.tail
return result
def find_tag(data: etree._Element, tag: str, all=False) -> dict:
"""
Handles missing attribute errors with default inputs
"""
d = {}
tag_search = './/' + tag # to enable recursive search
if all is False:
try:
tag_node = data.find(tag_search)
d['text'] = get_text(tag_node)
d['line_num'] = tag_node.sourceline
except AttributeError:
d['text'] = None
d['line_num'] = None
return d
else:
for i, j in enumerate(data.findall(tag_search)):
d[i] = {
'text': j.text,
'line_num': j.sourceline
}
return d
# TRANSFORM
def rawstr(s):
"""
Return the raw string representation (using r'') literals of the string
*s* if it is available. If any invalid characters are encountered (or a
string which cannot be represented as a rawstr), the default repr() result
is returned.
"""
if any(0 <= ord(ch) < 32 for ch in s):
return repr(s)
if (len(s) - len(s.rstrip("\\"))) % 2 == 1:
return repr(s)
pattern = "r'{0}'"
if '"' in s:
if "'" in s:
return repr(s)
elif "'" in s:
pattern = 'r"{0}"'
return pattern.format(s)
def format_date(s: str) -> str:
if s is None or s.isspace() or s.lower() == 'nd':
s = '01/01/2050'
formats = [
'%b %d, %Y',
'%B %d, %Y',
'%B %d, %Y',
'%B %d,, %Y',
'%B, %d, %Y',
'%B %d,%Y',
#
'%d/%m/%Y',
'%d/%m/%y',
'%m/%d/%Y',
'%m/%d/%y',
# '-%m/%d/%Y'
'%d %B,%Y',
'%d %B%Y',
'%B %d %Y',
' %b %d, %Y',
'%b %d, %Y',
'%d %B. %Y',
'%d %B, %Y',
'%b %d,%Y',
'%b %d %Y',
'%d %b %Y',
'%d %B %Y',
'%B,%Y',
'%B, %Y',
# '%d/%-m/%Y'
]
s = s.strip().capitalize()
tmp = ''
for f in formats:
if not isinstance(tmp, pd._libs.tslib.Timestamp):
tmp = | pd.to_datetime(s, errors='ignore', format=f) | pandas.to_datetime |
import os
import pandas as pd
from Utils import Truncate
from Cajero import Cajero
from Cliente import Cliente
from Evento import Inicializacion, FinSimulacion, LlegadaCliente, FinAtencion, FinEspera
class Controlador:
def __init__(self, cant_iteraciones, tiempo, mostrar_desde, media_llegada, media_fin):
self.cajero1 = Cajero("Libre", 1, 0, 0, None)
self.cajero2 = Cajero("Libre", 2, 0, 0, None)
self.cajero3 = Cajero("Libre", 3, 0, 0, None)
self.cajero4 = Cajero("Libre", 4, 0, 0, None)
self.mostrar_cantidad_iteraciones = cant_iteraciones
self.tiempo = tiempo
self.mostrar_desde_minuto = mostrar_desde
self.media_llegada = media_llegada
self.media_fin = media_fin
self.acum_tiempo_utilizacion = 0
self.contador_clientes = 1
self.reloj = 0
self.eventos = []
self.clientes = []
self.cola = []
self.array_fin_atencion = [0,0,0,0]
def inicializacion(self):
self.llegada_cliente = LlegadaCliente(self.reloj, self.media_llegada, self.contador_clientes)
self.fin_atencion = FinAtencion(None, 0, 0, 0)
self.eventos.append(self.llegada_cliente)
def llegadaCliente(self, evento_actual):
self.contador_clientes += 1
cliente = Cliente(None, "", evento_actual.id)
self.clientes.append(cliente)
proxima_llegada_cliente = LlegadaCliente(self.reloj, self.media_llegada, self.contador_clientes)
self.eventos.append(proxima_llegada_cliente)
fin_espera = self.reloj + 5
self.llegada_cliente = proxima_llegada_cliente
cajero = self.buscarCajeroLibre()
if cajero is not None:
fin_atencion = FinAtencion(cajero, self.reloj, self.media_fin, cliente.id)
self.eventos.append(fin_atencion)
self.fin_atencion = fin_atencion
self.array_fin_atencion[cajero.id - 1] = fin_atencion.hora
cajero.comenzarAtencion(cliente)
cliente.comenzarAtencion(cajero)
else:
self.cola.append(cliente)
cliente.comenzarEspera()
def finAtencion(self, cajero, duracion):
cliente_finalizado = cajero.cliente
cliente_finalizado.finalizarAtencion()
print("Cliente:", cliente_finalizado.id)
print("Cajero:", cliente_finalizado.cajero)
if len(self.cola) >= 1:
cajero = self.buscarCajeroLibre()
cliente = self.cola.pop()
fin_atencion = FinAtencion(cajero, self.reloj, self.media_fin, cliente.id)
self.eventos.append(fin_atencion)
self.fin_atencion = fin_atencion
self.array_fin_atencion[cajero.id - 1] = fin_atencion.hora
cliente.comenzarAtencion(cajero)
else:
self.array_fin_atencion[cajero.id - 1] = 0
def finEspera(self, cliente):
if cola.index(cliente) >= 2:
cliente.retirarse()
self.llegada_cliente = LlegadaCliente(self.reloj, self.media_llegada, cliente.id)
return
def buscarCajeroLibre(self):
'''
La función devuelve la primer máquina que encuentra en estado LIBRE
'''
libres = list(filter(lambda c: c.estaLibre(),
[self.cajero1, self.cajero2, self.cajero3, self.cajero4]))
if len(libres) == 0:
return None
else:
return libres[0]
def crearVectorEstado(self, evento_actual):
lista = ["Evento actual: " + str(evento_actual.nombre),
"Reloj: " + str(self.reloj),
"Tiempo entre llegadas:" + str(self.llegada_cliente.duracion),
"Próxima llegada: " + str(self.llegada_cliente.hora),
"Tiempo de atención: " + str(self.fin_atencion.duracion),
"Fin de atención 1: " + str(self.array_fin_atencion[0]),
"Fin de atención 2: " + str(self.array_fin_atencion[1]),
"Fin de atención 3: " + str(self.array_fin_atencion[2]),
"Fin de atención 4: " + str(self.array_fin_atencion[3]),
"Cajero 1: " + str(self.cajero1.estado),
"Cajero 2: " + str(self.cajero2.estado),
"Cajero 3: " + str(self.cajero3.estado),
"Cajero 4: " + str(self.cajero4.estado),
"Cola: " + str(len(self.cola)),
"Tiempo utilizacion 1: " + str(self.cajero1.tiempo_utilizacion),
"ACUM tiempo utilizacion 1: " + str(self.cajero1.acum_t_utilizacion),
"Tiempo utilizacion 2: " + str(self.cajero2.tiempo_utilizacion),
"ACUM tiempo utilizacion 2: " + str(self.cajero2.acum_t_utilizacion),
"Tiempo utilizacion 3: " + str(self.cajero3.tiempo_utilizacion),
"ACUM tiempo utilizacion 3: " + str(self.cajero3.acum_t_utilizacion),
"Tiempo utilizacion 4: " + str(self.cajero4.tiempo_utilizacion),
"ACUM tiempo utilizacion 4: " + str(self.cajero4.acum_t_utilizacion),
]
for c in self.clientes:
lista.append("Cliente: " + str(c.id))
lista.append(c.estado)
cajero = c.cajero
if cajero is None:
cajero = "-"
else:
cajero = "Cajero: " + str(cajero.id)
lista.append(cajero)
return lista
def crearVectorEstadoParcial(self, evento_actual):
'''
La función recibe como parámetro el evento actual de la iteración y retorna el vector de estado correspondiente.
'''
inicio_comunes = [
str(evento_actual.nombre),
str(self.reloj),
str(self.llegada_cliente.duracion),
str(self.llegada_cliente.hora),
str(self.fin_atencion.duracion),
str(self.array_fin_atencion[0]),
str(self.array_fin_atencion[1]),
str(self.array_fin_atencion[2]),
str(self.array_fin_atencion[3]),
]
fin_comunes = [
str(self.cajero1.estado),
str(self.cajero2.estado),
str(self.cajero3.estado),
str(self.cajero4.estado),
str(len(self.cola)),
str(self.cajero1.tiempo_utilizacion),
str(self.cajero2.tiempo_utilizacion),
str(self.cajero3.tiempo_utilizacion),
str(self.cajero4.tiempo_utilizacion),
str(self.cajero1.acum_t_utilizacion),
str(self.cajero2.acum_t_utilizacion),
str(self.cajero3.acum_t_utilizacion),
str(self.cajero4.acum_t_utilizacion),
]
return inicio_comunes + fin_comunes
def crearColumnasParcialesDataFrame(self):
'''
Crea una lista con las columnas correspondientes a los datos siempre presentes en el dataframe
'''
inicio_comunes = [
"Evento Actual",
"Reloj",
"Tiempo entre llegadas",
"Próxima llegada",
"Tiempo atención",
"Fin atención 1",
"Fin atención 2",
"Fin atención 3",
"Fin atención 4",
]
fin_comunes = [
"Cajero 1",
"Cajero 2",
"Cajero 3",
"Cajero 4",
"Cola",
"Tiempo utilización 1",
"Tiempo utilización 2",
"Tiempo utilización 3",
"Tiempo utilización 4",
"ACUM Tiempo utilización 1",
"ACUM Tiempo utilización 2",
"ACUMTiempo utilización 3",
"ACUM Tiempo utilización 4",
]
return inicio_comunes + fin_comunes
def agregarDatos(self, df_datos_fijos, df_clientes, evento_actual):
vector_estado_parcial = self.crearVectorEstadoParcial(evento_actual)
loc = len(df_datos_fijos)
df_datos_fijos.loc[loc] = vector_estado_parcial
for al in self.clientes:
df_clientes = al.agregarDF(df_clientes, loc)
return df_datos_fijos, df_clientes
def simular(self):
df_datos_fijos = pd.DataFrame(columns = self.crearColumnasParcialesDataFrame())
df_clientes = | pd.DataFrame() | pandas.DataFrame |
import os
from collections import defaultdict
from concurrent.futures import ProcessPoolExecutor
import celescope
import pysam
import numpy as np
import pandas as pd
import logging
from celescope.tools.utils import format_number, log, read_barcode_file
from celescope.tools.utils import format_stat
from celescope.tools.utils import read_one_col, gene_convert, glob_genomeDir
from celescope.tools.report import reporter
@log
def split_bam(bam, barcodes, outdir, sample, gene_id_name_dic, min_query_length):
'''
input:
bam: bam from feauturCounts
barcodes: cell barcodes, set
gene_id_name_dic: id name dic
min_query_length: minimum query length
ouput:
bam_dict: assign reads to cell barcodes and UMI
count_dict: UMI counts per cell
index: assign index(1-based) to cells
'''
# init
count_dict = defaultdict(dict)
bam_dict = defaultdict(dict)
index_dict = defaultdict(dict)
cells_dir = f'{outdir}/cells/'
# read bam and split
split_bam.logger.info('reading bam...')
samfile = pysam.AlignmentFile(bam, "rb")
header = samfile.header
barcodes = list(barcodes)
barcodes.sort()
for read in samfile:
attr = read.query_name.split('_')
barcode = attr[0]
umi = attr[1]
if not read.has_tag('XT'):
continue
gene = read.get_tag('XT')
query_length = read.infer_query_length()
if (barcode in barcodes) and (gene in gene_id_name_dic) and (query_length >= min_query_length):
gene_name = gene_id_name_dic[gene]
read.set_tag(tag='GN', value=gene_name, value_type='Z')
index = barcodes.index(barcode) + 1
read.set_tag(tag='CL', value=f'CELL{index}', value_type='Z')
# keep one read for each UMI
if umi not in bam_dict[barcode]:
bam_dict[barcode][umi] = read
# count
if gene_name not in count_dict[barcode]:
count_dict[barcode][gene_name] = {}
if umi in count_dict[barcode][gene_name]:
count_dict[barcode][gene_name][umi] += 1
else:
count_dict[barcode][gene_name][umi] = 1
split_bam.logger.info('writing cell bam...')
# write new bam
index = 0
for barcode in barcodes:
# init
index += 1
index_dict[index]['barcode'] = barcode
index_dict[index]['valid'] = False
# out bam
if barcode in bam_dict:
cell_dir = f'{cells_dir}/cell{index}'
cell_bam_file = f'{cell_dir}/cell{index}.bam'
if not os.path.exists(cell_dir):
os.makedirs(cell_dir)
index_dict[index]['valid'] = True
cell_bam = pysam.AlignmentFile(
f'{cell_bam_file}', "wb", header=header)
for umi in bam_dict[barcode]:
read = bam_dict[barcode][umi]
cell_bam.write(read)
cell_bam.close()
# out df_index
df_index = pd.DataFrame(index_dict).T
df_index.index.name = 'cell_index'
index_file = f'{outdir}/{sample}_cell_index.tsv'
df_index.to_csv(index_file, sep='\t')
# out count_dict
df_count = | pd.DataFrame(columns=['barcode', 'gene', 'UMI', 'read_count']) | pandas.DataFrame |
# *****************************************************************************
# Copyright (c) 2019-2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import itertools
import os
import platform
import string
import unittest
from copy import deepcopy
from itertools import product
import numpy as np
import pandas as pd
from numba.core.errors import TypingError
from sdc.hiframes.rolling import supported_rolling_funcs
from sdc.tests.test_base import TestCase
from sdc.tests.test_series import gen_frand_array
from sdc.tests.test_utils import (count_array_REPs, count_parfor_REPs,
skip_numba_jit, skip_sdc_jit,
test_global_input_data_float64)
LONG_TEST = (int(os.environ['SDC_LONG_ROLLING_TEST']) != 0
if 'SDC_LONG_ROLLING_TEST' in os.environ else False)
test_funcs = ('mean', 'max',)
if LONG_TEST:
# all functions except apply, cov, corr
test_funcs = supported_rolling_funcs[:-3]
def rolling_std_usecase(obj, window, min_periods, ddof):
return obj.rolling(window, min_periods).std(ddof)
def rolling_var_usecase(obj, window, min_periods, ddof):
return obj.rolling(window, min_periods).var(ddof)
class TestRolling(TestCase):
@skip_numba_jit
def test_series_rolling1(self):
def test_impl(S):
return S.rolling(3).sum()
hpat_func = self.jit(test_impl)
S = pd.Series([1.0, 2., 3., 4., 5.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@skip_numba_jit
def test_fixed1(self):
# test sequentially with manually created dfs
wins = (3,)
if LONG_TEST:
wins = (2, 3, 5)
centers = (False, True)
for func_name in test_funcs:
func_text = "def test_impl(df, w, c):\n return df.rolling(w, center=c).{}()\n".format(func_name)
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars['test_impl']
hpat_func = self.jit(test_impl)
for args in itertools.product(wins, centers):
df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
pd.testing.assert_frame_equal(hpat_func(df, *args), test_impl(df, *args))
df = pd.DataFrame({'B': [0, 1, 2, -2, 4]})
pd.testing.assert_frame_equal(hpat_func(df, *args), test_impl(df, *args))
@skip_numba_jit
def test_fixed2(self):
# test sequentially with generated dfs
sizes = (121,)
wins = (3,)
if LONG_TEST:
sizes = (1, 2, 10, 11, 121, 1000)
wins = (2, 3, 5)
centers = (False, True)
for func_name in test_funcs:
func_text = "def test_impl(df, w, c):\n return df.rolling(w, center=c).{}()\n".format(func_name)
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars['test_impl']
hpat_func = self.jit(test_impl)
for n, w, c in itertools.product(sizes, wins, centers):
df = pd.DataFrame({'B': np.arange(n)})
pd.testing.assert_frame_equal(hpat_func(df, w, c), test_impl(df, w, c))
@skip_numba_jit
def test_fixed_apply1(self):
# test sequentially with manually created dfs
def test_impl(df, w, c):
return df.rolling(w, center=c).apply(lambda a: a.sum())
hpat_func = self.jit(test_impl)
wins = (3,)
if LONG_TEST:
wins = (2, 3, 5)
centers = (False, True)
for args in itertools.product(wins, centers):
df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
pd.testing.assert_frame_equal(hpat_func(df, *args), test_impl(df, *args))
df = pd.DataFrame({'B': [0, 1, 2, -2, 4]})
pd.testing.assert_frame_equal(hpat_func(df, *args), test_impl(df, *args))
@skip_numba_jit
def test_fixed_apply2(self):
# test sequentially with generated dfs
def test_impl(df, w, c):
return df.rolling(w, center=c).apply(lambda a: a.sum())
hpat_func = self.jit(test_impl)
sizes = (121,)
wins = (3,)
if LONG_TEST:
sizes = (1, 2, 10, 11, 121, 1000)
wins = (2, 3, 5)
centers = (False, True)
for n, w, c in itertools.product(sizes, wins, centers):
df = pd.DataFrame({'B': np.arange(n)})
pd.testing.assert_frame_equal(hpat_func(df, w, c), test_impl(df, w, c))
@skip_numba_jit
def test_fixed_parallel1(self):
def test_impl(n, w, center):
df = pd.DataFrame({'B': np.arange(n)})
R = df.rolling(w, center=center).sum()
return R.B.sum()
hpat_func = self.jit(test_impl)
sizes = (121,)
wins = (5,)
if LONG_TEST:
sizes = (1, 2, 10, 11, 121, 1000)
wins = (2, 4, 5, 10, 11)
centers = (False, True)
for args in itertools.product(sizes, wins, centers):
self.assertEqual(hpat_func(*args), test_impl(*args),
"rolling fixed window with {}".format(args))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@skip_numba_jit
def test_fixed_parallel_apply1(self):
def test_impl(n, w, center):
df = pd.DataFrame({'B': np.arange(n)})
R = df.rolling(w, center=center).apply(lambda a: a.sum())
return R.B.sum()
hpat_func = self.jit(test_impl)
sizes = (121,)
wins = (5,)
if LONG_TEST:
sizes = (1, 2, 10, 11, 121, 1000)
wins = (2, 4, 5, 10, 11)
centers = (False, True)
for args in itertools.product(sizes, wins, centers):
self.assertEqual(hpat_func(*args), test_impl(*args),
"rolling fixed window with {}".format(args))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@skip_numba_jit
def test_variable1(self):
# test sequentially with manually created dfs
df1 = pd.DataFrame({'B': [0, 1, 2, np.nan, 4],
'time': [pd.Timestamp('20130101 09:00:00'),
pd.Timestamp('20130101 09:00:02'),
pd.Timestamp('20130101 09:00:03'),
pd.Timestamp('20130101 09:00:05'),
pd.Timestamp('20130101 09:00:06')]})
df2 = pd.DataFrame({'B': [0, 1, 2, -2, 4],
'time': [pd.Timestamp('20130101 09:00:01'),
pd.Timestamp('20130101 09:00:02'),
pd.Timestamp('20130101 09:00:03'),
pd.Timestamp('20130101 09:00:04'),
pd.Timestamp('20130101 09:00:09')]})
wins = ('2s',)
if LONG_TEST:
wins = ('1s', '2s', '3s', '4s')
# all functions except apply
for w, func_name in itertools.product(wins, test_funcs):
func_text = "def test_impl(df):\n return df.rolling('{}', on='time').{}()\n".format(w, func_name)
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars['test_impl']
hpat_func = self.jit(test_impl)
# XXX: skipping min/max for this test since the behavior of Pandas
# is inconsistent: it assigns NaN to last output instead of 4!
if func_name not in ('min', 'max'):
pd.testing.assert_frame_equal(hpat_func(df1), test_impl(df1))
pd.testing.assert_frame_equal(hpat_func(df2), test_impl(df2))
@skip_numba_jit
def test_variable2(self):
# test sequentially with generated dfs
wins = ('2s',)
sizes = (121,)
if LONG_TEST:
wins = ('1s', '2s', '3s', '4s')
sizes = (1, 2, 10, 11, 121, 1000)
# all functions except apply
for w, func_name in itertools.product(wins, test_funcs):
func_text = "def test_impl(df):\n return df.rolling('{}', on='time').{}()\n".format(w, func_name)
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars['test_impl']
hpat_func = self.jit(test_impl)
for n in sizes:
time = pd.date_range(start='1/1/2018', periods=n, freq='s')
df = pd.DataFrame({'B': np.arange(n), 'time': time})
pd.testing.assert_frame_equal(hpat_func(df), test_impl(df))
@skip_numba_jit
def test_variable_apply1(self):
# test sequentially with manually created dfs
df1 = pd.DataFrame({'B': [0, 1, 2, np.nan, 4],
'time': [pd.Timestamp('20130101 09:00:00'),
pd.Timestamp('20130101 09:00:02'),
pd.Timestamp('20130101 09:00:03'),
pd.Timestamp('20130101 09:00:05'),
pd.Timestamp('20130101 09:00:06')]})
df2 = pd.DataFrame({'B': [0, 1, 2, -2, 4],
'time': [pd.Timestamp('20130101 09:00:01'),
pd.Timestamp('20130101 09:00:02'),
pd.Timestamp('20130101 09:00:03'),
pd.Timestamp('20130101 09:00:04'),
pd.Timestamp('20130101 09:00:09')]})
wins = ('2s',)
if LONG_TEST:
wins = ('1s', '2s', '3s', '4s')
# all functions except apply
for w in wins:
func_text = "def test_impl(df):\n return df.rolling('{}', on='time').apply(lambda a: a.sum())\n".format(w)
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars['test_impl']
hpat_func = self.jit(test_impl)
pd.testing.assert_frame_equal(hpat_func(df1), test_impl(df1))
pd.testing.assert_frame_equal(hpat_func(df2), test_impl(df2))
@skip_numba_jit
def test_variable_apply2(self):
# test sequentially with generated dfs
wins = ('2s',)
sizes = (121,)
if LONG_TEST:
wins = ('1s', '2s', '3s', '4s')
# TODO: this crashes on Travis (3 process config) with size 1
sizes = (2, 10, 11, 121, 1000)
# all functions except apply
for w in wins:
func_text = "def test_impl(df):\n return df.rolling('{}', on='time').apply(lambda a: a.sum())\n".format(w)
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars['test_impl']
hpat_func = self.jit(test_impl)
for n in sizes:
time = pd.date_range(start='1/1/2018', periods=n, freq='s')
df = pd.DataFrame({'B': np.arange(n), 'time': time})
pd.testing.assert_frame_equal(hpat_func(df), test_impl(df))
@skip_numba_jit
@unittest.skipIf(platform.system() == 'Windows', "ValueError: time must be monotonic")
def test_variable_parallel1(self):
wins = ('2s',)
sizes = (121,)
if LONG_TEST:
wins = ('1s', '2s', '3s', '4s')
# XXX: Pandas returns time = [np.nan] for size==1 for some reason
sizes = (2, 10, 11, 121, 1000)
# all functions except apply
for w, func_name in itertools.product(wins, test_funcs):
func_text = "def test_impl(n):\n"
func_text += " df = pd.DataFrame({'B': np.arange(n), 'time': "
func_text += " pd.DatetimeIndex(np.arange(n) * 1000000000)})\n"
func_text += " res = df.rolling('{}', on='time').{}()\n".format(w, func_name)
func_text += " return res.B.sum()\n"
loc_vars = {}
exec(func_text, {'pd': pd, 'np': np}, loc_vars)
test_impl = loc_vars['test_impl']
hpat_func = self.jit(test_impl)
for n in sizes:
np.testing.assert_almost_equal(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@skip_numba_jit
@unittest.skipIf(platform.system() == 'Windows', "ValueError: time must be monotonic")
def test_variable_apply_parallel1(self):
wins = ('2s',)
sizes = (121,)
if LONG_TEST:
wins = ('1s', '2s', '3s', '4s')
# XXX: Pandas returns time = [np.nan] for size==1 for some reason
sizes = (2, 10, 11, 121, 1000)
# all functions except apply
for w in wins:
func_text = "def test_impl(n):\n"
func_text += " df = pd.DataFrame({'B': np.arange(n), 'time': "
func_text += " pd.DatetimeIndex(np.arange(n) * 1000000000)})\n"
func_text += " res = df.rolling('{}', on='time').apply(lambda a: a.sum())\n".format(w)
func_text += " return res.B.sum()\n"
loc_vars = {}
exec(func_text, {'pd': pd, 'np': np}, loc_vars)
test_impl = loc_vars['test_impl']
hpat_func = self.jit(test_impl)
for n in sizes:
np.testing.assert_almost_equal(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@skip_numba_jit
def test_series_fixed1(self):
# test series rolling functions
# all functions except apply
S1 = | pd.Series([0, 1, 2, np.nan, 4]) | pandas.Series |
import io
import time
import json
from datetime import datetime
import pandas as pd
from pathlib import Path
import requests
drop_cols = [
'3-day average of daily number of positive tests (may count people more than once)',
'daily total tests completed (may count people more than once)',
'3-day average of new people who tested positive (counts first positive lab per person)',
'3-day average of currently hospitalized',
'daily number of vaccine doses administered beyond the primary series '
]
def save_file(df, file_path, current_date):
# save/update file
if not Path(file_path).exists():
df.to_csv(file_path, index=False)
else:
# get prior file date
prior = pd.read_csv(file_path, parse_dates=['date'])
prior_date = pd.to_datetime(prior['date'].max()).date()
if current_date > prior_date:
df.to_csv(file_path, mode='a', header=False, index=False)
return
def scrape_sheet(sheet_id):
# load previous raw_data and get prior date
raw_general = './data/raw/ri-covid-19.csv'
df = pd.read_csv(raw_general, parse_dates=['date'])
prior_date = df['date'].max().tz_localize('EST').date()
# wait till 5:05 then check every 15 mins for update
target = datetime.now().replace(hour=17).replace(minute=5)
while datetime.now() < target:
print(f"[status] waiting for 5pm", end='\r')
time.sleep(60)
# load data from RI - DOH spreadsheet
gen_url = f'https://docs.google.com/spreadsheets/d/{sheet_id}264100583'
df = pd.read_csv(gen_url).dropna(axis=1, how='all')
date = list(df)[1].strip()
date = pd.to_datetime(date).tz_localize('EST').date()
if df.shape[0] != 27:
print('[ERROR: summary page format changed]')
while not prior_date < date:
print(f"[status] waiting for update...{time.strftime('%H:%M')}", end='\r')
time.sleep(5 * 60)
df = pd.read_csv(gen_url)
date = list(df)[1].strip()
date = pd.to_datetime(date).tz_localize('EST').date()
else:
print('[status] found new update pausing for 2 mins')
time.sleep(2 * 60)
## transform general sheet
df['date'] = date
df.columns = ['metric', 'count', 'date']
save_file(df, raw_general, date)
## scrape geographic sheet
geo_url = f'https://docs.google.com/spreadsheets/d/{sheet_id}901548302'
geo_df = pd.read_csv(geo_url)
# get grographic date & fix cols
geo_date = geo_df.iloc[-1][1]
geo_date = pd.to_datetime(geo_date)
geo_df['date'] = geo_date
cols = [x for x in list(geo_df) if 'Rate' not in x]
geo_df = geo_df[cols]
geo_df = geo_df.dropna(axis=0)
geo_df.columns = ['city_town', 'count', 'hostpialized', 'deaths', 'fully_vaccinated', 'date']
# save file
raw_geo = './data/raw/geo-ri-covid-19.csv'
save_file(geo_df, raw_geo, geo_date)
## scrape demographics sheet
dem_url = f'https://docs.google.com/spreadsheets/d/{sheet_id}31350783'
dem_df = pd.read_csv(dem_url)
# make sure no columns were added/removed
if not dem_df.shape == (31, 9):
print('[error] demographics format changed')
return
else:
# get demographics updated date
dem_date = dem_df.iloc[-1][1]
dem_date = pd.to_datetime(dem_date).tz_localize('EST').date()
# drop percentage columns & rename
dem_df = dem_df.drop(dem_df.columns[[1, 2, 4, 6, 8]], axis=1)
dem_df.columns = ['metric', 'case_count', 'hosptialized', 'deaths']
# get data
sex = dem_df[1:4]
age = dem_df[5:17]
race = dem_df[18:24]
dem_df = pd.concat([sex, age, race])
dem_df['date'] = dem_date
raw_dem = './data/raw/demographics-covid-19.csv'
save_file(dem_df, raw_dem, dem_date)
def scrape_revised(sheet_id):
# load previous revised_data and get prior date
raw_revised = './data/raw/revised-data.csv'
df = pd.read_csv(raw_revised, parse_dates=['date'])
prior_date = df['date'].max().tz_localize('EST').date()
# load revised sheet & fix column names
url = f'https://docs.google.com/spreadsheets/d/{sheet_id}1592746937'
df = pd.read_csv(url, parse_dates=['Date'])
df.columns = [x.lower() for x in list(df)]
# test to try and make sure columns dont change
if df.shape[1] != 36 or list(df)[6] != 'daily total tests completed (may count people more than once)':
print('[error] revised sheet columns changed')
return
# check if updated
if df['date'].max() > prior_date:
df = df.drop(columns=drop_cols)
# re order columns
move_cols = (list(df)[6:11] + list(df)[22:31])
cols = [x for x in list(df) if x not in move_cols]
cols.extend(move_cols)
df = df[cols]
df['date_scraped'] = datetime.strftime(datetime.now(), '%m/%d/%Y')
save_file(df, raw_revised, df['date'].max())
def scrape_nursing_homes(sheet_id):
# load prior date
raw_facility = './data/raw/nurse-homes-covid-19.csv'
df = pd.read_csv(raw_facility, parse_dates=['date'])
prior_date = df['date'].max().tz_localize('EST').date()
url = f'https://docs.google.com/spreadsheets/d/{sheet_id}500394186'
df = pd.read_csv(url)
# get date of last update
date = df.iloc[0,0].split(' ')[-1]
date = pd.to_datetime(date).tz_localize('EST').date()
if not date > prior_date:
print('\n[status] nursing homes:\tno update')
return
else:
# fix headers
df.columns = df.iloc[1]
# drop past 14 days column
df = df.drop(columns='New Resident Cases (in past 14 days)')
df['Facility Name'] = df['Facility Name'].str.replace(u'\xa0', ' ') # random unicode appeared
# fix dataframe shape
assisted = df[df['Facility Name'] == 'Assisted Living Facilities'].index[0]
nursing_homes = df[3:assisted].copy()
assisted_living = df[assisted+1:-1].copy()
# add facility type & recombine
nursing_homes['type'] = 'nursing home'
assisted_living['type'] = 'assisted living'
df = pd.concat([nursing_homes, assisted_living]).reset_index(drop=True)
# add date
df['date'] = date
save_file(df, raw_facility, date)
print('[status] nursing homes:\tupdated')
def scrape_zip_codes(sheet_id):
# load prior date
raw_zip = './data/raw/zip-codes-covid-19.csv'
df = pd.read_csv(raw_zip, parse_dates=['date'])
prior_date = df['date'].max().tz_localize('EST').date()
url = f'https://docs.google.com/spreadsheets/d/{sheet_id}365656702'
df = pd.read_csv(url)
# check if updated
date = df.iloc[-1][1].strip()
date = | pd.to_datetime(date) | pandas.to_datetime |
#
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import LooseVersion
import inspect
import numpy as np
import pandas as pd
import pyspark
import databricks.koalas as ks
from databricks.koalas.exceptions import PandasNotImplementedError
from databricks.koalas.missing.indexes import MissingPandasLikeIndex, MissingPandasLikeMultiIndex
from databricks.koalas.testing.utils import ReusedSQLTestCase, TestUtils
class IndexesTest(ReusedSQLTestCase, TestUtils):
@property
def pdf(self):
return pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [4, 5, 6, 3, 2, 1, 0, 0, 0],},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9],
)
@property
def kdf(self):
return ks.from_pandas(self.pdf)
def test_index(self):
for pdf in [
pd.DataFrame(np.random.randn(10, 5), index=list("abcdefghij")),
pd.DataFrame(
np.random.randn(10, 5), index=pd.date_range("2011-01-01", freq="D", periods=10)
),
pd.DataFrame(np.random.randn(10, 5), columns=list("abcde")).set_index(["a", "b"]),
]:
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.index, pdf.index)
def test_index_getattr(self):
kidx = self.kdf.index
item = "databricks"
expected_error_message = "'Index' object has no attribute '{}'".format(item)
with self.assertRaisesRegex(AttributeError, expected_error_message):
kidx.__getattr__(item)
def test_multi_index_getattr(self):
arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"]]
idx = pd.MultiIndex.from_arrays(arrays, names=("number", "color"))
pdf = pd.DataFrame(np.random.randn(4, 5), idx)
kdf = ks.from_pandas(pdf)
kidx = kdf.index
item = "databricks"
expected_error_message = "'MultiIndex' object has no attribute '{}'".format(item)
with self.assertRaisesRegex(AttributeError, expected_error_message):
kidx.__getattr__(item)
def test_to_series(self):
pidx = self.pdf.index
kidx = self.kdf.index
self.assert_eq(kidx.to_series(), pidx.to_series())
self.assert_eq(repr(kidx.to_series(name="a")), repr(pidx.to_series(name="a")))
# With name
pidx.name = "Koalas"
kidx.name = "Koalas"
self.assert_eq(repr(kidx.to_series()), repr(pidx.to_series()))
self.assert_eq(repr(kidx.to_series(name=("x", "a"))), repr(pidx.to_series(name=("x", "a"))))
# With tupled name
pidx.name = ("x", "a")
kidx.name = ("x", "a")
self.assert_eq(repr(kidx.to_series()), repr(pidx.to_series()))
self.assert_eq(repr(kidx.to_series(name="a")), repr(pidx.to_series(name="a")))
self.assert_eq((kidx + 1).to_series(), (pidx + 1).to_series())
pidx = self.pdf.set_index("b", append=True).index
kidx = self.kdf.set_index("b", append=True).index
with self.sql_conf({"spark.sql.execution.arrow.enabled": False}):
self.assert_eq(kidx.to_series(), pidx.to_series())
self.assert_eq(kidx.to_series(name="a"), pidx.to_series(name="a"))
def test_to_frame(self):
pidx = self.pdf.index
kidx = self.kdf.index
self.assert_eq(repr(kidx.to_frame()), repr(pidx.to_frame()))
self.assert_eq(repr(kidx.to_frame(index=False)), repr(pidx.to_frame(index=False)))
pidx.name = "a"
kidx.name = "a"
self.assert_eq(repr(kidx.to_frame()), repr(pidx.to_frame()))
self.assert_eq(repr(kidx.to_frame(index=False)), repr(pidx.to_frame(index=False)))
if LooseVersion(pd.__version__) >= LooseVersion("0.24"):
# The `name` argument is added in pandas 0.24.
self.assert_eq(repr(kidx.to_frame(name="x")), repr(pidx.to_frame(name="x")))
self.assert_eq(
repr(kidx.to_frame(index=False, name="x")),
repr(pidx.to_frame(index=False, name="x")),
)
pidx = self.pdf.set_index("b", append=True).index
kidx = self.kdf.set_index("b", append=True).index
self.assert_eq(repr(kidx.to_frame()), repr(pidx.to_frame()))
self.assert_eq(repr(kidx.to_frame(index=False)), repr(pidx.to_frame(index=False)))
if LooseVersion(pd.__version__) >= LooseVersion("0.24"):
# The `name` argument is added in pandas 0.24.
self.assert_eq(
repr(kidx.to_frame(name=["x", "y"])), repr(pidx.to_frame(name=["x", "y"]))
)
self.assert_eq(
repr(kidx.to_frame(index=False, name=["x", "y"])),
repr(pidx.to_frame(index=False, name=["x", "y"])),
)
def test_index_names(self):
kdf = self.kdf
self.assertIsNone(kdf.index.name)
idx = pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], name="x")
pdf = pd.DataFrame(np.random.randn(10, 5), index=idx, columns=list("abcde"))
kdf = ks.from_pandas(pdf)
pser = pdf.a
kser = kdf.a
self.assertEqual(kdf.index.name, pdf.index.name)
self.assertEqual(kdf.index.names, pdf.index.names)
pidx = pdf.index
kidx = kdf.index
pidx.name = "renamed"
kidx.name = "renamed"
self.assertEqual(kidx.name, pidx.name)
self.assertEqual(kidx.names, pidx.names)
self.assert_eq(kidx, pidx)
self.assertEqual(kdf.index.name, pdf.index.name)
self.assertEqual(kdf.index.names, pdf.index.names)
self.assertEqual(kser.index.names, pser.index.names)
pidx.name = None
kidx.name = None
self.assertEqual(kidx.name, pidx.name)
self.assertEqual(kidx.names, pidx.names)
self.assert_eq(kidx, pidx)
self.assertEqual(kdf.index.name, pdf.index.name)
self.assertEqual(kdf.index.names, pdf.index.names)
self.assertEqual(kser.index.names, pser.index.names)
with self.assertRaisesRegex(ValueError, "Names must be a list-like"):
kidx.names = "hi"
expected_error_message = "Length of new names must be {}, got {}".format(
len(kdf._internal.index_map), len(["0", "1"])
)
with self.assertRaisesRegex(ValueError, expected_error_message):
kidx.names = ["0", "1"]
def test_multi_index_names(self):
arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"]]
idx = pd.MultiIndex.from_arrays(arrays, names=("number", "color"))
pdf = pd.DataFrame(np.random.randn(4, 5), idx)
kdf = ks.from_pandas(pdf)
self.assertEqual(kdf.index.names, pdf.index.names)
pidx = pdf.index
kidx = kdf.index
pidx.names = ["renamed_number", "renamed_color"]
kidx.names = ["renamed_number", "renamed_color"]
self.assertEqual(kidx.names, pidx.names)
pidx.names = ["renamed_number", None]
kidx.names = ["renamed_number", None]
self.assertEqual(kidx.names, pidx.names)
if LooseVersion(pyspark.__version__) < LooseVersion("2.4"):
# PySpark < 2.4 does not support struct type with arrow enabled.
with self.sql_conf({"spark.sql.execution.arrow.enabled": False}):
self.assert_eq(kidx, pidx)
else:
self.assert_eq(kidx, pidx)
with self.assertRaises(PandasNotImplementedError):
kidx.name
with self.assertRaises(PandasNotImplementedError):
kidx.name = "renamed"
def test_index_rename(self):
pdf = pd.DataFrame(
np.random.randn(10, 5), index=pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], name="x")
)
kdf = ks.from_pandas(pdf)
pidx = pdf.index
kidx = kdf.index
self.assert_eq(kidx.rename("y"), pidx.rename("y"))
self.assert_eq(kdf.index.names, pdf.index.names)
kidx.rename("z", inplace=True)
pidx.rename("z", inplace=True)
self.assert_eq(kidx, pidx)
self.assert_eq(kdf.index.names, pdf.index.names)
self.assert_eq(kidx.rename(None), pidx.rename(None))
self.assert_eq(kdf.index.names, pdf.index.names)
def test_multi_index_rename(self):
arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"]]
idx = | pd.MultiIndex.from_arrays(arrays, names=("number", "color")) | pandas.MultiIndex.from_arrays |
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 2 09:25:41 2019
@author: michaelek
"""
import os
import pandas as pd
from pdsql import mssql
from matplotlib.pyplot import show
pd.options.display.max_columns = 10
date_col = 'Date_Time_Readings'
output_path = r'C:\ecan\git\water-use-advice\2020-08-17'
csv1 = 'L37-0812-m1.csv'
csv2 = 'L37-0812-m2.csv'
csv1_out = 'L37-0812-m1_fix.csv'
csv2_out = 'L37-0812-m2_fix.csv'
########################################
### Read and plot data
## Wap 1
df1 = pd.read_csv(os.path.join(output_path, csv1))
df1[date_col] = pd.to_datetime(df1['Date'] + ' ' + df1['Time'], dayfirst=True)
df2 = df1.drop(['Date', 'Time', 'GLOBAL.Flow1', 'GLOBAL.waterlevel'], axis=1)
df2 = df2.drop_duplicates(date_col).sort_values(date_col)
df2.rename(columns={'GLOBAL.Volume1': 'Acc'}, inplace=True)
df3 = df2.copy()
df3['sec_diff'] = df3[date_col].diff().dt.seconds
df3['vol_diff'] = df3['Acc'].diff()
print(any(df3['vol_diff'] < 0))
df2.set_index(date_col, inplace=True)
df2.plot()
show()
df2.to_csv(os.path.join(output_path, csv1_out))
## Wap 2
df1 = pd.read_csv(os.path.join(output_path, csv2))
df1[date_col] = | pd.to_datetime(df1['Date'] + ' ' + df1['Time'], dayfirst=True) | pandas.to_datetime |
import pandas as pd
import numpy as np
from openpyxl import load_workbook
from matplotlib import pyplot as plt
from matplotlib import rcParams
import matplotlib.ticker as ticker
from collections import namedtuple
import inspect
import os
from lcmod.core import make_shape, get_forecast
def spend_mult(sav_rate, k1=None, k2=None, ratio=None):
'''Returns the multiplier to apply to baseline spend
to reflect a change in threshold (k1->k2), given the savings (drugs substitution)
as a proportion r of the original costs
k1 = (C1 - s)/dQ : k is cost/QALY, c is cost of drug, s is savings
s = rC1, r=s/C1 : r is savings as proportion of initial costs
k1 = (1-r)C1 : assume dQ is 1 as effects are all relative
C1 = k1/(1-r)
k2 = C2 -rC1; C2 = k2 + rC1 = k2 + rk1/(1-r)
C2/C1 = (k2 + rk1/(1-r))*(k1/(1-r)) = (1-r)(k2/k1) + r
'''
r = sav_rate
if ratio is None:
if (k1 is None) or (k2 is None):
print('need a ratio or k1 and k2')
return
else: ratio = k2/k1
return ((1-r)*(ratio)) + r
##_________________________________________________________________________##
def dump_to_xls(res_df, in_dict, outfile, append=False, prefix="", shapes=None, log=True,
npv_rs=None, npv_yrs_lim=None, _debug=False):
'''Dump a results DataFrame to an xls, with option to make shapes from a
dict of scenarios or from a scenario alone, and/or passing shapes
PARAMETERS
res_df : the dataframe of results
in_dict : the scenarios dictionary used to generate the dataframe of results
outfile : the target file. NOTE NEEDS TO END xlsx - will be coerced
append : if True will add sheets to the outfile target
prefix : helpful if adding sheets - will be prepended to sheet names
shapes : if a df of individual lifecycle shapes already exists, it can
be passed in this parameter
log : if True will append any log info to the parameters header
npv_rs : pass a list of discount rates to get NPVs for all scenarios vs baseline
npv_yrs_lim : if NPVs are being calculated, this optionally limits the number of years.
Otherwise will be calculated over whole projection period
'''
if _debug: print("\nIN FUNCTION: ".ljust(20), inspect.stack()[0][3])
if _debug: print("..called by: ".ljust(20), inspect.stack()[1][3], end="\n\n")
# coerce if not xls.x
if outfile.split('.')[-1] != 'xlsx':
print('coercing outfile to .xlsx')
outfile = outfile.split('.')[0] + '.xlsx'
print('new name is ', outfile)
# initialise vars
params_header = None
shapes_body = None
# first get the params header and shapes, depending on input
# if a policy is passed
params_header = make_params_table(in_dict, log=log, _debug=_debug)
shapes_body = make_shapes(in_dict, flat=True, multi_index=True)
if _debug: print('\ngot header and shapes from in_dict:\n')
if _debug: print("params_header\n", params_header, end="\n\n")
if _debug: print("shapes_body\n", shapes_body.head(), end="\n")
# if shapes are passed
if shapes is not None:
if _debug: print('preparing to overwrite shapes')
shapes_body = shapes
if _debug: print("new shapes_body\n", shapes_body.head(), end="\n")
# assemble outputs
shapes_out = params_header.append(shapes_body)
main_out = params_header.append(res_df)
annual_out = params_header.append(res_df.groupby(res_df.index.year).sum())
scen_sums_out = res_df.groupby(level=0, axis=1).sum()
scen_sums_ann_out = scen_sums_out.groupby(res_df.index.year).sum()
scen_sums_ann_diff_out = scen_sums_ann_out.subtract(scen_sums_ann_out['baseline'],
axis=0).drop('baseline', axis=1)
if npv_rs is not None:
# get limit of yrs to plot - full df unless limited by npv_yrs
if npv_yrs_lim is None:
npv_yrs_lim = len(scen_sums_ann_out)
npv_df = pd.DataFrame(index=npv_rs, columns = scen_sums_ann_diff_out.columns)
for npv_r in npv_rs:
npv_df.loc[npv_r,:] = scen_sums_ann_diff_out.iloc[:npv_yrs_lim,:].multiply((1 - npv_r) ** np.arange(npv_yrs_lim), axis=0).sum()
npv_df.index.name = 'disc rate'
# write out - either appending or making a new one, or not appending
if append:
if os.path.isfile(outfile):
book = load_workbook(outfile)
writer = pd.ExcelWriter(outfile, engine="openpyxl")
writer.book = book
else:
print("could not find file to append to. will make a new one")
writer = pd.ExcelWriter(outfile)
# not appending
else: writer = pd.ExcelWriter(outfile)
shapes_out.to_excel(writer, prefix + 'shapes')
main_out.to_excel(writer, prefix + 'main')
annual_out.to_excel(writer, prefix + 'annual')
scen_sums_out.to_excel(writer, prefix + 'scen_sums')
scen_sums_ann_out.to_excel(writer, prefix + 'scen_sums_ann')
scen_sums_ann_diff_out.to_excel(writer, prefix + 'scen_sums_ann_diff')
if npv_rs is not None:
npv_df.to_excel(writer, prefix + 'NPVs')
writer.save()
if _debug: print("\LEAVING: ", inspect.stack()[0][3])
##_________________________________________________________________________##
def dump_shapes(scens):
'''for dictionary of scenarios, dumps a single df with all the shapes
- constructed from the Sheds etc, by make_shapes()
TODO make this cope with dicts or list, with whatever depth
'''
all_shapes = pd.DataFrame()
for s in scens:
for l in scens[s]:
all_shapes[s,l] = make_shapes([scens[s][l]])
all_shapes.columns = pd.MultiIndex.from_tuples(all_shapes.columns)
all_shapes.columns.names = ['scenario', 'spendline']
all_shapes.index.name = 'period'
return all_shapes
##_________________________________________________________________________##
def first_elem(in_struct):
'''Returns the type first element of the input, be it a list or a dict
'''
if isinstance(in_struct, list):
return in_struct[0]
elif isinstance(in_struct, dict):
return in_struct[list(in_struct.keys())[0]]
##_________________________________________________________________________##
def flatten(struct, _debug=False):
'''Return a flat dict of spendlines, keys are tuples of the hierarchical name
'''
out_dict = {}
def _dig(in_struct, name=None):
# if _debug: print('entering dig with', in_struct)
if name is None: name = []
if isinstance(first_elem(in_struct), dict):
if _debug: print('in a dict')
for s in in_struct:
if _debug: print('digging to ', s)
_dig(in_struct[s], name + [s])
elif isinstance(first_elem(in_struct), list):
if _debug: print('in a list')
for s in in_struct:
_dig(s, name + ['list'])
else: # can't get it to use isinstance to id spendline here so have to do by default :/
if _debug: print('in a spendline I ASSUME - type ', type(first_elem(in_struct)))
for l in in_struct:
if _debug: print('element is ', l)
out_dict[tuple(name + [l])] = in_struct[l]
_dig(struct)
return out_dict
###_________________________________________________________________________###
def make_log(in_dict, _debug=False):
'''Return a df with log contents of an input dict containing spendlines
'''
pad = 25
if _debug: print("\nIN FUNCTION: ".ljust(20), inspect.stack()[0][3])
if _debug: print("..called by: ".ljust(20), inspect.stack()[1][3], end="\n\n")
# first get a flat dict
flat = flatten(in_dict)
# instantiate the dataframe with the keys
df = pd.DataFrame(columns=flat.keys())
# now go through each spend line, and then each log entry
for line in flat:
if _debug: print('now in spend line'.ljust(pad), line)
if _debug: print('log is'.ljust(pad), flat[line].log)
for entry in flat[line].log:
if _debug: print('now in log entry'.ljust(pad), entry)
df.loc[entry, line] = flat[line].log[entry]
df.columns = pd.MultiIndex.from_tuples(df.columns)
if _debug: print('leaving ', inspect.stack()[0][3])
return df
##_________________________________________________________________________##
def make_params_table(pol_dict, index=None, log=False, _debug=False):
'''Constructs a dataframe with parameters for spendlines in an input dict.
There's a default set of row names - pass your own index if reqd
TODO auto pick up index
'''
if _debug: print("\nIN FUNCTION: ".ljust(20), inspect.stack()[0][3])
if _debug: print("..called by: ".ljust(20), inspect.stack()[1][3], end="\n\n")
if index is None:
index = """peak_spend_pa peak_spend icer sav_rate
uptake_dur plat_dur plat_gr_pa plat_gr_pm gen_mult launch_delay launch_stop
term_gr_pa term_gr_pm coh_gr_pa coh_gr_pm""".split()
df = pd.DataFrame(index=index)
flat_dict = flatten(pol_dict)
for q in flat_dict:
params = [flat_dict[q].peak_spend*12*12, # double annualised
flat_dict[q].peak_spend,
flat_dict[q].icer,
flat_dict[q].sav_rate,
int(flat_dict[q].shed.uptake_dur),
int(flat_dict[q].shed.plat_dur),
flat_dict[q].shed.plat_gr*12,
flat_dict[q].shed.plat_gr,
flat_dict[q].shed.gen_mult,
flat_dict[q].launch_delay,
flat_dict[q].launch_stop,
flat_dict[q].term_gr*12,
flat_dict[q].term_gr,
flat_dict[q].coh_gr*12,
flat_dict[q].coh_gr]
df[q] = params
if log: df = df.append(make_log(pol_dict, _debug=_debug))
df.columns = pd.MultiIndex.from_tuples(df.columns)
if _debug: print('leaving ', inspect.stack()[0][3])
return df
#_________________________________________________________________________##
def make_shapes(scens, term_dur=1, start_m=None, flat=True, synch_start=False,
net_spend=True, multi_index=True, _debug=False):
'''For an input dict of scenarios (scens), return a df of shapes,
ensuring differential launch times are handled.
term_dur : minimum number of terminal periods to plot
start_m : optional start month, otherwise range index
synch_start : option to move index start according to negative launch delays
'''
pad = 25
if _debug: print("\nIN FUNCTION: ".ljust(20), inspect.stack()[0][3])
if _debug: print("..called by: ".ljust(20), inspect.stack()[1][3], end="\n\n")
if flat: scens = flatten(scens, _debug=False)
pads = [25] + [8]*4
out = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 17 18:15:38 2021
@author: johan
"""
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tifffile as tf
from scipy import ndimage
from skimage.measure import regionprops, label
import napari
# Use GPU for processing
import pyclesperanto_prototype as cle
cle.select_device()
def fractional_value(MapA, MapB):
return np.sum(np.multiply(MapA, MapB))/np.sum(MapB)
def distribution_moments(Image, Mask, **kwargs):
"Returns histogram descriptors for Pixel values in an Image masked with a Mask (bool)"
assert Mask.dtype == bool
Prefix = kwargs.get('prefix', '')
nbins = kwargs.get('nbins', 100)
Results = dict()
values = Image[Mask == True].flatten()
Results[Prefix + "_Median"] = np.median(values)
Results[Prefix + "_Mean"] = values.mean()
Results[Prefix + "_std"] = values.std()
Results[Prefix + "_V10"] = np.quantile(values, 0.1)
Results[Prefix + "_V25"] = np.quantile(values, 0.25)
Results[Prefix + "_V75"] = np.quantile(values, 0.75)
Results[Prefix + "_V95"] = np.quantile(values, 0.95)
Histogram = dict()
Histogram['hist'], Histogram['edges'] = np.histogram(values, nbins)
return Results, Histogram
def make_IF_mask(IF):
labels = dict()
labels['Background'] = 1
labels['Hypoxia'] = 4
labels['CD31'] = 3
labels['Perfusion'] = 5
# Hypoxia
Hypoxia = np.zeros_like(IF, dtype=bool)
Hypoxia[IF == labels['Hypoxia']] = True
# CD31
CD31 = np.zeros_like(IF, dtype=bool)
CD31[IF == labels['CD31']] = True
# Perfusion
Perfusion = np.zeros_like(IF, dtype=bool)
Perfusion[IF == labels['Perfusion']] = True
return Hypoxia, CD31, Perfusion
def hist_to_file(hist, edges, file):
df = pd.DataFrame()
df['Frequency'] = hist
df['Edges'] = edges[:-1] + np.diff(edges).mean()/2.0 # save centers of histogram bars instead of edges
df.to_csv(file)
return 1
def make_tumor_mask(HE):
"Generates a binary mask of tumor areas (everything included)"
print('\t---> Generating masks')
labels = dict()
labels['Background'] = 0
labels['Necrosis'] = 1
labels['Vital'] = 2
labels['SMA'] = 3
# Make Tumor Mask
Tumor = np.zeros_like(HE, dtype=bool)
Tumor[HE != labels['Background']] = True # set foreground to True
Tumor[HE == labels['SMA']] = False # remove stroma tissue
Tumor = ndimage.binary_fill_holes(Tumor)
# Make Vital Mask
Vital = np.zeros_like(HE, dtype=bool)
Vital[HE == labels['Vital']] = True
# Make SMA Mask
SMA = np.zeros_like(HE, dtype=bool)
SMA[HE == labels['SMA']] = True
return Tumor, Vital, SMA
def measure_vessel_sizes(label_img, **kwargs):
pxsize = kwargs.get('pxsize', 0.44)
label_img = label(label_img)
regions = regionprops(label_img)
Vessel_minor_radii = [prop.minor_axis_length * pxsize for prop in regions]
# Vessel_major_radii = [prop.major_axis_length * pxsize for prop in regions]
Vessel_area = [prop.area * pxsize for prop in regions]
Vessel_area = np.histogram(Vessel_area)
Vessel_radii = np.histogram(Vessel_minor_radii)
return Vessel_area, Vessel_radii
def pretty_picture(R, G, B, label):
"Generate a prettyfied image for visualization"
image = np.vstack([R[None, :, :],
G[None, :, :],
B[None, :, :]]).astype(int)
image = image.transpose((1,2,0))
plt.imshow(image)
plt.imshow(label, cmap='gray', alpha=0.5)
def measure(segmented_HE, segmented_IF, **kwargs):
"Measured defined features from provided input images HE and IF"
pxsize = kwargs.get('pxsize', 0.44)
HE_image = kwargs.get('HE_root', '')
IF_image = kwargs.get('IF_root', '')
directory = kwargs.get('dir', '')
res_dir = os.path.dirname(segmented_HE)
HE = tf.imread(segmented_HE)
IF = tf.imread(segmented_IF)
Tumor, Vital, SMA = make_tumor_mask(HE)
Hypoxia, CD31, Perfusion = make_IF_mask(np.multiply(Vital, IF))
# Measure HE-related params
meas = dict()
meas['dir'] = directory
meas['HE_input'] = HE_image
meas['IF_input'] = IF_image
meas['Necrotic_fraction'] = 1 -fractional_value(Vital, Tumor)
meas['SMA_fraction'] = fractional_value(SMA, Tumor)
# Measure IF-related params
meas['Hypoxic_fraction'] = fractional_value(Hypoxia, Vital)
meas['Vascular_fraction'] = fractional_value(CD31, Vital)
# Vessel sizes
Vessel_area, Vessel_radii = measure_vessel_sizes(CD31)
hist_to_file(Vessel_area[0], Vessel_area[1], os.path.join(res_dir, 'Vessel_area_hist.csv'))
hist_to_file(Vessel_radii[0], Vessel_radii[1], os.path.join(res_dir, 'Vessel_radii_hist.csv'))
# Distance related features: Vessels
EDT = ndimage.morphology.distance_transform_edt(np.invert(CD31)) * pxsize # get EDT
EDT_fts, EDT_hist = distribution_moments(EDT, Vital, prefix='EDT_CD31')
meas.update(EDT_fts)
# Hypoxic-distance (HyDi) related features
HyDi_fts, HyDi_hist = distribution_moments(np.multiply(EDT, Hypoxia), Vital, prefix='EDT_Hypoxia')
# Save features
df = | pd.DataFrame(meas, index=[0]) | pandas.DataFrame |
import pandas as pd
import urllib3 as urllib
import urllib.request as urllib2
import json
import glob
import IPython.display
import re
pd.options.display.max_columns = None
http = urllib.PoolManager()
# Load Facility Name to CMS ID json file
fac2CMS_file = 'IL_FacilityName_to_CMS_ID.json'
with open(fac2CMS_file) as f:
ltc_name2cms_id = json.load(f)
def getResponse(url):
operUrl = http.request('GET', url)
if(operUrl.status==200):
data = operUrl.data
jsonData = json.loads(data.decode('utf-8'))
else:
print("Error receiving data", operUrl.getcode())
return jsonData
def facility2CMSNum (facilityName):
regex = re.compile('\(\d\)')
facilityName = regex.split(facilityName)[0].strip()
if facilityName in ltc_name2cms_id:
return ltc_name2cms_id[facilityName]
else:
return "No Match"
# df_facilities.reset_index(inplace=True) # Needed because used group by to get facility level data ToDo: COnsider moving this code up
# df_facilities['county-facName']= df_facilities['County'].str.upper() + '-' + df_facilities['FacilityName'].str.upper()
# df_facilities['CMS_ProvNum'] = df_facilities['county-facName'].apply(lambda x: facility2CMSNum(x))
def pull_IL_json_from_file(file):
'''
- Get IL data from JSON file
Return: Reporting Date: str, DataFrame of Outbreak data: dict
'''
#Get IL data from JSON
ltc_data = getResponse('https://idph.illinois.gov/DPHPublicInformation/api/covid/getltcdata')
ltc_data_json = json.dumps(ltc_data)
# Extract Reporting Data
reporting_date = '%d-%02d-%02d' %(ltc_data['LastUpdateDate']['year'], ltc_data['LastUpdateDate']['month'], ltc_data['LastUpdateDate']['day'])
#Saving a copy of source data
ltc_data_json = json.dumps(ltc_data)
file = "Source_data/IL_" + reporting_date + "_LTC_data_Source.json"
with open(file, "w") as f:
f.write(ltc_data_json)
# Get Reporting Date
reporting_date = '%d-%02d-%02d' % (ltc_data['LastUpdateDate']['year'], ltc_data['LastUpdateDate']['month'], ltc_data['LastUpdateDate']['day'])
return reporting_date, ltc_data
def pull_IL_json_from_web():
'''
- Get IL data from JSON
- Store IL data in Source Data w/Date Stamp
Return: Reporting Date: str, DataFrame of Outbreak data: dict
'''
#Get IL data from JSON
ltc_data = getResponse('https://idph.illinois.gov/DPHPublicInformation/api/covid/getltcdata')
ltc_data_json = json.dumps(ltc_data)
# Extract Reporting Data
reporting_date = '%d-%02d-%02d' %(ltc_data['LastUpdateDate']['year'], ltc_data['LastUpdateDate']['month'], ltc_data['LastUpdateDate']['day'])
#Saving a copy of source data
ltc_data_json = json.dumps(ltc_data)
file = "Source_data/IL_" + reporting_date + "_LTC_data_Source.json"
with open(file, "w") as f:
f.write(ltc_data_json)
# Get Reporting Date
reporting_date = '%d-%02d-%02d' % (ltc_data['LastUpdateDate']['year'], ltc_data['LastUpdateDate']['month'], ltc_data['LastUpdateDate']['day'])
return reporting_date, ltc_data
def outbreak_df_from_file(outbreak_data, ltc_name2cms_id):
""" From Json file:
1) return DataFrame augmented and save to file
2) return Summary data"""
ltc_data = outbreak_data # TODO Refactor NAME
# Extract Reporting Data
reporting_date = '%d-%02d-%02d' %(ltc_data['LastUpdateDate']['year'], ltc_data['LastUpdateDate']['month'], ltc_data['LastUpdateDate']['day'])
# Build DataFrame
df = | pd.DataFrame(ltc_data['FacilityValues']) | pandas.DataFrame |
"""
Helpers for metrics
"""
import altair as alt
import numpy as np
import pandas as pd
import streamlit as st
from sklearn import metrics
from xai_fairness.toolkit_perf import (
cumulative_gain_curve, binary_ks_curve)
def confusion_matrix_chart(source, title="Confusion matrix"):
"""Confusion matrix."""
base = alt.Chart(source).encode(
x="predicted:O",
y="actual:O",
).properties(
title=title,
)
rects = base.mark_rect().encode(
color="value:Q",
)
text = base.mark_text(
align="center",
baseline="middle",
color="black",
size=12,
dx=0,
).encode(
text="value:Q",
)
return rects + text
def roc_chart(fpr, tpr, title="ROC curve"):
"""ROC curve."""
source = pd.DataFrame({"FPR": fpr, "TPR": tpr})
base = alt.Chart(source).properties(title=title)
line = base.mark_line(color="red").encode(
alt.X("FPR", title="False positive rate"),
alt.Y("TPR", title="True positive rate"),
alt.Tooltip(["FPR", "TPR"]),
)
area = base.mark_area(fillOpacity=0.5, fill="red").encode(
alt.X("FPR"),
alt.Y("TPR"),
)
_df = pd.DataFrame({"x": [0, 1], "y": [0, 1]})
baseline = alt.Chart(_df).mark_line(strokeDash=[20, 5], color="black").encode(
alt.X("x"),
alt.Y("y"),
)
return line + area + baseline
def line_chart(source, xtitle, ytitle, title):
"""General line chart."""
base = alt.Chart(source).properties(title=title)
line = base.mark_line().encode(
alt.X("x:Q", title=xtitle),
alt.Y("y:Q", title=ytitle),
tooltip=[alt.Tooltip("x", title=xtitle), alt.Tooltip("y", title=ytitle)],
)
return line
def precision_recall_chart(precision, recall, title="Precision-Recall curve"):
"""PR curve."""
source = pd.DataFrame({"x": recall, "y": precision})
return line_chart(source, "Recall", "Precision", title=title)
def cumulative_gain_chart(percentages, gains, title="Cumulative gain curve"):
"""Cumulative gain curve."""
source = pd.DataFrame({"x": percentages, "y": gains})
return line_chart(source, "Percentage of samples selected",
"Percentage of positive labels", title=title)
def cumulative_lift_chart(percentages, gains, title="Cumulative lift curve"):
"""Cumulative lift curve."""
source = pd.DataFrame({"x": percentages, "y": gains})
return line_chart(source, "Percentage of samples selected", "Lift", title=title)
def recall_k_chart(percentages, recall, title="Recall@K"):
"""Recall@k curve."""
source = | pd.DataFrame({"x": percentages, "y": recall}) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2021/8/20 18:02
Desc: 东方财富网-数据中心-特色数据-股权质押
东方财富网-数据中心-特色数据-股权质押-股权质押市场概况: http://data.eastmoney.com/gpzy/marketProfile.aspx
东方财富网-数据中心-特色数据-股权质押-上市公司质押比例: http://data.eastmoney.com/gpzy/pledgeRatio.aspx
东方财富网-数据中心-特色数据-股权质押-重要股东股权质押明细: http://data.eastmoney.com/gpzy/pledgeDetail.aspx
东方财富网-数据中心-特色数据-股权质押-质押机构分布统计-证券公司: http://data.eastmoney.com/gpzy/distributeStatistics.aspx
东方财富网-数据中心-特色数据-股权质押-质押机构分布统计-银行: http://data.eastmoney.com/gpzy/distributeStatistics.aspx
东方财富网-数据中心-特色数据-股权质押-行业数据: http://data.eastmoney.com/gpzy/industryData.aspx
"""
import math
from akshare.utils import demjson
import pandas as pd
import requests
from tqdm import tqdm
def stock_em_gpzy_profile() -> pd.DataFrame:
"""
东方财富网-数据中心-特色数据-股权质押-股权质押市场概况
http://data.eastmoney.com/gpzy/marketProfile.aspx
:return: 股权质押市场概况
:rtype: pandas.DataFrame
"""
url = "http://dcfm.eastmoney.com/EM_MutiSvcExpandInterface/api/js/get"
params = {
"type": "ZD_SUM",
"token": "<PASSWORD>459a279469fe49eca5",
"cmd": "",
"st": "tdate",
"sr": "-1",
"p": "1",
"ps": "5000",
"js": "var zvxnZOnT={pages:(tp),data:(x),font:(font)}",
"rt": "52583914",
}
temp_df = pd.DataFrame()
res = requests.get(url, params=params)
data_text = res.text
data_json = demjson.decode(data_text[data_text.find("={") + 1 :])
map_dict = dict(
zip(
pd.DataFrame(data_json["font"]["FontMapping"])["code"],
pd.DataFrame(data_json["font"]["FontMapping"])["value"],
)
)
for key, value in map_dict.items():
data_text = data_text.replace(key, str(value))
data_json = demjson.decode(data_text[data_text.find("={") + 1 :])
temp_df = temp_df.append(pd.DataFrame(data_json["data"]), ignore_index=True)
temp_df.columns = [
"交易日期",
"sc_zsz",
"平均质押比例(%)",
"涨跌幅",
"A股质押总比例(%)",
"质押公司数量",
"质押笔数",
"质押总股数(股)",
"质押总市值(元)",
"沪深300指数",
]
temp_df = temp_df[
[
"交易日期",
"平均质押比例(%)",
"涨跌幅",
"A股质押总比例(%)",
"质押公司数量",
"质押笔数",
"质押总股数(股)",
"质押总市值(元)",
"沪深300指数",
]
]
temp_df["交易日期"] = pd.to_datetime(temp_df["交易日期"])
return temp_df
def stock_em_gpzy_pledge_ratio(trade_date: str = "2020-08-07") -> pd.DataFrame:
"""
东方财富网-数据中心-特色数据-股权质押-上市公司质押比例
http://data.eastmoney.com/gpzy/pledgeRatio.aspx
:param trade_date: 指定交易日, 访问 http://data.eastmoney.com/gpzy/pledgeRatio.aspx 查询
:type trade_date: str
:return: 上市公司质押比例
:rtype: pandas.DataFrame
"""
url = "http://dcfm.eastmoney.com/EM_MutiSvcExpandInterface/api/js/get"
temp_df = pd.DataFrame()
params = {
"type": "ZD_QL_LB",
"token": "70<PASSWORD>f4f09<PASSWORD>59a279469fe49eca5",
"cmd": "",
"st": "amtshareratio",
"sr": "-1",
"p": "1",
"ps": "5000",
"js": "var rlJqyOhv={pages:(tp),data:(x),font:(font)}",
"filter": f"(tdate='{trade_date}')",
"rt": "52584436",
}
res = requests.get(url, params=params)
data_text = res.text
data_json = demjson.decode(data_text[data_text.find("={") + 1 :])
map_dict = dict(
zip(
| pd.DataFrame(data_json["font"]["FontMapping"]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import requests
import json
from rasa_core_sdk import Action
import pandas as pd
from duckling import DucklingWrapper
# from rasa_core.events import SlotSet
from rasa_core_sdk.events import SlotSet
from rasa_core_sdk.events import AllSlotsReset
from rasa_core_sdk.events import Restarted
logger = logging.getLogger(__name__)
d = DucklingWrapper()
class ActionJoke(Action):
def name(self):
# define the name of the action which can then be included in training stories
return "action_joke"
def run(self, dispatcher, tracker, domain):
# what your action should do
request = requests.get('http://api.icndb.com/jokes/random').json() #make an apie call
joke = request['value']['joke'] #extract a joke from returned json response
dispatcher.utter_message(joke) #send the message back to the user
return []
# class ActionAskModelYear(Action):
# def name(self):
# # define the name of the action which can then be included in training stories
# return "action_ask_model_year"
# def run(self, dispatcher, tracker, domain):
# # what your action should do
# user_message = tracker.latest_message['text']
# dt = d.parse_time(user_message)
# model_year = dt[0][:4] if dt is not None else None
# return [SlotSet("model_year", model_year)]
class ActionGetModelYear(Action):
def name(self):
# define the name of the action which can then be included in training stories
return "action_get_model_year"
def run(self, dispatcher, tracker, domain):
# what your action should do
user_message = tracker.latest_message['text']
dt = d.parse_time(user_message)
print(user_message)
print(dt)
years = [v['text'] for v in dt if v['value']['grain'] == u'year']
model_year = None
if len(years) == 1:
# dispatcher.utter_template("utter_ask_dob", tracker, silent_fail=True)
model_year = years[0]
# else:
# dispatcher.utter_template("utter_ask_model_year", tracker, silent_fail=True)
return [SlotSet("model_year", model_year)]
class ActionVerifyDob(Action):
def name(self):
# define the name of the action which can then be included in training stories
return "action_verify_dob"
def run(self, dispatcher, tracker, domain):
# what your action should do
# user_message = tracker.latest_message['text']
# print(tracker.latest_message)
dt = tracker.get_slot('time')
age = None
print(dt)
if dt is not None:
age = int((pd.datetime.now() - | pd.to_datetime(dt[0][:10]) | pandas.to_datetime |
# https://imaddabbura.github.io/post/kmeans_clustering/
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.cluster import KMeans
import pandas as pd
import numpy as np
from PIL import Image
def rgb_to_hex(rgb):
return '#%02x%02x%02x' % (int(rgb[0]), int(rgb[1]), int(rgb[2]))
# Convert PIL image to array
img = Image.open("dogs.jpeg")
np_array = np.array(img)
print("Array shape")
print(np_array.shape)
print("\nDimensions of the Image")
print("Height : " + str(img.height))
print("Width : " + str(img.width))
# Reshape the array in 2 dimensions
flat_array = np_array.ravel()
new_array = flat_array.reshape(np_array.shape[0] * np_array.shape[1], np_array.shape[2])
# Initializing the k means
kmeans = KMeans(n_clusters=3)
kmeans.fit(new_array)
print("\nCluster centroids")
print(kmeans.cluster_centers_)
# count elements of each cluster
print("Elements of Each cluster ")
unique, counts = np.unique(kmeans.labels_, return_counts=True)
print(dict(zip(unique, counts)))
df = | pd.DataFrame(new_array, columns=["col1", "col2", "col3"]) | pandas.DataFrame |
import argparse
import itertools
import multiprocessing as mp
import os
from inspect import signature
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from Timer import Timer, timer
import qpputils as dp
try:
from crossval import InterTopicCrossValidation, IntraTopicCrossValidation
from queries_pre_process import add_topic_to_qdf
except ModuleNotFoundError:
import sys
from pathlib import Path
script_dir = sys.path[0]
# Adding the parent directory to the path
sys.path.append(str(Path(script_dir).parent))
from crossval import InterTopicCrossValidation, IntraTopicCrossValidation
from queries_pre_process import add_topic_to_qdf
PREDICTORS = ['clarity', 'wig', 'nqc', 'smv', 'rsd', 'qf', 'uef/clarity', 'uef/wig', 'uef/nqc', 'uef/smv', 'uef/qf']
# PREDICTORS = ['clarity', 'wig', 'nqc', 'uef/clarity', 'uef/wig', 'uef/nqc']
SIMILARITY_MEASURES = ['Jac_coefficient', 'RBO_EXT_100', 'Top_10_Docs_overlap', 'RBO_FUSED_EXT_100']
parser = argparse.ArgumentParser(description='PageRank UQV Evaluation', usage='python3.7 pr_eval.py -c CORPUS')
parser.add_argument('-c', '--corpus', default='ROBUST', type=str, help='corpus (index) to work with',
choices=['ROBUST', 'ClueWeb12B'])
def calc_best_worst(full_df: pd.DataFrame, ap_df: pd.DataFrame, metric_direction):
bw_param = max if metric_direction == 'best' else min
_ap_vars = ap_df.loc[ap_df.groupby('topic')['ap'].transform(bw_param) == ap_df['ap']].set_index('topic')
_results = []
for col in full_df.set_index(['topic', 'qid']).columns:
pr_df = full_df.loc[:, ['topic', 'qid', col]]
_result = {}
for topic, _df in pr_df.groupby('topic'):
_var_ap = _ap_vars.loc[topic].qid
if type(_var_ap) is str:
_pr_val = _df.loc[_df['qid'] == _var_ap, col].values[0]
else:
_pr_val = np.mean(_df.loc[_df['qid'].isin(_var_ap), col].values)
if metric_direction == 'best':
_var_score = np.count_nonzero(_df[col] < _pr_val) / len(_df)
else:
_var_score = np.count_nonzero(_df[col] > _pr_val) / len(_df)
_result[topic] = {col: _var_score}
_results.append(pd.DataFrame.from_dict(_result, orient='index'))
df = pd.concat(_results, axis=1)
return df
def set_basic_paths(corpus):
res_dir, data_dir = dp.set_environment_paths()
cv_folds = dp.ensure_file(f'{res_dir}/{corpus}/test/2_folds_30_repetitions.json')
ap_file = dp.ensure_file(f'{res_dir}/{corpus}/test/raw/QLmap1000')
pkl_dir = dp.ensure_dir(f'{res_dir}/{corpus}/test/pageRank/pkl_files')
return {'res_dir': res_dir, 'data_dir': data_dir, 'pkl_dir': pkl_dir, 'cv_folds': cv_folds, 'ap_file': ap_file}
def init_eval(corpus, similarity, predictor):
pth_dict = set_basic_paths(corpus)
predictor_pkl_dir = dp.ensure_dir(f"{pth_dict['pkl_dir']}/{predictor}")
predictions_dir = dp.ensure_dir(
f'{pth_dict["res_dir"]}/{corpus}/uqvPredictions/referenceLists/pageRank/raw/{similarity}/{predictor}/predictions')
ap_obj = dp.ResultsReader(pth_dict['ap_file'], 'ap')
ap_df = add_topic_to_qdf(ap_obj.data_df)
cv_obj = InterTopicCrossValidation(predictions_dir=predictions_dir, folds_map_file=pth_dict['cv_folds'])
full_results_df = add_topic_to_qdf(cv_obj.full_set)
return {'predictor_pkl_dir': predictor_pkl_dir, 'ap_obj': ap_obj, 'ap_df': ap_df,
'full_results_df': full_results_df, 'cv_obj': cv_obj}
@timer
def best_worst_metric(corpus, similarity, predictor, metric, load=False):
assert metric == 'best' or metric == 'worst', f'The function expects a known metric. {metric} was passed'
pkl_dir, ap_obj, ap_df, full_results_df, cv_obj = init_eval(corpus, similarity, predictor).values()
_file = f'{pkl_dir}/{similarity}_{metric}_results.pkl'
if load:
_df = load_exec(_file, calc_best_worst, (full_results_df, ap_df, metric))
else:
_df = calc_best_worst(full_results_df, ap_df, metric)
_df.to_pickle(_file)
return calc_s(cv_obj, _df)
def calc_s(cv_obj: InterTopicCrossValidation, full_scores_df: pd.DataFrame):
if hasattr(cv_obj, 'corr_df'):
cv_obj.__delattr__('corr_df')
cv_obj.full_set = full_scores_df
score = cv_obj.calc_test_results()
return float(score)
def load_exec(file_to_load, function_to_exec, args=None):
"""
The function tries to load a pandas DataFrame from Pickle file, if the file doesn't exist it will use the function
that was passed as an argument to generate a new DataFrame.
The assumptions are that the pickle file is a pandas DataFrame (if exists) and
the passed function returns a pandas DataFrame
:param file_to_load: path to the file that should loaded
:param function_to_exec: the function that will be executed in case the file doesn't exist
:param args: single or list of arguments to pass to the exec function
:return: pd.DataFrame
"""
try:
_df = pd.read_pickle(dp.ensure_file(file_to_load))
except AssertionError:
# Checking the signature of the function
sig = signature(function_to_exec)
if bool(sig.parameters):
if len(sig.parameters) > 1:
_df = function_to_exec(*args)
else:
_df = function_to_exec(args)
else:
_df = function_to_exec()
_df.to_pickle(file_to_load)
return _df
def minmax_ap_metric(corpus, similarity, predictor, minmax):
pkl_dir, ap_obj, raw_ap_df, full_pr_df, cv_obj = init_eval(corpus, similarity, predictor).values()
_list = []
for col in full_pr_df.set_index(['topic', 'qid']).columns:
grpby = full_pr_df.loc[:, ['topic', 'qid', col]].set_index('qid').groupby('topic')[col]
_qids = grpby.idxmax() if minmax == 'max' else grpby.idxmin()
_df = raw_ap_df.loc[raw_ap_df.qid.isin(_qids)].set_index('topic')['ap']
_list.append(_df.rename(col))
full_ap_df = pd.concat(_list, axis=1)
return calc_s(cv_obj, full_ap_df) if minmax == 'max' else -calc_s(cv_obj, -full_ap_df)
def run_intra_topic_eval(corpus, similarity, predictor):
spam_time = Timer(f'working on {corpus, similarity, predictor}')
pth_dict = set_basic_paths(corpus)
# {'res_dir': res_dir, 'data_dir': data_dir, 'pkl_dir': pkl_dir, 'cv_folds': cv_folds, 'ap_file': ap_file}
predictions_dir = dp.ensure_dir(os.path.join(pth_dict['res_dir'],
f'{corpus}/uqvPredictions/referenceLists/'
f'pageRank/raw/{similarity}/{predictor}/predictions'))
cv_obj = IntraTopicCrossValidation(predictions_dir=predictions_dir, folds_map_file=pth_dict['cv_folds'],
ap_file=pth_dict['ap_file'], save_calculations=True, test='kendall')
result = cv_obj.calc_test_results()
spam_time.stop()
return {(corpus, similarity, predictor): result}
def interactive_parameters():
# TODO: cover all options for the interactive choice with retries instead of fixed values
corpus = input('What corpus should be used for evaluation?\n')
while corpus != 'ROBUST' and corpus != 'ClueWeb12B':
print(f'Unknown corpus: {corpus}\ntry ROBUST or ClueWeb12B instead')
corpus = input('What corpus should be used for evaluation?\n')
predictor = input('What predictor should be used for evaluation?\n')
while predictor not in PREDICTORS:
print(f'Unknown predictor: {predictor}\n try one of the available predictors instead, e.g.\n{PREDICTORS}')
predictor = input('What predictor should be used for evaluation?\n')
similarity = input('What similarity should be used for evaluation?\n')
while similarity not in SIMILARITY_MEASURES:
print(
f'Unknown similarity: {similarity}\n try one of the available similarities instead, e.g.\n{SIMILARITY_MEASURES}')
similarity = input('What similarity should be used for evaluation?\n')
return corpus, similarity, predictor
def run_all(metric_func):
with mp.Pool(processes=40) as pool:
results = pool.starmap(metric_func,
itertools.product({'ROBUST', 'ClueWeb12B'}, SIMILARITY_MEASURES, PREDICTORS))
df = pd.DataFrame(results).T
df.index = pd.MultiIndex.from_tuples(df.index, names=['corpus', 'similarity', 'predictor'])
return df
def load_single_per_topic_df(corpus, similarity, predictor):
pth_dict = set_basic_paths(corpus)
predictions_dir = dp.ensure_dir(os.path.join(pth_dict['res_dir'],
f'{corpus}/uqvPredictions/referenceLists/'
f'pageRank/raw/{similarity}/{predictor}/predictions'))
cv_obj = IntraTopicCrossValidation(predictions_dir=predictions_dir, folds_map_file=pth_dict['cv_folds'],
ap_file=pth_dict['ap_file'], save_calculations=True)
df = cv_obj.load_per_topic_df()
mean_val_per_topic = df.loc[:, df.columns != 'weight'].mean(1)
return mean_val_per_topic, df.weight
def load_full_per_topic_df(corpus):
_list = []
# weight = None
for similarity in SIMILARITY_MEASURES:
for predictor in PREDICTORS:
_sr, weight = load_single_per_topic_df(corpus, similarity, predictor)
_sr.name = similarity + ' ' + predictor
_list.append(_sr)
# weight.name = 'weight'
# df = pd.concat(_list + [weight / weight.max()], axis=1)
df = pd.concat(_list, axis=1)
df.to_pickle(f'{corpus}_full_results_df.pkl')
# df.boxplot(rot=45)
# plt.show()
def main(args, load_results=False, interact=False):
corpus = args.corpus
pths_dict = set_basic_paths(corpus)
if interact:
print('\n\n\n------------!!!!!!!---------- Interactive Mode ------------!!!!!!!----------\n\n\n')
while True:
corpus, similarity, predictor = interactive_parameters()
max_ap_score = minmax_ap_metric(corpus, similarity, predictor, 'max')
min_ap_score = minmax_ap_metric(corpus, similarity, predictor, 'min')
print(f'The MAP score using {predictor} for max PR queries: {max_ap_score}\n'
f'The MAP score using {predictor} for min PR queries: {min_ap_score}')
best_score = best_worst_metric(corpus, similarity, predictor, metric='best', load=True)
worst_score = best_worst_metric(corpus, similarity, predictor, metric='worst', load=True)
print(f'Predicting using {predictor} best var: {best_score}\n'
f'Predicting using {predictor} worst var {worst_score}')
elif load_results:
res_df = load_exec(os.path.join(pths_dict['pkl_dir'], f'{corpus}_PageRank_results_table.pkl'), fun, corpus)
print(
res_df.to_latex(header=True, multirow=True, multicolumn=False, index=True, escape=False, index_names=True))
else:
results_df = run_all(run_intra_topic_eval)
print("\multicolumn{3}{c}{ROBUST} \\")
print(results_df.astype(float).sum(1).loc['ROBUST'].to_latex(header=True, multirow=True, multicolumn=False,
index=True, escape=False,
index_names=True).replace('_', '-'))
print("\multicolumn{3}{c}{ClueWeb12B} \\")
print(results_df.astype(float).sum(1).loc['ClueWeb12B'].to_latex(header=True, multirow=True, multicolumn=False,
index=True, escape=False,
index_names=True).replace('_', '-'))
results_df.to_pickle(os.path.join(pths_dict['pkl_dir'], f'AvgKendall_PageRank_results_table.pkl'))
def fun(corpus):
res_dir, data_dir = dp.set_environment_paths()
raw_ap_df = dp.add_topic_to_qdf(
dp.ResultsReader(dp.ensure_file(f'{res_dir}/{corpus}/test/raw/QLmap1000'), 'ap').data_df)
grp_obj = raw_ap_df.groupby('topic')['ap']
avg_ap_df = grp_obj.mean()
max_ap_df = grp_obj.max()
min_ap_df = grp_obj.min()
print(f'Average real MAP: {avg_ap_df.mean():.4f}')
print(f'Maximum real MAP: {max_ap_df.mean():.4f}')
print(f'Min real MAP: {min_ap_df.mean():.4f}')
result = {}
for similarity in SIMILARITY_MEASURES:
print(f'Similarity {similarity}:')
for predictor in PREDICTORS:
max_ap_score = minmax_ap_metric(corpus, similarity, predictor, 'max')
min_ap_score = minmax_ap_metric(corpus, similarity, predictor, 'min')
print(f'The MAP score using {predictor} for max PR queries: {max_ap_score}\n'
f'The MAP score using {predictor} for min PR queries: {min_ap_score}')
best_score = best_worst_metric(corpus, similarity, predictor, metric='best', load=True)
worst_score = best_worst_metric(corpus, similarity, predictor, metric='worst', load=True)
print(f'Predicting using {predictor} best var: {best_score}\n'
f'Predicting using {predictor} worst var {worst_score}')
result[similarity, predictor] = {'Max MAP': max_ap_score, 'Min MAP': min_ap_score,
'Best Score': best_score,
'Worst Score': worst_score}
df = | pd.DataFrame.from_dict(result, orient='index') | pandas.DataFrame.from_dict |
"""
# install the package
pip install deepctr
# tutorial
https://deepctr-doc.readthedocs.io/en/latest/Quick-Start.html#getting-started-4-steps-to-deepctr
# github
https://github.com/shenweichen/DeepCTR
しかし、これは binary しか出来ないので適応不可能。
binary を無理矢理適応させるばあいは、非クリックデータを何らかの方法で生成する必要がある。
# ---- 次のアイデア ----
# github
https://github.com/ChenglongChen/tensorflow-DeepFM
"""
import tensorflow as tf
import os
import pickle
import pandas as pd
import numpy as np
import copy
from sklearn.metrics import roc_auc_score
from sklearn.metrics import mean_absolute_error
from src.module.tensorflow_DeepFM.DeepFM import DeepFM as DeepFM_
# インターフェース
class DeepFM:
def __init__(self, set_train_test_users, set_train_test_items, dict_genre=None, first_half_fit_only_fm=False, ctr_prediction=True):
"""
import pandas as pd
DIR_DATA = 'src/module/knowledge_graph_attention_network/Data/ml'
df_train = pd.read_csv(os.path.join(DIR_DATA, 'train_rating.csv'))
df_test = pd.read_csv(os.path.join(DIR_DATA, 'test_rating.csv'))
set_train_test_users = set(np.concatenate([df_train['UserID'], df_test['UserID']]))
set_train_test_items = set(np.concatenate([df_train['MovieID'], df_test['MovieID']]))
dict_genre = pickle.load(open(os.path.join(DIR_DATA, 'genre.pickle'), 'rb'))
self = DeepFM(set_train_test_users, set_train_test_items, dict_genre)
self.dfm_params['epoch'] = 10
self.dfm_params['batch_size'] = 64
users = df_train['UserID'].values
items = df_train['UserID'].values
ratings = df_train['Rating'].values
self.fit(users, items, ratings)
predicted = self.predict(df_test['UserID'].values, df_test['UserID'].values)
# MAE of test-set
print( np.mean(np.abs(predicted - df_test['Rating'])) )
# MAE of mean-prediction
print( np.mean(np.abs(df_test['Rating'].mean() - df_test['Rating'])) )
## まぁ、実際のテストをクリアできればOKとする。
"""
"""
参考として、Movielens1Mデータで検証されたハイパーパラメータは以下の通り
Deep Matrix Factorization Approach for
Collaborative Filtering Recommender Systems
k(hidden-factor) = 8, γ(learning-rate) = 0.01, λ(regularization) = 0.045
K = [9, 3, 3]; Γ= [0.01, 0.01, 0.01]; Λ = [0.1, 0.01, 0.1]
"""
self.set_train_test_users = set(set_train_test_users)
self.set_train_test_items = set(set_train_test_items)
self.dict_genre = dict_genre
self.first_half_fit_only_fm = first_half_fit_only_fm
self.data_manager = Data_manager(set_train_test_users, set_train_test_items, dict_genre)
feature_size, field_size = self.data_manager.get_feature_size_field_size()
self.dfm_params = {
"feature_size" : feature_size,
"field_size" : field_size,
"loss_type" : "mse", # "logloss" なら {0,1} の判別問題。 "mse" なら regression。
"use_fm": True, # fm-layer を使用
"use_deep": True, # deep-layer を使用
"embedding_size": 8,
"dropout_fm": [1.0, 1.0],
"deep_layers": [32, 32],
"dropout_deep": [0.5, 0.5, 0.5],
"deep_layers_activation": tf.nn.relu,
"epoch": 30,
"batch_size": 64,
"learning_rate": 0.001,
"optimizer_type": "adam",
"batch_norm": 1,
"batch_norm_decay": 0.995,
"l2_reg": 0.0001,
"l2_reg_embedding": 0.0001,
"l2_reg_bias": 0.0001,
"verbose": True,
"eval_metric": mean_absolute_error,
"greater_is_better": False, # 学習における損失スコアが大きい方が良いかどうか
"random_seed": 2017,
}
self.ctr_prediction = ctr_prediction
if self.ctr_prediction:
self.dfm_params["loss_type"] = "logloss"
def fit(self, users, items, ratings, test_users=[], test_items=[], test_ratings=[], **kargs):
"""
users = [0,0,1]
items = [0,3,3]
ratings = [3.,4.,5.]
"""
global_mean_bias_init = np.float32(np.mean(ratings))
global_mean_bias_init = 0.01
self.model = DeepFM_(**self.dfm_params, global_mean_bias_init=global_mean_bias_init, first_half_fit_only_fm=self.first_half_fit_only_fm)
# もし、CTR予測の場合は、y=0のデータをランダム生成する。
if self.ctr_prediction:
users = list(users) + list(np.random.choice(list(set(users)), size=len(users)))
items = list(items) + list(np.random.choice(list(set(items)), size=len(items)))
ratings = list((np.array(ratings)>0).astype(int)) + [0]*len(ratings)
test_ratings = list((np.array(test_ratings)>0).astype(int))
Xi, Xv = self.data_manager.transform_users_and_items_to_Xi_Xv(users, items)
if len(test_users)>0:
test_Xi, test_Xv = self.data_manager.transform_users_and_items_to_Xi_Xv(test_users, test_items)
self.model.fit(Xi, Xv, ratings, test_Xi, test_Xv, test_ratings, early_stopping=True)
else:
self.model.fit(Xi, Xv, ratings, early_stopping=True, **kargs)
# load data
self.trained_users = list(set(users))
self.trained_items = list(set(items))
self.global_mean = self.model.predict(Xi, Xv).mean()
def predict(self, users, items, *args, **kargs):
Xi, Xv = self.data_manager.transform_users_and_items_to_Xi_Xv(users, items)
predicted = self.model.predict(Xi, Xv)
return predicted
# prepare training and validation data in the required format
class Data_manager:
def __init__(self, users, items, dict_genre=None):
"""
users [array like object]:
train, test set に含まれる user_id
items [array like object]:
train, test set に含まれる item_id
dict_genre [dictionary]:
ex) {item_id: [genre_id1, genre_id2]}
tensorflow_DeepFM/example 内部のプログラム、特にDataReader.pyを読み、データの形式を解読した。
結論として、 item, user, genre の各IDは以下のように変換すればよい。
1) user = {0,1,2} → {0,1,2} *未変更
2) item = {0,1} → {3,4} *userからのインクリメントID
3) genre = {0,1} → {5,6} *itemからのインクリメントID
4) a interaction-sample [u,i,g] = [0,1,0]→[0,4,5]
5) Xi_train (X-index trainset) = [変換した[u,i,g]1, 変換した[u,i,g]2, ...]
6) Xv_train (X-value trainset) = [[1.,1.,1.], [1.,1.,1.], ...]
user,item,genre はカテゴリ変数なのですべて1.となる。
7) y_train = [rating-score1, rating-score2, ...] *変換不要
EXAMPLE
-------------
import pandas as pd
df_rating = pd.read_csv(os.path.join(DIR_DATA, 'train_rating.csv'))
dict_genre = pickle.load(open(os.path.join(DIR_DATA, 'genre.pickle'), 'rb'))
users = df_rating['UserID']
items = df_rating['MovieID']
self = Data_manager(users, items, dict_genre=dict_genre)
"""
self.dict_genre = dict_genre
# インクリメントインデックスを生成するオブジェクト self.inclement_index を生成する。
if dict_genre:
dict_genre = {i:gs for i,gs in dict_genre.items() if i in items}
n_genre = max([max(gs) for i,gs in dict_genre.items() if gs]) + 1
genres = list(range(n_genre))
else:
dict_genre = {}
n_genre = 0
genres = []
self.inclement_index = inclement_index(users, items, genres)
# userとitemをインクリメントIDに変更する
dict_genre = {self.inclement_index.transform([i], field='item')[0]:gs for i,gs in dict_genre.items()}
# user, itemはそれぞれで2フィールド、ジャンルはジャンルラベルごとに別々のフィールドにわける。
self.re_dict_genre = {}
for i,gs in dict_genre.items():
# re_dict は {item_id:(field_id, genru_id)}となる。
genre_one_hot_vec = [0] * n_genre
for g in gs:
genre_one_hot_vec[g] = 1 # カテゴリ変数はかならず整数の1とする。
self.re_dict_genre[i] = genre_one_hot_vec
self.genre_indexes = self.inclement_index.transform(genres, field='genre')
self.feature_size = self.inclement_index.get_feature_size()
self.field_size = 2 + n_genre
def get_feature_size_field_size(self):
return self.feature_size, self.field_size
def transform_users_and_items_to_Xi_Xv(self, users, items):
"""
users = [0,0,1]
items = [1,5,5]
"""
Xi, Xv = [], []
users = self.inclement_index.transform(users, field='user')
items = self.inclement_index.transform(items, field='item')
for u,i in zip(users, items):
if self.dict_genre:
Xi.append([u, i] + self.genre_indexes)
Xv.append([1, 1] + self.re_dict_genre[i])
else:
Xi.append([u, i])
Xv.append([1, 1])
return Xi, Xv
class inclement_index:
def __init__(self, users, items, genres=[]):
"""
users = ['u0','u1',3]
items = ['i0', 3]
genres = ['pop', 'sf']
self = inclement_index(users, items, genres)
self.transform(['u0', 'u1', 3], field='user', inverse=False)
self.transform(['i0', 3], field='item', inverse=False)
self.transform(['pop', 'sf'], field='genre', inverse=False)
transformed = self.transform(['u0', 'u1', 3], field='user', inverse=False)
self.transform(transformed, field='user', inverse=True)
"""
users = set(users)
items = set(items)
genres = set(genres)
self.increment_cnt = 0
self.user_dict = {u:self.get_incremate_index() for u in users}
self.user_inverse_dict = {v:k for k,v in self.user_dict.items()}
self.item_dict = {i:self.get_incremate_index() for i in items}
self.item_inverse_dict = {v:k for k,v in self.item_dict.items()}
self.genre_dict = {g:self.get_incremate_index() for g in genres}
self.genre_inverse_dict = {v:k for k,v in self.genre_dict.items()}
def transform(self, xs, field='user', inverse=False):
"""
xs = [0,2]
self.transform(xs, type='user')
"""
if inverse:
if field == 'user':
_dict = self.user_inverse_dict
elif field == 'item':
_dict = self.item_inverse_dict
elif field == 'genre':
_dict = self.genre_inverse_dict
else:
if field == 'user':
_dict = self.user_dict
elif field == 'item':
_dict = self.item_dict
elif field == 'genre':
_dict = self.genre_dict
return [_dict[x] for x in xs]
def get_incremate_index(self):
now_index = copy.deepcopy(self.increment_cnt)
self.increment_cnt += 1
return now_index
def get_feature_size(self):
return self.increment_cnt
if __name__ == 'how to use it.':
###########################
# --- かなりシンプルなテスト ---
sample_size = 1000
users = np.random.choice(range(100), size=sample_size)
items = np.random.choice(range(100), size=sample_size)
genre_dict = None
ratings = users - items
self = DeepFM(set(users), set(items))
self.dfm_params['batch_size'] = 64
self.dfm_params['epoch'] = 100
self.fit(users, items, ratings)
self.predict([10, 5, 10], [10, 10, 2]) # 正解は [0, -5, 8] である
# 十分に小さなbatch_sizeかどうかは非常に重要
# これは学習テストのロス減少によって確認できる。
###########################
# --- シンプルなテスト1 ---
sample_size = 1000
n_user = 500
n_item = 20
users = np.random.choice(range(n_user), size=sample_size)
items = np.random.choice(range(n_item), size=sample_size)
user_embedding = {u:np.random.rand(5)-0.5 for u in range(n_user)}
item_embedding = {i:np.random.rand(5)-0.5 for i in range(n_item)}
def rating(u, i):
return 10*sum(user_embedding[u] * item_embedding[i]) + 3
ratings = [rating(u, i) for u,i in zip(users, items)]
self = DeepFM(list(range(n_user)), list(range(n_item)))
self.dfm_params['epoch'] = 100
self.dfm_params['embedding_size'] = 200
self.dfm_params['l2_reg'] = 0.0045
self.fit(users, items, ratings)
test_users = np.random.choice(range(n_user), size=sample_size)
test_items = np.random.choice(range(n_item), size=sample_size)
test_ratings = [rating(u, i) for u,i in zip(users, items)]
predicted = self.predict(test_users, test_items)
print( np.mean(np.abs(test_ratings - predicted)) )
print( np.mean(np.abs(test_ratings - np.mean(ratings))) )
# scaler を導入すると改善されるか? → 特に改善はされていない。
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit([[r] for r in ratings])
s_ratings = scaler.transform([[r] for r in ratings])[:,0]
self.fit(users, items, s_ratings)
predicted = self.predict(test_users, test_items)
predicted = scaler.inverse_transform(predicted[:,None])
print( np.mean(np.abs(test_ratings - predicted)) )
print( np.mean(np.abs(test_ratings - np.mean(ratings))) )
###########################
# --- シンプルなテスト2 bias とembedding あり ---
sample_size = 1000
n_user = 500
n_item = 20
users = np.random.choice(range(n_user), size=sample_size)
items = np.random.choice(range(n_item), size=sample_size)
user_embedding = {u:np.random.rand(5)-0.5 for u in range(n_user)}
item_embedding = {i:np.random.rand(5)-0.5 for i in range(n_item)}
user_bias = {u:u/10 for u in range(n_user)} # 単純にidが大きいほどバイアスが大きい
item_bias = {i:i for i in range(n_item)} # 単純にidが大きいほどバイアスが大きい
def rating(u, i):
return 10*sum(user_embedding[u] * item_embedding[i]) + user_bias[u] + item_bias[i]
ratings = [rating(u, i) for u,i in zip(users, items)]
test_users = np.random.choice(range(n_user), size=sample_size)
test_items = np.random.choice(range(n_item), size=sample_size)
test_ratings = [rating(u, i) for u,i in zip(users, items)]
self = DeepFM(list(range(n_user)), list(range(n_item)))
self.dfm_params['epoch'] = 100
self.dfm_params['embedding_size'] = 200
self.fit(users, items, ratings, test_users, test_items, test_ratings)
# 平均性能との比較
predicted = self.predict(test_users, test_items)
print( np.mean(np.abs(test_ratings - predicted)) )
print( np.mean(np.abs(test_ratings - np.mean(ratings))) )
# オラクルとの比較
predicted = self.predict([200]*n_item, list(range(n_item)))
answer = [rating(200,i) for i in range(n_item)]
print(predicted)
print(answer)
print(predicted - answer)
## 内部の embedding を確認する。
feature_embeddings = self.model.sess.run(self.model.weights["feature_embeddings"])
feature_bias = self.model.sess.run(self.model.weights["feature_bias"])
###########################
# --- シンプルなテスト3 head-tail-new ID ---
sample_size = 1000
n_user = 200
n_item = 50
## id が後半になるほど学習セット中の出現率が低くなる。
p_user = 1/np.array(range(1, n_user+1)); p_user /= p_user.sum()
p_item = 1/np.array(range(1, n_item+1)); p_item /= p_item.sum()
users = np.random.choice(range(n_user), size=sample_size, p=p_user)
items = np.random.choice(range(n_item), size=sample_size, p=p_item)
user_embedding = {u:np.random.rand(5)-0.5 for u in range(n_user)}
item_embedding = {i:np.random.rand(5)-0.5 for i in range(n_item)}
user_bias = {u:u/10 for u in range(n_user)} # 単純にidが大きいほどバイアスが大きい
item_bias = {i:i for i in range(n_item)} # 単純にidが大きいほどバイアスが大きい
def rating(u, i):
return 10*sum(user_embedding[u] * item_embedding[i]) + user_bias[u] + item_bias[i]
ratings = [rating(u, i) for u,i in zip(users, items)]
## user=500 と item=20 はそれぞれ新規IDとなる
test_users = np.random.choice(range(n_user), size=sample_size)
test_items = np.random.choice(range(n_item), size=sample_size)
test_ratings = [rating(u, i) for u,i in zip(users, items)]
self = DeepFM(list(range(n_user+1)), list(range(n_item+1)))
self.dfm_params['epoch'] = 300
self.dfm_params['embedding_size'] = 4
self.fit(users, items, ratings, test_users, test_items, test_ratings)
# 平均値予測との比較
predicted = self.predict(test_users, test_items)
print( np.mean(np.abs(test_ratings - predicted)) )
print( np.mean(np.abs(test_ratings - np.mean(ratings))) )
## 内部の embedding を確認する。
feature_embeddings = self.model.sess.run(self.model.weights["feature_embeddings"])
feature_bias = self.model.sess.run(self.model.weights["feature_bias"])
## 可視化する(ID=500 まではユーザーで、それ以降はアイテム)
import pandas as pd
# [正常] 一部のembeddingがIDの増加に合わせて線形に変化している。これらはバイアス効果を一部学習している。
pd.DataFrame(feature_embeddings).plot()
# [成功] DeepFM のバイアスの初期値を0付近にすることで、userのバイアスはオラクルに近くなった。
# [?] itemのバイアスはオラクルと逆にidが増加するほど減少している → おそらくembeddingがバイアスを学習してしまったゆえか?
pd.DataFrame(feature_bias).plot()
# 新規IDを確認する → ほぼ、初期値の0付近か?
## 新規ユーザー
feature_embeddings[200]
feature_bias[200]
## 新規アイテム
feature_embeddings[-1]
feature_bias[-1]
##############################################
# --- IDとは無関係なランダムなバイアスで学習してみる ---
sample_size = 1000
n_user = 200
n_item = 50
## id が後半になるほど学習セット中の出現率が低くなる。
p_user = 1/np.array(range(1, n_user+1)); p_user /= p_user.sum()
p_item = 1/np.array(range(1, n_item+1)); p_item /= p_item.sum()
users = np.random.choice(range(n_user), size=sample_size, p=p_user)
items = np.random.choice(range(n_item), size=sample_size, p=p_item)
user_bias = {u:np.random.rand() for u in range(n_user)}
item_bias = {i:np.random.rand() for i in range(n_item)}
user_embedding = {u:np.random.rand(5)-0.5 for u in range(n_user)}
item_embedding = {i:np.random.rand(5)-0.5 for i in range(n_item)}
def rating(u, i):
return 3*(sum(user_embedding[u] * item_embedding[i]) + user_bias[u] + item_bias[i])
ratings = [rating(u, i) for u,i in zip(users, items)]
## user=500 と item=20 はそれぞれ新規IDとなる
test_users = np.random.choice(range(n_user), size=sample_size)
test_items = np.random.choice(range(n_item), size=sample_size)
test_ratings = [rating(u, i) for u,i in zip(users, items)]
# ------------------------------
##############################################
self = DeepFM(list(range(n_user+1)), list(range(n_item+1)))
self.dfm_params['epoch'] = 100
self.dfm_params['embedding_size'] = 4
self.dfm_params['l2_reg'] = 0.001
self.fit(users, items, ratings, test_users, test_items, test_ratings)
feature_embeddings = self.model.sess.run(self.model.weights["feature_embeddings"])
feature_bias = self.model.sess.run(self.model.weights["feature_bias"])
""" デバック
self.predict([1]*n_item, range(n_item))
self.predict([0]*n_item, range(n_item))
[rating(1, i) for i in range(n_item)]
"""
pd.DataFrame(feature_embeddings).plot()
pd.DataFrame(feature_bias).plot()
"""
本テストは想定どおりの結果となり、成功したといえる。
その成功要因は、以下の変更を加えたことによる。
[1] 各id の embedding, bias の初期値を0付近のものに変更した。
[2] l2_reg の対象として embedding, bias を追加した。(おそらく、マイナーIDのweightが抑制されると思われるが、詳細は不明)
"""
# --- パラメータごとの影響を確認する。
self = DeepFM(list(range(n_user+1)), list(range(n_item+1)))
self.dfm_params['epoch'] = 10
self.dfm_params['embedding_size'] = 4
self.dfm_params['use_deep'] = False
self.fit(users, items, ratings, test_users, test_items, test_ratings)
feature_embeddings = self.model.sess.run(self.model.weights["feature_embeddings"])
feature_bias = self.model.sess.run(self.model.weights["feature_bias"])
| pd.DataFrame(feature_embeddings) | pandas.DataFrame |
import asyncio
from collections import defaultdict, namedtuple
from dataclasses import dataclass, fields as dataclass_fields
from datetime import date, datetime, timedelta, timezone
from enum import Enum
from itertools import chain, repeat
import logging
import pickle
from typing import Collection, Dict, Generator, Iterable, Iterator, KeysView, List, \
Mapping, Optional, Sequence, Set, Tuple, Union
import aiomcache
import numpy as np
import pandas as pd
from pandas.core.common import flatten
from sqlalchemy import sql
from sqlalchemy.orm import aliased
from sqlalchemy.orm.attributes import InstrumentedAttribute
from sqlalchemy.sql.elements import BinaryExpression
from athenian.api import metadata
from athenian.api.async_utils import gather, read_sql_query
from athenian.api.cache import cached, CancelCache, short_term_exptime
from athenian.api.controllers.logical_repos import coerce_logical_repos
from athenian.api.controllers.miners.filters import JIRAFilter, LabelFilter
from athenian.api.controllers.miners.github.commit import BRANCH_FETCH_COMMITS_COLUMNS, \
DAG, fetch_precomputed_commit_history_dags, fetch_repository_commits_no_branch_dates
from athenian.api.controllers.miners.github.dag_accelerated import searchsorted_inrange
from athenian.api.controllers.miners.github.label import fetch_labels_to_filter
from athenian.api.controllers.miners.github.logical import split_logical_repositories
from athenian.api.controllers.miners.github.precomputed_prs import \
discover_inactive_merged_unreleased_prs, MergedPRFactsLoader, OpenPRFactsLoader, \
update_unreleased_prs
from athenian.api.controllers.miners.github.release_load import ReleaseLoader
from athenian.api.controllers.miners.github.release_match import PullRequestToReleaseMapper, \
ReleaseToPullRequestMapper
from athenian.api.controllers.miners.github.released_pr import matched_by_column
from athenian.api.controllers.miners.jira.issue import generate_jira_prs_query
from athenian.api.controllers.miners.types import DeploymentConclusion, MinedPullRequest, \
nonemax, nonemin, PRParticipants, PRParticipationKind, PullRequestFacts, PullRequestFactsMap
from athenian.api.controllers.prefixer import Prefixer
from athenian.api.controllers.settings import LogicalRepositorySettings, ReleaseMatch, \
ReleaseSettings
from athenian.api.db import add_pdb_misses, Database, DatabaseLike
from athenian.api.defer import AllEvents, defer
from athenian.api.int_to_str import int_to_str
from athenian.api.models.metadata.github import Base, NodePullRequestJiraIssues, \
PullRequest, PullRequestComment, PullRequestCommit, PullRequestLabel, PullRequestReview, \
PullRequestReviewComment, PullRequestReviewRequest, PushCommit, Release
from athenian.api.models.metadata.jira import Component, Issue
from athenian.api.models.persistentdata.models import DeploymentNotification
from athenian.api.models.precomputed.models import GitHubPullRequestDeployment
from athenian.api.tracing import sentry_span
@dataclass
class PRDataFrames(Mapping[str, pd.DataFrame]):
"""Set of dataframes with all the PR data we can reach."""
prs: pd.DataFrame
commits: pd.DataFrame
releases: pd.DataFrame
jiras: pd.DataFrame
reviews: pd.DataFrame
review_comments: pd.DataFrame
review_requests: pd.DataFrame
comments: pd.DataFrame
labels: pd.DataFrame
deployments: pd.DataFrame
def __iter__(self) -> Iterator[str]:
"""Implement iter() - return an iterator over the field names."""
return iter((f.name for f in dataclass_fields(self)))
def __getitem__(self, key: str) -> pd.DataFrame:
"""Implement self[key]."""
try:
return getattr(self, key)
except AttributeError:
raise KeyError(key) from None
def __setitem__(self, key: str, value: pd.DataFrame) -> None:
"""Implement self[key] = value."""
for f in dataclass_fields(self):
if key == f.name:
break
else:
raise KeyError(key)
setattr(self, key, value)
def __len__(self) -> int:
"""Implement len()."""
return len(dataclass_fields(self))
class PullRequestMiner:
"""Load all the information related to Pull Requests from the metadata DB. Iterate over it \
to access individual PR objects."""
CACHE_TTL = short_term_exptime
log = logging.getLogger("%s.PullRequestMiner" % metadata.__package__)
ReleaseMappers = namedtuple("ReleaseMappers", [
"map_releases_to_prs", "map_prs_to_releases", "load_releases"])
mappers = ReleaseMappers(
map_releases_to_prs=ReleaseToPullRequestMapper.map_releases_to_prs,
map_prs_to_releases=PullRequestToReleaseMapper.map_prs_to_releases,
load_releases=ReleaseLoader.load_releases,
)
def __init__(self, dfs: PRDataFrames):
"""Initialize a new instance of `PullRequestMiner`."""
self._dfs = dfs
@property
def dfs(self) -> PRDataFrames:
"""Return the bound dataframes with PR information."""
return self._dfs
def __len__(self) -> int:
"""Return the number of loaded pull requests."""
return len(self._dfs.prs)
def __iter__(self) -> Generator[MinedPullRequest, None, None]:
"""Iterate over the individual pull requests."""
assert self._dfs.prs.index.nlevels == 2
df_fields = [f.name for f in dataclass_fields(MinedPullRequest) if f.name != "pr"]
dfs = []
grouped_df_iters = []
index_backup = []
for k in df_fields:
plural = k.endswith("s")
df = getattr(self._dfs, k if plural else (k + "s")) # type: pd.DataFrame
dfs.append(df)
# our very own groupby() allows us to call take() with reduced overhead
node_ids = df.index.get_level_values(0).values.astype(int, copy=False)
with_repos = k == "release"
if df.index.nlevels > 1:
# the second level adds determinism to the iteration order
second_level = df.index.get_level_values(1).values
node_ids_bytes = int_to_str(node_ids)
if second_level.dtype == int:
order_keys = np.char.add(node_ids_bytes, int_to_str(second_level))
else:
order_keys = np.char.add(node_ids_bytes,
second_level.astype("S", copy=False))
else:
order_keys = node_ids
df_order = np.argsort(order_keys)
if not with_repos:
unique_node_ids, node_ids_unique_counts = np.unique(node_ids, return_counts=True)
offsets = np.zeros(len(node_ids_unique_counts) + 1, dtype=int)
np.cumsum(node_ids_unique_counts, out=offsets[1:])
groups = self._iter_by_split(df_order, offsets)
grouped_df_iters.append(iter(zip(unique_node_ids, repeat(None), groups)))
else:
_, unique_counts = np.unique(order_keys, return_counts=True)
node_ids = node_ids[df_order]
repos = df.index.get_level_values(1).values[df_order].astype("U")
offsets = np.zeros(len(unique_counts) + 1, dtype=int)
np.cumsum(unique_counts, out=offsets[1:])
groups = self._iter_by_split(df_order, offsets)
grouped_df_iters.append(iter(zip(
node_ids[offsets[:-1]], repos[offsets[:-1]], groups)))
if plural:
index_backup.append(df.index)
df.index = df.index.droplevel(0)
else:
index_backup.append(None)
try:
grouped_df_states = []
for i in grouped_df_iters:
try:
grouped_df_states.append(next(i))
except StopIteration:
grouped_df_states.append((None, None, None))
empty_df_cache = {}
pr_columns = [PullRequest.node_id.name, PullRequest.repository_full_name.name]
pr_columns.extend(self._dfs.prs.columns)
if not self._dfs.prs.index.is_monotonic_increasing:
raise IndexError("PRs index must be pre-sorted ascending: "
"prs.sort_index(inplace=True)")
for pr_tuple in self._dfs.prs.itertuples():
(pr_node_id, repo), *pr_tuple = pr_tuple
items = {"pr": dict(zip(pr_columns, [pr_node_id, repo] + pr_tuple))}
for i, (k, (state_pr_node_id, state_repo, gdf), git, df) in enumerate(zip(
df_fields, grouped_df_states, grouped_df_iters, dfs)):
while state_pr_node_id is not None and (
state_pr_node_id < pr_node_id
or (state_pr_node_id == pr_node_id
and state_repo is not None
and state_repo < repo)):
try:
state_pr_node_id, state_repo, gdf = next(git)
except StopIteration:
state_pr_node_id, state_repo, gdf = None, None, None
grouped_df_states[i] = state_pr_node_id, state_repo, gdf
if state_pr_node_id == pr_node_id and \
(state_repo is None or state_repo == repo):
if not k.endswith("s"):
# much faster than items.iloc[gdf[0]]
gdf = {c: v for c, v in zip(df.columns, df._data.fast_xs(gdf[0]))}
else:
gdf = df.take(gdf)
items[k] = gdf
else:
try:
items[k] = empty_df_cache[k]
except KeyError:
if k.endswith("s"):
empty_val = df.iloc[:0].copy()
else:
empty_val = {c: None for c in df.columns}
items[k] = empty_df_cache[k] = empty_val
yield MinedPullRequest(**items)
finally:
for df, index in zip(dfs, index_backup):
if index is not None:
df.index = index
def drop(self, node_ids: Collection[int]) -> pd.Index:
"""
Remove PRs from the given collection of PR node IDs in-place.
Node IDs don't have to be all present.
:return: Actually removed node IDs.
"""
removed = self._dfs.prs.index.get_level_values(0).intersection(node_ids)
if removed.empty:
return removed
self._dfs.prs.drop(removed, inplace=True)
for df in self._dfs.values():
df.drop(removed, inplace=True, errors="ignore",
level=0 if isinstance(df.index, pd.MultiIndex) else None)
return removed
def _deserialize_mine_cache(buffer: bytes) -> Tuple[PRDataFrames,
PullRequestFactsMap,
Set[str],
PRParticipants,
LabelFilter,
JIRAFilter,
Dict[str, ReleaseMatch],
asyncio.Event]:
stuff = pickle.loads(buffer)
event = asyncio.Event()
event.set()
return (*stuff, event)
@sentry_span
def _postprocess_cached_prs(
result: Tuple[PRDataFrames,
PullRequestFactsMap,
Set[str],
PRParticipants,
LabelFilter,
JIRAFilter,
bool,
Dict[str, ReleaseMatch],
asyncio.Event],
date_to: date,
repositories: Set[str],
participants: PRParticipants,
labels: LabelFilter,
jira: JIRAFilter,
with_jira_map: bool,
pr_blacklist: Optional[Tuple[Collection[int], Dict[str, List[int]]]],
truncate: bool,
**_) -> Tuple[PRDataFrames,
PullRequestFactsMap,
Set[str],
PRParticipants,
LabelFilter,
JIRAFilter,
bool,
Dict[str, ReleaseMatch],
asyncio.Event]:
dfs, _, cached_repositories, cached_participants, cached_labels, cached_jira, \
cached_with_jira_map, _, _ = result
if with_jira_map and not cached_with_jira_map:
raise CancelCache()
cls = PullRequestMiner
if (repositories - cached_repositories or
not cls._check_participants_compatibility(cached_participants, participants) or
not cached_labels.compatible_with(labels) or
not cached_jira.compatible_with(jira)):
raise CancelCache()
to_remove = set()
if pr_blacklist is not None:
to_remove.update(pr_blacklist[0])
if no_logical_repos := (coerce_logical_repos(repositories).keys() == repositories):
to_remove.update(dfs.prs.index.get_level_values(0).values[
np.in1d(dfs.prs.index.get_level_values(1).values,
list(repositories), assume_unique=True, invert=True),
])
time_to = None if truncate else pd.Timestamp(date_to, tzinfo=timezone.utc)
to_remove.update(cls._find_drop_by_participants(dfs, participants, time_to))
to_remove.update(cls._find_drop_by_labels(dfs, labels))
to_remove.update(cls._find_drop_by_jira(dfs, jira))
cls._drop(dfs, to_remove)
if not no_logical_repos:
dfs.prs = dfs.prs.take(np.flatnonzero(
np.in1d(dfs.prs.index.get_level_values(1).values,
list(repositories), assume_unique=True),
))
return result
@classmethod
@sentry_span
@cached(
exptime=lambda cls, **_: cls.CACHE_TTL,
serialize=lambda r: pickle.dumps(r[:-1]),
deserialize=_deserialize_mine_cache,
key=lambda date_from, date_to, exclude_inactive, release_settings, logical_settings, updated_min, updated_max, pr_blacklist, truncate, **_: ( # noqa
date_from.toordinal(), date_to.toordinal(), exclude_inactive,
release_settings, logical_settings,
updated_min.timestamp() if updated_min is not None else None,
updated_max.timestamp() if updated_max is not None else None,
",".join(map(str, sorted(pr_blacklist[0]) if pr_blacklist is not None else [])),
truncate,
),
postprocess=_postprocess_cached_prs,
)
async def _mine(cls,
date_from: date,
date_to: date,
repositories: Set[str],
participants: PRParticipants,
labels: LabelFilter,
jira: JIRAFilter,
with_jira_map: bool,
branches: pd.DataFrame,
default_branches: Dict[str, str],
exclude_inactive: bool,
release_settings: ReleaseSettings,
logical_settings: LogicalRepositorySettings,
updated_min: Optional[datetime],
updated_max: Optional[datetime],
pr_blacklist: Optional[Tuple[Collection[int], Dict[str, List[int]]]],
truncate: bool,
prefixer: Prefixer,
account: int,
meta_ids: Tuple[int, ...],
mdb: Database,
pdb: Database,
rdb: Database,
cache: Optional[aiomcache.Client],
) -> Tuple[PRDataFrames,
PullRequestFactsMap,
Set[str],
PRParticipants,
LabelFilter,
JIRAFilter,
bool,
Dict[str, ReleaseMatch],
asyncio.Event]:
assert isinstance(date_from, date) and not isinstance(date_from, datetime)
assert isinstance(date_to, date) and not isinstance(date_to, datetime)
assert isinstance(repositories, set)
assert isinstance(mdb, Database)
assert isinstance(pdb, Database)
assert isinstance(rdb, Database)
assert (updated_min is None) == (updated_max is None)
time_from, time_to = (pd.Timestamp(t, tzinfo=timezone.utc) for t in (date_from, date_to))
pr_blacklist_expr = ambiguous = None
if pr_blacklist is not None:
pr_blacklist, ambiguous = pr_blacklist
if len(pr_blacklist) > 0:
pr_blacklist_expr = PullRequest.node_id.notin_any_values(pr_blacklist)
if logical_settings.has_logical_prs():
physical_repos = coerce_logical_repos(repositories).keys()
else:
physical_repos = repositories
pdags = await fetch_precomputed_commit_history_dags(physical_repos, account, pdb, cache)
fetch_branch_dags_task = asyncio.create_task(
cls._fetch_branch_dags(
physical_repos, pdags, branches, account, meta_ids, mdb, pdb, cache),
name="_fetch_branch_dags",
)
# the heaviest task should always go first
tasks = [
cls.mappers.map_releases_to_prs(
repositories, branches, default_branches, time_from, time_to,
participants.get(PRParticipationKind.AUTHOR, []),
participants.get(PRParticipationKind.MERGER, []),
jira, release_settings, logical_settings, updated_min, updated_max, pdags,
prefixer, account, meta_ids, mdb, pdb, rdb, cache, pr_blacklist_expr, None,
truncate=truncate),
cls.fetch_prs(
time_from, time_to, physical_repos, participants, labels, jira,
exclude_inactive, pr_blacklist_expr, None, branches, pdags, account, meta_ids,
mdb, pdb, cache, updated_min=updated_min, updated_max=updated_max,
fetch_branch_dags_task=fetch_branch_dags_task),
cls.map_deployments_to_prs(
physical_repos, time_from, time_to, participants,
labels, jira, updated_min, updated_max, prefixer, branches, pdags,
account, meta_ids, mdb, pdb, cache, pr_blacklist,
fetch_branch_dags_task=fetch_branch_dags_task),
]
# the following is a very rough approximation regarding updated_min/max:
# we load all of none of the inactive merged PRs
# see also: load_precomputed_done_candidates() which generates `ambiguous`
if not exclude_inactive and (updated_min is None or updated_min <= time_from):
tasks.append(cls._fetch_inactive_merged_unreleased_prs(
time_from, time_to, repositories, participants, labels, jira, default_branches,
release_settings, logical_settings.has_logical_prs(),
prefixer, account, meta_ids, mdb, pdb, cache))
# we don't load inactive released undeployed PRs because nobody needs them
(
(released_prs, releases, release_settings, matched_bys,
release_dags, precomputed_observed),
(prs, branch_dags, _),
deployed_prs,
*unreleased,
) = await gather(*tasks)
del pr_blacklist_expr
deployed_releases_task = None
if not deployed_prs.empty:
covered_prs = prs.index.union(released_prs.index)
if unreleased:
covered_prs = covered_prs.union(unreleased[0].index)
new_prs = deployed_prs.index.difference(covered_prs)
if not new_prs.empty:
new_prs = deployed_prs[[
PullRequest.merged_at.name, PullRequest.repository_full_name.name,
]].loc[new_prs]
min_deployed_merged = new_prs[PullRequest.merged_at.name].min()
if min_deployed_merged < time_from:
deployed_releases_task = asyncio.create_task(
cls.mappers.load_releases(
new_prs[PullRequest.repository_full_name.name].unique(),
branches, default_branches, min_deployed_merged, time_from,
release_settings, logical_settings, prefixer, account, meta_ids,
mdb, pdb, rdb, cache),
name="PullRequestMiner.mine/deployed_releases",
)
concatenated = [prs, released_prs, deployed_prs, *unreleased]
missed_prs = cls._extract_missed_prs(ambiguous, pr_blacklist, deployed_prs, matched_bys)
if missed_prs:
add_pdb_misses(pdb, "PullRequestMiner.mine/blacklist",
sum(len(v) for v in missed_prs.values()))
# these PRs are released by branch and not by tag, and we require by tag.
# we have not fetched them yet because they are in pr_blacklist
# and they are in pr_blacklist because we have previously loaded them in
# load_precomputed_done_candidates();
# now fetch only these `missed_prs`, respecting the filters.
pr_whitelist = PullRequest.node_id.in_(
list(chain.from_iterable(missed_prs.values())))
tasks = [
cls.mappers.map_releases_to_prs(
missed_prs, branches, default_branches, time_from, time_to,
participants.get(PRParticipationKind.AUTHOR, []),
participants.get(PRParticipationKind.MERGER, []),
jira, release_settings, logical_settings, updated_min, updated_max, pdags,
prefixer, account, meta_ids, mdb, pdb, rdb, cache, None, pr_whitelist,
truncate, precomputed_observed=precomputed_observed),
cls.fetch_prs(
time_from, time_to, missed_prs.keys(), participants, labels, jira,
exclude_inactive, None, pr_whitelist, branches, branch_dags, account, meta_ids,
mdb, pdb, cache, updated_min=updated_min, updated_max=updated_max,
fetch_branch_dags_task=fetch_branch_dags_task),
]
missed_released_prs, (missed_prs, *_) = await gather(*tasks)
concatenated.extend([missed_released_prs, missed_prs])
fetch_branch_dags_task.cancel() # 99.999% that it was awaited, but still
prs = pd.concat(concatenated, copy=False)
prs.reset_index(inplace=True)
prs.drop_duplicates([PullRequest.node_id.name, PullRequest.repository_full_name.name],
inplace=True)
prs.set_index(PullRequest.node_id.name, inplace=True)
prs.sort_index(inplace=True)
if unreleased:
unreleased = np.array([
unreleased[0].index.values,
unreleased[0][PullRequest.repository_full_name.name].values,
], dtype=object).T
tasks = [
# bypass the useless inner caching by calling _mine_by_ids directly
cls._mine_by_ids(
prs, unreleased, repositories, time_to, releases, matched_bys,
branches, default_branches, release_dags, release_settings, logical_settings,
prefixer, account, meta_ids, mdb, pdb, rdb, cache,
truncate=truncate, with_jira=with_jira_map,
extra_releases_task=deployed_releases_task,
physical_repositories=physical_repos),
OpenPRFactsLoader.load_open_pull_request_facts(prs, repositories, account, pdb),
]
(dfs, unreleased_facts, unreleased_prs_event), open_facts = await gather(
*tasks, op="PullRequestMiner.mine/external_data")
to_drop = cls._find_drop_by_participants(dfs, participants, None if truncate else time_to)
to_drop |= cls._find_drop_by_labels(dfs, labels)
if exclude_inactive:
to_drop |= cls._find_drop_by_inactive(dfs, time_from, time_to)
cls._drop(dfs, to_drop)
facts = open_facts
for k, v in unreleased_facts.items(): # merged unreleased PR precomputed facts
if v is not None: # it can be None because the pdb table is filled in two steps
facts[k] = v
dfs.prs = split_logical_repositories(
dfs.prs, dfs.labels, repositories, logical_settings)
return dfs, facts, repositories, participants, labels, jira, with_jira_map, matched_bys, \
unreleased_prs_event
_deserialize_mine_cache = staticmethod(_deserialize_mine_cache)
_postprocess_cached_prs = staticmethod(_postprocess_cached_prs)
def _deserialize_mine_by_ids_cache(
buffer: bytes) -> Tuple[PRDataFrames,
PullRequestFactsMap,
asyncio.Event]:
dfs, facts = pickle.loads(buffer)
event = asyncio.Event()
event.set()
return dfs, facts, event
@classmethod
@cached(
exptime=lambda cls, **_: cls.CACHE_TTL,
serialize=lambda r: pickle.dumps(r[:-1]),
deserialize=_deserialize_mine_by_ids_cache,
key=lambda prs, unreleased, releases, time_to, logical_settings, truncate=True, with_jira=True, **_: ( # noqa
",".join(map(str, prs.index.values)),
",".join(map(str, unreleased)),
",".join(map(str, releases[Release.node_id.name].values)),
time_to.timestamp(),
logical_settings,
truncate,
with_jira,
),
)
async def mine_by_ids(cls,
prs: pd.DataFrame,
unreleased: Collection[Tuple[int, str]],
logical_repositories: Union[Set[str], KeysView[str]],
time_to: datetime,
releases: pd.DataFrame,
matched_bys: Dict[str, ReleaseMatch],
branches: pd.DataFrame,
default_branches: Dict[str, str],
dags: Dict[str, DAG],
release_settings: ReleaseSettings,
logical_settings: LogicalRepositorySettings,
prefixer: Prefixer,
account: int,
meta_ids: Tuple[int, ...],
mdb: Database,
pdb: Database,
rdb: Database,
cache: Optional[aiomcache.Client],
truncate: bool = True,
with_jira: bool = True,
physical_repositories: Optional[Union[Set[str], KeysView[str]]] = None,
) -> Tuple[PRDataFrames,
PullRequestFactsMap,
asyncio.Event]:
"""
Fetch PR metadata for certain PRs.
:param prs: pandas DataFrame with fetched PullRequest-s. Only the details about those PRs \
will be loaded from the DB.
:param truncate: Do not load anything after `time_to`.
:param with_jira: Value indicating whether to load the mapped JIRA issues.
:return: 1. List of mined DataFrame-s. \
2. mapping to PullRequestFacts of unreleased merged PRs. \
3. Synchronization for updating the pdb table with merged unreleased PRs.
"""
return await cls._mine_by_ids(
prs, unreleased, logical_repositories, time_to, releases, matched_bys,
branches, default_branches, dags, release_settings, logical_settings, prefixer,
account, meta_ids, mdb, pdb, rdb, cache, truncate=truncate, with_jira=with_jira,
physical_repositories=physical_repositories)
_deserialize_mine_by_ids_cache = staticmethod(_deserialize_mine_by_ids_cache)
@classmethod
@sentry_span
async def _mine_by_ids(cls,
prs: pd.DataFrame,
unreleased: Collection[Tuple[int, str]],
logical_repositories: Union[Set[str], KeysView[str]],
time_to: datetime,
releases: pd.DataFrame,
matched_bys: Dict[str, ReleaseMatch],
branches: pd.DataFrame,
default_branches: Dict[str, str],
dags: Dict[str, DAG],
release_settings: ReleaseSettings,
logical_settings: LogicalRepositorySettings,
prefixer: Prefixer,
account: int,
meta_ids: Tuple[int, ...],
mdb: Database,
pdb: Database,
rdb: Database,
cache: Optional[aiomcache.Client],
truncate: bool = True,
with_jira: bool = True,
extra_releases_task: Optional[asyncio.Task] = None,
physical_repositories: Optional[Union[Set[str], KeysView[str]]] = None,
) -> Tuple[PRDataFrames,
PullRequestFactsMap,
asyncio.Event]:
assert prs.index.nlevels == 1
node_ids = prs.index if len(prs) > 0 else set()
facts = {} # precomputed PullRequestFacts about merged unreleased PRs
unreleased_prs_event: asyncio.Event = None
merged_unreleased_indexes = []
@sentry_span
async def fetch_reviews():
return await cls._read_filtered_models(
PullRequestReview, node_ids, time_to, meta_ids, mdb,
columns=[PullRequestReview.submitted_at, PullRequestReview.state,
PullRequestReview.user_login, PullRequestReview.user_node_id],
created_at=truncate)
@sentry_span
async def fetch_review_comments():
return await cls._read_filtered_models(
PullRequestReviewComment, node_ids, time_to, meta_ids, mdb,
columns=[PullRequestReviewComment.created_at, PullRequestReviewComment.user_login,
PullRequestReviewComment.user_node_id],
created_at=truncate)
@sentry_span
async def fetch_review_requests():
return await cls._read_filtered_models(
PullRequestReviewRequest, node_ids, time_to, meta_ids, mdb,
columns=[PullRequestReviewRequest.created_at],
created_at=truncate)
@sentry_span
async def fetch_comments():
return await cls._read_filtered_models(
PullRequestComment, node_ids, time_to, meta_ids, mdb,
columns=[PullRequestComment.created_at, PullRequestComment.user_login,
PullRequestComment.user_node_id],
created_at=truncate)
@sentry_span
async def fetch_commits():
return await cls._read_filtered_models(
PullRequestCommit, node_ids, time_to, meta_ids, mdb,
columns=[PullRequestCommit.authored_date, PullRequestCommit.committed_date,
PullRequestCommit.author_login, PullRequestCommit.committer_login,
PullRequestCommit.author_user_id, PullRequestCommit.committer_user_id],
created_at=truncate)
@sentry_span
async def fetch_labels():
return await cls._read_filtered_models(
PullRequestLabel, node_ids, time_to, meta_ids, mdb,
columns=[sql.func.lower(PullRequestLabel.name).label(PullRequestLabel.name.name),
PullRequestLabel.description,
PullRequestLabel.color],
created_at=False)
fetch_labels_task = asyncio.create_task(
fetch_labels(), name="PullRequestMiner.mine_by_ids/fetch_labels")
@sentry_span
async def map_releases():
anyhow_merged_mask = prs[PullRequest.merged_at.name].notnull().values
if truncate:
merged_mask = (prs[PullRequest.merged_at.name] < time_to).values
nonlocal merged_unreleased_indexes
merged_unreleased_indexes = np.flatnonzero(anyhow_merged_mask & ~merged_mask)
else:
merged_mask = anyhow_merged_mask
if len(unreleased):
prs_index = np.char.add(
int_to_str(prs.index.values),
(prs_repos := prs[PullRequest.repository_full_name.name].values.astype("S")),
)
if isinstance(unreleased, np.ndarray):
unreleased_index = np.char.add(
int_to_str(unreleased[:, 0].astype(int)),
unreleased[:, 1].astype(prs_repos.dtype),
)
else:
unreleased_index = np.char.add(
int_to_str(np.fromiter((p[0] for p in unreleased), int, len(unreleased))),
np.array([p[1] for p in unreleased], dtype=prs_repos.dtype),
)
merged_mask &= np.in1d(prs_index, unreleased_index, invert=True)
merged_prs = prs.take(np.flatnonzero(merged_mask))
nonlocal releases
if extra_releases_task is not None:
await extra_releases_task
extra_releases, _ = extra_releases_task.result()
releases = releases.append(extra_releases, ignore_index=True)
labels = None
if logical_settings.has_logical_prs():
nonlocal physical_repositories
if physical_repositories is None:
physical_repositories = coerce_logical_repos(logical_repositories).keys()
if logical_settings.has_prs_by_label(physical_repositories):
await fetch_labels_task
labels = fetch_labels_task.result()
merged_prs = split_logical_repositories(
merged_prs, labels, logical_repositories, logical_settings)
else:
merged_prs = split_logical_repositories(merged_prs, None, set(), logical_settings)
df_facts, other_facts = await gather(
cls.mappers.map_prs_to_releases(
merged_prs, releases, matched_bys, branches, default_branches, time_to,
dags, release_settings, prefixer, account, meta_ids, mdb, pdb, cache,
labels=labels),
MergedPRFactsLoader.load_merged_unreleased_pull_request_facts(
prs.take(np.flatnonzero(anyhow_merged_mask & ~merged_mask)),
nonemax(releases[Release.published_at.name].nonemax(), time_to),
LabelFilter.empty(), matched_bys, default_branches, release_settings,
prefixer, account, pdb),
)
nonlocal facts
nonlocal unreleased_prs_event
df, facts, unreleased_prs_event = df_facts
facts.update(other_facts)
return df
async def _fetch_labels():
await fetch_labels_task
return fetch_labels_task.result()
@sentry_span
async def fetch_jira():
_map = aliased(NodePullRequestJiraIssues, name="m")
_issue = aliased(Issue, name="i")
_issue_epic = aliased(Issue, name="e")
selected = [
PullRequest.node_id, _issue.key, _issue.title, _issue.type, _issue.status,
_issue.created, _issue.updated, _issue.resolved, _issue.labels, _issue.components,
_issue.acc_id, _issue_epic.key.label("epic"),
]
if not with_jira:
df = pd.DataFrame(columns=[col.name for col in selected
if col not in (_issue.acc_id, _issue.components)])
df[PullRequest.node_id.name] = df[PullRequest.node_id.name].astype(int)
return df.set_index([PullRequest.node_id.name, _issue.key.name])
df = await read_sql_query(
sql.select(selected).select_from(sql.join(
PullRequest, sql.join(
_map, sql.join(_issue, _issue_epic, sql.and_(
_issue.epic_id == _issue_epic.id,
_issue.acc_id == _issue_epic.acc_id), isouter=True),
sql.and_(_map.jira_id == _issue.id,
_map.jira_acc == _issue.acc_id)),
sql.and_(PullRequest.node_id == _map.node_id,
PullRequest.acc_id == _map.node_acc),
)).where(sql.and_(PullRequest.node_id.in_(node_ids),
PullRequest.acc_id.in_(meta_ids),
_issue.is_deleted.is_(False))),
mdb, columns=selected, index=[PullRequest.node_id.name, _issue.key.name])
if df.empty:
df.drop([Issue.acc_id.name, Issue.components.name], inplace=True, axis=1)
return df
components = df[[Issue.acc_id.name, Issue.components.name]] \
.groupby(Issue.acc_id.name, sort=False).aggregate(lambda s: set(flatten(s)))
rows = await mdb.fetch_all(
sql.select([Component.acc_id, Component.id, Component.name])
.where(sql.or_(*(sql.and_(Component.id.in_(vals),
Component.acc_id == int(acc))
for acc, vals in zip(components.index.values,
components[Issue.components.name].values)))))
cmap = {}
for r in rows:
cmap.setdefault(r[0], {})[r[1]] = r[2].lower()
df[Issue.labels.name] = (
df[Issue.labels.name].apply(lambda i: [s.lower() for s in (i or [])])
+
df[[Issue.acc_id.name, Issue.components.name]]
.apply(lambda row: ([cmap[row[Issue.acc_id.name]][c]
for c in row[Issue.components.name]]
if row[Issue.components.name] is not None else []),
axis=1)
)
df.drop([Issue.acc_id.name, Issue.components.name], inplace=True, axis=1)
return df
# the order is important: it provides the best performance
# we launch coroutines from the heaviest to the lightest
dfs = await gather(
fetch_commits(),
map_releases(),
fetch_jira(),
fetch_reviews(),
fetch_review_comments(),
fetch_review_requests(),
fetch_comments(),
_fetch_labels(),
cls.fetch_pr_deployments(node_ids, prefixer, account, pdb, rdb),
)
dfs = PRDataFrames(prs, *dfs)
if len(merged_unreleased_indexes):
# if we truncate and there are PRs merged after `time_to`
merged_unreleased_prs = prs.take(merged_unreleased_indexes)
label_matches = np.flatnonzero(np.in1d(
dfs.labels.index.get_level_values(0).values,
merged_unreleased_prs.index.values))
labels = {}
for k, v in zip(dfs.labels.index.get_level_values(0).values[label_matches],
dfs.labels[PullRequestLabel.name.name].values[label_matches]):
try:
labels[k].append(v)
except KeyError:
labels[k] = [v]
other_unreleased_prs_event = asyncio.Event()
unreleased_prs_event = AllEvents(unreleased_prs_event, other_unreleased_prs_event)
merged_unreleased_prs = split_logical_repositories(
merged_unreleased_prs, dfs.labels, logical_repositories, logical_settings)
await defer(update_unreleased_prs(
merged_unreleased_prs, pd.DataFrame(), time_to, labels, matched_bys,
default_branches, release_settings, account, pdb, other_unreleased_prs_event),
"update_unreleased_prs/truncate(%d)" % len(merged_unreleased_indexes))
return dfs, facts, unreleased_prs_event
@classmethod
@sentry_span
async def mine(cls,
date_from: date,
date_to: date,
time_from: datetime,
time_to: datetime,
repositories: Set[str],
participants: PRParticipants,
labels: LabelFilter,
jira: JIRAFilter,
with_jira_map: bool,
branches: pd.DataFrame,
default_branches: Dict[str, str],
exclude_inactive: bool,
release_settings: ReleaseSettings,
logical_settings: LogicalRepositorySettings,
prefixer: Prefixer,
account: int,
meta_ids: Tuple[int, ...],
mdb: Database,
pdb: Database,
rdb: Database,
cache: Optional[aiomcache.Client],
updated_min: Optional[datetime] = None,
updated_max: Optional[datetime] = None,
pr_blacklist: Optional[Tuple[Collection[int], Dict[str, List[int]]]] = None,
truncate: bool = True,
) -> Tuple["PullRequestMiner",
PullRequestFactsMap,
Dict[str, ReleaseMatch],
asyncio.Event]:
"""
Mine metadata about pull requests according to the numerous filters.
:param account: State DB account ID.
:param meta_ids: Metadata (GitHub) account IDs.
:param date_from: Fetch PRs created starting from this date, inclusive.
:param date_to: Fetch PRs created ending with this date, inclusive.
:param time_from: Precise timestamp of since when PR events are allowed to happen.
:param time_to: Precise timestamp of until when PR events are allowed to happen.
:param repositories: PRs must belong to these repositories (prefix excluded).
:param participants: PRs must have these user IDs in the specified participation roles \
(OR aggregation). An empty dict means everybody.
:param labels: PRs must be labeled according to this filter's include & exclude sets.
:param jira: JIRA filters for those PRs that are matched with JIRA issues.
:param with_jira_map: Value indicating whether we must load JIRA issues mapped to PRs. \
This is independent from filtering PRs by `jira`.
:param branches: Preloaded DataFrame with branches in the specified repositories.
:param default_branches: Mapping from repository names to their default branch names.
:param exclude_inactive: Ors must have at least one event in the given time frame.
:param release_settings: Release match settings of the account.
:param logical_settings: Logical repository settings of the account.
:param updated_min: PRs must have the last update timestamp not older than it.
:param updated_max: PRs must have the last update timestamp not newer than or equal to it.
:param mdb: Metadata db instance.
:param pdb: Precomputed db instance.
:param rdb: Persistentdata db instance.
:param cache: memcached client to cache the collected data.
:param pr_blacklist: completely ignore the existence of these PR node IDs. \
The second tuple element is the ambiguous PRs: released by branch \
while there were no tag releases and the strategy is `tag_or_branch`.
:param truncate: activate the "time machine" and erase everything after `time_to`.
:return: 1. New `PullRequestMiner` with the PRs satisfying to the specified filters. \
2. Precomputed facts about unreleased pull requests. \
This is an optimization which breaks the abstraction a bit. \
3. `matched_bys` - release matches for each repository. \
4. Synchronization for updating the pdb table with merged unreleased PRs. \
Another abstraction leakage that we have to deal with.
"""
date_from_with_time = datetime.combine(date_from, datetime.min.time(), tzinfo=timezone.utc)
date_to_with_time = datetime.combine(date_to, datetime.min.time(), tzinfo=timezone.utc)
assert time_from >= date_from_with_time
assert time_to <= date_to_with_time
dfs, facts, _, _, _, _, _, matched_bys, event = await cls._mine(
date_from, date_to, repositories, participants, labels, jira, with_jira_map, branches,
default_branches, exclude_inactive, release_settings, logical_settings,
updated_min, updated_max, pr_blacklist, truncate, prefixer, account, meta_ids,
mdb, pdb, rdb, cache)
cls._truncate_prs(dfs, time_from, time_to)
return cls(dfs), facts, matched_bys, event
@classmethod
@sentry_span
async def fetch_prs(cls,
time_from: Optional[datetime],
time_to: datetime,
repositories: Union[Set[str], KeysView[str]],
participants: PRParticipants,
labels: LabelFilter,
jira: JIRAFilter,
exclude_inactive: bool,
pr_blacklist: Optional[BinaryExpression],
pr_whitelist: Optional[BinaryExpression],
branches: pd.DataFrame,
dags: Optional[Dict[str, DAG]],
account: int,
meta_ids: Tuple[int, ...],
mdb: Database,
pdb: Database,
cache: Optional[aiomcache.Client],
columns=PullRequest,
updated_min: Optional[datetime] = None,
updated_max: Optional[datetime] = None,
fetch_branch_dags_task: Optional[asyncio.Task] = None,
with_labels: bool = False,
) -> Tuple[pd.DataFrame, Dict[str, DAG], Optional[pd.DataFrame]]:
"""
Query pull requests from mdb that satisfy the given filters.
Note: we cannot filter by regular PR labels here due to the DB schema limitations,
so the caller is responsible for fetching PR labels and filtering by them afterward.
Besides, we cannot filter by participation roles different from AUTHOR and MERGER.
Note: we cannot load PRs that closed before time_from but released between
`time_from` and `time_to`. Hence the caller should map_releases_to_prs separately.
There can be duplicates: PR closed between `time_from` and `time_to` and released
between `time_from` and `time_to`.
Note: we cannot load PRs that closed before time_from but deployed between
`time_from` and `time_to`. Hence the caller should map_deployments_to_prs separately.
There can be duplicates: PR closed between `time_from` and `time_to` and deployed
between `time_from` and `time_to`.
We have to resolve the merge commits of rebased PRs so that they do not appear
force-push-dropped.
:return: pandas DataFrame with the PRs indexed by node_id; \
commit DAGs that contain the branch heads; \
(if was required) DataFrame with PR labels.
"""
assert isinstance(mdb, Database)
assert isinstance(pdb, Database)
pr_list_coro = cls._fetch_prs_by_filters(
time_from, time_to, repositories, participants, labels, jira, exclude_inactive,
pr_blacklist, pr_whitelist, meta_ids, mdb, cache, columns=columns,
updated_min=updated_min, updated_max=updated_max,
)
if columns is not PullRequest and PullRequest.merge_commit_id not in columns and \
PullRequest.merge_commit_sha not in columns:
prs, labels = await pr_list_coro
return prs, dags, labels if with_labels else None
if fetch_branch_dags_task is None:
fetch_branch_dags_task = cls._fetch_branch_dags(
repositories, dags, branches, account, meta_ids, mdb, pdb, cache)
dags, (prs, labels) = await gather(fetch_branch_dags_task, pr_list_coro)
async def load_labels():
if not with_labels:
return None
if labels is not None:
return labels
return await fetch_labels_to_filter(prs.index.values, meta_ids, mdb)
prs, labels = await gather(
cls.mark_dead_prs(prs, branches, dags, meta_ids, mdb, columns),
load_labels(),
)
return prs, dags, labels
@classmethod
async def mark_dead_prs(cls,
prs: pd.DataFrame,
branches: pd.DataFrame,
dags: Dict[str, DAG],
meta_ids: Tuple[int, ...],
mdb: Database,
columns=PullRequest,
) -> pd.DataFrame:
"""
Add and fill "dead" column in the `prs` DataFrame.
A PR is considered dead (force-push-dropped) if it does not exit in the commit DAG and \
we cannot detect its rebased clone.
"""
prs["dead"] = False
if branches.empty:
return prs
merged_prs = prs.take(np.nonzero((
prs[PullRequest.merged_at.name] <= datetime.now(timezone.utc) - timedelta(hours=1)
).values)[0])
# timedelta(hours=1) must match the `exptime` of `fetch_repository_commits()`
# commits DAGs are cached and may be not fully up to date, so otherwise some PRs may
# appear as wrongly force push dropped; see also: DEV-554
if merged_prs.empty:
return prs
pr_numbers = merged_prs[PullRequest.number.name].values
assert merged_prs.index.nlevels == 1
pr_node_ids = merged_prs.index.values
pr_repos = merged_prs[PullRequest.repository_full_name.name].values
repo_order = np.argsort(pr_repos)
unique_pr_repos, pr_repo_counts = np.unique(pr_repos, return_counts=True)
pr_merge_hashes = \
merged_prs[PullRequest.merge_commit_sha.name].values.astype("S40")[repo_order]
pos = 0
queries = []
dead = []
acc_id_cond = PushCommit.acc_id.in_(meta_ids)
min_commit_date = merged_prs[PullRequest.merged_at.name].min()
committed_date_cond = PushCommit.committed_date >= min_commit_date
substr = sql.func.substr(PushCommit.message, 1, 32)
sqlite = mdb.url.dialect == "sqlite"
for repo, n_prs in zip(unique_pr_repos, pr_repo_counts):
begin_pos = pos
end_pos = pos + n_prs
pos += n_prs
repo_pr_merge_hashes = pr_merge_hashes[begin_pos:end_pos]
dag_hashes = dags[repo][0]
if len(dag_hashes) == 0:
# no branches found in `fetch_repository_commits()`
continue
not_found = dag_hashes[
searchsorted_inrange(dag_hashes, repo_pr_merge_hashes)
] != repo_pr_merge_hashes
indexes = repo_order[begin_pos:end_pos][not_found]
dead.extend(dead_node_ids := pr_node_ids[indexes])
repo_cond = PushCommit.repository_full_name == repo
for pr_node_id, n in zip(dead_node_ids, pr_numbers[indexes]):
if sqlite:
# SQLite does not support parameter recycling
acc_id_cond = PushCommit.acc_id.in_(meta_ids)
committed_date_cond = PushCommit.committed_date >= min_commit_date
substr = sql.func.substr(PushCommit.message, 1, 32)
repo_cond = PushCommit.repository_full_name == repo
queries.append(
sql.select([PushCommit.node_id.label("commit_node_id"),
PushCommit.sha.label("sha"),
sql.literal_column("'" + repo + "'").label("repo"),
sql.literal_column(str(pr_node_id)).label("pr_node_id"),
PushCommit.committed_date,
PushCommit.pushed_date])
.where(sql.and_(acc_id_cond,
repo_cond,
committed_date_cond,
substr.like("Merge pull request #%d from %%" % n))))
if not queries:
return prs
prs.loc[dead, "dead"] = True
# we may have MANY queries here and Postgres responds with StatementTooComplexError
# split them by 100-sized batches to stay below the resource limits
batch_size = 100
tasks = []
for batch_index in range(0, len(queries), batch_size):
batch = queries[batch_index:batch_index + batch_size]
if len(batch) == 1:
query = batch[0]
else:
query = sql.union_all(*batch)
tasks.append(read_sql_query(query, mdb, [
"commit_node_id", "sha", "repo", "pr_node_id",
PushCommit.committed_date, PushCommit.pushed_date,
]))
resolveds = await gather(*tasks, op="mark_dead_prs commit SQL UNION ALL-s")
resolved = pd.concat(resolveds)
# look up the candidates in the DAGs
pr_repos = resolved["repo"].values
repo_order = np.argsort(pr_repos)
unique_pr_repos, pr_repo_counts = np.unique(pr_repos, return_counts=True)
pr_merge_hashes = resolved["sha"].values.astype("S")[repo_order]
pos = 0
alive_indexes = []
for repo, n_prs in zip(unique_pr_repos, pr_repo_counts):
begin_pos = pos
end_pos = pos + n_prs
pos += n_prs
repo_pr_merge_hashes = pr_merge_hashes[begin_pos:end_pos]
dag_hashes = dags[repo][0]
found = dag_hashes[
searchsorted_inrange(dag_hashes, repo_pr_merge_hashes)
] == repo_pr_merge_hashes
alive_indexes.extend(repo_order[begin_pos:end_pos][found])
if (resolved := resolved.take(alive_indexes)).empty:
return prs
# take the commit that was committed the latest; if there are multiple, prefer the one
# with pushed_date = null
resolved.sort_values([PushCommit.committed_date.name, PushCommit.pushed_date.name],
ascending=False, inplace=True, na_position="first")
resolved.drop_duplicates("pr_node_id", inplace=True)
# patch the commit IDs and the hashes
alive_node_ids = resolved["pr_node_id"].values
if columns is PullRequest or PullRequest.merge_commit_id in columns:
prs.loc[alive_node_ids, PullRequest.merge_commit_id.name] = \
resolved["commit_node_id"].values
if columns is PullRequest or PullRequest.merge_commit_sha in columns:
prs.loc[alive_node_ids, PullRequest.merge_commit_sha.name] = resolved["sha"].values
prs.loc[alive_node_ids, "dead"] = False
return prs
@classmethod
async def _fetch_branch_dags(cls,
repositories: Iterable[str],
dags: Optional[Dict[str, DAG]],
branches: pd.DataFrame,
account: int,
meta_ids: Tuple[int, ...],
mdb: Database,
pdb: Database,
cache: Optional[aiomcache.Client],
) -> Dict[str, DAG]:
if dags is None:
dags = await fetch_precomputed_commit_history_dags(
repositories, account, pdb, cache)
return await fetch_repository_commits_no_branch_dates(
dags, branches, BRANCH_FETCH_COMMITS_COLUMNS, True, account, meta_ids,
mdb, pdb, cache)
@classmethod
@sentry_span
async def _fetch_prs_by_filters(cls,
time_from: Optional[datetime],
time_to: datetime,
repositories: Set[str],
participants: PRParticipants,
labels: LabelFilter,
jira: JIRAFilter,
exclude_inactive: bool,
pr_blacklist: Optional[BinaryExpression],
pr_whitelist: Optional[BinaryExpression],
meta_ids: Tuple[int, ...],
mdb: Database,
cache: Optional[aiomcache.Client],
columns=PullRequest,
updated_min: Optional[datetime] = None,
updated_max: Optional[datetime] = None,
) -> Tuple[pd.DataFrame, Optional[pd.DataFrame]]:
assert (updated_min is None) == (updated_max is None)
filters = [
(sql.case(
[(PullRequest.closed, PullRequest.closed_at)],
else_=sql.text("'3000-01-01'"), # backed up with a DB index
) >= time_from) if time_from is not None else sql.true(),
PullRequest.created_at < time_to,
PullRequest.acc_id.in_(meta_ids),
PullRequest.hidden.is_(False),
PullRequest.repository_full_name.in_(repositories),
]
if exclude_inactive and updated_min is None:
# this does not provide 100% guarantee because it can be after time_to,
# we need to properly filter later
filters.append(PullRequest.updated_at >= time_from)
if updated_min is not None:
filters.append(PullRequest.updated_at.between(updated_min, updated_max))
if pr_blacklist is not None:
filters.append(pr_blacklist)
if pr_whitelist is not None:
filters.append(pr_whitelist)
if len(participants) == 1:
if PRParticipationKind.AUTHOR in participants:
filters.append(PullRequest.user_login.in_(
participants[PRParticipationKind.AUTHOR]))
elif PRParticipationKind.MERGER in participants:
filters.append(
PullRequest.merged_by_login.in_(participants[PRParticipationKind.MERGER]))
elif len(participants) == 2 and PRParticipationKind.AUTHOR in participants and \
PRParticipationKind.MERGER in participants:
filters.append(sql.or_(
PullRequest.user_login.in_(participants[PRParticipationKind.AUTHOR]),
PullRequest.merged_by_login.in_(participants[PRParticipationKind.MERGER]),
))
if columns is PullRequest:
selected_columns = [PullRequest]
remove_acc_id = False
else:
selected_columns = columns = list(columns)
if remove_acc_id := (PullRequest.acc_id not in selected_columns):
selected_columns.append(PullRequest.acc_id)
if PullRequest.merge_commit_id in columns or PullRequest.merge_commit_sha in columns:
# needed to resolve rebased merge commits
if PullRequest.number not in selected_columns:
selected_columns.append(PullRequest.number)
if labels:
singles, multiples = LabelFilter.split(labels.include)
embedded_labels_query = not multiples
if all_in_labels := (set(singles + list(chain.from_iterable(multiples)))):
filters.append(
sql.exists().where(sql.and_(
PullRequestLabel.acc_id == PullRequest.acc_id,
PullRequestLabel.pull_request_node_id == PullRequest.node_id,
sql.func.lower(PullRequestLabel.name).in_(all_in_labels),
)))
if labels.exclude:
filters.append(
sql.not_(sql.exists().where(sql.and_(
PullRequestLabel.acc_id == PullRequest.acc_id,
PullRequestLabel.pull_request_node_id == PullRequest.node_id,
sql.func.lower(PullRequestLabel.name).in_(labels.exclude),
))))
if not jira:
query = sql.select(selected_columns).where(sql.and_(*filters))
else:
query = await generate_jira_prs_query(
filters, jira, None, mdb, cache, columns=selected_columns)
prs = await read_sql_query(query, mdb, columns, index=PullRequest.node_id.name)
if remove_acc_id:
del prs[PullRequest.acc_id.name]
if PullRequest.closed.name in prs:
cls.adjust_pr_closed_merged_timestamps(prs)
_, first_encounters = np.unique(prs.index.values, return_index=True)
if len(first_encounters) < len(prs):
prs = prs.take(first_encounters)
if not labels or embedded_labels_query:
return prs, None
df_labels = await fetch_labels_to_filter(prs.index, meta_ids, mdb)
left = cls.find_left_by_labels(
prs.index, df_labels.index, df_labels[PullRequestLabel.name.name].values, labels)
prs = prs.take(np.flatnonzero(prs.index.isin(left)))
return prs, df_labels
@staticmethod
def adjust_pr_closed_merged_timestamps(prs_df: pd.DataFrame) -> None:
"""Force set `closed_at` and `merged_at` to NULL if not `closed`. Remove `closed`."""
not_closed = ~prs_df[PullRequest.closed.name].values
prs_df.loc[not_closed, PullRequest.closed_at.name] = pd.NaT
prs_df.loc[not_closed, PullRequest.merged_at.name] = pd.NaT
prs_df.drop(columns=PullRequest.closed.name, inplace=True)
@classmethod
@sentry_span
async def _fetch_inactive_merged_unreleased_prs(
cls,
time_from: datetime,
time_to: datetime,
repos: Union[Set[str], KeysView[str]],
participants: PRParticipants,
labels: LabelFilter,
jira: JIRAFilter,
default_branches: Dict[str, str],
release_settings: ReleaseSettings,
has_logical_repos: bool,
prefixer: Prefixer,
account: int,
meta_ids: Tuple[int, ...],
mdb: Database,
pdb: Database,
cache: Optional[aiomcache.Client]) -> pd.DataFrame:
node_id_map = await discover_inactive_merged_unreleased_prs(
time_from, time_to, repos, participants, labels, default_branches, release_settings,
prefixer, account, pdb, cache)
if not jira:
return await read_sql_query(sql.select([PullRequest])
.where(PullRequest.node_id.in_(node_id_map)),
mdb, PullRequest, index=PullRequest.node_id.name)
df = await cls.filter_jira(node_id_map, jira, meta_ids, mdb, cache)
if not has_logical_repos:
return df
append = defaultdict(list)
node_ids = df.index.values
repository_full_names = df[PullRequest.repository_full_name.name].values
for i, (pr_node_id, physical_repo) in enumerate(zip(node_ids, repository_full_names)):
logical_repos = node_id_map[pr_node_id]
if physical_repo != (first_logical_repo := logical_repos[0]):
repository_full_names[i] = first_logical_repo
for logical_repo in logical_repos[1:]:
append[logical_repo].append(i)
if append:
chunks = []
for logical_repo, indexes in append.items():
subdf = df.take(indexes)
subdf[PullRequest.repository_full_name.name] = logical_repo
chunks.append(subdf)
df = pd.concat([df] + chunks)
return df
@classmethod
@sentry_span
async def filter_jira(cls,
pr_node_ids: Collection[int],
jira: JIRAFilter,
meta_ids: Tuple[int, ...],
mdb: Database,
cache: Optional[aiomcache.Client],
columns=PullRequest) -> pd.DataFrame:
"""Filter PRs by JIRA properties."""
assert jira
filters = [PullRequest.node_id.in_(pr_node_ids)]
query = await generate_jira_prs_query(filters, jira, meta_ids, mdb, cache, columns=columns)
query = query.with_statement_hint(f"Rows(pr repo #{len(pr_node_ids)})")
return await read_sql_query(
query, mdb, columns, index=PullRequest.node_id.name)
@classmethod
@sentry_span
async def fetch_pr_deployments(cls,
pr_node_ids: Iterable[int],
prefixer: Prefixer,
account: int,
pdb: Database,
rdb: Database,
) -> pd.DataFrame:
"""Load the deployments for each PR node ID."""
ghprd = GitHubPullRequestDeployment
cols = [ghprd.pull_request_id, ghprd.deployment_name, ghprd.repository_id]
df = await read_sql_query(
sql.select(cols)
.where(sql.and_(ghprd.acc_id == account,
ghprd.pull_request_id.in_any_values(pr_node_ids))),
con=pdb, columns=cols, index=ghprd.deployment_name.name)
cols = [DeploymentNotification.name,
DeploymentNotification.environment,
DeploymentNotification.conclusion,
DeploymentNotification.finished_at]
details = await read_sql_query(
sql.select(cols)
.where(sql.and_(DeploymentNotification.account_id == account,
DeploymentNotification.name.in_(df.index.values))),
con=rdb, columns=cols, index=DeploymentNotification.name.name,
)
details.index.name = ghprd.deployment_name.name
df = df.join(details)
df.reset_index(inplace=True)
df.set_index([ghprd.pull_request_id.name, ghprd.deployment_name.name], inplace=True)
repo_node_to_name = prefixer.repo_node_to_name.get
df[PullRequest.repository_full_name.name] = \
[repo_node_to_name(r) for r in df[ghprd.repository_id.name].values]
return df
@staticmethod
def _check_participants_compatibility(cached_participants: PRParticipants,
participants: PRParticipants) -> bool:
if not cached_participants:
return True
if not participants:
return False
for k, v in participants.items():
if v - cached_participants.get(k, set()):
return False
return True
@classmethod
@sentry_span
def _remove_spurious_prs(cls, time_from: datetime, dfs: PRDataFrames) -> None:
old_releases = np.where(dfs.releases[Release.published_at.name] < time_from)[0]
if len(old_releases) == 0:
return
cls._drop(dfs, dfs.releases.index[old_releases])
@classmethod
def _drop(cls, dfs: PRDataFrames, pr_ids: Collection[int]) -> None:
if len(pr_ids) == 0:
return
for df in dfs.values():
df.drop(pr_ids,
level=0 if isinstance(df.index, pd.MultiIndex) else None,
inplace=True,
errors="ignore")
@classmethod
@sentry_span
def _find_drop_by_participants(cls,
dfs: PRDataFrames,
participants: PRParticipants,
time_to: Optional[datetime],
) -> pd.Index:
if not participants:
return pd.Index([])
if time_to is not None:
for df_name, col in (("commits", PullRequestCommit.committed_date),
("reviews", PullRequestReview.created_at),
("review_comments", PullRequestReviewComment.created_at),
("review_requests", PullRequestReviewRequest.created_at),
("comments", PullRequestComment.created_at)):
df = getattr(dfs, df_name)
setattr(dfs, df_name, df.take(np.where(df[col.name] < time_to)[0]))
passed = []
dict_iter = (
(dfs.prs, PullRequest.user_login, None, PRParticipationKind.AUTHOR),
(dfs.prs, PullRequest.merged_by_login, PullRequest.merged_at, PRParticipationKind.MERGER), # noqa
(dfs.releases, Release.author, Release.published_at, PRParticipationKind.RELEASER),
)
for df, part_col, date_col, pk in dict_iter:
col_parts = participants.get(pk)
if not col_parts:
continue
mask = df[part_col.name].isin(col_parts)
if time_to is not None and date_col is not None:
mask &= df[date_col.name] < time_to
passed.append(df.index.get_level_values(0).take(np.flatnonzero(mask)))
reviewers = participants.get(PRParticipationKind.REVIEWER)
if reviewers:
ulkr = PullRequestReview.user_login.name
ulkp = PullRequest.user_login.name
user_logins = pd.merge(dfs.reviews[ulkr].droplevel(1), dfs.prs[ulkp],
left_index=True, right_index=True, how="left", copy=False)
ulkr += "_x"
ulkp += "_y"
passed.append(user_logins.index.take(np.where(
(user_logins[ulkr] != user_logins[ulkp]) & user_logins[ulkr].isin(reviewers),
)[0]).unique())
for df, col, pk in (
(dfs.comments, PullRequestComment.user_login, PRParticipationKind.COMMENTER),
(dfs.commits, PullRequestCommit.author_login, PRParticipationKind.COMMIT_AUTHOR),
(dfs.commits, PullRequestCommit.committer_login, PRParticipationKind.COMMIT_COMMITTER)): # noqa
col_parts = participants.get(pk)
if not col_parts:
continue
passed.append(df.index.get_level_values(0).take(np.flatnonzero(
df[col.name].isin(col_parts))).unique())
while len(passed) > 1:
new_passed = []
for i in range(0, len(passed), 2):
if i + 1 < len(passed):
new_passed.append(passed[i].union(passed[i + 1]))
else:
new_passed.append(passed[i])
passed = new_passed
return dfs.prs.index.get_level_values(0).difference(passed[0])
@classmethod
@sentry_span
def _find_drop_by_labels(cls, dfs: PRDataFrames, labels: LabelFilter) -> pd.Index:
if not labels:
return pd.Index([])
df_labels_index = dfs.labels.index.get_level_values(0)
df_labels_names = dfs.labels[PullRequestLabel.name.name].values
pr_node_ids = dfs.prs.index.get_level_values(0)
left = cls.find_left_by_labels(pr_node_ids, df_labels_index, df_labels_names, labels)
if not labels.include:
return df_labels_index.difference(left)
return pr_node_ids.difference(left)
@classmethod
def find_left_by_labels(cls,
full_index: pd.Index,
df_labels_index: pd.Index,
df_labels_names: Sequence[str],
labels: LabelFilter) -> pd.Index:
"""
Post-filter PRs by their loaded labels.
:param full_index: All the PR node IDs, not just those that correspond to labeled PRs.
:param df_labels_index: (PR node ID, label name) DataFrame index. There may be several \
rows for the same PR node ID.
:param df_labels_names: (PR node ID, label name) DataFrame column.
"""
left_include = left_exclude = None
if labels.include:
singles, multiples = LabelFilter.split(labels.include)
left_include = df_labels_index.take(
np.nonzero(np.in1d(df_labels_names, singles))[0],
).unique()
for group in multiples:
passed = df_labels_index
for label in group:
passed = passed.intersection(
df_labels_index.take(np.nonzero(df_labels_names == label)[0]))
if passed.empty:
break
left_include = left_include.union(passed)
if labels.exclude:
left_exclude = full_index.difference(df_labels_index.take(
np.nonzero(np.in1d(df_labels_names, list(labels.exclude)))[0],
).unique())
if labels.include:
if labels.exclude:
left = left_include.intersection(left_exclude)
else:
left = left_include
else:
left = left_exclude
return left
@classmethod
@sentry_span
def _find_drop_by_jira(cls, dfs: PRDataFrames, jira: JIRAFilter) -> pd.Index:
if not jira:
return | pd.Index([]) | pandas.Index |
# coding: utf-8
# ### SHL project
#
# * training module: shl_tm (under construction)
#
# * prediction module: shl_pm (completed)
#
# * simulation module: shl_sm (completed, pending OCR)
#
# * misc module: shl_mm (under construction)
#
#
# ### data feeds:
#
# * historical bidding price, per second, time series (for machine learning, under construction)
#
# * live bidding price, per second, time series (for real time prediciton, completed. shl_pm)
#
# ### parameter lookup table/dataframe
#
# * parm_si (seasonality index per second)
#
# * parm_month (parameter like alpha, beta, gamma, etc. per month)
# In[1]:
import pandas as pd
# In[2]:
# function to fetch Seasonality-Index
def shl_intra_fetch_si(ccyy_mm, time, shl_data_parm_si):
# return shl_data_parm_si[(shl_data_parm_si['ccyy-mm'] == '2017-09') & (shl_data_parm_si['time'] == '11:29:00')]
return shl_data_parm_si[(shl_data_parm_si['ccyy-mm'] == ccyy_mm) & (shl_data_parm_si['time'] == time)].iloc[0]['si']
# In[3]:
# function to fetch Dynamic-Increment
def shl_intra_fetch_di(ccyy_mm, shl_data_parm_month):
return shl_data_parm_month[shl_data_parm_month['ccyy-mm'] == ccyy_mm].iloc[0]['di']
# In[4]:
def shl_intra_fetch_previous_n_sec_time_as_str(shl_data_time_field, n):
return str((pd.to_datetime(shl_data_time_field, format='%H:%M:%S') - pd.Timedelta(seconds=n)).time())
def shl_intra_fetch_future_n_sec_time_as_str(shl_data_time_field, n):
return str(( | pd.to_datetime(shl_data_time_field, format='%H:%M:%S') | pandas.to_datetime |
import os
import json
import pandas as pd
import numpy as np
import logging
import shutil
from linker.core.base import (link_config,
COLUMN_TYPES,
LINKING_RELATIONSHIPS)
from linker.core.files import LinkFiles
from linker.core.memory_link_base import MemoryLinkBase
from linker.reports.report import generate_linking_summary
logger = logging.getLogger(__name__)
class MemoryLink(MemoryLinkBase):
def __init__(self, project):
if project is None:
raise TypeError
super(MemoryLink, self).__init__(project)
self.matched_not_linked = None
self.project_type = 'LINK'
datasets = project['datasets']
self.left_index = datasets[0]['index_field']
self.right_index = datasets[1]['index_field']
self.left_entity = datasets[0]['entity_field']
self.right_entity = datasets[1]['entity_field']
self.left_index_type = None
self.right_index_type = None
def __str__(self):
if self.project is None:
return ''
relationship = None
for rel in LINKING_RELATIONSHIPS:
if rel[0] == self.project['relationship_type']:
relationship = rel[1]
descriptor = super(MemoryLink, self).__str__()
data_dict = json.loads(descriptor)
data_dict['datasets'] = [dataset['name'] for dataset in
self.project['datasets']]
data_dict['Relationship_type'] = relationship
return json.dumps(data_dict, indent=4)
def load_data(self):
logger.debug('>>--- load_data --->>')
logger.info('Loading input dataset for project: %s with task id: %s.',
self.project['name'], self.project['task_uuid'])
datasets = self.project['datasets']
if datasets and len(datasets) > 1:
self.left_columns += [datasets[0]['index_field'],
datasets[0]['entity_field']]
self.right_columns += [datasets[1]['index_field'],
datasets[1]['entity_field']]
if 'data_types' in datasets[0]:
left_dtypes = {}
for col_name, col_type in datasets[0]["data_types"].items():
left_dtypes[col_name] = COLUMN_TYPES[col_type]
if self.left_index in left_dtypes:
self.left_index_type = left_dtypes[self.left_index]
else:
left_dtypes = None
if 'data_types' in datasets[1]:
right_dtypes = {}
for col_name, col_type in datasets[1]["data_types"].items():
right_dtypes[col_name] = COLUMN_TYPES[col_type]
if self.right_index in right_dtypes:
self.right_index_type = right_dtypes[self.right_index]
else:
right_dtypes = None
try:
left_usecols = datasets[0]['columns'] or self.left_columns
except KeyError:
left_usecols = self.left_columns
logger.debug('Left columns: %s.', left_usecols)
logger.debug('Left index: %s', self.left_index)
logger.debug('Left data types: %s', left_dtypes)
self.left_dataset = pd.read_csv(datasets[0]['url'],
index_col=self.left_index,
usecols=left_usecols,
skipinitialspace=True,
dtype=left_dtypes)
try:
right_usecols = datasets[1]['columns'] or self.right_columns
except KeyError:
right_usecols = self.right_columns
logger.debug('Right columns: %s.', right_usecols)
logger.debug('Right index: %s', self.right_index)
logger.debug('Right data types: %s', right_dtypes)
self.right_dataset = pd.read_csv(datasets[1]['url'],
index_col=self.right_index,
usecols=right_usecols,
skipinitialspace=True,
dtype=right_dtypes)
logger.debug('<<--- load_data ---<<')
def link(self, seq, relationship='1T1'):
"""
Links the matched record based on relationship type.
Filters all the record pairs that don't agree on the relationship type.
:param seq: sequence number
:param relationship: Relationship type
'1T1': One-To-One, default
'1TN': One-To-Many
'NT1': Many-To-One
:return: Linked record pairs.
"""
logger.debug('>>--- link --->>')
logger.info('Linking the records pairs based on the relationship type.')
match_file_path = self.temp_path + LinkFiles.TEMP_MATCHED_FILE
matched = pd.read_csv(match_file_path,
index_col=['LEFT_' + self.left_index,
'RIGHT_' + self.right_index])
group_field = 'RIGHT_' + self.right_entity
filter_field = 'LEFT_' + self.left_entity
if relationship == 'MT1':
group_field, filter_field = filter_field, group_field
relationship_group = matched.groupby(group_field)
linked = relationship_group.filter(
lambda x: len(x[filter_field].unique()) == 1)
if relationship == '1T1':
group_field, filter_field = filter_field, group_field
relationship_group = linked.groupby(group_field)
linked = relationship_group.filter(
lambda x: len(x[filter_field].unique()) == 1)
linked = linked.assign(STEP=seq)
logger.info('Assigning link id to the selected subset of record pairs.')
left_entity_id = 'LEFT_' + self.left_entity
right_entity_id = 'RIGHT_' + self.right_entity
link_index = linked.reset_index()[
[left_entity_id, right_entity_id]].drop_duplicates()
link_index = link_index.set_index([left_entity_id, right_entity_id])
link_index['LINK_ID'] = pd.Series(
[MemoryLinkBase.get_next_id() for row in link_index.index],
index=link_index.index)
link_index['LINK_ID'] = link_index['LINK_ID'].map(
lambda x: '{:.0f}'.format(x)
if pd.notnull(x)
else np.nan)
linked = linked.join(link_index,
on=[left_entity_id, right_entity_id],
how='inner')
matched_not_linked = self.get_rows_not_in(matched, linked.index)
matched_not_linked['STEP'] = seq
logger.debug('<<--- link ---<<')
return linked, matched_not_linked
def run(self):
logger.debug('>>--- run --->>')
logger.info('Executing linking project %s. Task id: %s.',
self.project['name'], self.project['task_uuid'])
self.steps = {}
self.total_records_linked = 0
self.total_entities = 0
logger.info('Project steps: %s', len(self.project['steps']))
MemoryLinkBase.reset_id()
for step in self.project['steps']:
self.steps[step['seq']] = {}
logger.info("Linking Step %s :", step['seq'])
logger.info("%s.1) Finding record pairs satisfying blocking constraints...",
step['seq'])
self.pair_n_match(step=step['seq'],
link_method=step['linking_method'],
blocking=step['blocking_schema'],
linking=step['linking_schema'])
logger.info("%s.2) Identifying the linked records based on the relationship type...",
step['seq'])
step_linked, step_matched_not_linked = self.link(step['seq'], self.project['relationship_type'])
left_index = 'LEFT_' + self.left_index
right_index = 'RIGHT_' + self.right_index
left_entity_id = 'LEFT_' + self.left_entity
right_entity_id = 'RIGHT_' + self.right_entity
self.steps[step['seq']]['total_records_linked'] = len(
step_linked.index.values)
self.total_records_linked += len(step_linked.index.values)
self.steps[step['seq']]['total_entities'] = len(
step_linked.groupby([left_entity_id, right_entity_id]))
self.total_entities += self.steps[step['seq']]['total_entities']
if not step_linked.empty:
# Create EntityID - LinkId map
left_links = step_linked[
[left_entity_id, 'LINK_ID']].drop_duplicates()
left_links = left_links.reset_index().set_index(
left_entity_id)['LINK_ID']
left_match = self.left_dataset.join(left_links,
on=self.left_entity,
how='inner')
linked = pd.merge(
left_match.reset_index(),
step_linked.reset_index(),
left_on=self.left_index,
right_on=left_index,
how='left'
)
linked.drop(left_index, axis=1, inplace=True)
linked.drop(left_entity_id, axis=1, inplace=True)
linked.rename(columns={self.left_index: left_index},
inplace=True)
linked.rename(columns={self.left_entity: left_entity_id},
inplace=True)
right_links = step_linked[
[right_entity_id, 'LINK_ID']].drop_duplicates()
right_links = right_links.reset_index().set_index(
right_entity_id)['LINK_ID']
right_match = self.right_dataset.join(right_links,
on=self.right_entity,
how='inner')
linked = pd.merge(
linked,
right_match.reset_index(),
left_on=right_index,
right_on=self.right_index,
how='right'
)
linked.drop(right_index, axis=1, inplace=True)
linked.drop(right_entity_id, axis=1, inplace=True)
linked.rename(columns={self.right_index: right_index},
inplace=True)
linked.rename(columns={self.right_entity: right_entity_id},
inplace=True)
linked.drop(['LINK_ID_x', 'LINK_ID_y'], axis=1, inplace=True)
else:
linked = pd.DataFrame()
self.linked = linked if self.linked is None \
else self.linked.append(linked)
self.steps[step['seq']]['total_matched_not_linked'] = len(
step_matched_not_linked.index.values)
if self.matched_not_linked is None:
self.matched_not_linked = step_matched_not_linked
else:
self.matched_not_linked = self.matched_not_linked.append(
step_matched_not_linked)
self.left_dataset = self.left_dataset[
~self.left_dataset[self.left_entity].isin(
step_linked[left_entity_id])]
self.right_dataset = self.right_dataset[
~self.right_dataset[self.right_entity].isin(
step_linked[right_entity_id])]
logger.info("Number of records linked at step %s: %s",
step['seq'], len(self.linked))
temp_match_file_path = self.temp_path + LinkFiles.TEMP_MATCHED_FILE
# Delete temporary matched file.
if os.path.isfile(temp_match_file_path):
os.remove(temp_match_file_path)
logger.info('Execution of linking project %s with Task id: %s is completed.',
self.project['name'], self.project['task_uuid'])
logger.debug('<<--- run ---<<')
def save(self):
logger.debug('>>--- save --->>')
left_index = 'LEFT_' + self.left_index
right_index = 'RIGHT_' + self.right_index
left_entity_id = 'LEFT_' + self.left_entity
right_entity_id = 'RIGHT_' + self.right_entity
grouped = self.matched_not_linked.reset_index().groupby([
left_index, right_index, left_entity_id, right_entity_id]).agg(
{'STEP': 'min'})
self.matched_not_linked = pd.DataFrame(grouped)
# Storing linked data records.
logger.info("Preparing output files of the linking project %s with tsk id %s.",
self.project['name'], self.project['task_uuid'])
linked_file_path = self.project['output_root'] + link_config.get('linked_data_file', 'linked_data.csv')
self.linked['STEP'] = self.linked['STEP'].map(
lambda x: '{:.0f}'.format(x)
if pd.notnull(x)
else np.nan)
self.linked[right_entity_id] = self.linked[right_entity_id].map(
lambda x: '{:.0f}'.format(x)
if | pd.notnull(x) | pandas.notnull |
import os
import tempfile
import unittest
import numpy as np
import pandas as pd
from sqlalchemy import create_engine
from tests.settings import POSTGRESQL_ENGINE, SQLITE_ENGINE
from tests.utils import get_repository_path, DBTest
from ukbrest.common.pheno2sql import Pheno2SQL
class Pheno2SQLTest(DBTest):
@unittest.skip('sqlite being removed')
def test_sqlite_default_values(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = SQLITE_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'sqlite'
## Check table exists
tmp = pd.read_sql("SELECT name FROM sqlite_master WHERE type='table' AND name='{}';".format('ukb_pheno_0_00'), create_engine(db_engine))
assert not tmp.empty
## Check columns are correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine))
expected_columns = ["eid","c21_0_0","c21_1_0","c21_2_0","c31_0_0","c34_0_0","c46_0_0","c47_0_0","c48_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
## Check data is correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 2
assert tmp.loc[1, 'c21_0_0'] == 'Option number 1'
assert tmp.loc[1, 'c21_1_0'] == 'No response'
assert tmp.loc[1, 'c21_2_0'] == 'Yes'
assert tmp.loc[1, 'c31_0_0'] == '2012-01-05'
assert int(tmp.loc[1, 'c34_0_0']) == 21
assert int(tmp.loc[1, 'c46_0_0']) == -9
assert tmp.loc[1, 'c47_0_0'].round(5) == 45.55412
assert tmp.loc[1, 'c48_0_0'] == '2011-08-14'
assert tmp.loc[2, 'c21_0_0'] == 'Option number 2'
assert pd.isnull(tmp.loc[2, 'c21_1_0'])
assert tmp.loc[2, 'c21_2_0'] == 'No'
assert tmp.loc[2, 'c31_0_0'] == '2015-12-30'
assert int(tmp.loc[2, 'c34_0_0']) == 12
assert int(tmp.loc[2, 'c46_0_0']) == -2
assert tmp.loc[2, 'c47_0_0'].round(5) == -0.55461
assert tmp.loc[2, 'c48_0_0'] == '2010-03-29'
def test_postgresql_default_values(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check table exists
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_00'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine))
expected_columns = ["eid","c21_0_0","c21_1_0","c21_2_0","c31_0_0","c34_0_0","c46_0_0","c47_0_0","c48_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
## Check data is correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 2
assert tmp.loc[1, 'c21_0_0'] == 'Option number 1'
assert tmp.loc[1, 'c21_1_0'] == 'No response'
assert tmp.loc[1, 'c21_2_0'] == 'Yes'
assert tmp.loc[1, 'c31_0_0'].strftime('%Y-%m-%d') == '2012-01-05'
assert int(tmp.loc[1, 'c34_0_0']) == 21
assert int(tmp.loc[1, 'c46_0_0']) == -9
assert tmp.loc[1, 'c47_0_0'].round(5) == 45.55412
assert tmp.loc[1, 'c48_0_0'].strftime('%Y-%m-%d') == '2011-08-14'
assert tmp.loc[2, 'c21_0_0'] == 'Option number 2'
assert pd.isnull(tmp.loc[2, 'c21_1_0'])
assert tmp.loc[2, 'c21_2_0'] == 'No'
assert tmp.loc[2, 'c31_0_0'].strftime('%Y-%m-%d') == '2015-12-30'
assert int(tmp.loc[2, 'c34_0_0']) == 12
assert int(tmp.loc[2, 'c46_0_0']) == -2
assert tmp.loc[2, 'c47_0_0'].round(5) == -0.55461
assert tmp.loc[2, 'c48_0_0'].strftime('%Y-%m-%d') == '2010-03-29'
def test_exit(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = POSTGRESQL_ENGINE
temp_dir = tempfile.mkdtemp()
# Run
with Pheno2SQL(csv_file, db_engine, tmpdir=temp_dir) as p2sql:
p2sql.load_data()
# Validate
## Check table exists
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_00'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine))
expected_columns = ["eid","c21_0_0","c21_1_0","c21_2_0","c31_0_0","c34_0_0","c46_0_0","c47_0_0","c48_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
## Check data is correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 2
## Check that temporary files were deleted
assert len(os.listdir(temp_dir)) == 0
@unittest.skip('sqlite being removed')
def test_sqlite_less_columns_per_table(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = SQLITE_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'sqlite'
## Check tables exist
table = pd.read_sql("SELECT name FROM sqlite_master WHERE type='table' AND name='{}';".format('ukb_pheno_0_00'), create_engine(db_engine))
assert not table.empty
table = pd.read_sql("SELECT name FROM sqlite_master WHERE type='table' AND name='{}';".format('ukb_pheno_0_01'), create_engine(db_engine))
assert not table.empty
table = pd.read_sql("SELECT name FROM sqlite_master WHERE type='table' AND name='{}';".format('ukb_pheno_0_02'), create_engine(db_engine))
assert not table.empty
## Check columns are correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine))
expected_columns = ["eid","c21_0_0","c21_1_0","c21_2_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
tmp = pd.read_sql('select * from ukb_pheno_0_01', create_engine(db_engine))
expected_columns = ["eid","c31_0_0","c34_0_0","c46_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
tmp = pd.read_sql('select * from ukb_pheno_0_02', create_engine(db_engine))
expected_columns = ["eid","c47_0_0","c48_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
## Check data is correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 2
assert tmp.loc[1, 'c21_0_0'] == 'Option number 1'
assert tmp.loc[1, 'c21_1_0'] == 'No response'
assert tmp.loc[1, 'c21_2_0'] == 'Yes'
assert tmp.loc[2, 'c21_0_0'] == 'Option number 2'
assert pd.isnull(tmp.loc[2, 'c21_1_0'])
assert tmp.loc[2, 'c21_2_0'] == 'No'
tmp = pd.read_sql('select * from ukb_pheno_0_01', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 2
assert tmp.loc[1, 'c31_0_0'] == '2012-01-05'
assert int(tmp.loc[1, 'c34_0_0']) == 21
assert int(tmp.loc[1, 'c46_0_0']) == -9
assert tmp.loc[2, 'c31_0_0'] == '2015-12-30'
assert int(tmp.loc[2, 'c34_0_0']) == 12
assert int(tmp.loc[2, 'c46_0_0']) == -2
tmp = pd.read_sql('select * from ukb_pheno_0_02', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 2
assert tmp.loc[1, 'c47_0_0'].round(5) == 45.55412
assert tmp.loc[1, 'c48_0_0'] == '2011-08-14'
assert tmp.loc[2, 'c47_0_0'].round(5) == -0.55461
assert tmp.loc[2, 'c48_0_0'] == '2010-03-29'
def test_postgresql_less_columns_per_table(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check tables exist
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_00'), create_engine(db_engine))
assert table.iloc[0, 0]
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_01'), create_engine(db_engine))
assert table.iloc[0, 0]
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_02'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine))
expected_columns = ["eid","c21_0_0","c21_1_0","c21_2_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
tmp = pd.read_sql('select * from ukb_pheno_0_01', create_engine(db_engine))
expected_columns = ["eid","c31_0_0","c34_0_0","c46_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
tmp = pd.read_sql('select * from ukb_pheno_0_02', create_engine(db_engine))
expected_columns = ["eid","c47_0_0","c48_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
## Check data is correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 2
assert tmp.loc[1, 'c21_0_0'] == 'Option number 1'
assert tmp.loc[1, 'c21_1_0'] == 'No response'
assert tmp.loc[1, 'c21_2_0'] == 'Yes'
assert tmp.loc[2, 'c21_0_0'] == 'Option number 2'
assert pd.isnull(tmp.loc[2, 'c21_1_0'])
assert tmp.loc[2, 'c21_2_0'] == 'No'
tmp = pd.read_sql('select * from ukb_pheno_0_01', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 2
assert tmp.loc[1, 'c31_0_0'].strftime('%Y-%m-%d') == '2012-01-05'
assert int(tmp.loc[1, 'c34_0_0']) == 21
assert int(tmp.loc[1, 'c46_0_0']) == -9
assert tmp.loc[2, 'c31_0_0'].strftime('%Y-%m-%d') == '2015-12-30'
assert int(tmp.loc[2, 'c34_0_0']) == 12
assert int(tmp.loc[2, 'c46_0_0']) == -2
tmp = pd.read_sql('select * from ukb_pheno_0_02', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 2
assert tmp.loc[1, 'c47_0_0'].round(5) == 45.55412
assert tmp.loc[1, 'c48_0_0'].strftime('%Y-%m-%d') == '2011-08-14'
assert tmp.loc[2, 'c47_0_0'].round(5) == -0.55461
assert tmp.loc[2, 'c48_0_0'].strftime('%Y-%m-%d') == '2010-03-29'
def test_custom_tmpdir(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = POSTGRESQL_ENGINE
with Pheno2SQL(csv_file, db_engine, tmpdir='/tmp/custom/directory/here', delete_temp_csv=False) as p2sql:
# Run
p2sql.load_data()
# Validate
## Check table exists
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_00'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine))
expected_columns = ["eid","c21_0_0","c21_1_0","c21_2_0","c31_0_0","c34_0_0","c46_0_0","c47_0_0","c48_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
## Check data is correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 2
## Check that temporary are still there
assert len(os.listdir('/tmp/custom/directory/here')) > 0
## Check that temporary is now clean
assert len(os.listdir('/tmp/custom/directory/here')) == 0
@unittest.skip('sqlite being removed')
def test_sqlite_auxiliary_table_is_created(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = SQLITE_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'sqlite'
## Check tables exist
table = pd.read_sql("SELECT name FROM sqlite_master WHERE type='table' AND name='{}';".format('ukb_pheno_0_00'), create_engine(db_engine))
assert not table.empty
table = pd.read_sql("SELECT name FROM sqlite_master WHERE type='table' AND name='{}';".format('ukb_pheno_0_01'), create_engine(db_engine))
assert not table.empty
table = pd.read_sql("SELECT name FROM sqlite_master WHERE type='table' AND name='{}';".format('ukb_pheno_0_02'), create_engine(db_engine))
assert not table.empty
## Check columns are correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine))
expected_columns = ["eid","c21_0_0","c21_1_0","c21_2_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
tmp = pd.read_sql('select * from ukb_pheno_0_01', create_engine(db_engine))
expected_columns = ["eid","c31_0_0","c34_0_0","c46_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
tmp = pd.read_sql('select * from ukb_pheno_0_02', create_engine(db_engine))
expected_columns = ["eid","c47_0_0","c48_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
## Check auxiliary table existance
table = pd.read_sql("SELECT name FROM sqlite_master WHERE type='table' AND name='{}';".format('fields'), create_engine(db_engine))
assert not table.empty
## Check columns are correct
tmp = pd.read_sql('select * from fields', create_engine(db_engine))
expected_columns = ["column_name", "table_name"]
assert len(tmp.columns) >= len(expected_columns)
assert all(x in tmp.columns for x in expected_columns)
## Check data is correct
tmp = pd.read_sql('select * from fields', create_engine(db_engine), index_col='column_name')
assert not tmp.empty
assert tmp.shape[0] == 8
assert tmp.loc['c21_0_0', 'table_name'] == 'ukb_pheno_0_00'
assert tmp.loc['c21_1_0', 'table_name'] == 'ukb_pheno_0_00'
assert tmp.loc['c21_2_0', 'table_name'] == 'ukb_pheno_0_00'
assert tmp.loc['c31_0_0', 'table_name'] == 'ukb_pheno_0_01'
assert tmp.loc['c34_0_0', 'table_name'] == 'ukb_pheno_0_01'
assert tmp.loc['c46_0_0', 'table_name'] == 'ukb_pheno_0_01'
assert tmp.loc['c47_0_0', 'table_name'] == 'ukb_pheno_0_02'
assert tmp.loc['c48_0_0', 'table_name'] == 'ukb_pheno_0_02'
def test_postgresql_auxiliary_table_is_created_and_has_minimum_data_required(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check tables exist
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_00'), create_engine(db_engine))
assert table.iloc[0, 0]
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_01'), create_engine(db_engine))
assert table.iloc[0, 0]
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_02'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine))
expected_columns = ["eid","c21_0_0","c21_1_0","c21_2_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
tmp = pd.read_sql('select * from ukb_pheno_0_01', create_engine(db_engine))
expected_columns = ["eid","c31_0_0","c34_0_0","c46_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
tmp = pd.read_sql('select * from ukb_pheno_0_02', create_engine(db_engine))
expected_columns = ["eid","c47_0_0","c48_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
## Check auxiliary table existance
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('fields'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
tmp = pd.read_sql('select * from fields', create_engine(db_engine))
expected_columns = ["column_name", "table_name"]
assert len(tmp.columns) >= len(expected_columns)
assert all(x in tmp.columns for x in expected_columns)
## Check data is correct
tmp = pd.read_sql('select * from fields', create_engine(db_engine), index_col='column_name')
assert not tmp.empty
assert tmp.shape[0] == 8
assert tmp.loc['c21_0_0', 'table_name'] == 'ukb_pheno_0_00'
assert tmp.loc['c21_1_0', 'table_name'] == 'ukb_pheno_0_00'
assert tmp.loc['c21_2_0', 'table_name'] == 'ukb_pheno_0_00'
assert tmp.loc['c31_0_0', 'table_name'] == 'ukb_pheno_0_01'
assert tmp.loc['c34_0_0', 'table_name'] == 'ukb_pheno_0_01'
assert tmp.loc['c46_0_0', 'table_name'] == 'ukb_pheno_0_01'
assert tmp.loc['c47_0_0', 'table_name'] == 'ukb_pheno_0_02'
assert tmp.loc['c48_0_0', 'table_name'] == 'ukb_pheno_0_02'
def test_postgresql_auxiliary_table_with_more_information(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check tables exist
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_00'), create_engine(db_engine))
assert table.iloc[0, 0]
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_01'), create_engine(db_engine))
assert table.iloc[0, 0]
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_02'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check auxiliary table existance
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('fields'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
tmp = pd.read_sql('select * from fields', create_engine(db_engine))
expected_columns = ["column_name", "field_id", "inst", "arr", "coding", "table_name", "type", "description"]
assert len(tmp.columns) == len(expected_columns), len(tmp.columns)
assert all(x in expected_columns for x in tmp.columns)
## Check data is correct
tmp = pd.read_sql('select * from fields', create_engine(db_engine), index_col='column_name')
assert not tmp.empty
assert tmp.shape[0] == 8
assert tmp.loc['c21_0_0', 'field_id'] == '21'
assert tmp.loc['c21_0_0', 'inst'] == 0
assert tmp.loc['c21_0_0', 'arr'] == 0
assert tmp.loc['c21_0_0', 'coding'] == 100261
assert tmp.loc['c21_0_0', 'table_name'] == 'ukb_pheno_0_00'
assert tmp.loc['c21_0_0', 'type'] == 'Categorical (single)'
assert tmp.loc['c21_0_0', 'description'] == 'An string value'
assert tmp.loc['c21_1_0', 'field_id'] == '21'
assert tmp.loc['c21_1_0', 'inst'] == 1
assert tmp.loc['c21_1_0', 'arr'] == 0
assert tmp.loc['c21_1_0', 'coding'] == 100261
assert tmp.loc['c21_1_0', 'table_name'] == 'ukb_pheno_0_00'
assert tmp.loc['c21_1_0', 'type'] == 'Categorical (single)'
assert tmp.loc['c21_1_0', 'description'] == 'An string value'
assert tmp.loc['c21_2_0', 'field_id'] == '21'
assert tmp.loc['c21_2_0', 'inst'] == 2
assert tmp.loc['c21_2_0', 'arr'] == 0
assert tmp.loc['c21_2_0', 'coding'] == 100261
assert tmp.loc['c21_2_0', 'table_name'] == 'ukb_pheno_0_00'
assert tmp.loc['c21_2_0', 'type'] == 'Categorical (single)'
assert tmp.loc['c21_2_0', 'description'] == 'An string value'
assert tmp.loc['c31_0_0', 'field_id'] == '31'
assert tmp.loc['c31_0_0', 'inst'] == 0
assert tmp.loc['c31_0_0', 'arr'] == 0
assert pd.isnull(tmp.loc['c31_0_0', 'coding'])
assert tmp.loc['c31_0_0', 'table_name'] == 'ukb_pheno_0_01'
assert tmp.loc['c31_0_0', 'type'] == 'Date'
assert tmp.loc['c31_0_0', 'description'] == 'A date'
assert tmp.loc['c34_0_0', 'field_id'] == '34'
assert tmp.loc['c34_0_0', 'inst'] == 0
assert tmp.loc['c34_0_0', 'arr'] == 0
assert tmp.loc['c34_0_0', 'coding'] == 9
assert tmp.loc['c34_0_0', 'table_name'] == 'ukb_pheno_0_01'
assert tmp.loc['c34_0_0', 'type'] == 'Integer'
assert tmp.loc['c34_0_0', 'description'] == 'Some integer'
assert tmp.loc['c46_0_0', 'field_id'] == '46'
assert tmp.loc['c46_0_0', 'inst'] == 0
assert tmp.loc['c46_0_0', 'arr'] == 0
assert pd.isnull(tmp.loc['c46_0_0', 'coding'])
assert tmp.loc['c46_0_0', 'table_name'] == 'ukb_pheno_0_01'
assert tmp.loc['c46_0_0', 'type'] == 'Integer'
assert tmp.loc['c46_0_0', 'description'] == 'Some another integer'
assert tmp.loc['c47_0_0', 'field_id'] == '47'
assert tmp.loc['c47_0_0', 'inst'] == 0
assert tmp.loc['c47_0_0', 'arr'] == 0
assert pd.isnull(tmp.loc['c47_0_0', 'coding'])
assert tmp.loc['c47_0_0', 'table_name'] == 'ukb_pheno_0_02'
assert tmp.loc['c47_0_0', 'type'] == 'Continuous'
assert tmp.loc['c47_0_0', 'description'] == 'Some continuous value'
assert tmp.loc['c48_0_0', 'field_id'] == '48'
assert tmp.loc['c48_0_0', 'inst'] == 0
assert tmp.loc['c48_0_0', 'arr'] == 0
assert pd.isnull(tmp.loc['c48_0_0', 'coding'])
assert tmp.loc['c48_0_0', 'table_name'] == 'ukb_pheno_0_02'
assert tmp.loc['c48_0_0', 'type'] == 'Time'
assert tmp.loc['c48_0_0', 'description'] == 'Some time'
def test_postgresql_auxiliary_table_check_types(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check auxiliary table existance
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('fields'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
tmp = pd.read_sql('select * from fields', create_engine(db_engine))
expected_columns = ["column_name", "field_id", "inst", "arr", "coding", "table_name", "type", "description"]
assert len(tmp.columns) == len(expected_columns), len(tmp.columns)
assert all(x in expected_columns for x in tmp.columns)
## Check data is correct
sql_types = """
select column_name, data_type
from information_schema.columns
where table_name = 'fields';
"""
tmp = pd.read_sql(sql_types, create_engine(db_engine), index_col='column_name')
assert not tmp.empty
assert tmp.shape[0] == 8
assert tmp.loc['field_id', 'data_type'] == 'text'
assert tmp.loc['inst', 'data_type'] == 'bigint'
assert tmp.loc['arr', 'data_type'] == 'bigint'
assert tmp.loc['coding', 'data_type'] == 'bigint'
assert tmp.loc['table_name', 'data_type'] == 'text'
assert tmp.loc['type', 'data_type'] == 'text'
assert tmp.loc['description', 'data_type'] == 'text'
def test_postgresql_auxiliary_table_constraints(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check auxiliary table existance
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('fields'), create_engine(db_engine))
assert table.iloc[0, 0]
# primary key
constraint_sql = self._get_table_contrains('fields', column_query='column_name', relationship_query='pk_%%')
constraints_results = pd.read_sql(constraint_sql, create_engine(db_engine))
assert constraints_results is not None
assert not constraints_results.empty
# index on 'event' column
constraint_sql = self._get_table_contrains('fields', relationship_query='ix_%%')
constraints_results = pd.read_sql(constraint_sql, create_engine(db_engine))
assert constraints_results is not None
assert not constraints_results.empty
columns = constraints_results['column_name'].tolist()
assert len(columns) == 6
assert 'arr' in columns
assert 'field_id' in columns
assert 'inst' in columns
assert 'table_name' in columns
assert 'type' in columns
assert 'coding' in columns
def test_postgresql_two_csv_files(self):
# Prepare
csv01 = get_repository_path('pheno2sql/example08_01.csv')
csv02 = get_repository_path('pheno2sql/example08_02.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL((csv01, csv02), db_engine)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check tables exist
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_00'), create_engine(db_engine))
assert table.iloc[0, 0]
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_1_00'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine))
expected_columns = ["eid","c21_0_0","c21_1_0","c21_2_0","c31_0_0","c34_0_0","c46_0_0","c47_0_0","c48_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
tmp = pd.read_sql('select * from ukb_pheno_1_00', create_engine(db_engine))
expected_columns = ["eid","c100_0_0", "c100_1_0", "c100_2_0", "c110_0_0", "c120_0_0", "c130_0_0", "c140_0_0", "c150_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
## Check data is correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 5
assert tmp.loc[1, 'c21_0_0'] == 'Option number 1'
assert tmp.loc[1, 'c21_1_0'] == 'No response'
assert tmp.loc[1, 'c21_2_0'] == 'Yes'
assert tmp.loc[1, 'c31_0_0'].strftime('%Y-%m-%d') == '2011-03-07'
assert int(tmp.loc[1, 'c34_0_0']) == -33
assert int(tmp.loc[1, 'c46_0_0']) == -9
assert tmp.loc[1, 'c47_0_0'].round(5) == 41.55312
assert tmp.loc[1, 'c48_0_0'].strftime('%Y-%m-%d') == '2010-07-14'
assert tmp.loc[5, 'c21_0_0'] == 'Option number 5'
assert tmp.loc[5, 'c21_1_0'] == 'Maybe'
assert tmp.loc[5, 'c21_2_0'] == 'Probably'
assert pd.isnull(tmp.loc[5, 'c31_0_0'])
assert int(tmp.loc[5, 'c34_0_0']) == -4
assert int(tmp.loc[5, 'c46_0_0']) == 1
assert pd.isnull(tmp.loc[5, 'c47_0_0'])
assert tmp.loc[5, 'c48_0_0'].strftime('%Y-%m-%d') == '1999-10-11'
tmp = pd.read_sql('select * from ukb_pheno_1_00', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 3
assert int(tmp.loc[1, 'c100_0_0']) == -9
assert int(tmp.loc[1, 'c100_1_0']) == 3
assert pd.isnull(tmp.loc[1, 'c100_2_0'])
assert tmp.loc[1, 'c110_0_0'].round(5) == 42.55312
assert int(tmp.loc[1, 'c120_0_0']) == -33
assert tmp.loc[1, 'c130_0_0'] == 'Option number 1'
assert tmp.loc[1, 'c140_0_0'].strftime('%Y-%m-%d') == '2011-03-07'
assert tmp.loc[1, 'c150_0_0'].strftime('%Y-%m-%d') == '2010-07-14'
assert pd.isnull(tmp.loc[3, 'c100_0_0'])
assert int(tmp.loc[3, 'c100_1_0']) == -4
assert int(tmp.loc[3, 'c100_2_0']) == -10
assert tmp.loc[3, 'c110_0_0'].round(5) == -35.31471
assert int(tmp.loc[3, 'c120_0_0']) == 0
assert tmp.loc[3, 'c130_0_0'] == 'Option number 3'
assert tmp.loc[3, 'c140_0_0'].strftime('%Y-%m-%d') == '1997-04-15'
assert pd.isnull(tmp.loc[3, 'c150_0_0'])
@unittest.skip('sqlite being removed')
def test_sqlite_query_single_table(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example02.csv')
db_engine = SQLITE_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=999999)
p2sql.load_data()
# Run
columns = ['c21_0_0', 'c21_2_0', 'c48_0_0']
query_result = next(p2sql.query(columns))
# Validate
assert query_result is not None
assert query_result.index.name == 'eid'
assert len(query_result.index) == 4
assert all(x in query_result.index for x in range(1, 4 + 1))
assert len(query_result.columns) == len(columns)
assert all(x in columns for x in query_result.columns)
assert not query_result.empty
assert query_result.shape[0] == 4
assert query_result.loc[1, 'c21_0_0'] == 'Option number 1'
assert query_result.loc[2, 'c21_0_0'] == 'Option number 2'
assert query_result.loc[3, 'c21_0_0'] == 'Option number 3'
assert query_result.loc[4, 'c21_0_0'] == 'Option number 4'
assert query_result.loc[1, 'c21_2_0'] == 'Yes'
assert query_result.loc[2, 'c21_2_0'] == 'No'
assert query_result.loc[3, 'c21_2_0'] == 'Maybe'
assert pd.isnull(query_result.loc[4, 'c21_2_0'])
assert query_result.loc[1, 'c48_0_0'] == '2011-08-14'
assert query_result.loc[2, 'c48_0_0'] == '2016-11-30'
assert query_result.loc[3, 'c48_0_0'] == '2010-01-01'
assert query_result.loc[4, 'c48_0_0'] == '2011-02-15'
def test_postgresql_query_single_table(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example02.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=999999)
p2sql.load_data()
# Run
columns = ['c21_0_0', 'c21_2_0', 'c48_0_0']
query_result = next(p2sql.query(columns))
# Validate
assert query_result is not None
assert query_result.index.name == 'eid'
assert len(query_result.index) == 4
assert all(x in query_result.index for x in range(1, 4 + 1))
assert len(query_result.columns) == len(columns)
assert all(x in columns for x in query_result.columns)
assert not query_result.empty
assert query_result.shape[0] == 4
assert query_result.loc[1, 'c21_0_0'] == 'Option number 1'
assert query_result.loc[2, 'c21_0_0'] == 'Option number 2'
assert query_result.loc[3, 'c21_0_0'] == 'Option number 3'
assert query_result.loc[4, 'c21_0_0'] == 'Option number 4'
assert query_result.loc[1, 'c21_2_0'] == 'Yes'
assert query_result.loc[2, 'c21_2_0'] == 'No'
assert query_result.loc[3, 'c21_2_0'] == 'Maybe'
assert pd.isnull(query_result.loc[4, 'c21_2_0'])
assert query_result.loc[1, 'c48_0_0'].strftime('%Y-%m-%d') == '2011-08-14'
assert query_result.loc[2, 'c48_0_0'].strftime('%Y-%m-%d') == '2016-11-30'
assert query_result.loc[3, 'c48_0_0'].strftime('%Y-%m-%d') == '2010-01-01'
assert query_result.loc[4, 'c48_0_0'].strftime('%Y-%m-%d') == '2011-02-15'
def test_postgresql_two_csv_files_query_single_table(self):
# Prepare
csv01 = get_repository_path('pheno2sql/example08_01.csv')
csv02 = get_repository_path('pheno2sql/example08_02.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL((csv01, csv02), db_engine, n_columns_per_table=999999)
p2sql.load_data()
# Run
columns = ['c21_0_0', 'c21_2_0', 'c48_0_0']
query_result = next(p2sql.query(columns))
# Validate
assert query_result is not None
assert query_result.index.name == 'eid'
assert len(query_result.index) == 5
assert all(x in query_result.index for x in range(1, 5 + 1))
assert len(query_result.columns) == len(columns)
assert all(x in columns for x in query_result.columns)
assert not query_result.empty
assert query_result.shape[0] == 5
assert query_result.loc[1, 'c21_0_0'] == 'Option number 1'
assert query_result.loc[2, 'c21_0_0'] == 'Option number 2'
assert query_result.loc[3, 'c21_0_0'] == 'Option number 3'
assert query_result.loc[4, 'c21_0_0'] == 'Option number 4'
assert query_result.loc[5, 'c21_0_0'] == 'Option number 5'
assert query_result.loc[1, 'c21_2_0'] == 'Yes'
assert query_result.loc[2, 'c21_2_0'] == 'No'
assert query_result.loc[3, 'c21_2_0'] == 'Maybe'
assert pd.isnull(query_result.loc[4, 'c21_2_0'])
assert query_result.loc[5, 'c21_2_0'] == 'Probably'
assert query_result.loc[1, 'c48_0_0'].strftime('%Y-%m-%d') == '2010-07-14'
assert query_result.loc[2, 'c48_0_0'].strftime('%Y-%m-%d') == '2017-11-30'
assert query_result.loc[3, 'c48_0_0'].strftime('%Y-%m-%d') == '2020-01-01'
assert query_result.loc[4, 'c48_0_0'].strftime('%Y-%m-%d') == '1990-02-15'
assert query_result.loc[5, 'c48_0_0'].strftime('%Y-%m-%d') == '1999-10-11'
@unittest.skip('sqlite being removed')
def test_sqlite_query_multiple_tables(self):
# RIGHT and FULL OUTER JOINs are not currently supported
# Prepare
csv_file = get_repository_path('pheno2sql/example02.csv')
db_engine = SQLITE_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=2)
p2sql.load_data()
# Run
columns = ['c21_0_0', 'c21_2_0', 'c48_0_0']
query_result = p2sql.query(columns)
# Validate
assert query_result is not None
assert query_result.index.name == 'eid'
assert len(query_result.index) == 4
assert all(x in query_result.index for x in range(1, 4 + 1))
assert len(query_result.columns) == len(columns)
assert all(x in columns for x in query_result.columns)
assert not query_result.empty
assert query_result.shape[0] == 4
assert query_result.loc[1, 'c21_0_0'] == 'Option number 1'
assert query_result.loc[2, 'c21_0_0'] == 'Option number 2'
assert query_result.loc[3, 'c21_0_0'] == 'Option number 3'
assert query_result.loc[4, 'c21_0_0'] == 'Option number 4'
assert query_result.loc[1, 'c21_2_0'] == 'Yes'
assert query_result.loc[2, 'c21_2_0'] == 'No'
assert query_result.loc[3, 'c21_2_0'] == 'Maybe'
assert pd.isnull(query_result.loc[4, 'c21_2_0'])
assert query_result.loc[1, 'c48_0_0'] == '2011-08-14'
assert query_result.loc[2, 'c48_0_0'] == '2016-11-30'
assert query_result.loc[3, 'c48_0_0'] == '2010-01-01'
assert query_result.loc[4, 'c48_0_0'] == '2011-02-15'
def test_postgresql_query_multiple_tables(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example02.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=2)
p2sql.load_data()
# Run
columns = ['c21_0_0', 'c21_2_0', 'c48_0_0']
query_result = next(p2sql.query(columns))
# Validate
assert query_result is not None
assert query_result.index.name == 'eid'
assert len(query_result.index) == 4
assert all(x in query_result.index for x in range(1, 4 + 1))
assert len(query_result.columns) == len(columns)
assert all(x in columns for x in query_result.columns)
assert not query_result.empty
assert query_result.shape[0] == 4
assert query_result.loc[1, 'c21_0_0'] == 'Option number 1'
assert query_result.loc[2, 'c21_0_0'] == 'Option number 2'
assert query_result.loc[3, 'c21_0_0'] == 'Option number 3'
assert query_result.loc[4, 'c21_0_0'] == 'Option number 4'
assert query_result.loc[1, 'c21_2_0'] == 'Yes'
assert query_result.loc[2, 'c21_2_0'] == 'No'
assert query_result.loc[3, 'c21_2_0'] == 'Maybe'
assert pd.isnull(query_result.loc[4, 'c21_2_0'])
assert query_result.loc[1, 'c48_0_0'].strftime('%Y-%m-%d') == '2011-08-14'
assert query_result.loc[2, 'c48_0_0'].strftime('%Y-%m-%d') == '2016-11-30'
assert query_result.loc[3, 'c48_0_0'].strftime('%Y-%m-%d') == '2010-01-01'
assert query_result.loc[4, 'c48_0_0'].strftime('%Y-%m-%d') == '2011-02-15'
def test_postgresql_two_csv_files_query_multiple_tables(self):
# Prepare
csv01 = get_repository_path('pheno2sql/example08_01.csv')
csv02 = get_repository_path('pheno2sql/example08_02.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL((csv01, csv02), db_engine, n_columns_per_table=999999)
p2sql.load_data()
# Run
columns = ['c21_0_0', 'c21_2_0', 'c110_0_0', 'c150_0_0']
query_result = next(p2sql.query(columns))
# Validate
assert query_result is not None
assert query_result.index.name == 'eid'
assert len(query_result.index) == 5
assert all(x in query_result.index for x in range(1, 5 + 1))
assert len(query_result.columns) == len(columns)
assert all(x in columns for x in query_result.columns)
assert not query_result.empty
assert query_result.shape[0] == 5
assert query_result.loc[1, 'c21_0_0'] == 'Option number 1'
assert query_result.loc[2, 'c21_0_0'] == 'Option number 2'
assert query_result.loc[3, 'c21_0_0'] == 'Option number 3'
assert query_result.loc[4, 'c21_0_0'] == 'Option number 4'
assert query_result.loc[5, 'c21_0_0'] == 'Option number 5'
assert query_result.loc[1, 'c21_2_0'] == 'Yes'
assert query_result.loc[2, 'c21_2_0'] == 'No'
assert query_result.loc[3, 'c21_2_0'] == 'Maybe'
assert pd.isnull(query_result.loc[4, 'c21_2_0'])
assert query_result.loc[5, 'c21_2_0'] == 'Probably'
assert query_result.loc[1, 'c110_0_0'].round(5) == 42.55312
assert pd.isnull(query_result.loc[2, 'c110_0_0'])
assert query_result.loc[3, 'c110_0_0'].round(5) == -35.31471
assert pd.isnull(query_result.loc[4, 'c110_0_0'])
assert pd.isnull(query_result.loc[5, 'c110_0_0'])
assert query_result.loc[1, 'c150_0_0'].strftime('%Y-%m-%d') == '2010-07-14'
assert query_result.loc[2, 'c150_0_0'].strftime('%Y-%m-%d') == '2017-11-30'
assert pd.isnull(query_result.loc[3, 'c150_0_0'])
assert pd.isnull(query_result.loc[4, 'c150_0_0'])
assert pd.isnull(query_result.loc[5, 'c150_0_0'])
def test_postgresql_two_csv_files_flipped_query_multiple_tables(self):
# Prepare
# In this test the files are just flipped
csv01 = get_repository_path('pheno2sql/example08_01.csv')
csv02 = get_repository_path('pheno2sql/example08_02.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL((csv02, csv01), db_engine, n_columns_per_table=999999)
p2sql.load_data()
# Run
columns = ['c21_0_0', 'c21_2_0', 'c110_0_0', 'c150_0_0']
query_result = next(p2sql.query(columns))
# Validate
assert query_result is not None
assert query_result.index.name == 'eid'
assert len(query_result.index) == 5
assert all(x in query_result.index for x in range(1, 5 + 1))
assert len(query_result.columns) == len(columns)
assert all(x in columns for x in query_result.columns)
assert not query_result.empty
assert query_result.shape[0] == 5
assert query_result.loc[1, 'c21_0_0'] == 'Option number 1'
assert query_result.loc[2, 'c21_0_0'] == 'Option number 2'
assert query_result.loc[3, 'c21_0_0'] == 'Option number 3'
assert query_result.loc[4, 'c21_0_0'] == 'Option number 4'
assert query_result.loc[5, 'c21_0_0'] == 'Option number 5'
assert query_result.loc[1, 'c21_2_0'] == 'Yes'
assert query_result.loc[2, 'c21_2_0'] == 'No'
assert query_result.loc[3, 'c21_2_0'] == 'Maybe'
assert pd.isnull(query_result.loc[4, 'c21_2_0'])
assert query_result.loc[5, 'c21_2_0'] == 'Probably'
assert query_result.loc[1, 'c110_0_0'].round(5) == 42.55312
assert pd.isnull(query_result.loc[2, 'c110_0_0'])
assert query_result.loc[3, 'c110_0_0'].round(5) == -35.31471
assert pd.isnull(query_result.loc[4, 'c110_0_0'])
assert pd.isnull(query_result.loc[5, 'c110_0_0'])
assert query_result.loc[1, 'c150_0_0'].strftime('%Y-%m-%d') == '2010-07-14'
assert query_result.loc[2, 'c150_0_0'].strftime('%Y-%m-%d') == '2017-11-30'
assert pd.isnull(query_result.loc[3, 'c150_0_0'])
assert pd.isnull(query_result.loc[4, 'c150_0_0'])
assert pd.isnull(query_result.loc[5, 'c150_0_0'])
@unittest.skip('sqlite being removed')
def test_sqlite_query_custom_columns(self):
# SQLite is very limited when selecting variables, renaming, doing math operations, etc
pass
def test_postgresql_query_custom_columns(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example02.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=2)
p2sql.load_data()
# Run
columns = ['c21_0_0', 'c21_2_0', 'c47_0_0', '(c47_0_0 ^ 2.0) as c47_squared']
query_result = next(p2sql.query(columns))
# Validate
assert query_result is not None
assert query_result.index.name == 'eid'
assert len(query_result.index) == 4
assert all(x in query_result.index for x in range(1, 4 + 1))
assert len(query_result.columns) == len(columns)
assert all(x in ['c21_0_0', 'c21_2_0', 'c47_0_0', 'c47_squared'] for x in query_result.columns)
assert not query_result.empty
assert query_result.shape[0] == 4
assert query_result.loc[1, 'c21_0_0'] == 'Option number 1'
assert query_result.loc[2, 'c21_0_0'] == 'Option number 2'
assert query_result.loc[3, 'c21_0_0'] == 'Option number 3'
assert query_result.loc[4, 'c21_0_0'] == 'Option number 4'
assert query_result.loc[1, 'c21_2_0'] == 'Yes'
assert query_result.loc[2, 'c21_2_0'] == 'No'
assert query_result.loc[3, 'c21_2_0'] == 'Maybe'
assert pd.isnull(query_result.loc[4, 'c21_2_0'])
assert query_result.loc[1, 'c47_0_0'].round(5) == 45.55412
assert query_result.loc[2, 'c47_0_0'].round(5) == -0.55461
assert query_result.loc[3, 'c47_0_0'].round(5) == -5.32471
assert query_result.loc[4, 'c47_0_0'].round(5) == 55.19832
assert query_result.loc[1, 'c47_squared'].round(5) == round(45.55412 ** 2, 5)
assert query_result.loc[2, 'c47_squared'].round(5) == round((-0.55461) ** 2, 5)
assert query_result.loc[3, 'c47_squared'].round(5) == round((-5.32471) ** 2, 5)
assert query_result.loc[4, 'c47_squared'].round(5) == round(55.19832 ** 2, 5)
@unittest.skip('sqlite being removed')
def test_sqlite_query_single_filter(self):
# RIGHT and FULL OUTER JOINs are not currently supported
# Prepare
csv_file = get_repository_path('pheno2sql/example02.csv')
db_engine = SQLITE_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=2)
p2sql.load_data()
# Run
columns = ['c21_0_0', 'c21_2_0', 'c47_0_0']
filter = ['c47_0_0 > 0']
query_result = p2sql.query(columns, filter)
# Validate
assert query_result is not None
assert query_result.index.name == 'eid'
assert len(query_result.index) == 2
assert all(x in query_result.index for x in (1, 4))
assert len(query_result.columns) == len(columns)
assert all(x in columns for x in query_result.columns)
assert not query_result.empty
assert query_result.shape[0] == 2
assert query_result.loc[1, 'c21_0_0'] == 'Option number 1'
assert query_result.loc[4, 'c21_0_0'] == 'Option number 4'
assert query_result.loc[1, 'c21_2_0'] == 'Yes'
assert pd.isnull(query_result.loc[4, 'c21_2_0'])
assert query_result.loc[1, 'c47_0_0'].round(5) == 45.55412
assert query_result.loc[4, 'c47_0_0'].round(5) == 55.19832
def test_postgresql_query_single_filter(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example02.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=2)
p2sql.load_data()
# Run
columns = ['c21_0_0', 'c21_2_0', 'c47_0_0']
filter = ['c47_0_0 > 0']
query_result = next(p2sql.query(columns, filterings=filter))
# Validate
assert query_result is not None
assert query_result.index.name == 'eid'
assert len(query_result.index) == 2
assert all(x in query_result.index for x in (1, 4))
assert len(query_result.columns) == len(columns)
assert all(x in columns for x in query_result.columns)
assert not query_result.empty
assert query_result.shape[0] == 2
assert query_result.loc[1, 'c21_0_0'] == 'Option number 1'
assert query_result.loc[4, 'c21_0_0'] == 'Option number 4'
assert query_result.loc[1, 'c21_2_0'] == 'Yes'
assert pd.isnull(query_result.loc[4, 'c21_2_0'])
assert query_result.loc[1, 'c47_0_0'].round(5) == 45.55412
assert query_result.loc[4, 'c47_0_0'].round(5) == 55.19832
@unittest.skip('sqlite being removed')
def test_sqlite_query_multiple_and_filter(self):
# 'RIGHT and FULL OUTER JOINs are not currently supported'
# Prepare
csv_file = get_repository_path('pheno2sql/example02.csv')
db_engine = SQLITE_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=2)
p2sql.load_data()
# Run
columns = ['c21_0_0', 'c21_2_0', 'c47_0_0', 'c48_0_0']
filter = ["c48_0_0 > '2011-01-01'", "c21_2_0 <> ''"]
query_result = p2sql.query(columns, filter)
# Validate
assert query_result is not None
assert query_result.index.name == 'eid'
assert len(query_result.index) == 2
assert all(x in query_result.index for x in (1, 2))
assert len(query_result.columns) == len(columns)
assert all(x in columns for x in query_result.columns)
assert not query_result.empty
assert query_result.shape[0] == 2
assert query_result.loc[1, 'c21_0_0'] == 'Option number 1'
assert query_result.loc[2, 'c21_0_0'] == 'Option number 2'
assert query_result.loc[1, 'c21_2_0'] == 'Yes'
assert query_result.loc[2, 'c21_2_0'] == 'No'
assert query_result.loc[1, 'c47_0_0'].round(5) == 45.55412
assert query_result.loc[2, 'c47_0_0'].round(5) == -0.55461
def test_postgresql_query_multiple_and_filter(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example02.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=2)
p2sql.load_data()
# Run
columns = ['c21_0_0', 'c21_2_0', 'c47_0_0', 'c48_0_0']
filter = ["c48_0_0 > '2011-01-01'", "c21_2_0 <> ''"]
query_result = next(p2sql.query(columns, filterings=filter))
# Validate
assert query_result is not None
assert query_result.index.name == 'eid'
assert len(query_result.index) == 2
assert all(x in query_result.index for x in (1, 2))
assert len(query_result.columns) == len(columns)
assert all(x in columns for x in query_result.columns)
assert not query_result.empty
assert query_result.shape[0] == 2
assert query_result.loc[1, 'c21_0_0'] == 'Option number 1'
assert query_result.loc[2, 'c21_0_0'] == 'Option number 2'
assert query_result.loc[1, 'c21_2_0'] == 'Yes'
assert query_result.loc[2, 'c21_2_0'] == 'No'
assert query_result.loc[1, 'c47_0_0'].round(5) == 45.55412
assert query_result.loc[2, 'c47_0_0'].round(5) == -0.55461
assert query_result.loc[1, 'c48_0_0'].strftime('%Y-%m-%d') == '2011-08-14'
assert query_result.loc[2, 'c48_0_0'].strftime('%Y-%m-%d') == '2016-11-30'
@unittest.skip('sqlite being removed')
def test_sqlite_float_is_empty(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example03.csv')
db_engine = SQLITE_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'sqlite'
## Check data is correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 4
assert tmp.loc[1, 'c21_0_0'] == 'Option number 1'
assert tmp.loc[1, 'c21_1_0'] == 'No response'
assert tmp.loc[1, 'c21_2_0'] == 'Yes'
assert tmp.loc[2, 'c21_0_0'] == 'Option number 2'
assert pd.isnull(tmp.loc[2, 'c21_1_0'])
assert tmp.loc[2, 'c21_2_0'] == 'No'
assert tmp.loc[3, 'c21_0_0'] == 'Option number 3'
assert tmp.loc[3, 'c21_1_0'] == 'Of course'
assert tmp.loc[3, 'c21_2_0'] == 'Maybe'
assert pd.isnull(tmp.loc[4, 'c21_2_0'])
tmp = pd.read_sql('select * from ukb_pheno_0_01', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 4
assert tmp.loc[1, 'c31_0_0'] == '2012-01-05'
assert int(tmp.loc[1, 'c34_0_0']) == 21
assert int(tmp.loc[1, 'c46_0_0']) == -9
assert tmp.loc[2, 'c31_0_0'] == '2015-12-30'
assert int(tmp.loc[2, 'c34_0_0']) == 12
assert int(tmp.loc[2, 'c46_0_0']) == -2
assert tmp.loc[3, 'c31_0_0'] == '2007-03-19'
assert int(tmp.loc[3, 'c34_0_0']) == 1
assert int(tmp.loc[3, 'c46_0_0']) == -7
tmp = pd.read_sql('select * from ukb_pheno_0_02', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 4
# FIXME: this is strange, data type in this particular case needs np.round
assert np.round(tmp.loc[1, 'c47_0_0'], 5) == 45.55412
assert tmp.loc[1, 'c48_0_0'] == '2011-08-14'
assert tmp.loc[2, 'c47_0_0'] == -0.55461
assert tmp.loc[2, 'c48_0_0'] == '2016-11-30'
assert pd.isnull(tmp.loc[3, 'c47_0_0'])
assert tmp.loc[3, 'c48_0_0'] == '2010-01-01'
def test_postgresql_float_is_empty(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example03.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check data is correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 4
assert tmp.loc[1, 'c21_0_0'] == 'Option number 1'
assert tmp.loc[1, 'c21_1_0'] == 'No response'
assert tmp.loc[1, 'c21_2_0'] == 'Yes'
assert tmp.loc[2, 'c21_0_0'] == 'Option number 2'
assert pd.isnull(tmp.loc[2, 'c21_1_0'])
assert tmp.loc[2, 'c21_2_0'] == 'No'
assert tmp.loc[3, 'c21_0_0'] == 'Option number 3'
assert tmp.loc[3, 'c21_1_0'] == 'Of course'
assert tmp.loc[3, 'c21_2_0'] == 'Maybe'
assert pd.isnull(tmp.loc[4, 'c21_2_0'])
tmp = pd.read_sql('select * from ukb_pheno_0_01', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 4
assert tmp.loc[1, 'c31_0_0'].strftime('%Y-%m-%d') == '2012-01-05'
assert int(tmp.loc[1, 'c34_0_0']) == 21
assert int(tmp.loc[1, 'c46_0_0']) == -9
assert tmp.loc[2, 'c31_0_0'].strftime('%Y-%m-%d') == '2015-12-30'
assert int(tmp.loc[2, 'c34_0_0']) == 12
assert int(tmp.loc[2, 'c46_0_0']) == -2
assert tmp.loc[3, 'c31_0_0'].strftime('%Y-%m-%d') == '2007-03-19'
assert int(tmp.loc[3, 'c34_0_0']) == 1
assert int(tmp.loc[3, 'c46_0_0']) == -7
tmp = pd.read_sql('select * from ukb_pheno_0_02', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 4
assert tmp.loc[1, 'c47_0_0'].round(5) == 45.55412
assert tmp.loc[1, 'c48_0_0'].strftime('%Y-%m-%d') == '2011-08-14'
assert tmp.loc[2, 'c47_0_0'].round(5) == -0.55461
assert tmp.loc[2, 'c48_0_0'].strftime('%Y-%m-%d') == '2016-11-30'
assert pd.isnull(tmp.loc[3, 'c47_0_0'])
assert tmp.loc[3, 'c48_0_0'].strftime('%Y-%m-%d') == '2010-01-01'
def test_postgresql_timestamp_is_empty(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example04.csv')
db_engine = 'postgresql://test:test@localhost:5432/ukb'
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check data is correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 4
assert tmp.loc[1, 'c21_0_0'] == 'Option number 1'
assert tmp.loc[1, 'c21_1_0'] == 'No response'
assert tmp.loc[1, 'c21_2_0'] == 'Yes'
assert tmp.loc[2, 'c21_0_0'] == 'Option number 2'
assert pd.isnull(tmp.loc[2, 'c21_1_0'])
assert tmp.loc[2, 'c21_2_0'] == 'No'
assert tmp.loc[3, 'c21_0_0'] == 'Option number 3'
assert tmp.loc[3, 'c21_1_0'] == 'Of course'
assert tmp.loc[3, 'c21_2_0'] == 'Maybe'
assert pd.isnull(tmp.loc[4, 'c21_2_0'])
tmp = pd.read_sql('select * from ukb_pheno_0_01', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 4
assert tmp.loc[1, 'c31_0_0'].strftime('%Y-%m-%d') == '2012-01-05'
assert int(tmp.loc[1, 'c34_0_0']) == 21
assert int(tmp.loc[1, 'c46_0_0']) == -9
assert tmp.loc[2, 'c31_0_0'].strftime('%Y-%m-%d') == '2015-12-30'
assert int(tmp.loc[2, 'c34_0_0']) == 12
assert int(tmp.loc[2, 'c46_0_0']) == -2
assert tmp.loc[3, 'c31_0_0'].strftime('%Y-%m-%d') == '2007-03-19'
assert int(tmp.loc[3, 'c34_0_0']) == 1
assert int(tmp.loc[3, 'c46_0_0']) == -7
assert pd.isnull(tmp.loc[4, 'c31_0_0'])
tmp = pd.read_sql('select * from ukb_pheno_0_02', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 4
assert tmp.loc[1, 'c47_0_0'].round(5) == 45.55412
assert tmp.loc[1, 'c48_0_0'].strftime('%Y-%m-%d') == '2011-08-14'
assert tmp.loc[2, 'c47_0_0'].round(5) == -0.55461
assert pd.isnull(tmp.loc[2, 'c48_0_0'])
assert tmp.loc[3, 'c47_0_0'].round(5) == -5.32471
assert tmp.loc[3, 'c48_0_0'].strftime('%Y-%m-%d') == '2010-01-01'
def test_postgresql_integer_is_nan(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example06_nan_integer.csv')
db_engine = 'postgresql://test:test@localhost:5432/ukb'
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check data is correct
tmp = pd.read_sql('select * from ukb_pheno_0_01', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 4
assert tmp.loc[1, 'c31_0_0'].strftime('%Y-%m-%d') == '2012-01-05'
assert int(tmp.loc[1, 'c34_0_0']) == 21
assert int(tmp.loc[1, 'c46_0_0']) == -9
assert tmp.loc[2, 'c31_0_0'].strftime('%Y-%m-%d') == '2015-12-30'
assert int(tmp.loc[2, 'c34_0_0']) == 12
pd.isnull(tmp.loc[2, 'c46_0_0'])
assert tmp.loc[3, 'c31_0_0'].strftime('%Y-%m-%d') == '2007-03-19'
assert int(tmp.loc[3, 'c34_0_0']) == 1
assert int(tmp.loc[3, 'c46_0_0']) == -7
assert pd.isnull(tmp.loc[4, 'c31_0_0'])
def test_postgresql_first_row_is_nan_integer(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example07_first_nan_integer.csv')
db_engine = 'postgresql://test:test@localhost:5432/ukb'
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check data is correct
tmp = pd.read_sql('select * from ukb_pheno_0_01', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 4
assert tmp.loc[1, 'c31_0_0'].strftime('%Y-%m-%d') == '2012-01-05'
assert int(tmp.loc[1, 'c34_0_0']) == 21
assert pd.isnull(tmp.loc[1, 'c46_0_0'])
assert tmp.loc[2, 'c31_0_0'].strftime('%Y-%m-%d') == '2015-12-30'
assert int(tmp.loc[2, 'c34_0_0']) == 12
pd.isnull(tmp.loc[2, 'c46_0_0'])
assert tmp.loc[3, 'c31_0_0'].strftime('%Y-%m-%d') == '2007-03-19'
assert int(tmp.loc[3, 'c34_0_0']) == 1
assert int(tmp.loc[3, 'c46_0_0']) == -7
assert pd.isnull(tmp.loc[4, 'c31_0_0'])
def test_postgresql_sql_chunksize01(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example02.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3, sql_chunksize=2)
p2sql.load_data()
# Run
columns = ['c21_0_0', 'c21_2_0', 'c48_0_0']
query_result = p2sql.query(columns)
# Validate
assert query_result is not None
import collections
assert isinstance(query_result, collections.Iterable)
index_len_sum = 0
for chunk_idx, chunk in enumerate(query_result):
assert chunk.index.name == 'eid'
index_len_sum += len(chunk.index)
assert len(chunk.index) == 2
if chunk_idx == 0:
indexes = (1, 2)
assert all(x in chunk.index for x in indexes)
else:
indexes = (3, 4)
assert all(x in chunk.index for x in indexes)
assert len(chunk.columns) == len(columns)
assert all(x in columns for x in chunk.columns)
assert not chunk.empty
assert chunk.shape[0] == 2
if chunk_idx == 0:
assert chunk.loc[1, 'c21_0_0'] == 'Option number 1'
assert chunk.loc[2, 'c21_0_0'] == 'Option number 2'
assert chunk.loc[1, 'c21_2_0'] == 'Yes'
assert chunk.loc[2, 'c21_2_0'] == 'No'
assert chunk.loc[1, 'c48_0_0'].strftime('%Y-%m-%d') == '2011-08-14'
assert chunk.loc[2, 'c48_0_0'].strftime('%Y-%m-%d') == '2016-11-30'
else:
assert chunk.loc[3, 'c21_0_0'] == 'Option number 3'
assert chunk.loc[4, 'c21_0_0'] == 'Option number 4'
assert chunk.loc[3, 'c21_2_0'] == 'Maybe'
assert pd.isnull(chunk.loc[4, 'c21_2_0'])
assert chunk.loc[3, 'c48_0_0'].strftime('%Y-%m-%d') == '2010-01-01'
assert chunk.loc[4, 'c48_0_0'].strftime('%Y-%m-%d') == '2011-02-15'
assert index_len_sum == 4
def test_postgresql_sql_chunksize02(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example02.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3, sql_chunksize=3)
p2sql.load_data()
# Run
columns = ['c21_0_0', 'c21_2_0', 'c48_0_0']
query_result = p2sql.query(columns)
# Validate
assert query_result is not None
import collections
assert isinstance(query_result, collections.Iterable)
index_len_sum = 0
for chunk_idx, chunk in enumerate(query_result):
assert chunk.index.name == 'eid'
index_len_sum += len(chunk.index)
if chunk_idx == 0:
assert len(chunk.index) == 3
indexes = (1, 2, 3)
assert all(x in chunk.index for x in indexes)
else:
assert len(chunk.index) == 1
indexes = (4,)
assert all(x in chunk.index for x in indexes)
assert len(chunk.columns) == len(columns)
assert all(x in columns for x in chunk.columns)
assert not chunk.empty
if chunk_idx == 0:
assert chunk.shape[0] == 3
assert chunk.loc[1, 'c21_0_0'] == 'Option number 1'
assert chunk.loc[2, 'c21_0_0'] == 'Option number 2'
assert chunk.loc[3, 'c21_0_0'] == 'Option number 3'
assert chunk.loc[1, 'c21_2_0'] == 'Yes'
assert chunk.loc[2, 'c21_2_0'] == 'No'
assert chunk.loc[3, 'c21_2_0'] == 'Maybe'
assert chunk.loc[1, 'c48_0_0'].strftime('%Y-%m-%d') == '2011-08-14'
assert chunk.loc[2, 'c48_0_0'].strftime('%Y-%m-%d') == '2016-11-30'
assert chunk.loc[3, 'c48_0_0'].strftime('%Y-%m-%d') == '2010-01-01'
else:
assert chunk.shape[0] == 1
assert chunk.loc[4, 'c21_0_0'] == 'Option number 4'
assert pd.isnull(chunk.loc[4, 'c21_2_0'])
assert chunk.loc[4, 'c48_0_0'].strftime('%Y-%m-%d') == '2011-02-15'
assert index_len_sum == 4
def test_postgresql_all_eids_table_created(self):
# Prepare
directory = get_repository_path('pheno2sql/example14')
csv_file1 = get_repository_path(os.path.join(directory, 'example14_00.csv'))
csv_file2 = get_repository_path(os.path.join(directory, 'example14_01.csv'))
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL((csv_file1, csv_file2), db_engine, bgen_sample_file=os.path.join(directory, 'impv2.sample'),
n_columns_per_table=2, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check samples table exists
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('all_eids'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
all_eids = pd.read_sql('select * from all_eids', create_engine(db_engine))
expected_columns = ["eid"]
assert len(all_eids.columns) == len(expected_columns)
assert all(x in all_eids.columns for x in expected_columns)
## Check data is correct
all_eids = pd.read_sql('select * from all_eids', create_engine(db_engine), index_col='eid')
assert len(all_eids.index) == 6 + 4, len(all_eids.index)
assert 1000010 in all_eids.index
assert 1000020 in all_eids.index
assert 1000021 in all_eids.index
assert 1000030 in all_eids.index
assert 1000040 in all_eids.index
assert 1000041 in all_eids.index
assert 1000050 in all_eids.index
assert 1000060 in all_eids.index
assert 1000061 in all_eids.index
assert 1000070 in all_eids.index
def test_postgresql_all_eids_table_constraints(self):
# Prepare
directory = get_repository_path('pheno2sql/example14')
csv_file1 = get_repository_path(os.path.join(directory, 'example14_00.csv'))
csv_file2 = get_repository_path(os.path.join(directory, 'example14_01.csv'))
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL((csv_file1, csv_file2), db_engine, bgen_sample_file=os.path.join(directory, 'impv2.sample'),
n_columns_per_table=2, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check samples table exists
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('all_eids'), create_engine(db_engine))
assert table.iloc[0, 0]
# primary key
constraint_sql = self._get_table_contrains('all_eids', relationship_query='pk_%%')
constraints_results = pd.read_sql(constraint_sql, create_engine(db_engine))
assert constraints_results is not None
assert not constraints_results.empty
columns = constraints_results['column_name'].tolist()
assert len(columns) == 1
assert 'eid' in columns
def test_postgresql_bgen_samples_table_created(self):
# Prepare
directory = get_repository_path('pheno2sql/example10')
csv_file = get_repository_path(os.path.join(directory, 'example10_diseases.csv'))
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, bgen_sample_file=os.path.join(directory, 'impv2.sample'),
n_columns_per_table=2, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check samples table exists
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('bgen_samples'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
samples_data = pd.read_sql('select * from bgen_samples', create_engine(db_engine))
expected_columns = ["index", "eid"]
assert len(samples_data.columns) == len(expected_columns)
assert all(x in samples_data.columns for x in expected_columns)
## Check data is correct
samples_data = pd.read_sql('select * from bgen_samples', create_engine(db_engine), index_col='index')
assert not samples_data.empty
assert samples_data.shape[0] == 5
assert samples_data.loc[1, 'eid'] == 1000050
assert samples_data.loc[2, 'eid'] == 1000030
assert samples_data.loc[3, 'eid'] == 1000040
assert samples_data.loc[4, 'eid'] == 1000010
assert samples_data.loc[5, 'eid'] == 1000020
def test_postgresql_bgen_samples_table_constraints(self):
# Prepare
directory = get_repository_path('pheno2sql/example10')
csv_file = get_repository_path(os.path.join(directory, 'example10_diseases.csv'))
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, bgen_sample_file=os.path.join(directory, 'impv2.sample'),
n_columns_per_table=2, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check samples table exists
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('bgen_samples'), create_engine(db_engine))
assert table.iloc[0, 0]
# primary key
constraint_sql = self._get_table_contrains('bgen_samples', relationship_query='pk_%%')
constraints_results = pd.read_sql(constraint_sql, create_engine(db_engine))
assert constraints_results is not None
assert not constraints_results.empty
columns = constraints_results['column_name'].tolist()
assert len(columns) == 2
assert 'eid' in columns
assert 'index' in columns
# indexes
constraint_sql = self._get_table_contrains('bgen_samples', relationship_query='ix_%%')
constraints_results = pd.read_sql(constraint_sql, create_engine(db_engine))
assert constraints_results is not None
assert not constraints_results.empty
columns = constraints_results['column_name'].tolist()
assert len(columns) == 2
assert 'eid' in columns
assert 'index' in columns
def test_postgresql_events_tables_only_one_instance_filled(self):
# Prepare
directory = get_repository_path('pheno2sql/example10')
csv_file = get_repository_path(os.path.join(directory, 'example10_diseases.csv'))
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, bgen_sample_file=os.path.join(directory, 'impv2.sample'),
n_columns_per_table=2, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check samples table exists
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('events'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
events_data = pd.read_sql('select * from events order by eid, instance, event', create_engine(db_engine))
expected_columns = ['eid', 'field_id', 'instance', 'event']
assert len(events_data.columns) == len(expected_columns)
assert all(x in events_data.columns for x in expected_columns)
## Check data is correct
assert not events_data.empty
assert events_data.shape[0] == 6
assert events_data.loc[0, 'eid'] == 1000020
assert events_data.loc[0, 'field_id'] == 84
assert events_data.loc[0, 'event'] == 'E103'
assert events_data.loc[1, 'eid'] == 1000020
assert events_data.loc[1, 'field_id'] == 84
assert events_data.loc[1, 'event'] == 'N308'
assert events_data.loc[2, 'eid'] == 1000020
assert events_data.loc[2, 'field_id'] == 84
assert events_data.loc[2, 'event'] == 'Q750'
assert events_data.loc[3, 'eid'] == 1000030
assert events_data.loc[3, 'field_id'] == 84
assert events_data.loc[3, 'event'] == 'N308'
assert events_data.loc[4, 'eid'] == 1000040
assert events_data.loc[4, 'field_id'] == 84
assert events_data.loc[4, 'event'] == 'N308'
assert events_data.loc[5, 'eid'] == 1000050
assert events_data.loc[5, 'field_id'] == 84
assert events_data.loc[5, 'event'] == 'E103'
def test_postgresql_events_tables_only_two_instances_filled(self):
# Prepare
directory = get_repository_path('pheno2sql/example11')
csv_file = get_repository_path(os.path.join(directory, 'example11_diseases.csv'))
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, bgen_sample_file=os.path.join(directory, 'impv2.sample'),
n_columns_per_table=2, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check samples table exists
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('events'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
events_data = pd.read_sql('select * from events order by eid, instance, event', create_engine(db_engine))
expected_columns = ['eid', 'field_id', 'instance', 'event']
assert len(events_data.columns) == len(expected_columns)
assert all(x in events_data.columns for x in expected_columns)
## Check data is correct
assert not events_data.empty
assert events_data.shape[0] == 11
cidx = 0
assert events_data.loc[cidx, 'eid'] == 1000010
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 1
assert events_data.loc[cidx, 'event'] == 'E103'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000010
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 1
assert events_data.loc[cidx, 'event'] == 'Q750'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000020
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 0
assert events_data.loc[cidx, 'event'] == 'E103'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000020
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 0
assert events_data.loc[cidx, 'event'] == 'N308'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000020
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 1
assert events_data.loc[cidx, 'event'] == 'J32'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000030
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 0
assert events_data.loc[cidx, 'event'] == 'N308'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000030
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 1
assert events_data.loc[cidx, 'event'] == 'Q750'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000040
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 0
assert events_data.loc[cidx, 'event'] == 'N308'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000040
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 1
assert events_data.loc[cidx, 'event'] == 'E103'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000040
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 1
assert events_data.loc[cidx, 'event'] == 'Q750'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000050
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 0
assert events_data.loc[cidx, 'event'] == 'E103'
def test_postgresql_events_tables_two_categorical_fields_and_two_and_three_instances(self):
# Prepare
directory = get_repository_path('pheno2sql/example12')
csv_file = get_repository_path(os.path.join(directory, 'example12_diseases.csv'))
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, bgen_sample_file=os.path.join(directory, 'impv2.sample'),
n_columns_per_table=2, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check samples table exists
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('events'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
events_data = pd.read_sql('select * from events order by eid, field_id, instance, event', create_engine(db_engine))
expected_columns = ['eid', 'field_id', 'instance', 'event']
assert len(events_data.columns) == len(expected_columns)
assert all(x in events_data.columns for x in expected_columns)
## Check total data
assert not events_data.empty
assert events_data.shape[0] == 25
# 1000010
cidx = 0
assert events_data.loc[cidx, 'eid'] == 1000010
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 1
assert events_data.loc[cidx, 'event'] == 'E103'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000010
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 1
assert events_data.loc[cidx, 'event'] == 'Q750'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000010
assert events_data.loc[cidx, 'field_id'] == 85
assert events_data.loc[cidx, 'instance'] == 0
assert events_data.loc[cidx, 'event'] == '1136'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000010
assert events_data.loc[cidx, 'field_id'] == 85
assert events_data.loc[cidx, 'instance'] == 0
assert events_data.loc[cidx, 'event'] == '1434'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000010
assert events_data.loc[cidx, 'field_id'] == 85
assert events_data.loc[cidx, 'instance'] == 2
assert events_data.loc[cidx, 'event'] == '1701'
# 1000020
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000020
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 0
assert events_data.loc[cidx, 'event'] == 'E103'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000020
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 0
assert events_data.loc[cidx, 'event'] == 'N308'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000020
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 1
assert events_data.loc[cidx, 'event'] == 'J32'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000020
assert events_data.loc[cidx, 'field_id'] == 85
assert events_data.loc[cidx, 'instance'] == 0
assert events_data.loc[cidx, 'event'] == '1114'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000020
assert events_data.loc[cidx, 'field_id'] == 85
assert events_data.loc[cidx, 'instance'] == 0
assert events_data.loc[cidx, 'event'] == '1434'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000020
assert events_data.loc[cidx, 'field_id'] == 85
assert events_data.loc[cidx, 'instance'] == 1
assert events_data.loc[cidx, 'event'] == '1136'
# 1000030
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000030
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 0
assert events_data.loc[cidx, 'event'] == 'N308'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000030
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 1
assert events_data.loc[cidx, 'event'] == 'Q750'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000030
assert events_data.loc[cidx, 'field_id'] == 85
assert events_data.loc[cidx, 'instance'] == 1
assert events_data.loc[cidx, 'event'] == '1434'
# 1000040
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000040
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 0
assert events_data.loc[cidx, 'event'] == 'N308'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000040
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 1
assert events_data.loc[cidx, 'event'] == 'E103'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000040
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 1
assert events_data.loc[cidx, 'event'] == 'Q750'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000040
assert events_data.loc[cidx, 'field_id'] == 85
assert events_data.loc[cidx, 'instance'] == 1
assert events_data.loc[cidx, 'event'] == '1114'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000040
assert events_data.loc[cidx, 'field_id'] == 85
assert events_data.loc[cidx, 'instance'] == 1
assert events_data.loc[cidx, 'event'] == '1136'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000040
assert events_data.loc[cidx, 'field_id'] == 85
assert events_data.loc[cidx, 'instance'] == 2
assert events_data.loc[cidx, 'event'] == '457'
# 1000050
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000050
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 0
assert events_data.loc[cidx, 'event'] == 'E103'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000050
assert events_data.loc[cidx, 'field_id'] == 85
assert events_data.loc[cidx, 'instance'] == 0
assert events_data.loc[cidx, 'event'] == '1434'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000050
assert events_data.loc[cidx, 'field_id'] == 85
assert events_data.loc[cidx, 'instance'] == 1
assert events_data.loc[cidx, 'event'] == '1114'
# 1000060
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000060
assert events_data.loc[cidx, 'field_id'] == 85
assert events_data.loc[cidx, 'instance'] == 2
assert events_data.loc[cidx, 'event'] == '1114'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000060
assert events_data.loc[cidx, 'field_id'] == 85
assert events_data.loc[cidx, 'instance'] == 2
assert events_data.loc[cidx, 'event'] == '1136'
def test_postgresql_events_tables_check_constrains_exist(self):
# Prepare
directory = get_repository_path('pheno2sql/example12')
csv_file = get_repository_path(os.path.join(directory, 'example12_diseases.csv'))
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, bgen_sample_file=os.path.join(directory, 'impv2.sample'),
n_columns_per_table=2, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check samples table exists
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('events'), create_engine(db_engine))
assert table.iloc[0, 0]
# primary key
constraint_sql = self._get_table_contrains('events', relationship_query='pk_%%')
constraints_results = pd.read_sql(constraint_sql, create_engine(db_engine))
assert constraints_results is not None
assert not constraints_results.empty
columns = constraints_results['column_name'].tolist()
assert len(columns) == 4
assert 'eid' in columns
assert 'field_id' in columns
assert 'instance' in columns
assert 'event' in columns
# index on 'event' column
constraint_sql = self._get_table_contrains('events', relationship_query='ix_%%')
constraints_results = pd.read_sql(constraint_sql, create_engine(db_engine), index_col='index_name')
assert constraints_results is not None
assert not constraints_results.empty
assert constraints_results.shape[0] == 6
assert constraints_results.loc[['ix_events_eid']].shape[0] == 1
assert constraints_results.loc['ix_events_eid', 'column_name'] == 'eid'
assert constraints_results.loc[['ix_events_field_id']].shape[0] == 1
assert constraints_results.loc['ix_events_field_id', 'column_name'] == 'field_id'
assert constraints_results.loc[['ix_events_instance']].shape[0] == 1
assert constraints_results.loc['ix_events_instance', 'column_name'] == 'instance'
assert constraints_results.loc[['ix_events_event']].shape[0] == 1
assert constraints_results.loc['ix_events_event', 'column_name'] == 'event'
assert constraints_results.loc[['ix_events_field_id_event']].shape[0] == 2
assert 'field_id' in constraints_results.loc['ix_events_field_id_event', 'column_name'].tolist()
assert 'event' in constraints_results.loc['ix_events_field_id_event', 'column_name'].tolist()
def test_postgresql_phenotypes_tables_check_constrains_exist(self):
# Prepare
directory = get_repository_path('pheno2sql/example12')
csv_file = get_repository_path(os.path.join(directory, 'example12_diseases.csv'))
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, bgen_sample_file=os.path.join(directory, 'impv2.sample'),
n_columns_per_table=15, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check tables exists
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_00'), create_engine(db_engine))
assert table.iloc[0, 0]
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_01'), create_engine(db_engine))
assert table.iloc[0, 0]
# primary key
constraint_sql = self._get_table_contrains('ukb_pheno_0_00', column_query='eid', relationship_query='pk_%%')
constraints_results = pd.read_sql(constraint_sql, create_engine(db_engine))
assert constraints_results is not None
assert not constraints_results.empty
constraint_sql = self._get_table_contrains('ukb_pheno_0_01', column_query='eid', relationship_query='pk_%%')
constraints_results = pd.read_sql(constraint_sql, create_engine(db_engine))
assert constraints_results is not None
assert not constraints_results.empty
def test_postgresql_vacuum(self):
# Prepare
directory = get_repository_path('pheno2sql/example12')
csv_file = get_repository_path(os.path.join(directory, 'example12_diseases.csv'))
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, bgen_sample_file=os.path.join(directory, 'impv2.sample'),
n_columns_per_table=2, loading_n_jobs=1)
# Run
p2sql.load_data(vacuum=True)
# Validate
vacuum_data = pd.DataFrame()
query_count = 0
# FIXME waits for vacuum to finish
while vacuum_data.empty and query_count < 150:
vacuum_data = pd.read_sql("""
select relname, last_vacuum, last_analyze
from pg_stat_user_tables
where schemaname = 'public' and last_vacuum is not null and last_analyze is not null
""", db_engine)
query_count += 1
assert vacuum_data is not None
assert not vacuum_data.empty
def test_postgresql_load_data_non_utf_characters(self):
# Prepare
directory = get_repository_path('pheno2sql/example15')
csv_file1 = get_repository_path(os.path.join(directory, 'example15_00.csv')) # latin1
csv_file2 = get_repository_path(os.path.join(directory, 'example15_01.csv')) # latin1
csv_file3 = get_repository_path(os.path.join(directory, 'example15_02.csv')) # utf-8
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL((csv_file1, csv_file2, csv_file3), db_engine, bgen_sample_file=os.path.join(directory, 'impv2.sample'),
n_columns_per_table=2, loading_n_jobs=1)
# Run
p2sql.load_data()
columns = ['c21_1_0', 'c21_0_0', 'c103_0_0', 'c104_0_0', 'c221_0_0', 'c221_1_0']
query_result = next(p2sql.query(columns))
# Validate
assert query_result.index.name == 'eid'
assert len(query_result.index) == 10
assert len(query_result.columns) == len(columns)
assert all(x in columns for x in query_result.columns)
assert not query_result.empty
assert query_result.loc[1000041, 'c103_0_0'] == 'Optión 4'
assert query_result.loc[1000041, 'c104_0_0'] == '158'
assert query_result.loc[1000070, 'c21_1_0'] == 'Of course ñ'
assert query_result.loc[1000070, 'c21_0_0'] == 'Option number 7'
assert query_result.loc[1000050, 'c221_0_0'] == 'Option number 25'
assert query_result.loc[1000050, 'c221_1_0'] == 'Maybe ñó'
def test_postgresql_load_data_with_duplicated_data_field(self):
# Prepare
directory = get_repository_path('pheno2sql/example16')
csv_file1 = get_repository_path(os.path.join(directory, 'example1600.csv'))
csv_file2 = get_repository_path(os.path.join(directory, 'example1601.csv'))
db_engine = POSTGRESQL_ENGINE
# intentionally, load first "latest" dataset (since 1601 > 1600)
p2sql = Pheno2SQL((csv_file2, csv_file1), db_engine, bgen_sample_file=os.path.join(directory, 'impv2.sample'),
n_columns_per_table=2, loading_n_jobs=1)
# Run
p2sql.load_data()
columns = ['c103_0_0', 'c47_0_0', 'c50_0_0']
query_result = next(p2sql.query(columns))
# Validate
assert query_result.index.name == 'eid'
assert len(query_result.index) == 7 + 3, len(query_result.index)
assert not query_result.empty
assert query_result.shape[0] == 7 + 3, query_result.shape[0]
assert len(query_result.columns) == len(columns)
assert all(x in columns for x in query_result.columns)
# this individuals should not have data for data-field 50, since we overwrote the old dataset (1600)
assert pd.isnull(query_result.loc[1000021, 'c50_0_0'])
assert pd.isnull(query_result.loc[1000041, 'c50_0_0'])
assert pd.isnull(query_result.loc[1000061, 'c50_0_0'])
# should keep "newest" data (in 1601, csv_file2)
assert query_result.loc[1000010, 'c50_0_0'] == 1.01
assert query_result.loc[1000020, 'c50_0_0'] == 1.05
assert query_result.loc[1000030, 'c50_0_0'] == 1.21
assert query_result.loc[1000040, 'c50_0_0'] == 1.25
assert query_result.loc[1000050, 'c50_0_0'] == 1.41
assert query_result.loc[1000060, 'c50_0_0'] == 1.45
assert query_result.loc[1000070, 'c50_0_0'] == 1.50
# check other data-fields
assert pd.isnull(query_result.loc[1000020, 'c103_0_0'])
assert pd.isnull(query_result.loc[1000040, 'c103_0_0'])
assert | pd.isnull(query_result.loc[1000060, 'c103_0_0']) | pandas.isnull |
"""
"""
import numpy as np
import pandas as pd
def parse_data_elecciones_esp(votation_file):
#Headers as rows for now
df = pd.read_excel(votation_file, 0)
## circunscripcion
circunscripcion = df.loc[:, :14]
circunscripcion = pd.DataFrame(circunscripcion.loc[1:, :].as_matrix(), columns = circunscripcion.loc[0, :])
# Votes
data = df.loc[:, 14:].as_matrix()[1:, 1:]
m_circs = data.shape[0]
n_parties = data.shape[1]/2
parties_b = df.loc[:, 14:].columns[1:]
parties = []
votes, diputes = np.zeros((m_circs, n_parties)), np.zeros((m_circs, n_parties))
for i in range(n_parties):
votes[:, i] = data[:, 2*i]
diputes[:, i] = data[:, 2*i+1]
parties.append(parties_b[2*i])
votes, diputes = votes.astype(int), diputes.astype(int)
return circunscripcion, parties, votes, diputes
def collapse_by_col(circunscripcion, votes, diputes, icol):
if icol is None:
l_n = ["Spain", 0, "Spain"]
l = [l_n + list(circunscripcion.iloc[:, 3:].sum(0).astype(int))]
new_circunscripcion = pd.DataFrame(l, columns=circunscripcion.columns)
vts = votes.sum(0)
dips = diputes.sum(0)
return new_circunscripcion, vts, dips
new_circuns = list(circunscripcion.iloc[:, icol].unique())
cs, m_circ, n_parties = [], len(new_circuns), votes.shape[1]
vts, dips = np.zeros((m_circ, n_parties)), np.zeros((m_circ, n_parties))
for nc in new_circuns:
logi = np.array(circunscripcion.iloc[:, icol] == nc)
l_n = [nc, new_circuns.index(nc), nc]
l = l_n + list(circunscripcion.iloc[logi, 3:].sum(0).astype(int))
cs.append(l)
vts[new_circuns.index(nc), :] = votes[logi, :].sum(0)
dips[new_circuns.index(nc), :] = diputes[logi, :].sum(0)
new_circunscripcion = | pd.DataFrame(cs, columns=circunscripcion.columns) | pandas.DataFrame |
import pandas as pd
from texthero import nlp
from . import PandasTestCase
import unittest
import string
class TestNLP(PandasTestCase):
"""
Named entity.
"""
def test_named_entities(self):
s = | pd.Series("New York is a big city") | pandas.Series |
# To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %%
import pandas as pd
import glob
import os
# %%
home=os.path.dirname(__file__)+"/../"
# %%
df = pd.read_csv(home+'/COVID-19/dati-province/dpc-covid19-ita-province.csv')
provdata = pd.read_csv(home+'/other_info/provinceData.csv')
regdata = pd.read_csv(home+'/other_info/regionData.csv')
# %%
# rename columns
df = df.rename(columns={
'stato': 'Country',
'codice_regione': 'Region Code',
'denominazione_regione': 'Region',
'codice_provincia': 'Province Code',
'denominazione_provincia': 'Province',
'sigla_provincia': 'Province Abbreviation',
'totale_casi': 'Total Cases'
})
df = df.astype({
'Total Cases':'Int32'
})
provdata = provdata.astype({
'Population':'Int32'
})
# %%
df['Last Update'] = pd.to_datetime(df['data'])
df['Date'] = pd.to_datetime(df['data']).dt.floor('D')
# %%
# Previous Total Cases Previous Total Deaths Previous Total Recovered Previous Total Tests
prev = df[['Date','Region','Province','Total Cases']].\
rename(columns={'Total Cases':'Prev Total Cases'})
prev['Date'] = prev['Date']+ | pd.to_timedelta(1,unit='D') | pandas.to_timedelta |
"""Test the surface_io module."""
from collections import OrderedDict
import logging
import shutil
import pandas as pd
import yaml
import fmu.dataio
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
CFG = OrderedDict()
CFG["template"] = {"name": "Test", "revision": "AUTO"}
CFG["masterdata"] = {
"smda": {
"country": [
{"identifier": "Norway", "uuid": "ad214d85-8a1d-19da-e053-c918a4889309"}
],
"discovery": [{"short_identifier": "abdcef", "uuid": "ghijk"}],
}
}
CFG2 = {}
with open("tests/data/drogon/global_config2/global_variables.yml", "r") as stream:
CFG2 = yaml.safe_load(stream)
RUN = "tests/data/drogon/ertrun1/realization-0/iter-0/rms"
CASEPATH = "tests/data/drogon/ertrun1"
def test_table_io(tmp_path):
"""Minimal test tables io, uses tmp_path."""
# make a small DataFrame
table = pd.DataFrame({"STOIIP": [123, 345, 654], "PORO": [0.2, 0.4, 0.3]})
fmu.dataio.ExportData.export_root = tmp_path.resolve()
fmu.dataio.ExportData.table_fformat = "csv"
exp = fmu.dataio.ExportData(name="test", verbosity="INFO", content="volumes")
exp._pwd = tmp_path
exp.to_file(table)
assert (tmp_path / "tables" / ".test.csv.yml").is_file() is True
with open(tmp_path / "tables" / "test.csv") as stream:
header = stream.readline().split(",")
assert len(header) == 2
# export with index=True which will give three columns (first is the index column)
exp.to_file(table, index=True)
with open(tmp_path / "tables" / "test.csv") as stream:
header = stream.readline().split(",")
assert len(header) == 3
def test_tables_io_larger_case_ertrun(tmp_path):
"""Larger test table io as ERTRUN, uses global config from Drogon to tmp_path."""
current = tmp_path / "scratch" / "fields" / "user"
current.mkdir(parents=True, exist_ok=True)
shutil.copytree(CASEPATH, current / "mycase")
fmu.dataio.ExportData.export_root = "../../share/results"
fmu.dataio.ExportData.table_fformat = "csv"
runfolder = current / "mycase" / "realization-0" / "iter-0" / "rms" / "model"
runfolder.mkdir(parents=True, exist_ok=True)
out = (
current / "mycase" / "realization-0" / "iter-0" / "share" / "results" / "tables"
)
exp = fmu.dataio.ExportData(
name="sometable",
config=CFG2,
content="volumetrics",
unit="m",
is_prediction=True,
is_observation=False,
tagname="what Descr",
verbosity="INFO",
runfolder=runfolder.resolve(),
workflow="my current workflow",
)
# make a fake DataFrame
table = | pd.DataFrame({"STOIIP": [123, 345, 654], "PORO": [0.2, 0.4, 0.3]}) | pandas.DataFrame |
from numpy import mean,cov,double,cumsum,dot,linalg,array,rank
from pylab import plot,subplot,axis,stem,show,figure
import numpy
import pandas
import math
import matplotlib.pyplot as plt
import numpy as np
from sklearn.preprocessing import scale
from sklearn.decomposition import PCA
from sklearn import cross_validation
from sklearn.linear_model import LinearRegression
from mpl_toolkits.mplot3d import Axes3D
from sklearn import decomposition
from sklearn import datasets
# to pca 3 to visual
def pca3perform():
data=pandas.read_table("data-encode.txt",sep=' ')
print("success read test.csv file")
#data=data.reset_index().values
#data=data.as_matrix()
y=data.iloc[0:,0]
y=y.as_matrix()
x=data.iloc[0:,1:]
x=x.as_matrix()
pca=PCA(n_components=3, copy=False)
temp=pca.fit(x)
temp=pca.transform(x)
print(temp,type(temp))
x=temp
temp=pandas.DataFrame(temp)
perform(pca,x,y)
def perform(pca,X,y):
fig = plt.figure(1, figsize=(50, 50))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
for name, label in [('Setosa', 0), ('Versicolour', 1), ('Virginica', 2)]:
ax.text3D(X[y == label, 0].mean(),
X[y == label, 1].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=y, cmap=plt.cm.spectral)
x_surf = [X[:, 0].min(), X[:, 0].max(),
X[:, 0].min(), X[:, 0].max()]
y_surf = [X[:, 0].max(), X[:, 0].max(),
X[:, 0].min(), X[:, 0].min()]
x_surf = np.array(x_surf)
y_surf = np.array(y_surf)
v0 = pca.transform(pca.components_[[0]])
v0 /= v0[-1]
v1 = pca.transform(pca.components_[[1]])
v1 /= v1[-1]
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
plt.show()
def pcaana(A):
# computing eigenvalues and eigenvectors of covariance matrix
M = (A-mean(A.T,axis=1)).T # subtract the mean (along columns)
[latent,coeff] = linalg.eig(cov(M)) # attention:not always sorted
score = dot(coeff.T,M) # projection of the data in the new space
coeff, score, latent = pcaana(A)
print(coeff,score,latent)
figure("init figure")
#subplot(121)
# every eigenvector describe the direction
# of a principal component.
m = mean(A,axis=1)
plot([0, -coeff[0,0]*2]+m[0], [0, -coeff[0,1]*2]+m[1],'--k')
plot([0, coeff[1,0]*2]+m[0], [0, coeff[1,1]*2]+m[1],'--k')
plot(A[0,:],A[1,:],'ob') # the data
axis('equal')
subplot(122)
# new data
plot(score[0,:],score[1,:],'*g')
axis('equal')
show()
return coeff,score,latent
def en(X=[[0,0]]):
X=X-numpy.mean(X,axis=0)
[u,s,v]=numpy.linalg.svd(X)
v=v.transpose()
#v=v[:,:numcomp]
return numpy.dot(X,v)
def sigmod(x):
return int(round(1.0/(1+math.exp(-x)),0))
if __name__=="__main__":
pca3perform()
exit()
A = array([ [2.4,0.7,2.9,2.2,3.0,2.7,1.6,1.1,1.6,0.9],[2.4,1.7,2.9,2.2,3.0,2.7,2.6,1.1,1.6,0.9],
[2.5,0.5,2.2,1.9,3.1,2.3,2,1,1.5,1.1] ])
data=pandas.read_csv("multi_phenos.txt",sep=' ',header=None)
dtype = [('Col1','int32'), ('Col2','float32'), ('Col3','float32')]
values = numpy.zeros(20, dtype=dtype)
print(values,type(values))
index = ['Row'+str(i) for i in range(1, len(values)+1)]
df = pandas.DataFrame(values, index=index)
print(df,type(values))
print(data,type(data))
#data=data.reset_index().values
data=data.as_matrix()
print(data,type(data))
pca=PCA(n_components=1, copy=False, whiten=False)
temp=pca.fit(data)
temp=pca.transform(data)
print(temp,type(temp))
#temp=pandas.DataFrame(temp)
"""
for index, row in temp.iterrows():
for col_name in temp.columns:
print("#"+row[col_name])
"""
for i in range(0, len(temp)):
temp[i]=sigmod(temp[i])
#this is save a txt file
numpy.savetxt("multi_phenos_pca.txt",temp)
d= | pandas.DataFrame(temp) | pandas.DataFrame |
from pathlib import Path
import pandas as pd
import openpyxl
class CompareFiles(object):
def __init__(self, file_one_path: str, file_two_path: str):
self.file_one_path: str = file_one_path
self.file_two_path: str = file_two_path
self.__validate__()
def __validate__(self):
"""
Validates whether the file exists or not
"""
file_one = Path(self.file_one_path)
file_two = Path(self.file_two_path)
if not file_one.is_file() or not file_two.is_file():
print('No file found, exiting.')
exit(-1)
def compare_csv(self):
file_one = pd.read_csv(self.file_one_path)
file_two = | pd.read_csv(self.file_two_path) | pandas.read_csv |
from pandas import DataFrame
import numpy as np
import nltk
from collections import Counter
from collections import OrderedDict
from sklearn.feature_extraction.text import TfidfVectorizer
def extract_sim_words(model, brand, result_path, freq_dist, min_count, save=True, topn=20):
df = DataFrame(columns=[['word', 'sim', 'freq']])
result = model.most_similar([model.docvecs[brand]], topn=topn)
if save:
for tup in result:
if freq_dist[tup[0]] >= min_count:
df.loc[len(df)] = [tup[0], tup[1], freq_dist[tup[0]]]
df.to_csv(result_path + 'keywords/' + brand + "_sim_words.csv", index=False)
return
else:
for tup in result:
if freq_dist[tup[0]] >= min_count:
df.loc[len(df)] = [tup[0], tup[1], freq_dist[tup[0]]]
return df
def extract_sim_brand(model, brand, result_path, save=True, topn=20):
df = DataFrame(columns=[['word', 'sim']])
result = model.docvecs.most_similar(brand, topn=topn)
if save:
for tup in result:
df.loc[len(df)] = [tup[0], tup[1]]
df.to_csv(result_path + 'keywords/' + brand + "_sim_brands.csv", index=False)
return
else:
for tup in result:
df.loc[len(df)] = [tup[0], tup[1]]
return df
def cal_mean_cluster(df_result, cluster_idx, doc2vec_model, group_name='Cluster'):
df = df_result[df_result[group_name] == cluster_idx]
names = list(df['Name'].unique())
all_arr = np.zeros((doc2vec_model.vector_size, len(names)))
for index, name in enumerate(names):
all_arr[:, index] = doc2vec_model.docvecs[name]
return all_arr.mean(axis=1)
def print_result(vector, model, freq_dist, min_count, topn=50):
df = DataFrame(columns=[['word','cos','freq']])
lst = model.most_similar([vector], topn=topn)
for tup in lst:
if freq_dist[tup[0]] >= min_count:
df.loc[len(df)] = [tup[0], tup[1], freq_dist[tup[0]]]
return df
def save_brand_sim(model, sum_vector, name, save_path, topn=20):
df = DataFrame(columns=('brand','sim'))
lst = model.docvecs.most_similar([sum_vector], topn=topn)
for tup in lst:
df.loc[len(df)] = [tup[0], tup[1]]
df.to_csv(save_path + name + '_simBrands.csv', index=False)
return
# 각 브랜드의 단어 분포
def brand_raw_freq(documents, brand):
brand_review = []
for index, doc in enumerate(documents):
if doc.tags[0] == brand:
brand_review.append(doc.words)
brand_review = [word for sent in brand_review for word in sent]
corpus = nltk.Text(brand_review)
freq = nltk.FreqDist(corpus)
return brand_review, freq
def extract_keywords(score_df, brand, documents, selected, path, min_count = 100):
keywords = score_df[['word',brand]].sort_values(brand, ascending=False)
keywords.reset_index(inplace=True, drop=True)
review, freq = brand_freq(documents, selected, brand)
keyword_count = []
df = DataFrame(columns=[["단어","확률유사도","빈도"]])
for index, row in keywords.iterrows():
if freq[row['word']] >= min_count:
df.loc[len(df)] = [row['word'], row[brand], freq[row['word']]]
df.to_csv(path + '/keywords/' + brand + '_Keywords.csv', index=False)
def brand_freq(documents, selected_words, brand):
brand_review = []
for index, doc in enumerate(documents):
if doc.tags[0] == brand:
brand_review.append(selected_words[index])
brand_review = [word for sent in brand_review for word in sent]
corpus = nltk.Text(brand_review)
freq = nltk.FreqDist(corpus)
return brand_review, freq
def clustering(model):
brand_list = list(model.docvecs.doctags.keys())
hidden_size = model.vector_size
print("num of securities : %s, num of dimension : %s" % (len(brand_list), hidden_size))
doc_arr = np.zeros((len(brand_list), hidden_size))
for index, name in enumerate(brand_list):
doc_arr[index, :] = model.docvecs[name]
return brand_list, doc_arr
def tf_idf(documents, selected_words, brand_list, max_feature = 5000):
total_freq = Counter()
corpus = []
for brand in brand_list:
review, freq = brand_freq(documents, selected_words, brand)
total_freq += freq
doc = ' '.join(review)
corpus.append(doc)
total_freq = OrderedDict(sorted(total_freq.items(), key=lambda t: -t[1]))
vectorizer = TfidfVectorizer(max_features=max_feature)
tfidf_arr = vectorizer.fit_transform(corpus).toarray()
col_name = vectorizer.get_feature_names()
df_tfidf = | DataFrame(columns=[col_name]) | pandas.DataFrame |
import numpy as np
import pandas as pd
import talib
from talib import stream
def test_streaming():
a = np.array([1,1,2,3,5,8,13], dtype=float)
r = stream.MOM(a, timeperiod=1)
assert r == 5
r = stream.MOM(a, timeperiod=2)
assert r == 8
r = stream.MOM(a, timeperiod=3)
assert r == 10
r = stream.MOM(a, timeperiod=4)
assert r == 11
r = stream.MOM(a, timeperiod=5)
assert r == 12
r = stream.MOM(a, timeperiod=6)
assert r == 12
r = stream.MOM(a, timeperiod=7)
assert np.isnan(r)
def test_streaming_pandas():
a = pd.Series([1,1,2,3,5,8,13])
r = stream.MOM(a, timeperiod=1)
assert r == 5
r = stream.MOM(a, timeperiod=2)
assert r == 8
r = stream.MOM(a, timeperiod=3)
assert r == 10
r = stream.MOM(a, timeperiod=4)
assert r == 11
r = stream.MOM(a, timeperiod=5)
assert r == 12
r = stream.MOM(a, timeperiod=6)
assert r == 12
r = stream.MOM(a, timeperiod=7)
assert np.isnan(r)
def test_CDL3BLACKCROWS():
o = np.array([39.00, 39.00, 39.00, 39.00, 39.00, 39.00, 39.00, 39.00, 39.00, 39.00, 39.00, 39.00, 39.00, 39.00, 40.32, 40.51, 38.09, 35.00])
h = np.array([40.84, 40.84, 40.84, 40.84, 40.84, 40.84, 40.84, 40.84, 40.84, 40.84, 40.84, 40.84, 40.84, 40.84, 41.69, 40.84, 38.12, 35.50])
l = np.array([35.80, 35.80, 35.80, 35.80, 35.80, 35.80, 35.80, 35.80, 35.80, 35.80, 35.80, 35.80, 35.80, 35.80, 39.26, 36.73, 33.37, 30.03])
c = np.array([40.29, 40.29, 40.29, 40.29, 40.29, 40.29, 40.29, 40.29, 40.29, 40.29, 40.29, 40.29, 40.29, 40.29, 40.46, 37.08, 33.37, 30.03])
r = stream.CDL3BLACKCROWS(o, h, l, c)
assert r == -100
def test_CDL3BLACKCROWS_pandas():
o = pd.Series([39.00, 39.00, 39.00, 39.00, 39.00, 39.00, 39.00, 39.00, 39.00, 39.00, 39.00, 39.00, 39.00, 39.00, 40.32, 40.51, 38.09, 35.00])
h = pd.Series([40.84, 40.84, 40.84, 40.84, 40.84, 40.84, 40.84, 40.84, 40.84, 40.84, 40.84, 40.84, 40.84, 40.84, 41.69, 40.84, 38.12, 35.50])
l = pd.Series([35.80, 35.80, 35.80, 35.80, 35.80, 35.80, 35.80, 35.80, 35.80, 35.80, 35.80, 35.80, 35.80, 35.80, 39.26, 36.73, 33.37, 30.03])
c = | pd.Series([40.29, 40.29, 40.29, 40.29, 40.29, 40.29, 40.29, 40.29, 40.29, 40.29, 40.29, 40.29, 40.29, 40.29, 40.46, 37.08, 33.37, 30.03]) | pandas.Series |
import os
import pandas as pd
from numpy.random import default_rng
def create_sample(
input_file="../../classes_input/test_input.csv",
output_file=None,
percentage_sample=25,
exclude_samples=None,
):
if not output_file:
exclude = ""
if exclude_samples:
excluded_names = [
os.path.splitext(os.path.basename(x))[0].replace(
"test_input_sampled_", ""
)
for x in exclude_samples
]
exclude = f"_exclude_{'_'.join(excluded_names)}"
output_file = (
f"../../classes_input/test_input_sampled_{percentage_sample}{exclude}.csv"
)
rng = default_rng()
input_df = pd.read_csv(input_file)
all_classes = | pd.unique(input_df["class_id"]) | pandas.unique |
from pathlib import Path
import os
import pandas as pd
import numpy as np
def get_country_geolocation():
dir_path = os.path.dirname(os.path.realpath(__file__))
country_mapping = pd.read_csv(
dir_path + '/data_files/country_centroids_az8.csv', dtype=str)
country_mapping = country_mapping.iloc[:, [48, 66, 67]]
longitude_mapping = {row['iso_n3']: row['Longitude']
for _, row in country_mapping.iterrows()}
latititude_mapping = {row['iso_n3']: row['Latitude']
for _, row in country_mapping.iterrows()}
return longitude_mapping, latititude_mapping
def get_country_isocode_mapping():
dir_path = os.path.dirname(os.path.realpath(__file__))
country_mapping = pd.read_csv(
dir_path + '/data_files/country-codes_csv.csv', dtype=str)
country_mapping = country_mapping.iloc[1:, [2, 8]]
mapping = {row['official_name_en']: row['ISO3166-1-numeric']
for _, row in country_mapping.iterrows()}
# add missing countries > 1000 students
mapping['Taiwan'] = '158'
mapping['Hong Kong'] = '364'
mapping['Iran'] = '364'
mapping['North Korea'] = '408'
mapping['South Korea'] = '410'
mapping['Vietnam'] = '704'
mapping['United Kingdom'] = '826'
mapping['Venezuela'] = '862'
mapping['Russia'] = '643'
mapping['Bolivia'] = '068'
mapping['Côte d’Ivoire/Ivory Coast'] = '384'
return mapping
def get_output_filename(path, out_folder):
outfile = Path(path).stem + '.csv'
return os.path.join(out_folder, outfile)
def write_csv(df, excel_file, out_folder, index=False):
out_csv = get_output_filename(excel_file, out_folder)
df.to_csv(out_csv, index=index)
def clean_new_enrollment(excel_file, out_folder):
df = pd.read_excel(excel_file)
# remove empty row
df = df.drop(6)
# prepare headers
headers = []
for i, column in enumerate(df.columns):
first_header = df[column].iloc[1]
if i == 0:
headers.append('Academic Level')
continue
if pd.isna(first_header):
headers.append(df[column].iloc[2])
else:
headers.append(f'{first_header} {df[column].iloc[2]}')
df.columns = headers
# chose data rows
df = df.iloc[3:8]
write_csv(df, excel_file, out_folder)
def clean_academic_level(excel_file, out_folder):
# TODO change hyphen to null
df = pd.read_excel(excel_file)
df = df.drop([2,
4, 5, 6, 7, 8, 9, 10, 11, 12,
14, 15, 16, 17, 18,
20, 21, 22,
24, 26,
28, 29, 30, 31, 32, 33, 34])
# drop upto column 34 pre 2009/10
columns_to_drop = [i for i in range(33) if i != 1]
# drop empty columns, every third column is empty
empty_columns = [i for i in range(33, 62) if not (i+1) % 3]
columns_to_drop = list(set(columns_to_drop) | set(empty_columns))
df = df.drop(df.columns[columns_to_drop], axis=1)
df = df.reset_index(drop=True)
headers = []
for i, column in enumerate(df.columns):
if i == 0:
# print(column)
# academic level column
headers.append(df[column].iloc[1])
continue
first_header = df[column].iloc[0]
if i % 2 != 0:
year = first_header
if pd.isna(first_header):
headers.append(f'{year} {df[column].iloc[1]}')
else:
headers.append(f'{first_header} {df[column].iloc[1]}')
df.columns = headers
df = df.iloc[2:]
df = df.set_index('Academic Level').transpose()
df = df.reset_index(level=0)
df = df.rename(columns={'index': 'Year'})
# df.index.name = None
# df.columns = df.iloc[1].values
print(df)
# df = df.iloc[2:38]
write_csv(df, excel_file, out_folder)
def clean_places_of_origin(excel_file, out_folder):
df = pd.read_excel(excel_file)
df.columns = df.loc[1].values
print(df)
def clean_top25_institution(excel_file, out_folder):
df = | pd.read_excel(excel_file) | pandas.read_excel |
import numpy as np
import pandas as pd
import time, copy
import pickle as pickle
import sklearn
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import log_loss
from scipy.special import expit
import matplotlib.pyplot as plt
from sklearn.ensemble import AdaBoostClassifier
import statsmodels.api as sm
import tensorflow as tf
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Dense, Dropout, Input
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import LearningRateScheduler
from tensorflow.python.eager.context import num_gpus
from imblearn.over_sampling import SMOTE
from imblearn.under_sampling import RandomUnderSampler
from sub_utils import exp_decay_scheduler, keras_count_nontrainable_params, resample_and_shuffle, create_tf_dataset, reshape_model_input
class Naive_Classifier:
'''
Create naive baseline classifier, that assigns a constant surrender rate, regardsless of the feature configuration.
Parameters
----------
rate: Constant probability prediction
'''
def __init__(self, rate, ):
self.rate = rate
def predict_proba(self, X):
pred = np.zeros(shape=(len(X),2))
pred[:,0] = 1-self.rate
pred[:,1]= self.rate
return pred
def predict(self, X):
return self.predict_proba(X)
def predict_class(self, X, threshold=0.5):
return self.predict_proba(X)>threshold
def create_ann(widths: list, actv: list, dropout: float, n_input: int, lrate: float):
'''
Create individual ANNs for ANN_bagging.
'''
model = Sequential()
for j in range(len(widths)):
if j==0: # Specify input size for first layer
model.add(Dense(units = widths[j], activation = actv[j], input_dim = n_input))
else:
model.add(Dense(units = widths[j], activation = actv[j]))
if j<(len(widths)-1): # No dropout after output layer
model.add(Dropout(rate = dropout))
model.compile(loss = 'binary_crossentropy', metrics= ['acc'], optimizer=Adam(lr=lrate))
return model
def hpsearch_ann(**params):
'''
Use params obtained via a hpsearch to create an ann.
This function is a helper function, to simplify the varying notation.
'''
widths = [params['width_{}'.format(1+i)] for i in range(params['depth'])]+[1]
actv = params['depth']*[params['actv']]+['sigmoid']
dropout = params['dropout']
n_input = params['n_input']
lrate = params['lrate']
model = create_ann(widths=widths, actv=actv, dropout=dropout, n_input= n_input, lrate = lrate)
return model
def hpsearch_boost_ann(resampler ='None', tf_dist_strat = None, **params):
'''
Helper function to map params to ANN_boost object initialization.
'''
N_boosting = params['n_boosting']
n_input = params['n_input']
boost_width = params['width']
actv = params['actv']
lrate = params['lrate']
return ANN_boost(N_models = N_boosting, N_input = n_input, width=boost_width, act_fct=actv, lr = lrate, resampler = resampler, tf_dist_strat=tf_dist_strat)
class Logit_model:
'''
A bagged version of the sklearn LogisticRegression model.
'''
def __init__(self, params, poly_degrees, N_bag = 5, resampler = 'None'):
self.poly_degrees = poly_degrees
self.resampler = resampler
self.N_bag = N_bag
try:
del params['random_state']
except:
pass
self.models = [LogisticRegression(**params) for _ in range(self.N_bag)]
def fit(self, X_train, y_train):
'''
Fit all individual models independently for data X, y.
'''
for i in range(self.N_bag):
# optional resampling
if self.resampler == 'undersampling':
X,y = RandomUnderSampler(sampling_strategy= 'majority').fit_resample(X=X_train, y=y_train)
# shuffle data, otherwise all oversampled data are appended
X,y = sklearn.utils.shuffle(X,y)
elif self.resampler == 'SMOTE':
X,y = SMOTE().fit_resample(X=X_train, y=y_train)
# shuffle data, otherwise all oversampled data are appended
X,y = sklearn.utils.shuffle(X,y)
else:
X,y = X_train, y_train
X,y = sklearn.utils.shuffle(X,y)
# polynomial feature engineering
X_logit, y_logit = reshape_model_input(X, degrees_lst = self.poly_degrees), y
# fit model
self.models[i].fit(X_logit, y_logit)
# [self.models[i].fit(*shuffle(X_logit, y_logit, random_state=i)) for i in range(self.N_bag)]
return self # allow for one-line notation of creating and fitting the model
def predict_proba(self, X):
'''
Predict probabilities using the full ensembles of self.N_bag individual models.
'''
X_logit = reshape_model_input(X, degrees_lst = self.poly_degrees)
return np.sum(np.array([self.models[i].predict_proba(X_logit) for i in range(self.N_bag)]), axis = 0)/self.N_bag
def predict_proba_running_avg(self, X):
'''
Predict probabilities for all individual logit-models and report rolling average results, i.e. the benefit of adding more individual models to the ensemble.
'''
X_logit = reshape_model_input(X, degrees_lst = self.poly_degrees)
return np.cumsum(np.array([self.models[i].predict_proba(X_logit) for i in range(self.N_bag)]), axis = 0)/np.arange(1, self.N_bag+1).reshape((-1,1,1))
def predict_proba_individual(self, X):
'''
Predict probabilities for all individual logit-models and report them as an array of shape (N_bag, len(X), 2).
'''
X_logit = reshape_model_input(X, degrees_lst = self.poly_degrees)
return np.array([self.models[i].predict_proba(X_logit) for i in range(self.N_bag)])
class ANN_bagging:
"""
Purpose: Build multiple ANN models, use the bagged predictor in combination with an optional resampling procedure to reduce the variance of a predictor.
New version - compatible with hpsklearn optimized parameter values as input
Initialize the architecture of all individual models in the bagging procedure.
Inputs:
-------
N_models: Number of models to be included in bagging procedure
N_input: Number of input nodes
width_lst: List containing the width for all layers, and hence implicitely also the depth of the network
act_fct_lst: List containing the activation function for all layers
dropout_rate: Dropout rate applied to all layers (except output layer)
dropout_rate = 0 will effectively disable dropout
resampler: 'None': No resampling
'SMOTE': SMOTE resampling
'undersampling': RandomUndersampling
loss: loss function which the model will be compiled with. Standard option: 'binary_crossentropy'
optimizer: loss function which the model will be compiled with. Standard option: 'adam'
Outputs:
--------
None. Creates self.model object with type(object) = dict
"""
def __init__(self, N_models: int, hparams:dict, tf_dist_strat, resampler = 'None'):
self.resampler = resampler
self.model = {}
self.hparams = hparams
self.lr = hparams['lrate']
self.tf_dist_strat = tf_dist_strat
for i in range(N_models):
# create model i
try:
with self.tf_dist_strat.scope():
self.model[i] = hpsearch_ann(**hparams)
except:
self.model[i] = hpsearch_ann(**hparams)
# set ensemble model
try:
with self.tf_dist_strat.scope():
INPUT = Input(shape = (self.hparams['n_input'],))
self.ensemble = Model(inputs=INPUT, outputs = tf.keras.layers.Average()([self.model[i](INPUT) for i in range(len(self.model))]))
# reduce learning rate for final fine-tuning of collective bagged model
self.ensemble.compile(optimizer = Adam(learning_rate=self.lr/2), loss = 'binary_crossentropy', metrics = ['acc'])
except:
INPUT = Input(shape = (self.hparams['n_input'],))
self.ensemble = Model(inputs=INPUT, outputs = tf.keras.layers.Average()([self.model[i](INPUT) for i in range(len(self.model))]))
# reduce learning rate for final fine-tuning of collective bagged model
self.ensemble.compile(optimizer = Adam(learning_rate=self.lr/2), loss = 'binary_crossentropy', metrics = ['acc'])
def re_init_ensemble(self):
'''
Note: If we load old parametrizations by setting self.model[i] = value, the self.ensemble does not update automatically.
Hence, we need this value for consistently loading old values.
'''
# re-set ensemble model
try:
with self.tf_dist_strat.scope():
INPUT = Input(shape = (self.hparams['n_input'],))
self.ensemble = Model(inputs=INPUT, outputs = tf.keras.layers.Average()([self.model[i](INPUT) for i in range(len(self.model))]))
# reduce learning rate for final fine-tuning of collective bagged model
self.ensemble.compile(optimizer = Adam(learning_rate=self.lr/2), loss = 'binary_crossentropy', metrics = ['acc'])
except:
INPUT = Input(shape = (self.hparams['n_input'],))
self.ensemble = Model(inputs=INPUT, outputs = tf.keras.layers.Average()([self.model[i](INPUT) for i in range(len(self.model))]))
# reduce learning rate for final fine-tuning of collective bagged model
self.ensemble.compile(optimizer = Adam(learning_rate=self.lr/2), loss = 'binary_crossentropy', metrics = ['acc'])
def fit(self, X_train, y_train, callbacks = [], val_share = 0.3, N_epochs = 200):
"""
Purpose: Train all model instances in the bagging procedure.
output:
\t None. Updates parameters of all models in self.model
input
\t X_train, y_train: \t Training data
\t resampling_option: \t 'None': No resampling is performed
\t \t 'undersampling': random undersampling of the majority class
\t \t 'SMOTE': SMOTE methodology applied
\t callbacks: \t callbacks for training
\t val_share, N_epochs, N_batch: \t Additional arguments for training
"""
# handle pandas-datatype
if type(X_train)==type(pd.DataFrame([1])):
X_train=X_train.values
if type(y_train) == type(pd.DataFrame([1])):
y_train=y_train.values
# check if GPUs are available
try:
N_GPUs = self.tf_dist_strat.num_replicas_in_sync()
except:
N_GPUs = 1
for i in range(len(self.model)):
# utilze concept of resampling
X,y = resample_and_shuffle(X_train, y_train, self.resampler)
# transform into tf.data.Dataset
try:
train_data, val_data = create_tf_dataset(X, y, val_share, self.hparams['batch_size']*num_gpus())
except:
# go on with regular, numpy-data-type
print('tf.data.Dataset could not be constructed. Continuing with numpy-data.')
pass
if len(self.model)==1:
try:
self.model[i].fit(x=train_data, batch_size= N_GPUs*self.hparams['batch_size'], epochs = N_epochs,
validation_data = val_data, verbose = 2, callbacks=callbacks)
except:
print('using non-tf.data-format')
self.model[i].fit(x=X, y = y, batch_size= N_GPUs*self.hparams['batch_size'], epochs = N_epochs,
validation_split= val_share, verbose = 2, callbacks=callbacks)
else:
if i==0:
# More compact view on models' training progress
print('Data of shape {} '.format(X.shape) + 'and balance factor {}'.format(sum(y)/len(y)))
# Start training of model
print('Training Model {}'.format(i))
t_start = time.time()
try:
self.model[i].fit(x=train_data, batch_size= N_GPUs*self.hparams['batch_size'], epochs = N_epochs,
validation_data= val_data, verbose = 2, callbacks=callbacks+[LearningRateScheduler(exp_decay_scheduler)])
except:
print('using non-tf.data-format')
self.model[i].fit(x=X, y = y, batch_size= N_GPUs*self.hparams['batch_size'], epochs = N_epochs,
validation_split= val_share, verbose = 2, callbacks=callbacks+[LearningRateScheduler(exp_decay_scheduler)])
n_epochs_trained = len(self.model[i].history.history['loss'])
print('\t ... {} epochs'.format(n_epochs_trained))
# plt.plot(self.model[i].history.history['loss'], label='loss')
# plt.plot(self.model[i].history.history['val_loss'], label='val_loss')
# plt.legend()
# plt.show()
for _ in range(3):
print('\t ... Fine tuning')
# reduce learning rate
self.model[i].optimizer.learning_rate = self.model[i].optimizer.learning_rate/2
try:
self.model[i].fit(x=train_data, batch_size= N_GPUs*self.hparams['batch_size'], epochs = N_epochs,
validation_data= val_data, verbose = 2, callbacks=callbacks+[LearningRateScheduler(exp_decay_scheduler)])#, initial_epoch= n_epochs_trained)
except:
print('using non-tf.data-format')
self.model[i].fit(x=X, y = y, batch_size= N_GPUs*self.hparams['batch_size'], epochs = N_epochs,
validation_split= val_share, verbose = 2, callbacks=callbacks+[LearningRateScheduler(exp_decay_scheduler)])#, initial_epoch= n_epochs_trained)
# print(self.model[i].history.history)
# n_epochs_trained += len(self.model[i].history.history['loss'])
print('\t ... Overall time: {} sec.'.format(time.time()-t_start))
print('\t ... Done!')
# plt.plot(self.model[i].history.history['loss'], label='loss')
# plt.plot(self.model[i].history.history['val_loss'], label='val_loss')
# plt.legend()
# plt.show()
print('Final fine tuning of whole bagged estimator:')
t_start = time.time()
try:
self.ensemble.fit(x=train_data, batch_size= N_GPUs*self.hparams['batch_size'], epochs = N_epochs, validation_data= val_data, verbose = 0, callbacks=callbacks)
except:
print('using non-tf.data-format')
self.ensemble.fit(x=X, y = y, batch_size= N_GPUs*self.hparams['batch_size'], epochs = N_epochs, validation_split= val_share, verbose = 0, callbacks=callbacks)
print('\t ... {} epochs'.format(len(self.ensemble.history.history['val_loss'])))
print('\t ... {} sec.'.format(time.time()-t_start))
print('\t ... Done!')
# Return object to allow for shorter/ single-line notation, i.e. ANN_bagging().fit()
return self
def predict(self, X):
"""
Purpose: Predict event probability for data
Inputs:
-------
\t X: \t Input data
Outputs:
--------
\t Predictions for all input data
"""
# handle pandas-datatype
if type(X)==type( | pd.DataFrame([1]) | pandas.DataFrame |
"""
Utilities to use with market_calendars
"""
import itertools
import warnings
import pandas as pd
def merge_schedules(schedules, how='outer'):
"""
Given a list of schedules will return a merged schedule. The merge method (how) will either return the superset
of any datetime when any schedule is open (outer) or only the datetime where all markets are open (inner)
CAVEATS:
* This does not work for schedules with breaks, the break information will be lost.
* Onlu "market_open" and "market_close" are considered, other market times are not yet supported.
:param schedules: list of schedules
:param how: outer or inner
:return: schedule DataFrame
"""
all_cols = [x.columns for x in schedules]
all_cols = list(itertools.chain(*all_cols))
if ('break_start' in all_cols) or ('break_end' in all_cols):
warnings.warn('Merge schedules will drop the break_start and break_end from result.')
result = schedules[0]
for schedule in schedules[1:]:
result = result.merge(schedule, how=how, right_index=True, left_index=True)
if how == 'outer':
result['market_open'] = result.apply(lambda x: min(x.market_open_x, x.market_open_y), axis=1)
result['market_close'] = result.apply(lambda x: max(x.market_close_x, x.market_close_y), axis=1)
elif how == 'inner':
result['market_open'] = result.apply(lambda x: max(x.market_open_x, x.market_open_y), axis=1)
result['market_close'] = result.apply(lambda x: min(x.market_close_x, x.market_close_y), axis=1)
else:
raise ValueError('how argument must be "inner" or "outer"')
result = result[['market_open', 'market_close']]
return result
def convert_freq(index, frequency):
"""
Converts a DateTimeIndex to a new lower frequency
:param index: DateTimeIndex
:param frequency: frequency string
:return: DateTimeIndex
"""
return pd.DataFrame(index=index).asfreq(frequency).index
class _date_range:
"""
This is a callable class that should be used by calling the already initiated instance: `date_range`.
Given a schedule, it will return a DatetimeIndex with all of the valid datetimes at the frequency given.
The schedule values are assumed to be in UTC.
The calculations will be made for each trading session. If the passed schedule-DataFrame doesn't have
breaks, there is one trading session per day going from market_open to market_close, otherwise there are two,
the first one going from market_open to break_start and the second one from break_end to market_close.
*Any trading session where start == end is considered a 'no-trading session' and will always be dropped*
CAVEATS:
* Only "market_open", "market_close" (and, optionally, "breaak_start" and "break_end")
are considered, other market times are not yet supported by this class.
* If the difference between start and end of a trading session is smaller than an interval of the
frequency, and closed= "right" and force_close = False, the whole session will disappear.
This will also raise a warning.
Signature:
.__call__(self, schedule, frequency, closed='right', force_close=True, **kwargs)
:param schedule: schedule of a calendar, which may or may not include break_start and break_end columns
:param frequency: frequency string that is used by pd.Timedelta to calculate the timestamps
this must be "1D" or higher frequency
:param closed: the way the intervals are labeled
'right': use the end of the interval
'left': use the start of the interval
None: (or 'both') use the end of the interval but include the start of the first interval (the open)
:param force_close: how the last value of a trading session is handled
True: guarantee that the close of the trading session is the last value
False: guarantee that there is no value greater than the close of the trading session
None: leave the last value as it is calculated based on the closed parameter
:param kwargs: unused. Solely for compatibility.
"""
def __init__(self, schedule = None, frequency= None, closed='right', force_close=True):
if not closed in ("left", "right", "both", None):
raise ValueError("closed must be 'left', 'right', 'both' or None.")
elif not force_close in (True, False, None):
raise ValueError("force_close must be True, False or None.")
self.closed = closed
self.force_close = force_close
self.has_breaks = False
if frequency is None: self.frequency = None
else:
self.frequency = pd.Timedelta(frequency)
if self.frequency > | pd.Timedelta("1D") | pandas.Timedelta |
import pandas as pd
def get_param_for_symbol(param, ary):
for dict in ary:
keys = dict.keys()
if param in keys:
return dict[param]
def build_param_ary_for_param(symbolary, paramset, start):
dict_df = {}
for param in paramset:
paramary = []
for symbol in symbolary:
ary = start[symbol]
value = get_param_for_symbol(param, ary)
paramary.append(value)
dict_df[param] = | pd.Series(paramary, index=symbolary) | pandas.Series |
import functools
from tqdm.contrib.concurrent import process_map
import copy
from Utils.Data.Dictionary.MappingDictionary import *
from Utils.Data.Features.Generated.GeneratedFeature import GeneratedFeaturePickle
import pandas as pd
import numpy as np
def add(dictionary, key):
dictionary[key] = dictionary.get(key, 0) + 1
def compute_chunk(chunk):
timestamp = chunk.index.to_numpy().mean()
dictionary = {}
chunk['hashtags'].map(lambda x: [add(dictionary, e) for e in x] if x is not None else [0])
return timestamp, dictionary
def get_popularity(chunk, result, s):
out = []
result = copy.deepcopy(result)
s = copy.deepcopy(s)
for hashtag, timestamp in zip(chunk['hashtags'], chunk['time']):
if hashtag is not None:
index = np.searchsorted(s, timestamp, 'left') - 1
x = [result[index][1].get(h, 0)
for h in hashtag]
else:
x = [0]
out.append(x)
return | pd.Series(out) | pandas.Series |
# -*- coding: utf-8 -*-
"""
@author: hkaneko
"""
import math
import sys
import numpy as np
import pandas as pd
import sample_functions
from sklearn import metrics, svm
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_predict, GridSearchCV
from sklearn.neighbors import KNeighborsClassifier
method_name = 'rf' # 'knn' or 'svm' or 'rf'
add_nonlinear_terms_flag = False # True (二乗項・交差項を追加) or False (追加しない)
number_of_atom_types = 6 # 予測に用いる原子の種類の数
number_of_samples_in_prediction = 10000 # 予測するサンプル数
number_of_iterations = 100 # 予測を繰り返す回数
fold_number = 2 # N-fold CV の N
max_number_of_k = 20 # 使用する k の最大値
svm_cs = 2 ** np.arange(-5, 11, dtype=float)
svm_gammas = 2 ** np.arange(-20, 11, dtype=float)
rf_number_of_trees = 300 # RF における決定木の数
rf_x_variables_rates = np.arange(1, 11, dtype=float) / 10 # 1 つの決定木における説明変数の数の割合の候補
ocsvm_nu = 0.003 # OCSVM における ν。トレーニングデータにおけるサンプル数に対する、サポートベクターの数の下限の割合
ocsvm_gammas = 2 ** np.arange(-20, 11, dtype=float) # γ の候補
if method_name != 'knn' and method_name != 'svm' and method_name != 'rf':
sys.exit('\'{0}\' というクラス分類手法はありません。method_name を見直してください。'.format(method_name))
dataset = pd.read_csv('unique_m.csv', index_col=-1)
dataset = dataset.sort_values('critical_temp', ascending=False).iloc[:4000, :]
y = dataset.iloc[:, 86].copy()
y[dataset.iloc[:, 86] >= 90] = 'positive' # 転移温度 90 K 以上を高温超伝導体 (positive) とします
y[dataset.iloc[:, 86] < 90] = 'negative'
# 高温超電導体の数の調査
numbers = y.value_counts()
print('高温超電導体の数 :', numbers.iloc[1])
print('非高温超電導体の数 :', numbers.iloc[0])
original_x = dataset.iloc[:, :86]
original_x = (original_x.T / original_x.T.sum()).T
# 標準偏差が 0 の説明変数を削除
original_x = original_x.drop(original_x.columns[original_x.std() == 0], axis=1)
if add_nonlinear_terms_flag:
x = pd.read_csv('x_superconductor.csv', index_col=0)
# x = sample_functions.add_nonlinear_terms(original_x) # 説明変数の二乗項や交差項を追加
# 標準偏差が 0 の説明変数を削除
std_0_nonlinear_variable_flags = x.std() == 0
x = x.drop(x.columns[std_0_nonlinear_variable_flags], axis=1)
else:
x = original_x.copy()
autoscaled_original_x = (original_x - original_x.mean()) / original_x.std() # オートスケーリング
autoscaled_x = (x - x.mean()) / x.std() # オートスケーリング
# グラム行列の分散を最大化することによる γ の最適化
optimal_ocsvm_gamma = sample_functions.gamma_optimization_with_variance(autoscaled_x, ocsvm_gammas)
if method_name == 'knn':
# CV による k の最適化
accuracy_in_cv_all = [] # 空の list の変数を作成して、成分数ごとのクロスバリデーション後の 正解率 をこの変数に追加していきます
ks = [] # 同じく k の値をこの変数に追加していきます
for k in range(1, max_number_of_k + 1):
model = KNeighborsClassifier(n_neighbors=k, metric='euclidean') # k-NN モデルの宣言
# クロスバリデーション推定値の計算し、DataFrame型に変換
estimated_y_in_cv = pd.DataFrame(cross_val_predict(model, autoscaled_x, y, cv=fold_number))
accuracy_in_cv = metrics.accuracy_score(y, estimated_y_in_cv) # 正解率を計算
print(k, accuracy_in_cv) # k の値と r2 を表示
accuracy_in_cv_all.append(accuracy_in_cv) # r2 を追加
ks.append(k) # k の値を追加
# k の値ごとの CV 後の正解率をプロットし、CV 後の正解率が最大のときを k の最適値に
optimal_k = sample_functions.plot_and_selection_of_hyperparameter(ks, accuracy_in_cv_all, 'k',
'cross-validated accuracy')
print('\nCV で最適化された k :', optimal_k, '\n')
# k-NN
model = KNeighborsClassifier(n_neighbors=optimal_k, metric='euclidean') # モデルの宣言
elif method_name == 'svm':
optimal_svm_gamma = optimal_ocsvm_gamma.copy()
# CV による C の最適化
model_in_cv = GridSearchCV(svm.SVC(kernel='rbf', gamma=optimal_svm_gamma),
{'C': svm_cs}, cv=fold_number, verbose=2)
model_in_cv.fit(autoscaled_x, y)
optimal_svm_c = model_in_cv.best_params_['C']
# CV による γ の最適化
model_in_cv = GridSearchCV(svm.SVC(kernel='rbf', C=optimal_svm_c),
{'gamma': svm_gammas}, cv=fold_number, verbose=2)
model_in_cv.fit(autoscaled_x, y)
optimal_svm_gamma = model_in_cv.best_params_['gamma']
print('CV で最適化された C :', optimal_svm_c)
print('CV で最適化された γ:', optimal_svm_gamma)
# SVM
model = svm.SVC(kernel='rbf', C=optimal_svm_c, gamma=optimal_svm_gamma) # モデルの宣言
elif method_name == 'rf':
# OOB (Out-Of-Bugs) による説明変数の数の割合の最適化
accuracy_oob = []
for index, x_variables_rate in enumerate(rf_x_variables_rates):
print(index + 1, '/', len(rf_x_variables_rates))
model_in_validation = RandomForestClassifier(n_estimators=rf_number_of_trees, max_features=int(
max(math.ceil(autoscaled_x.shape[1] * x_variables_rate), 1)), oob_score=True)
model_in_validation.fit(autoscaled_x, y)
accuracy_oob.append(model_in_validation.oob_score_)
optimal_x_variables_rate = sample_functions.plot_and_selection_of_hyperparameter(rf_x_variables_rates,
accuracy_oob,
'rate of x-variables',
'accuracy for OOB')
print('\nOOB で最適化された説明変数の数の割合 :', optimal_x_variables_rate)
# RF
model = RandomForestClassifier(n_estimators=rf_number_of_trees,
max_features=int(
max(math.ceil(autoscaled_x.shape[1] * optimal_x_variables_rate), 1)),
oob_score=True) # RF モデルの宣言
model.fit(autoscaled_x, y) # モデルの構築
if method_name == 'rf':
# 説明変数の重要度
x_importances = | pd.DataFrame(model.feature_importances_, index=x.columns, columns=['importance']) | pandas.DataFrame |
#
# Copyright 2020 EPAM Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import functools
import os
from typing import Optional, List, Dict, Union, Any, Tuple, Type
import numpy as np
import pandas as pd
import pandas.api.types as pdt
import mlflow.models
import mlflow.pyfunc
# Storage of loaded prediction function
MODEL_FLAVOR = None
# Path to model's root
MODEL_LOCATION = os.getenv('MODEL_LOCATION', '.')
# Optional. Examples of input and output pandas DataFrames
MODEL_INPUT_SAMPLE_FILE = os.path.join(MODEL_LOCATION, 'head_input.pkl')
MODEL_OUTPUT_SAMPLE_FILE = os.path.join(MODEL_LOCATION, 'head_output.pkl')
# pylint: disable=R0911
def _type_to_open_api_format(t: Type) -> Tuple[Optional[str], Optional[Any]]:
"""
Convert type of column to OpenAPI type name and example
:param t: object's type
:return: name for OpenAPI
"""
if isinstance(t, (str, bytes, bytearray)):
return 'string', ''
if isinstance(t, bool):
return 'boolean', False
if isinstance(t, int):
return 'integer', 0
if isinstance(t, float):
return 'number', 0
if pdt.is_integer_dtype(t):
return 'integer', 0
if pdt.is_float_dtype(t):
return 'number', 0
if pdt.is_string_dtype(t):
return 'string', ''
if pdt.is_bool_dtype(t) or pdt.is_complex_dtype(t):
return 'string', ''
return None, None
def init() -> str:
"""
Initialize model and return prediction type
:return: prediction type (matrix or objects)
"""
model = mlflow.models.Model.load(MODEL_LOCATION)
if mlflow.pyfunc.FLAVOR_NAME not in model.flavors:
raise ValueError(f'{mlflow.pyfunc.FLAVOR_NAME} not in model\'s flavors')
global MODEL_FLAVOR
MODEL_FLAVOR = mlflow.pyfunc.load_model(MODEL_LOCATION)
return 'matrix'
def predict_on_matrix(input_matrix: List[List[Any]], provided_columns_names: Optional[List[str]] = None) \
-> Tuple[np.ndarray, Tuple[str, ...]]:
"""
Make prediction on a Matrix of values
:param input_matrix: data for prediction
:param provided_columns_names: Name of columns for provided matrix
:return: result matrix as np.array[np.array[Any]] and result column names
"""
if provided_columns_names:
input_matrix = | pd.DataFrame(input_matrix, columns=provided_columns_names) | pandas.DataFrame |
import os
import sys
import numpy as np
import pandas as pd
from pycompss.api.api import compss_wait_on
from pycompss.api.task import task
from data_managers.fundamentals_extraction import FundamentalsCollector
from data_managers.price_extraction import PriceExtractor
from data_managers.sic import load_sic
from models.classifiers import train_attrs as attrs
from settings.basic import DATE_FORMAT, DATA_PATH
from utils import load_symbol_list, save_obj, exists_obj, get_datasets_name
try:
import pyextrae.multiprocessing as pyextrae
tracing = True
except:
tracing = False
@task(returns=pd.DataFrame)
def get_prices(symbols_list_name, start_date='2006-01-01',
resample_period='1W', only_prices=False):
if tracing:
pro_f = sys.getprofile()
sys.setprofile(None)
prices = _get_prices(symbols_list_name, start_date, resample_period)
if only_prices:
res = prices.price
else:
res = prices
if tracing:
sys.setprofile(pro_f)
return res
def _get_prices(symbols_list_name, start_date='2006-01-01',
resample_period='1W'):
print("Loading prices for %s [%s - end] %s" % (
symbols_list_name, start_date, resample_period))
df_prices = PriceExtractor(symbols_list_name=symbols_list_name,
start_date=start_date).collect()
# set common index for outer join
df_prices = (df_prices
.assign(
date=lambda r: pd.to_datetime(r.date, format=DATE_FORMAT))
.set_index('date')
.groupby('symbol')
.resample(resample_period)
.ffill()
.sort_index())
return df_prices
@task(returns=pd.DataFrame)
def get_fundamentals(symbols_list_name, start_date, end_date, resample_period):
if tracing:
pro_f = sys.getprofile()
sys.setprofile(None)
print("Loading fundamentals for %s [%s - %s] %s" % (
symbols_list_name, start_date, end_date, resample_period))
df_fund = FundamentalsCollector(symbols_list_name=symbols_list_name,
start_date=start_date,
end_date=end_date).collect()
df_fund = (df_fund
.drop_duplicates(['date', 'symbol'], keep='first')
.assign(date=lambda r: pd.to_datetime(r.date, format=DATE_FORMAT))
.set_index('date')
.groupby('symbol')
.resample(resample_period)
.ffill()
.replace('nm', np.NaN)
.sort_index()
.assign(
bookvaluepershare=lambda r: pd.to_numeric(r.bookvaluepershare)))
df_fund = pd.concat(
[pd.to_numeric(df_fund[col], errors='ignore') for col in
df_fund.columns],
axis=1)
if tracing:
sys.setprofile(pro_f)
return df_fund
def process_symbol(symbol, df_fund, df_prices, sic_code, sic_industry,
thresholds, target_shift):
# TODO remove this once pyCOMPSs supports single-char parameters
symbol = symbol[:-1]
bot_thresh, top_thresh = thresholds
print("Processing symbol [%s]" % symbol)
ds = pd.concat([df_fund.loc[symbol], df_prices.loc[symbol]],
join='inner',
axis=1)
bins = pd.IntervalIndex.from_tuples(
[(-np.inf, bot_thresh), (bot_thresh, top_thresh),
(top_thresh, np.inf)])
df_tidy = (pd.DataFrame()
.assign(eps=ds.basiceps,
price=ds.price,
p2b=ds.price / ds.bookvaluepershare,
p2e=ds.price / ds.basiceps,
p2r=ds.price / ds.totalrevenue,
div2price=pd.to_numeric(
ds.cashdividendspershare) / pd.to_numeric(
ds.price),
divpayoutratio=ds.divpayoutratio,
# Performance measures
roe=ds.roe,
roic=ds.roic,
roa=ds.roa,
# Efficiency measures
assetturnover=ds.assetturnover,
invturnonver=ds.invturnover,
profitmargin=ds.profitmargin,
debtratio=ds.totalassets / ds.totalliabilities,
ebittointerestex=pd.to_numeric(
ds.ebit) / pd.to_numeric(
ds.totalinterestexpense),
# aka times-interest-earned ratio
# cashcoverage=ds.ebit + depretitation) / ds.totalinterestexpense,
# Liquidity measures
wc=ds.nwc,
wc2a=pd.to_numeric(ds.nwc) / pd.to_numeric(
ds.totalassets),
currentratio=ds.totalcurrentassets / ds.totalcurrentliabilities,
# Misc. info
symbol=symbol,
sic_info=sic_code[symbol],
sic_industry=sic_industry[symbol],
# Graham screening
revenue=ds.operatingrevenue,
epsgrowth=ds.epsgrowth,
bvps=ds.bookvaluepershare,
# Target
y=(df_prices.loc[symbol].price.shift(
-target_shift) / ds.price) - 1,
positions=lambda r: pd.cut(r.y, bins).cat.codes - 1,
)
.set_index('symbol', append=True))
return df_tidy
@task(returns=1)
def process_symbols(available_symbols, df_fund, df_prices, sic_code,
sic_industry, thresholds, target_shift):
if tracing:
pro_f = sys.getprofile()
sys.setprofile(None)
merged_dfs = []
for i, symbol in enumerate(available_symbols):
merged_dfs.append(process_symbol(symbol=symbol + '_', df_fund=df_fund,
df_prices=df_prices,
sic_code=sic_code,
sic_industry=sic_industry,
thresholds=thresholds,
target_shift=target_shift))
df = | pd.concat(merged_dfs) | pandas.concat |
#!/usr/bin/env python
import argparse
import json
import os
import urllib
from collections import Counter
from datetime import date
import requests
import pandas
class _REST(object):
BASE_URL = 'https://qiita.com'
def __init__(self, headers: dict, **kwargs):
self.queries = {}
self.headers = headers
self.base_url = self.BASE_URL.format(**kwargs)
for k, v in kwargs.items():
setattr(self, k, v)
def set_query(self, queries: dict):
self.queries.update(queries)
return self
def get(self, _id) -> dict:
url = self.base_url
if self.queries:
url = '?'.join([url, urllib.parse.urlencode(self.queries)])
url = '/'.join([url, _id])
response = requests.get(url, headers=self.headers)
return json.loads(response.text)
def list(self) -> dict:
url = self.base_url
if self.queries:
url = '?'.join([url, urllib.parse.urlencode(self.queries)])
response = requests.get(url, headers=self.headers)
return json.loads(response.text)
class Items(_REST):
BASE_URL = 'https://qiita.com/api/v2/users/{user_id}/items'
class Users(_REST):
BASE_URL = 'https://qiita.com/api/v2/users'
def _args():
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('-u', dest='user', default='itkr')
return parser.parse_args()
def _get_or_create_items(user_id):
# ファイルから
output_path = './output/likes_{}.json'.format(date.today().strftime('%Y-%m-%d'))
if os.path.exists(output_path):
return | pandas.read_json(output_path) | pandas.read_json |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import pytest
import re
from numpy import nan as NA
import numpy as np
from numpy.random import randint
from pandas.compat import range, u
import pandas.compat as compat
from pandas import Index, Series, DataFrame, isna, MultiIndex, notna
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
import pandas.core.strings as strings
class TestStringMethods(object):
def test_api(self):
# GH 6106, GH 9322
assert Series.str is strings.StringMethods
assert isinstance(Series(['']).str, strings.StringMethods)
# GH 9184
invalid = Series([1])
with tm.assert_raises_regex(AttributeError,
"only use .str accessor"):
invalid.str
assert not hasattr(invalid, 'str')
def test_iter(self):
# GH3638
strs = 'google', 'wikimedia', 'wikipedia', 'wikitravel'
ds = Series(strs)
for s in ds.str:
# iter must yield a Series
assert isinstance(s, Series)
# indices of each yielded Series should be equal to the index of
# the original Series
tm.assert_index_equal(s.index, ds.index)
for el in s:
# each element of the series is either a basestring/str or nan
assert isinstance(el, compat.string_types) or isna(el)
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
# this case since 'wikitravel' is the longest string
assert s.dropna().values.item() == 'l'
def test_iter_empty(self):
ds = Series([], dtype=object)
i, s = 100, 1
for i, s in enumerate(ds.str):
pass
# nothing to iterate over so nothing defined values should remain
# unchanged
assert i == 100
assert s == 1
def test_iter_single_element(self):
ds = Series(['a'])
for i, s in enumerate(ds.str):
pass
assert not i
assert_series_equal(ds, s)
def test_iter_object_try_string(self):
ds = Series([slice(None, randint(10), randint(10, 20)) for _ in range(
4)])
i, s = 100, 'h'
for i, s in enumerate(ds.str):
pass
assert i == 100
assert s == 'h'
def test_cat(self):
one = np.array(['a', 'a', 'b', 'b', 'c', NA], dtype=np.object_)
two = np.array(['a', NA, 'b', 'd', 'foo', NA], dtype=np.object_)
# single array
result = strings.str_cat(one)
exp = 'aabbc'
assert result == exp
result = strings.str_cat(one, na_rep='NA')
exp = 'aabbcNA'
assert result == exp
result = strings.str_cat(one, na_rep='-')
exp = 'aabbc-'
assert result == exp
result = strings.str_cat(one, sep='_', na_rep='NA')
exp = 'a_a_b_b_c_NA'
assert result == exp
result = strings.str_cat(two, sep='-')
exp = 'a-b-d-foo'
assert result == exp
# Multiple arrays
result = strings.str_cat(one, [two], na_rep='NA')
exp = np.array(['aa', 'aNA', 'bb', 'bd', 'cfoo', 'NANA'],
dtype=np.object_)
tm.assert_numpy_array_equal(result, exp)
result = strings.str_cat(one, two)
exp = np.array(['aa', NA, 'bb', 'bd', 'cfoo', NA], dtype=np.object_)
tm.assert_almost_equal(result, exp)
def test_count(self):
values = np.array(['foo', 'foofoo', NA, 'foooofooofommmfoo'],
dtype=np.object_)
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_count(mixed, 'a')
xp = np.array([1, NA, 0, NA, NA, 0, NA, NA, NA])
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.count('a')
xp = Series([1, NA, 0, NA, NA, 0, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = [u('foo'), u('foofoo'), NA, u('foooofooofommmfoo')]
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
def test_contains(self):
values = np.array(['foo', NA, 'fooommm__foo',
'mmm_', 'foommm[_]+bar'], dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, NA, True, True, False], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, regex=False)
expected = np.array([False, NA, False, False, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
values = ['foo', 'xyz', 'fooommm__foo', 'mmm_']
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# case insensitive using regex
values = ['Foo', 'xYz', 'fOOomMm__fOo', 'MMM_']
result = strings.str_contains(values, 'FOO|mmm', case=False)
expected = np.array([True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# case insensitive without regex
result = strings.str_contains(values, 'foo', regex=False, case=False)
expected = np.array([True, False, True, False])
tm.assert_numpy_array_equal(result, expected)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_contains(mixed, 'o')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.contains('o')
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = np.array([u'foo', NA, u'fooommm__foo', u'mmm_'],
dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, np.nan, True, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, na=False)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
values = np.array(['foo', 'xyz', 'fooommm__foo', 'mmm_'],
dtype=np.object_)
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# na
values = Series(['om', 'foo', np.nan])
res = values.str.contains('foo', na="foo")
assert res.loc[2] == "foo"
def test_startswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = np.array(['a', NA, 'b', True, datetime.today(),
'foo', None, 1, 2.], dtype=np.object_)
rs = strings.str_startswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.startswith('f')
assert isinstance(rs, Series)
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.startswith('foo', na=True)
tm.assert_series_equal(result, exp.fillna(True).astype(bool))
def test_endswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_endswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, False, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.endswith('f')
xp = Series([False, NA, False, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.endswith('foo', na=False)
tm.assert_series_equal(result, exp.fillna(False).astype(bool))
def test_title(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.title()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.title()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.title()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_lower_upper(self):
values = Series(['om', NA, 'nom', 'nom'])
result = values.str.upper()
exp = Series(['OM', NA, 'NOM', 'NOM'])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
mixed = mixed.str.upper()
rs = Series(mixed).str.lower()
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('nom'), u('nom')])
result = values.str.upper()
exp = Series([u('OM'), NA, u('NOM'), u('NOM')])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
def test_capitalize(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.capitalize()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.capitalize()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.capitalize()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_swapcase(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.swapcase()
exp = Series(["foo", "bar", NA, "bLAH", "BLURG"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "Blah", None,
1, 2.])
mixed = mixed.str.swapcase()
exp = Series(["foo", NA, "BAR", NA, NA, "bLAH", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.swapcase()
exp = Series([u("foo"), NA, u("BAR"), u("bLURG")])
tm.assert_series_equal(results, exp)
def test_casemethods(self):
values = ['aaa', 'bbb', 'CCC', 'Dddd', 'eEEE']
s = Series(values)
assert s.str.lower().tolist() == [v.lower() for v in values]
assert s.str.upper().tolist() == [v.upper() for v in values]
assert s.str.title().tolist() == [v.title() for v in values]
assert s.str.capitalize().tolist() == [v.capitalize() for v in values]
assert s.str.swapcase().tolist() == [v.swapcase() for v in values]
def test_replace(self):
values = Series(['fooBAD__barBAD', NA])
result = values.str.replace('BAD[_]*', '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series(['foobarBAD', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace('BAD[_]*', '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace('BAD[_]*', '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
result = values.str.replace(r"(?<=\w),(?=\w)", ", ", flags=re.UNICODE)
tm.assert_series_equal(result, exp)
# GH 13438
for klass in (Series, Index):
for repl in (None, 3, {'a': 'b'}):
for data in (['a', 'b', None], ['a', 'b', 'c', 'ad']):
values = klass(data)
pytest.raises(TypeError, values.str.replace, 'a', repl)
def test_replace_callable(self):
# GH 15055
values = Series(['fooBAD__barBAD', NA])
# test with callable
repl = lambda m: m.group(0).swapcase()
result = values.str.replace('[a-z][A-Z]{2}', repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
# test with wrong number of arguments, raising an error
if compat.PY2:
p_err = r'takes (no|(exactly|at (least|most)) ?\d+) arguments?'
else:
p_err = (r'((takes)|(missing)) (?(2)from \d+ to )?\d+ '
r'(?(3)required )positional arguments?')
repl = lambda: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x, y=None: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
# test regex named groups
values = Series(['Foo Bar Baz', NA])
pat = r"(?P<first>\w+) (?P<middle>\w+) (?P<last>\w+)"
repl = lambda m: m.group('middle').swapcase()
result = values.str.replace(pat, repl)
exp = Series(['bAR', NA])
tm.assert_series_equal(result, exp)
def test_replace_compiled_regex(self):
# GH 15446
values = Series(['fooBAD__barBAD', NA])
# test with compiled regex
pat = re.compile(r'BAD[_]*')
result = values.str.replace(pat, '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace(pat, '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace(pat, '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace(pat, '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
pat = re.compile(r"(?<=\w),(?=\w)", flags=re.UNICODE)
result = values.str.replace(pat, ", ")
tm.assert_series_equal(result, exp)
# case and flags provided to str.replace will have no effect
# and will produce warnings
values = Series(['fooBAD__barBAD__bad', NA])
pat = re.compile(r'BAD[_]*')
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', flags=re.IGNORECASE)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=False)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=True)
# test with callable
values = Series(['fooBAD__barBAD', NA])
repl = lambda m: m.group(0).swapcase()
pat = re.compile('[a-z][A-Z]{2}')
result = values.str.replace(pat, repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
def test_repeat(self):
values = Series(['a', 'b', NA, 'c', NA, 'd'])
result = values.str.repeat(3)
exp = Series(['aaa', 'bbb', NA, 'ccc', NA, 'ddd'])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series(['a', 'bb', NA, 'cccc', NA, 'dddddd'])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
rs = Series(mixed).str.repeat(3)
xp = Series(['aaa', NA, 'bbb', NA, NA, 'foofoofoo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('d')])
result = values.str.repeat(3)
exp = Series([u('aaa'), u('bbb'), NA, u('ccc'), NA, u('ddd')])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series([u('a'), u('bb'), NA, u('cccc'), NA, u('dddddd')])
tm.assert_series_equal(result, exp)
def test_match(self):
# New match behavior introduced in 0.13
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*BAD[_]+.*BAD')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# test passing as_indexer still works but is ignored
values = Series(['fooBAD__barBAD', NA, 'foo'])
exp = Series([True, NA, False])
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=True)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=False)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*(BAD[_]+).*(BAD)', as_indexer=True)
tm.assert_series_equal(result, exp)
pytest.raises(ValueError, values.str.match, '.*(BAD[_]+).*(BAD)',
as_indexer=False)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.match('.*(BAD[_]+).*(BAD)')
xp = Series([True, NA, True, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# na GH #6609
res = Series(['a', 0, np.nan]).str.match('a', na=False)
exp = Series([True, False, False])
assert_series_equal(exp, res)
res = Series(['a', 0, np.nan]).str.match('a')
exp = Series([True, np.nan, np.nan])
assert_series_equal(exp, res)
def test_extract_expand_None(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)', expand=None)
def test_extract_expand_unspecified(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)')
def test_extract_expand_False(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er, er, er,
er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# GH9980
# Index only works with one regex group since
# multi-group would expand to a frame
idx = Index(['A1', 'A2', 'A3', 'A4', 'B5'])
with tm.assert_raises_regex(ValueError, "supported"):
idx.str.extract('([AB])([123])', expand=False)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=False)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=False)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=False)
assert result.name == 'uno'
exp = klass(['A', 'A'], name='uno')
if klass == Series:
tm.assert_series_equal(result, exp)
else:
tm.assert_index_equal(result, exp)
s = Series(['A1', 'B2', 'C3'])
# one group, no matches
result = s.str.extract('(_)', expand=False)
exp = Series([NA, NA, NA], dtype=object)
tm.assert_series_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=False)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=False)
exp = Series(['A', 'B', NA], name='letter')
tm.assert_series_equal(result, exp)
# two named groups
result = s.str.extract('(?P<letter>[AB])(?P<number>[123])',
expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, '3']],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], ['C', NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
s = Series(data, index=index)
result = s.str.extract(r'(\d)', expand=False)
exp = Series(['1', '2', NA], index=index)
tm.assert_series_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=False)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
# single_series_name_is_preserved.
s = Series(['a3', 'b3', 'c2'], name='bob')
r = s.str.extract(r'(?P<sue>[a-z])', expand=False)
e = Series(['a', 'b', 'c'], name='sue')
tm.assert_series_equal(r, e)
assert r.name == e.name
def test_extract_expand_True(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er,
er, er, er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=True)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=True)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result_df = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=True)
assert isinstance(result_df, DataFrame)
result_series = result_df['uno']
assert_series_equal(result_series, Series(['A', 'A'], name='uno'))
def test_extract_series(self):
# extract should give the same result whether or not the
# series has a name.
for series_name in None, "series_name":
s = Series(['A1', 'B2', 'C3'], name=series_name)
# one group, no matches
result = s.str.extract('(_)', expand=True)
exp = DataFrame([NA, NA, NA], dtype=object)
tm.assert_frame_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=True)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=True)
exp = DataFrame({"letter": ['A', 'B', NA]})
tm.assert_frame_equal(result, exp)
# two named groups
result = s.str.extract(
'(?P<letter>[AB])(?P<number>[123])',
expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=True)
exp = DataFrame(e_list, columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
def test_extract_optional_groups(self):
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, '3']
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
result = Series(data, index=index).str.extract(
r'(\d)', expand=True)
exp = DataFrame(['1', '2', NA], index=index)
tm.assert_frame_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
def test_extract_single_group_returns_frame(self):
# GH11386 extract should always return DataFrame, even when
# there is only one group. Prior to v0.18.0, extract returned
# Series when there was only one group in the regex.
s = Series(['a3', 'b3', 'c2'], name='series_name')
r = s.str.extract(r'(?P<letter>[a-z])', expand=True)
e = DataFrame({"letter": ['a', 'b', 'c']})
tm.assert_frame_equal(r, e)
def test_extractall(self):
subject_list = [
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL> some text <EMAIL>',
'<EMAIL> some text c@d.<EMAIL> and <EMAIL>',
np.nan,
"",
]
expected_tuples = [
("dave", "google", "com"),
("tdhock5", "gmail", "com"),
("maudelaperriere", "gmail", "com"),
("rob", "gmail", "com"), ("steve", "gmail", "com"),
("a", "b", "com"), ("c", "d", "com"), ("e", "f", "com"),
]
named_pattern = r"""
(?P<user>[a-z0-9]+)
@
(?P<domain>[a-z]+)
\.
(?P<tld>[a-z]{2,4})
"""
expected_columns = ["user", "domain", "tld"]
S = Series(subject_list)
# extractall should return a DataFrame with one row for each
# match, indexed by the subject from which the match came.
expected_index = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(3, 0),
(3, 1),
(4, 0),
(4, 1),
(4, 2),
], names=(None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = S.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# The index of the input Series should be used to construct
# the index of the output DataFrame:
series_index = MultiIndex.from_tuples([
("single", "Dave"),
("single", "Toby"),
("single", "Maude"),
("multiple", "robAndSteve"),
("multiple", "abcdef"),
("none", "missing"),
("none", "empty"),
])
Si = Series(subject_list, series_index)
expected_index = MultiIndex.from_tuples([
("single", "Dave", 0),
("single", "Toby", 0),
("single", "Maude", 0),
("multiple", "robAndSteve", 0),
("multiple", "robAndSteve", 1),
("multiple", "abcdef", 0),
("multiple", "abcdef", 1),
("multiple", "abcdef", 2),
], names=(None, None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Si.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# MultiIndexed subject with names.
Sn = Series(subject_list, series_index)
Sn.index.names = ("matches", "description")
expected_index.names = ("matches", "description", "match")
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Sn.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# optional groups.
subject_list = ['', 'A1', '32']
named_pattern = '(?P<letter>[AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(named_pattern)
expected_index = MultiIndex.from_tuples([
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=['letter', 'number'])
tm.assert_frame_equal(computed_df, expected_df)
# only one of two groups has a name.
pattern = '([AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(pattern)
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=[0, 'number'])
tm.assert_frame_equal(computed_df, expected_df)
def test_extractall_single_group(self):
# extractall(one named group) returns DataFrame with one named
# column.
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
r = s.str.extractall(r'(?P<letter>[a-z])')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame({"letter": ['a', 'b', 'd', 'c']}, i)
tm.assert_frame_equal(r, e)
# extractall(one un-named group) returns DataFrame with one
# un-named column.
r = s.str.extractall(r'([a-z])')
e = DataFrame(['a', 'b', 'd', 'c'], i)
tm.assert_frame_equal(r, e)
def test_extractall_single_group_with_quantifier(self):
# extractall(one un-named group with quantifier) returns
# DataFrame with one un-named column (GH13382).
s = Series(['ab3', 'abc3', 'd4cd2'], name='series_name')
r = s.str.extractall(r'([a-z]+)')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame(['ab', 'abc', 'd', 'cd'], i)
tm.assert_frame_equal(r, e)
def test_extractall_no_matches(self):
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
# one un-named group.
r = s.str.extractall('(z)')
e = DataFrame(columns=[0])
tm.assert_frame_equal(r, e)
# two un-named groups.
r = s.str.extractall('(z)(z)')
e = DataFrame(columns=[0, 1])
tm.assert_frame_equal(r, e)
# one named group.
r = s.str.extractall('(?P<first>z)')
e = DataFrame(columns=["first"])
tm.assert_frame_equal(r, e)
# two named groups.
r = s.str.extractall('(?P<first>z)(?P<second>z)')
e = DataFrame(columns=["first", "second"])
tm.assert_frame_equal(r, e)
# one named, one un-named.
r = s.str.extractall('(z)(?P<second>z)')
e = DataFrame(columns=[0,
"second"])
tm.assert_frame_equal(r, e)
def test_extractall_stringindex(self):
s = Series(["a1a2", "b1", "c1"], name='xxx')
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([(0, 0), (0, 1), (1, 0)],
names=[None, 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
# index should return the same result as the default index without name
# thus index.name doesn't affect to the result
for idx in [Index(["a1a2", "b1", "c1"]),
Index(["a1a2", "b1", "c1"], name='xxx')]:
res = idx.str.extractall(r"[ab](?P<digit>\d)")
tm.assert_frame_equal(res, exp)
s = Series(["a1a2", "b1", "c1"], name='s_name',
index=Index(["XX", "yy", "zz"], name='idx_name'))
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([("XX", 0), ("XX", 1), ("yy", 0)],
names=["idx_name", 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
def test_extractall_errors(self):
# Does not make sense to use extractall with a regex that has
# no capture groups. (it returns DataFrame with one column for
# each capture group)
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
with tm.assert_raises_regex(ValueError, "no capture groups"):
s.str.extractall(r'[a-z]')
def test_extract_index_one_two_groups(self):
s = Series(['a3', 'b3', 'd4c2'], index=["A3", "B3", "D4"],
name='series_name')
r = s.index.str.extract(r'([A-Z])', expand=True)
e = DataFrame(['A', "B", "D"])
tm.assert_frame_equal(r, e)
# Prior to v0.18.0, index.str.extract(regex with one group)
# returned Index. With more than one group, extract raised an
# error (GH9980). Now extract always returns DataFrame.
r = s.index.str.extract(
r'(?P<letter>[A-Z])(?P<digit>[0-9])', expand=True)
e_list = [
("A", "3"),
("B", "3"),
("D", "4"),
]
e = DataFrame(e_list, columns=["letter", "digit"])
tm.assert_frame_equal(r, e)
def test_extractall_same_as_extract(self):
s = Series(['a3', 'b3', 'c2'], name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_multi_index = s.str.extractall(pattern_two_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_multi_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_multi_index = s.str.extractall(pattern_two_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_multi_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_multi_index = s.str.extractall(pattern_one_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_multi_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_multi_index = s.str.extractall(pattern_one_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_multi_index)
def test_extractall_same_as_extract_subject_index(self):
# same as above tests, but s has an MultiIndex.
i = MultiIndex.from_tuples([
("A", "first"),
("B", "second"),
("C", "third"),
], names=("capital", "ordinal"))
s = Series(['a3', 'b3', 'c2'], i, name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_match_index = s.str.extractall(pattern_two_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_match_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_match_index = s.str.extractall(pattern_two_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_match_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_match_index = s.str.extractall(pattern_one_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_match_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_match_index = s.str.extractall(pattern_one_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_match_index)
def test_empty_str_methods(self):
empty_str = empty = Series(dtype=object)
empty_int = Series(dtype=int)
empty_bool = Series(dtype=bool)
empty_bytes = Series(dtype=object)
# GH7241
# (extract) on empty series
tm.assert_series_equal(empty_str, empty.str.cat(empty))
assert '' == empty.str.cat()
tm.assert_series_equal(empty_str, empty.str.title())
tm.assert_series_equal(empty_int, empty.str.count('a'))
tm.assert_series_equal(empty_bool, empty.str.contains('a'))
tm.assert_series_equal(empty_bool, empty.str.startswith('a'))
tm.assert_series_equal(empty_bool, empty.str.endswith('a'))
tm.assert_series_equal(empty_str, empty.str.lower())
tm.assert_series_equal(empty_str, empty.str.upper())
tm.assert_series_equal(empty_str, empty.str.replace('a', 'b'))
tm.assert_series_equal(empty_str, empty.str.repeat(3))
tm.assert_series_equal(empty_bool, empty.str.match('^a'))
tm.assert_frame_equal(
DataFrame(columns=[0], dtype=str),
empty.str.extract('()', expand=True))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=True))
tm.assert_series_equal(
empty_str,
empty.str.extract('()', expand=False))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=False))
tm.assert_frame_equal(DataFrame(dtype=str), empty.str.get_dummies())
tm.assert_series_equal(empty_str, empty_str.str.join(''))
tm.assert_series_equal(empty_int, empty.str.len())
tm.assert_series_equal(empty_str, empty_str.str.findall('a'))
tm.assert_series_equal(empty_int, empty.str.find('a'))
tm.assert_series_equal(empty_int, empty.str.rfind('a'))
tm.assert_series_equal(empty_str, empty.str.pad(42))
tm.assert_series_equal(empty_str, empty.str.center(42))
tm.assert_series_equal(empty_str, empty.str.split('a'))
tm.assert_series_equal(empty_str, empty.str.rsplit('a'))
tm.assert_series_equal(empty_str,
empty.str.partition('a', expand=False))
tm.assert_series_equal(empty_str,
empty.str.rpartition('a', expand=False))
tm.assert_series_equal(empty_str, empty.str.slice(stop=1))
tm.assert_series_equal(empty_str, empty.str.slice(step=1))
tm.assert_series_equal(empty_str, empty.str.strip())
tm.assert_series_equal(empty_str, empty.str.lstrip())
tm.assert_series_equal(empty_str, empty.str.rstrip())
tm.assert_series_equal(empty_str, empty.str.wrap(42))
tm.assert_series_equal(empty_str, empty.str.get(0))
tm.assert_series_equal(empty_str, empty_bytes.str.decode('ascii'))
tm.assert_series_equal(empty_bytes, empty.str.encode('ascii'))
tm.assert_series_equal(empty_str, empty.str.isalnum())
tm.assert_series_equal(empty_str, empty.str.isalpha())
tm.assert_series_equal(empty_str, empty.str.isdigit())
tm.assert_series_equal(empty_str, empty.str.isspace())
tm.assert_series_equal(empty_str, empty.str.islower())
tm.assert_series_equal(empty_str, empty.str.isupper())
tm.assert_series_equal(empty_str, empty.str.istitle())
tm.assert_series_equal(empty_str, empty.str.isnumeric())
tm.assert_series_equal(empty_str, empty.str.isdecimal())
tm.assert_series_equal(empty_str, empty.str.capitalize())
tm.assert_series_equal(empty_str, empty.str.swapcase())
tm.assert_series_equal(empty_str, empty.str.normalize('NFC'))
if compat.PY3:
table = str.maketrans('a', 'b')
else:
import string
table = string.maketrans('a', 'b')
tm.assert_series_equal(empty_str, empty.str.translate(table))
def test_empty_str_methods_to_frame(self):
empty = Series(dtype=str)
empty_df = DataFrame([])
tm.assert_frame_equal(empty_df, empty.str.partition('a'))
tm.assert_frame_equal(empty_df, empty.str.rpartition('a'))
def test_ismethods(self):
values = ['A', 'b', 'Xy', '4', '3A', '', 'TT', '55', '-', ' ']
str_s = Series(values)
alnum_e = [True, True, True, True, True, False, True, True, False,
False]
alpha_e = [True, True, True, False, False, False, True, False, False,
False]
digit_e = [False, False, False, True, False, False, False, True, False,
False]
# TODO: unused
num_e = [False, False, False, True, False, False, # noqa
False, True, False, False]
space_e = [False, False, False, False, False, False, False, False,
False, True]
lower_e = [False, True, False, False, False, False, False, False,
False, False]
upper_e = [True, False, False, False, True, False, True, False, False,
False]
title_e = [True, False, True, False, True, False, False, False, False,
False]
tm.assert_series_equal(str_s.str.isalnum(), Series(alnum_e))
tm.assert_series_equal(str_s.str.isalpha(), Series(alpha_e))
tm.assert_series_equal(str_s.str.isdigit(), Series(digit_e))
tm.assert_series_equal(str_s.str.isspace(), Series(space_e))
tm.assert_series_equal(str_s.str.islower(), Series(lower_e))
tm.assert_series_equal(str_s.str.isupper(), Series(upper_e))
tm.assert_series_equal(str_s.str.istitle(), Series(title_e))
assert str_s.str.isalnum().tolist() == [v.isalnum() for v in values]
assert str_s.str.isalpha().tolist() == [v.isalpha() for v in values]
assert str_s.str.isdigit().tolist() == [v.isdigit() for v in values]
assert str_s.str.isspace().tolist() == [v.isspace() for v in values]
assert str_s.str.islower().tolist() == [v.islower() for v in values]
assert str_s.str.isupper().tolist() == [v.isupper() for v in values]
assert str_s.str.istitle().tolist() == [v.istitle() for v in values]
def test_isnumeric(self):
# 0x00bc: ¼ VULGAR FRACTION ONE QUARTER
# 0x2605: ★ not number
# 0x1378: ፸ ETHIOPIC NUMBER SEVENTY
# 0xFF13: 3 Em 3
values = ['A', '3', u'¼', u'★', u'፸', u'3', 'four']
s = Series(values)
numeric_e = [False, True, True, False, True, True, False]
decimal_e = [False, True, False, False, False, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
unicodes = [u'A', u'3', u'¼', u'★', u'፸', u'3', u'four']
assert s.str.isnumeric().tolist() == [v.isnumeric() for v in unicodes]
assert s.str.isdecimal().tolist() == [v.isdecimal() for v in unicodes]
values = ['A', np.nan, u'¼', u'★', np.nan, u'3', 'four']
s = Series(values)
numeric_e = [False, np.nan, True, False, np.nan, True, False]
decimal_e = [False, np.nan, False, False, np.nan, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
def test_get_dummies(self):
s = Series(['a|b', 'a|c', np.nan])
result = s.str.get_dummies('|')
expected = DataFrame([[1, 1, 0], [1, 0, 1], [0, 0, 0]],
columns=list('abc'))
tm.assert_frame_equal(result, expected)
s = Series(['a;b', 'a', 7])
result = s.str.get_dummies(';')
expected = DataFrame([[0, 1, 1], [0, 1, 0], [1, 0, 0]],
columns=list('7ab'))
tm.assert_frame_equal(result, expected)
# GH9980, GH8028
idx = Index(['a|b', 'a|c', 'b|c'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0), (1, 0, 1),
(0, 1, 1)], names=('a', 'b', 'c'))
tm.assert_index_equal(result, expected)
def test_get_dummies_with_name_dummy(self):
# GH 12180
# Dummies named 'name' should work as expected
s = Series(['a', 'b,name', 'b'])
result = s.str.get_dummies(',')
expected = DataFrame([[1, 0, 0], [0, 1, 1], [0, 1, 0]],
columns=['a', 'b', 'name'])
tm.assert_frame_equal(result, expected)
idx = Index(['a|b', 'name|c', 'b|name'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0, 0), (0, 0, 1, 1),
(0, 1, 0, 1)],
names=('a', 'b', 'c', 'name'))
tm.assert_index_equal(result, expected)
def test_join(self):
values = Series(['a_b_c', 'c_d_e', np.nan, 'f_g_h'])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.split('_').str.join('_')
xp = Series(['a_b', NA, 'asdf_cas_asdf', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), np.nan, u('f_g_h')])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
def test_len(self):
values = Series(['foo', 'fooo', 'fooooo', np.nan, 'fooooooo'])
result = values.str.len()
exp = values.map(lambda x: len(x) if notna(x) else NA)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.len()
xp = Series([3, NA, 13, NA, NA, 3, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('foo'), u('fooo'), u('fooooo'), np.nan, u(
'fooooooo')])
result = values.str.len()
exp = values.map(lambda x: len(x) if notna(x) else NA)
tm.assert_series_equal(result, exp)
def test_findall(self):
values = Series(['fooBAD__barBAD', NA, 'foo', 'BAD'])
result = values.str.findall('BAD[_]*')
exp = Series([['BAD__', 'BAD'], NA, [], ['BAD']])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['fooBAD__barBAD', NA, 'foo', True, datetime.today(),
'BAD', None, 1, 2.])
rs = Series(mixed).str.findall('BAD[_]*')
xp = Series([['BAD__', 'BAD'], NA, [], NA, NA, ['BAD'], NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo'), u('BAD')])
result = values.str.findall('BAD[_]*')
exp = Series([[u('BAD__'), u('BAD')], NA, [], [u('BAD')]])
tm.assert_almost_equal(result, exp)
def test_find(self):
values = Series(['ABCDEFG', 'BCDEFEF', 'DEFGHIJEF', 'EFGHEF', 'XXXX'])
result = values.str.find('EF')
tm.assert_series_equal(result, Series([4, 3, 1, 0, -1]))
expected = np.array([v.find('EF') for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF')
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind('EF') for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find('EF', 3)
tm.assert_series_equal(result, Series([4, 3, 7, 4, -1]))
expected = np.array([v.find('EF', 3) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF', 3)
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind('EF', 3) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find('EF', 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.find('EF', 3, 6) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF', 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.rfind('EF', 3, 6) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
with tm.assert_raises_regex(TypeError,
"expected a string object, not int"):
result = values.str.find(0)
with tm.assert_raises_regex(TypeError,
"expected a string object, not int"):
result = values.str.rfind(0)
def test_find_nan(self):
values = Series(['ABCDEFG', np.nan, 'DEFGHIJEF', np.nan, 'XXXX'])
result = values.str.find('EF')
tm.assert_series_equal(result, Series([4, np.nan, 1, np.nan, -1]))
result = values.str.rfind('EF')
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find('EF', 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.rfind('EF', 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find('EF', 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
result = values.str.rfind('EF', 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
def test_index(self):
def _check(result, expected):
if isinstance(result, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
for klass in [Series, Index]:
s = klass(['ABCDEFG', 'BCDEFEF', 'DEFGHIJEF', 'EFGHEF'])
result = s.str.index('EF')
_check(result, klass([4, 3, 1, 0]))
expected = np.array([v.index('EF') for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('EF')
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.rindex('EF') for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.index('EF', 3)
_check(result, klass([4, 3, 7, 4]))
expected = np.array([v.index('EF', 3) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('EF', 3)
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.rindex('EF', 3) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.index('E', 4, 8)
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.index('E', 4, 8) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('E', 0, 5)
_check(result, klass([4, 3, 1, 4]))
expected = np.array([v.rindex('E', 0, 5) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
with tm.assert_raises_regex(ValueError,
"substring not found"):
result = s.str.index('DE')
with tm.assert_raises_regex(TypeError,
"expected a string "
"object, not int"):
result = s.str.index(0)
# test with nan
s = Series(['abcb', 'ab', 'bcbe', np.nan])
result = s.str.index('b')
tm.assert_series_equal(result, Series([1, 1, 0, np.nan]))
result = s.str.rindex('b')
tm.assert_series_equal(result, Series([3, 1, 2, np.nan]))
def test_pad(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.pad(5, side='left')
exp = Series([' a', ' b', NA, ' c', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right')
exp = Series(['a ', 'b ', NA, 'c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both')
exp = Series([' a ', ' b ', NA, ' c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='left')
xp = Series([' a', NA, ' b', NA, NA, ' ee', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='right')
xp = Series(['a ', NA, 'b ', NA, NA, 'ee ', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='both')
xp = Series([' a ', NA, ' b ', NA, NA, ' ee ', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('eeeeee')])
result = values.str.pad(5, side='left')
exp = Series([u(' a'), u(' b'), NA, u(' c'), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right')
exp = Series([u('a '), u('b '), NA, u('c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both')
exp = Series([u(' a '), u(' b '), NA, u(' c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
def test_pad_fillchar(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.pad(5, side='left', fillchar='X')
exp = Series(['XXXXa', 'XXXXb', NA, 'XXXXc', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right', fillchar='X')
exp = Series(['aXXXX', 'bXXXX', NA, 'cXXXX', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both', fillchar='X')
exp = Series(['XXaXX', 'XXbXX', NA, 'XXcXX', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.pad(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.pad(5, fillchar=5)
def test_pad_width(self):
# GH 13598
s = Series(['1', '22', 'a', 'bb'])
for f in ['center', 'ljust', 'rjust', 'zfill', 'pad']:
with tm.assert_raises_regex(TypeError,
"width must be of "
"integer type, not*"):
getattr(s.str, f)('f')
def test_translate(self):
def _check(result, expected):
if isinstance(result, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
for klass in [Series, Index]:
s = klass(['abcdefg', 'abcc', 'cdddfg', 'cdefggg'])
if not compat.PY3:
import string
table = string.maketrans('abc', 'cde')
else:
table = str.maketrans('abc', 'cde')
result = s.str.translate(table)
expected = klass(['cdedefg', 'cdee', 'edddfg', 'edefggg'])
_check(result, expected)
# use of deletechars is python 2 only
if not compat.PY3:
result = s.str.translate(table, deletechars='fg')
expected = klass(['cdede', 'cdee', 'eddd', 'ede'])
_check(result, expected)
result = s.str.translate(None, deletechars='fg')
expected = klass(['abcde', 'abcc', 'cddd', 'cde'])
_check(result, expected)
else:
with tm.assert_raises_regex(
ValueError, "deletechars is not a valid argument"):
result = s.str.translate(table, deletechars='fg')
# Series with non-string values
s = Series(['a', 'b', 'c', 1.2])
expected = Series(['c', 'd', 'e', np.nan])
result = s.str.translate(table)
tm.assert_series_equal(result, expected)
def test_center_ljust_rjust(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.center(5)
exp = Series([' a ', ' b ', NA, ' c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.ljust(5)
exp = Series(['a ', 'b ', NA, 'c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.rjust(5)
exp = Series([' a', ' b', NA, ' c', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'c', 'eee', None,
1, 2.])
rs = Series(mixed).str.center(5)
xp = Series([' a ', NA, ' b ', NA, NA, ' c ', ' eee ', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.ljust(5)
xp = Series(['a ', NA, 'b ', NA, NA, 'c ', 'eee ', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.rjust(5)
xp = Series([' a', NA, ' b', NA, NA, ' c', ' eee', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('eeeeee')])
result = values.str.center(5)
exp = Series([u(' a '), u(' b '), NA, u(' c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.ljust(5)
exp = Series([u('a '), u('b '), NA, u('c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.rjust(5)
exp = Series([u(' a'), u(' b'), NA, u(' c'), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
def test_center_ljust_rjust_fillchar(self):
values = Series(['a', 'bb', 'cccc', 'ddddd', 'eeeeee'])
result = values.str.center(5, fillchar='X')
expected = Series(['XXaXX', 'XXbbX', 'Xcccc', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.center(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.ljust(5, fillchar='X')
expected = Series(['aXXXX', 'bbXXX', 'ccccX', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.ljust(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rjust(5, fillchar='X')
expected = Series(['XXXXa', 'XXXbb', 'Xcccc', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.rjust(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
# If fillchar is not a charatter, normal str raises TypeError
# 'aaa'.ljust(5, 'XY')
# TypeError: must be char, not str
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.center(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.ljust(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.rjust(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.center(5, fillchar=1)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.ljust(5, fillchar=1)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.rjust(5, fillchar=1)
def test_zfill(self):
values = Series(['1', '22', 'aaa', '333', '45678'])
result = values.str.zfill(5)
expected = Series(['00001', '00022', '00aaa', '00333', '45678'])
tm.assert_series_equal(result, expected)
expected = np.array([v.zfill(5) for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.zfill(3)
expected = Series(['001', '022', 'aaa', '333', '45678'])
tm.assert_series_equal(result, expected)
expected = np.array([v.zfill(3) for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
values = Series(['1', np.nan, 'aaa', np.nan, '45678'])
result = values.str.zfill(5)
expected = Series(['00001', np.nan, '00aaa', np.nan, '45678'])
tm.assert_series_equal(result, expected)
def test_split(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.split('_')
exp = Series([['a', 'b', 'c'], ['c', 'd', 'e'], NA, ['f', 'g', 'h']])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.split('__')
tm.assert_series_equal(result, exp)
result = values.str.split('__', expand=False)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b_c', NA, 'd_e_f', True, datetime.today(), None, 1,
2.])
result = mixed.str.split('_')
exp = Series([['a', 'b', 'c'], NA, ['d', 'e', 'f'], NA, NA, NA, NA, NA
])
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.split('_', expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), NA, u('f_g_h')])
result = values.str.split('_')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
result = values.str.split('_', expand=False)
tm.assert_series_equal(result, exp)
# regex split
values = Series([u('a,b_c'), u('c_d,e'), NA, u('f,g,h')])
result = values.str.split('[,_]')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
def test_rsplit(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.rsplit('_')
exp = Series([['a', 'b', 'c'], ['c', 'd', 'e'], NA, ['f', 'g', 'h']])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.rsplit('__')
tm.assert_series_equal(result, exp)
result = values.str.rsplit('__', expand=False)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b_c', NA, 'd_e_f', True, datetime.today(), None, 1,
2.])
result = mixed.str.rsplit('_')
exp = Series([['a', 'b', 'c'], NA, ['d', 'e', 'f'], NA, NA, NA, NA, NA
])
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.rsplit('_', expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), NA, u('f_g_h')])
result = values.str.rsplit('_')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
result = values.str.rsplit('_', expand=False)
tm.assert_series_equal(result, exp)
# regex split is not supported by rsplit
values = Series([u('a,b_c'), u('c_d,e'), NA, u('f,g,h')])
result = values.str.rsplit('[,_]')
exp = Series([[u('a,b_c')], [u('c_d,e')], NA, [u('f,g,h')]])
tm.assert_series_equal(result, exp)
# setting max number of splits, make sure it's from reverse
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.rsplit('_', n=1)
exp = Series([['a_b', 'c'], ['c_d', 'e'], NA, ['f_g', 'h']])
tm.assert_series_equal(result, exp)
def test_split_noargs(self):
# #1859
s = Series(['<NAME>', '<NAME>'])
result = s.str.split()
expected = ['Travis', 'Oliphant']
assert result[1] == expected
result = s.str.rsplit()
assert result[1] == expected
def test_split_maxsplit(self):
# re.split 0, str.split -1
s = Series(['bd asdf jfg', 'kjasdflqw asdfnfk'])
result = s.str.split(n=-1)
xp = s.str.split()
tm.assert_series_equal(result, xp)
result = s.str.split(n=0)
tm.assert_series_equal(result, xp)
xp = s.str.split('asdf')
result = s.str.split('asdf', n=0)
tm.assert_series_equal(result, xp)
result = s.str.split('asdf', n=-1)
tm.assert_series_equal(result, xp)
def test_split_no_pat_with_nonzero_n(self):
s = Series(['split once', 'split once too!'])
result = s.str.split(n=1)
expected = Series({0: ['split', 'once'], 1: ['split', 'once too!']})
tm.assert_series_equal(expected, result, check_index_type=False)
def test_split_to_dataframe(self):
s = Series(['nosplit', 'alsonosplit'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: Series(['nosplit', 'alsonosplit'])})
tm.assert_frame_equal(result, exp)
s = Series(['some_equal_splits', 'with_no_nans'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: ['some', 'with'],
1: ['equal', 'no'],
2: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
s = Series(['some_unequal_splits', 'one_of_these_things_is_not'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: ['some', 'one'],
1: ['unequal', 'of'],
2: ['splits', 'these'],
3: [NA, 'things'],
4: [NA, 'is'],
5: [NA, 'not']})
tm.assert_frame_equal(result, exp)
s = Series(['some_splits', 'with_index'], index=['preserve', 'me'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: ['some', 'with'], 1: ['splits', 'index']},
index=['preserve', 'me'])
tm.assert_frame_equal(result, exp)
with tm.assert_raises_regex(ValueError, "expand must be"):
s.str.split('_', expand="not_a_boolean")
def test_split_to_multiindex_expand(self):
idx = Index(['nosplit', 'alsonosplit'])
result = idx.str.split('_', expand=True)
exp = idx
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
idx = Index(['some_equal_splits', 'with_no_nans'])
result = idx.str.split('_', expand=True)
exp = MultiIndex.from_tuples([('some', 'equal', 'splits'), (
'with', 'no', 'nans')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 3
idx = Index(['some_unequal_splits', 'one_of_these_things_is_not'])
result = idx.str.split('_', expand=True)
exp = MultiIndex.from_tuples([('some', 'unequal', 'splits', NA, NA, NA
), ('one', 'of', 'these', 'things',
'is', 'not')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 6
with tm.assert_raises_regex(ValueError, "expand must be"):
idx.str.split('_', expand="not_a_boolean")
def test_rsplit_to_dataframe_expand(self):
s = Series(['nosplit', 'alsonosplit'])
result = s.str.rsplit('_', expand=True)
exp = DataFrame({0: Series(['nosplit', 'alsonosplit'])})
tm.assert_frame_equal(result, exp)
s = Series(['some_equal_splits', 'with_no_nans'])
result = s.str.rsplit('_', expand=True)
exp = DataFrame({0: ['some', 'with'],
1: ['equal', 'no'],
2: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
result = s.str.rsplit('_', expand=True, n=2)
exp = DataFrame({0: ['some', 'with'],
1: ['equal', 'no'],
2: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
result = s.str.rsplit('_', expand=True, n=1)
exp = DataFrame({0: ['some_equal', 'with_no'], 1: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
s = Series(['some_splits', 'with_index'], index=['preserve', 'me'])
result = s.str.rsplit('_', expand=True)
exp = DataFrame({0: ['some', 'with'], 1: ['splits', 'index']},
index=['preserve', 'me'])
tm.assert_frame_equal(result, exp)
def test_rsplit_to_multiindex_expand(self):
idx = Index(['nosplit', 'alsonosplit'])
result = idx.str.rsplit('_', expand=True)
exp = idx
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
idx = Index(['some_equal_splits', 'with_no_nans'])
result = idx.str.rsplit('_', expand=True)
exp = MultiIndex.from_tuples([('some', 'equal', 'splits'), (
'with', 'no', 'nans')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 3
idx = Index(['some_equal_splits', 'with_no_nans'])
result = idx.str.rsplit('_', expand=True, n=1)
exp = MultiIndex.from_tuples([('some_equal', 'splits'),
('with_no', 'nans')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 2
def test_split_nan_expand(self):
# gh-18450
s = Series(["foo,bar,baz", NA])
result = s.str.split(",", expand=True)
exp = DataFrame([["foo", "bar", "baz"], [NA, NA, NA]])
tm.assert_frame_equal(result, exp)
# check that these are actually np.nan and not None
# TODO see GH 18463
# tm.assert_frame_equal does not differentiate
assert all(np.isnan(x) for x in result.iloc[1])
def test_split_with_name(self):
# GH 12617
# should preserve name
s = Series(['a,b', 'c,d'], name='xxx')
res = s.str.split(',')
exp = Series([['a', 'b'], ['c', 'd']], name='xxx')
tm.assert_series_equal(res, exp)
res = s.str.split(',', expand=True)
exp = DataFrame([['a', 'b'], ['c', 'd']])
tm.assert_frame_equal(res, exp)
idx = Index(['a,b', 'c,d'], name='xxx')
res = idx.str.split(',')
exp = Index([['a', 'b'], ['c', 'd']], name='xxx')
assert res.nlevels == 1
tm.assert_index_equal(res, exp)
res = idx.str.split(',', expand=True)
exp = MultiIndex.from_tuples([('a', 'b'), ('c', 'd')])
assert res.nlevels == 2
tm.assert_index_equal(res, exp)
def test_partition_series(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.partition('_', expand=False)
exp = Series([('a', '_', 'b_c'), ('c', '_', 'd_e'), NA,
('f', '_', 'g_h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('_', expand=False)
exp = Series([('a_b', '_', 'c'), ('c_d', '_', 'e'), NA,
('f_g', '_', 'h')])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.partition('__', expand=False)
exp = Series([('a', '__', 'b__c'), ('c', '__', 'd__e'), NA,
('f', '__', 'g__h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('__', expand=False)
exp = Series([('a__b', '__', 'c'), ('c__d', '__', 'e'), NA,
('f__g', '__', 'h')])
tm.assert_series_equal(result, exp)
# None
values = Series(['a b c', 'c d e', NA, 'f g h'])
result = values.str.partition(expand=False)
exp = Series([('a', ' ', 'b c'), ('c', ' ', 'd e'), NA,
('f', ' ', 'g h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition(expand=False)
exp = Series([('a b', ' ', 'c'), ('c d', ' ', 'e'), NA,
('f g', ' ', 'h')])
tm.assert_series_equal(result, exp)
# Not splited
values = Series(['abc', 'cde', NA, 'fgh'])
result = values.str.partition('_', expand=False)
exp = Series([('abc', '', ''), ('cde', '', ''), NA, ('fgh', '', '')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('_', expand=False)
exp = Series([('', '', 'abc'), ('', '', 'cde'), NA, ('', '', 'fgh')])
tm.assert_series_equal(result, exp)
# unicode
values = Series([u'a_b_c', u'c_d_e', NA, u'f_g_h'])
result = values.str.partition('_', expand=False)
exp = Series([(u'a', u'_', u'b_c'), (u'c', u'_', u'd_e'),
NA, (u'f', u'_', u'g_h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('_', expand=False)
exp = Series([(u'a_b', u'_', u'c'), (u'c_d', u'_', u'e'),
NA, (u'f_g', u'_', u'h')])
tm.assert_series_equal(result, exp)
# compare to standard lib
values = Series(['A_B_C', 'B_C_D', 'E_F_G', 'EFGHEF'])
result = values.str.partition('_', expand=False).tolist()
assert result == [v.partition('_') for v in values]
result = values.str.rpartition('_', expand=False).tolist()
assert result == [v.rpartition('_') for v in values]
def test_partition_index(self):
values = Index(['a_b_c', 'c_d_e', 'f_g_h'])
result = values.str.partition('_', expand=False)
exp = Index(np.array([('a', '_', 'b_c'), ('c', '_', 'd_e'), ('f', '_',
'g_h')]))
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
result = values.str.rpartition('_', expand=False)
exp = Index(np.array([('a_b', '_', 'c'), ('c_d', '_', 'e'), (
'f_g', '_', 'h')]))
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
result = values.str.partition('_')
exp = Index([('a', '_', 'b_c'), ('c', '_', 'd_e'), ('f', '_', 'g_h')])
tm.assert_index_equal(result, exp)
assert isinstance(result, MultiIndex)
assert result.nlevels == 3
result = values.str.rpartition('_')
exp = Index([('a_b', '_', 'c'), ('c_d', '_', 'e'), ('f_g', '_', 'h')])
tm.assert_index_equal(result, exp)
assert isinstance(result, MultiIndex)
assert result.nlevels == 3
def test_partition_to_dataframe(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.partition('_')
exp = DataFrame({0: ['a', 'c', np.nan, 'f'],
1: ['_', '_', np.nan, '_'],
2: ['b_c', 'd_e', np.nan, 'g_h']})
tm.assert_frame_equal(result, exp)
result = values.str.rpartition('_')
exp = DataFrame({0: ['a_b', 'c_d', np.nan, 'f_g'],
1: ['_', '_', np.nan, '_'],
2: ['c', 'e', np.nan, 'h']})
tm.assert_frame_equal(result, exp)
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.partition('_', expand=True)
exp = DataFrame({0: ['a', 'c', np.nan, 'f'],
1: ['_', '_', np.nan, '_'],
2: ['b_c', 'd_e', np.nan, 'g_h']})
tm.assert_frame_equal(result, exp)
result = values.str.rpartition('_', expand=True)
exp = DataFrame({0: ['a_b', 'c_d', np.nan, 'f_g'],
1: ['_', '_', np.nan, '_'],
2: ['c', 'e', np.nan, 'h']})
tm.assert_frame_equal(result, exp)
def test_partition_with_name(self):
# GH 12617
s = Series(['a,b', 'c,d'], name='xxx')
res = s.str.partition(',')
exp = DataFrame({0: ['a', 'c'], 1: [',', ','], 2: ['b', 'd']})
tm.assert_frame_equal(res, exp)
# should preserve name
res = s.str.partition(',', expand=False)
exp = Series([('a', ',', 'b'), ('c', ',', 'd')], name='xxx')
tm.assert_series_equal(res, exp)
idx = Index(['a,b', 'c,d'], name='xxx')
res = idx.str.partition(',')
exp = MultiIndex.from_tuples([('a', ',', 'b'), ('c', ',', 'd')])
assert res.nlevels == 3
tm.assert_index_equal(res, exp)
# should preserve name
res = idx.str.partition(',', expand=False)
exp = Index(np.array([('a', ',', 'b'), ('c', ',', 'd')]), name='xxx')
assert res.nlevels == 1
tm.assert_index_equal(res, exp)
def test_pipe_failures(self):
# #2119
s = Series(['A|B|C'])
result = s.str.split('|')
exp = Series([['A', 'B', 'C']])
tm.assert_series_equal(result, exp)
result = s.str.replace('|', ' ')
exp = Series(['A B C'])
tm.assert_series_equal(result, exp)
def test_slice(self):
values = Series(['aafootwo', 'aabartwo', NA, 'aabazqux'])
result = values.str.slice(2, 5)
exp = Series(['foo', 'bar', NA, 'baz'])
tm.assert_series_equal(result, exp)
for start, stop, step in [(0, 3, -1), (None, None, -1), (3, 10, 2),
(3, 0, -1)]:
try:
result = values.str.slice(start, stop, step)
expected = Series([s[start:stop:step] if not isna(s) else NA
for s in values])
tm.assert_series_equal(result, expected)
except:
print('failed on %s:%s:%s' % (start, stop, step))
raise
# mixed
mixed = Series(['aafootwo', NA, 'aabartwo', True, datetime.today(),
None, 1, 2.])
rs = Series(mixed).str.slice(2, 5)
xp = Series(['foo', NA, 'bar', NA, NA, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.slice(2, 5, -1)
xp = Series(['oof', NA, 'rab', NA, NA, NA, NA, NA])
# unicode
values = Series([u('aafootwo'), u('aabartwo'), NA, u('aabazqux')])
result = values.str.slice(2, 5)
exp = Series([u('foo'), u('bar'), NA, u('baz')])
tm.assert_series_equal(result, exp)
result = values.str.slice(0, -1, 2)
exp = Series([u('afow'), u('abrw'), NA, u('abzu')])
tm.assert_series_equal(result, exp)
def test_slice_replace(self):
values = Series(['short', 'a bit longer', 'evenlongerthanthat', '', NA
])
exp = Series(['shrt', 'a it longer', 'evnlongerthanthat', '', NA])
result = values.str.slice_replace(2, 3)
tm.assert_series_equal(result, exp)
exp = Series(['shzrt', 'a zit longer', 'evznlongerthanthat', 'z', NA])
result = values.str.slice_replace(2, 3, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['shzort', 'a zbit longer', 'evzenlongerthanthat', 'z', NA
])
result = values.str.slice_replace(2, 2, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['shzort', 'a zbit longer', 'evzenlongerthanthat', 'z', NA
])
result = values.str.slice_replace(2, 1, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['shorz', 'a bit longez', 'evenlongerthanthaz', 'z', NA])
result = values.str.slice_replace(-1, None, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['zrt', 'zer', 'zat', 'z', NA])
result = values.str.slice_replace(None, -2, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['shortz', 'a bit znger', 'evenlozerthanthat', 'z', NA])
result = values.str.slice_replace(6, 8, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['zrt', 'a zit longer', 'evenlongzerthanthat', 'z', NA])
result = values.str.slice_replace(-10, 3, 'z')
tm.assert_series_equal(result, exp)
def test_strip_lstrip_rstrip(self):
values = Series([' aa ', ' bb \n', NA, 'cc '])
result = values.str.strip()
exp = Series(['aa', 'bb', NA, 'cc'])
tm.assert_series_equal(result, exp)
result = values.str.lstrip()
exp = Series(['aa ', 'bb \n', NA, 'cc '])
tm.assert_series_equal(result, exp)
result = values.str.rstrip()
exp = Series([' aa', ' bb', NA, 'cc'])
tm.assert_series_equal(result, exp)
def test_strip_lstrip_rstrip_mixed(self):
# mixed
mixed = Series([' aa ', NA, ' bb \t\n', True, datetime.today(), None,
1, 2.])
rs = Series(mixed).str.strip()
xp = Series(['aa', NA, 'bb', NA, NA, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.lstrip()
xp = Series(['aa ', NA, 'bb \t\n', NA, NA, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.rstrip()
xp = Series([' aa', NA, ' bb', NA, NA, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
def test_strip_lstrip_rstrip_unicode(self):
# unicode
values = Series([u(' aa '), u(' bb \n'), NA, u('cc ')])
result = values.str.strip()
exp = Series([u('aa'), u('bb'), NA, u('cc')])
tm.assert_series_equal(result, exp)
result = values.str.lstrip()
exp = Series([u('aa '), u('bb \n'), NA, u('cc ')])
tm.assert_series_equal(result, exp)
result = values.str.rstrip()
exp = Series([u(' aa'), u(' bb'), NA, u('cc')])
tm.assert_series_equal(result, exp)
def test_strip_lstrip_rstrip_args(self):
values = Series(['xxABCxx', 'xx BNSD', 'LDFJH xx'])
rs = values.str.strip('x')
xp = Series(['ABC', ' BNSD', 'LDFJH '])
assert_series_equal(rs, xp)
rs = values.str.lstrip('x')
xp = | Series(['ABCxx', ' BNSD', 'LDFJH xx']) | pandas.Series |
"""
test_exploreDA
--------------
The module which groups the main functions to explore a new data.
"""
import pandas as pd
import numpy as np
import datetime
from Plotting.contdistrib_plot import cont_distrib_plot
from Plotting.catdistrib_plot import barplot_plot
from Plotting.net_plotting import plot_net_distribution, plot_heat_net
from Plotting.geo_plotting import compute_spatial_density_sparse,\
clean_coordinates, plot_in_map, plot_geo_heatmap
from Plotting.temp_plotting import temp_distrib
from Statistics.cont_statistics import quantile_compute, ranges_compute,\
cont_count, log_cont_count
from Statistics.cat_statistics import cat_count
from Statistics.temp_statistics import count_temp_stats
from Statistics.coord_statistics import mean_coord_by_values
from SummaryStatistics.univariate_stats import compute_univariate_stats
def test():
## Parameters data
contdata = pd.Series(np.random.random(100))
catdata = pd.Series(np.random.randint(0, 10, 100))
netdata = np.random.random((10, 10))
xt = np.random.random(100)
timedata = pd.Series(np.random.random(100),
index=[datetime.datetime.now() + datetime.timedelta(e)
for e in np.cumsum(xt)])
df = pd.DataFrame([np.random.random(100), np.random.random(100), catdata])
df = df.T
df.columns = ['a', 'b', 'c']
### Statitics testing
# Categorical variable
cat_count(df, 'c')
# Continious variable
quantile_compute(contdata, 5)
ranges_compute(contdata, 5)
cont_count(df, 'a', 5)
log_cont_count(df, 'a', 5)
# Coordinate variables (TORETEST :Fail)
mean_coord_by_values(df, ['a', 'b'], 'c')
# Temporal variable
date_ranges = np.linspace(timedata.min(), timedata.max(), 5)[1:-1]
count_temp_stats(timedata, date_ranges, tags=None)
### Plotting testing
## Testing univariate categorical variable plotting
barplot_plot(catdata, logscale=False)
barplot_plot(catdata, logscale=True)
## Testing univariate continious variable plotting
cont_distrib_plot(contdata, n_bins=5, logscale=True)
cont_distrib_plot(contdata, n_bins=5, logscale=False)
## Testing network plotting
plot_net_distribution(netdata, 5)
plot_heat_net(netdata, range(10))
## Testing geospatial plotting
# Parameters
longs, lats = np.random.random(100), np.random.random(100)
n_x, n_y = 10, 10
n_levs = 5
sigma_smooth, order_smooth, null_lim = 5, 0, 0.1
var0, var1 = None, np.random.random(100)
coordinates = | pd.DataFrame([longs, lats]) | pandas.DataFrame |
import bisect
import ifc.stockData as stockData
import pandas as pd
import numpy as np
from datetime import datetime
def get_series(ticker_sym, start, end):
df = stockData.get_data_from_google(ticker_sym, start, end)
return Series(stockData.get_data_from_google(ticker_sym, start, end))
class Series(object):
""" used to represent a series of days """
def __init__(self, dataframe, config=None):
self.df = dataframe
self.max_win = 26
if config is None: # default settings
self.mavg = [10, 30]
self.macd = [(9, 12, 26)]
self.ema = []
self.rsi = [14]
else:
raise NotImplementedError
def run_calculations(self):
""" runs a set of default calculations """
for w in self.mavg:
self.calculate_mavg(w)
for w in self.macd:
self.calculate_macd(w[0], w[1], w[2])
for w in self.ema:
self.calculate_ema(w)
for w in self.rsi:
self.calculate_rsi(w)
def set_max_win(self, val):
self.max_win = val if val > self.max_win else self.max_win
def trim_fat(self, window=None):
temp_win = window
if temp_win is None:
temp_win = self.max_win
self.df = self.df[temp_win:] # drop the first n rows typically containing Nan
def calculate_mavg(self, window=10, col='Adj_Close'):
""" calculates a simple moving average """
self.set_max_win(window)
name = "mavg_%s" % (window)
self.df[name] = self.df['Adj_Close'].rolling(window=window).mean()
return name
def calculate_rsi(self, window=14, col='Adj_Close'):
self.set_max_win(window)
delta = self.df[col].diff()
dUp, dDown = delta.copy(), delta.copy()
dUp[dUp < 0] = 0
dDown[dDown > 0] = 0
RolUp = dUp.rolling(window=window).mean()
RolDown = dDown.rolling(window=window).mean().abs()
name = "rsi_%s" % (window)
RS = RolUp / RolDown
self.df[name] = 100.0 - (100.0 / (1.0 + RS))
return name
def calculate_ema(self, window, name=None, col='Adj_Close'):
self.set_max_win(window)
if name is None:
name = "ema_%s" % (window)
self.df[name] = pd.ewma(self.df[col], span=window, min_periods=window)
return name
def calculate_macd(self, signal=9, fast=12, slow=26, col='Adj_Close'):
""" MACD """
self.set_max_win(slow)
signal_name = "signal_%s" % (signal)
# ignore warnings for now # TODO
fast_ema = pd.ewma(self.df[col], span=fast, min_periods=fast)
slow_ema = pd.ewma(self.df[col], span=slow, min_periods=slow)
name = "macd_%s_%s" % (fast, slow)
self.df[name] = fast_ema - slow_ema
self.calculate_ema(signal, col=name, name=signal_name)
return name
def calculate_mom(self, col='Adj_Close', window=1):
""" Momentum Measures the change in price
Price(t)-Price(t-n)
"""
self.set_max_win(window)
name = "mom_%s" % (window)
self.df[name] = self.df[col] - self.df[col].shift(window)
return name
def calculate_rocr(self, window=3, col='Adj_Close'):
""" Rate of Change Compute rate of change
relative to previous trading intervals
(Price(t)/Price(t-n))*100
"""
self.set_max_win(window)
name = "rocr_%s" % (window)
self.df[name] = (self.df[col] / self.df[col].shift(window)) * 100
return name
def calculate_atr(self, window=14):
""" Average True Range Shows volatility of market
ATR(t) = ((n-1) * ATR(t-1) + Tr(t)) / n
where Tr(t) = Max(Abs(High - Low), Abs(High - Close(t - 1)), Abs(Low - Close(t - 1));
"""
self.set_max_win(window)
i = 0
tr_l = [0]
for i in range(self.df.index[-1]):
tr = max(self.df.get_value(i + 1, 'High'),
self.df.get_value(i, 'Adj_Close')) - min(self.df.get_value(i + 1, 'Low'),
self.df.get_value(i, 'Adj_Close'))
tr_l.append(tr)
name = 'atr_%s' % (window)
self.df[name] = pd.ewma(pd.Series(tr_l), span=window, min_periods=window)
return name
def calculate_mfi(self, window=14):
""" Money Flow Index Relates typical price with Volume
100 - (100 / (1 + Money Ratio))
where Money Ratio=(+Moneyflow / -Moneyflow);
Moneyflow=Tp*Volume
Tp = (High + Low + Close)/3
"""
name = "mfi_%s" % (window)
tp = (self.df['High'] + self.df['Low'] + self.df['Adj_Close']) / 3
i = 0
PosMF = [0]
while i < self.df.index[-1]:
if tp[i + 1] > tp[i]:
PosMF.append(tp[i + 1] * self.df.get_value(i + 1, 'Volume'))
else:
PosMF.append(0)
i = i + 1
PosMF = pd.Series(PosMF)
TotMF = tp * self.df['Volume']
MFR = | pd.Series(PosMF / TotMF) | pandas.Series |
#!/usr/bin/env python3
"""
Python tool to correct attenuation bias stemming from measurement error in polygenic scores (PGI).
"""
import argparse
import copy
import itertools
import logging
import multiprocessing
import os
import re
import stat
import sys
import tarfile
import tempfile
from typing import Any, Dict, List, Tuple
import zipfile
import numpy as np
import pandas as pd
import statsmodels.api as sm
import wget
####################################################################################################
# The default short file prefix to use for output and logs
DEFAULT_SHORT_PREFIX = "pgi_correct"
# Software version
__version__ = '0.0.2'
# Email addresses to use in header banner to denote contacts
SOFTWARE_CORRESPONDENCE_EMAIL1 = "<EMAIL>"
SOFTWARE_CORRESPONDENCE_EMAIL2 = "<EMAIL>"
OTHER_CORRESPONDENCE_EMAIL = "<EMAIL>"
# GCTA version used when downloading
GCTA_VERSION = "gcta_1.93.0beta"
# GCTA executable used when downloading
GCTA_EXEC = "gcta64"
# GCTA URL (where to try to download GCTA from)
GCTA_URL = "https://cnsgenomics.com/software/gcta/bin"
# BOLT version used when downloading
BOLT_VERSION = "BOLT-LMM_v.2.3.4"
# BOLT executable used when downloading
BOLT_EXEC = "bolt"
# BOLT URL (where to try to download BOLT from)
BOLT_URL = "http://data.broadinstitute.org/alkesgroup/BOLT-LMM/downloads"
# Default number of blocks for jack-knifing
DEFAULT_NUM_JK_BLOCKS = 100
# Threshold of number of jack knife blocks below which the user is warned
MIN_WARNING_JK_BLOCKS = 20
# Name given to column of constant values added to the regression data
CONS_COL_NAME = "cons"
# List holding CONS_COL_NAME (this software passes column names are passed around via lists)
CONS_COLS = [CONS_COL_NAME]
# Result reporting values
VAR_OUTPUT_COLUMN = "variable_name"
UNCORR_COEF_COLUMN = "uncorrected_coef"
UNCORR_COEF_SE_COLUMN = "uncorrected_se"
CORR_COEF_COLUMN = "corrected_coef"
CORR_COEF_SE_COLUMN = "corrected_se"
OUTPUT_COLUMNNAMES = [VAR_OUTPUT_COLUMN, UNCORR_COEF_COLUMN, UNCORR_COEF_SE_COLUMN,
CORR_COEF_COLUMN, CORR_COEF_SE_COLUMN]
####################################################################################################
DEFAULT_FULL_OUT_PREFIX = "%s/%s" % (os.getcwd(), DEFAULT_SHORT_PREFIX)
# Logging banner to use at the top of the log file
HEADER = """
<><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>
<>
<> Polygenic Index (PGI) Measurement Error Correction
<> Version: %s
<> (C) 2020 Social Science Genetic Association Consortium (SSGAC)
<> MIT License
<>
<><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>
<> Software-related correspondence: %s or %s
<> All other correspondence: %s
<><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>
""" % (__version__, SOFTWARE_CORRESPONDENCE_EMAIL1, SOFTWARE_CORRESPONDENCE_EMAIL2,
OTHER_CORRESPONDENCE_EMAIL)
# Formatted string to use for reporting to the log/terminal and also the results file
RESULTS_SUMMARY_FSTR = """
N = %s
Heritability = %s (%s)
R Squared = %s (%s)
Rho = %s (%s)
"""
# Used in the slot where standard errors would normally be reported (when jack-knife wasn't run)
ASSUMED_VAL = "assumed"
"""
Class used as a holder for internal values
"""
class InternalNamespace:
pass
def warn_or_raise(force: Any, msg_str: str, *msg_args: Any):
"""
If force is False-ish, this throws a runtime error with message msg_str. Otherwise,
it logs msg_str at a warning level. The message msg_str can be a %-formatted string that
takes arguments, which are passed via msg_args.
:param force: Determines if this should raise an exception or log a warning
:param msg_str: String to log or use as exception message
:param *args: Optional positional arguments that are inputs to msg_str formatting
"""
if not force:
raise RuntimeError(msg_str % msg_args)
logging.warning("WARNING: " + msg_str, *msg_args)
def set_up_logger(output_full_prefix: str, logging_level: str):
"""
Set up the logger for this utility.
:param output_full_prefix: Full prefix to use for output files
:param logging_level: Level of verbosity of logging object.
:return: Returns the full path to the log output file
"""
# This bit of validation needs to be done early here so that the logger can be created
output_dir = os.path.dirname(output_full_prefix)
if not os.path.exists(output_dir):
raise FileNotFoundError("The designated output directory [%s] does not exist." % output_dir)
# Construct full path to desired log output file
full_logfile_path = output_full_prefix + ".log"
# Set stdout handler level
logging_level_map = {"debug": logging.DEBUG,
"info": logging.INFO,
"warn": logging.WARN}
if logging_level not in logging_level_map:
raise ValueError("Logging level %s not recognized. Please specify one of %s " %
(logging_level, list(logging_level_map.keys())))
# Set logging config (message is timestamp plus text)
logging.basicConfig(format='%(asctime)s %(message)s',
filename=full_logfile_path,
filemode='w', level=logging_level_map[logging_level], datefmt='%I:%M:%S %p')
# Create extra handlers to mirror messages to the terminal
# (errors and warnings to stderr and lower priority messages to stdout)
stderr_handler = logging.StreamHandler(stream=sys.stderr)
stderr_handler.setLevel(logging.WARNING)
stdout_handler = logging.StreamHandler(stream=sys.stdout)
stdout_handler.setLevel(logging_level_map[logging_level])
stdout_handler.addFilter(lambda record: record.levelno <= logging.INFO)
# Add the handlers to the logger
logging.getLogger().addHandler(stderr_handler)
logging.getLogger().addHandler(stdout_handler)
return full_logfile_path
def to_flag(arg_str: str) -> str:
"""
Utility method to convert from the name of an argparse Namespace attribute / variable
(which often is adopted elsewhere in this code, as well) to the corresponding flag
:param arg_str: Name of the arg
:return: The name of the flag (sans "--")
"""
return arg_str.replace("_", "-")
def to_arg(flag_str: str) -> str:
"""
Utility method to convert from an argparse flag name to the name of the corresponding attribute
in the argparse Namespace (which often is adopted elsewhere in this code, as well)
:param flag_str: Name of the flag (sans "--")
:return: The name of the argparse attribute/var
"""
return flag_str.replace("-", "_")
def format_os_cmd(cmd: List[str]) -> str:
"""
Format OS command for readability (used to display string used to execute this software)
:param cmd: Command to be passed to os.system(.), stored as a list
whose elements are the options/flags.
:return: Formatted string.
"""
return "Calling " + ' '.join(cmd).replace("--", " \\ \n\t--")
def validate_h2_software_inputs(user_args: Dict[str, str], parsed_args: argparse.Namespace,
settings: InternalNamespace):
"""
Responsible for validating gcta/bolt flags for internal consistency and valid values
(doesn't need to check for existence of files / directories, this is just for the flag values)
:param user_args: User-specified flags
:param parsed_args: Result of argparse parsing user command / flags
:param settings: Internal namespace class
"""
# Classify GCTA-related flags for checking consistency
required_gcta_argnames = {"pheno_file"}
need_one_argnames = {"bfile", "grm"}
optional_gcta_argnames = {"gcta_exec", "grm_cutoff"} | need_one_argnames
required_bolt_argnames = {"pheno_file", "bfile"}
optional_bolt_argnames = {"pheno_file_pheno_col"}
# Set of all user flags employed
user_args_key_set = set(user_args.keys())
all_software_args = required_gcta_argnames | optional_gcta_argnames | required_bolt_argnames | optional_bolt_argnames
if settings.use_gcta:
missing = required_gcta_argnames - user_args_key_set
settings.software = "GCTA"
exec_name = os.path.basename(parsed_args.gcta_exec) if parsed_args.gcta_exec else None
else:
missing = required_bolt_argnames - user_args_key_set
settings.software = "BOLT"
exec_name = os.path.basename(parsed_args.bolt_exec) if parsed_args.bolt_exec else None
# Run different checks of flags depending on whether GCTA is needed or not
if settings.calc_h2:
# If all the required flags aren't specified, throw an exception
if missing:
raise RuntimeError("For %s to run, please specify the following missing flags: %s" %
(software, {to_flag(arg) for arg in missing}))
# Check to make sure bfile or GRM are specified
need_ones_present = need_one_argnames & user_args_key_set
if len(need_ones_present) != 1 and settings.use_gcta:
raise RuntimeError("Need to specify one and only one of: %s", need_one_argnames)
# Issue a warning if GCTA is specified by the user and it's not the expected executable name
if exec_name: # check if an executable has been passed first
if exec_name != GCTA_EXEC and exec_name != BOLT_EXEC:
raise NameError("Specified executable [%s] is not the expected one: [%s]" %
(exec_name, GCTA_EXEC if settings.use_gcta else BOLT_EXEC))
else:
# If any GCTA-related flags are specified, warn the user
not_needed = [x for x in all_software_args if x in user_args_key_set]
if not_needed:
extraneous_flags = {to_flag(arg) for arg in not_needed}
warn_or_raise(settings.force, "The following unneeded flags were specified: %s",
extraneous_flags)
def validate_filedir_inputs(user_args: Dict[str, str], parsed_args: argparse.Namespace,
settings: InternalNamespace):
"""
Responsible for validating the existence of files and directories (not file contents)
:param user_args: User-specified flags
:param parsed_args: Result of argparse parsing user command / flags
:param settings: Internal namespace class
"""
# Check for output directory (somewhat redundant as the logging setup probably did this already)
out_dir = os.path.dirname(parsed_args.out)
if not os.path.exists(out_dir):
raise FileNotFoundError("The designated output directory [%s] does not exist." % out_dir)
settings.out_dir = out_dir
# Check for regression data file
if not os.path.exists(parsed_args.reg_data_file):
raise FileNotFoundError("The designated data file [%s] does not exist." %
parsed_args.reg_data_file)
# If path to gcta executable is specified (and gcta is needed), confirm it exists
if settings.calc_h2 and parsed_args.gcta_exec:
if not os.path.exists(parsed_args.gcta_exec):
raise FileNotFoundError("The specified gcta executable [%s] does not exist." %
args.gcta_exec)
# If grm info is specified (and gcta is needed), confirm it all exists
if settings.calc_h2 and parsed_args.grm:
grm_dir = os.path.dirname(parsed_args.grm)
if not os.path.exists(grm_dir):
raise FileNotFoundError("The specified grm directory [%s] does not exist." % grm_dir)
if not (os.path.exists("%s.grm.bin" % parsed_args.grm) and
os.path.exists("%s.grm.id" % parsed_args.grm) and
os.path.exists("%s.grm.N.bin" % parsed_args.grm)):
raise FileNotFoundError("One or more of the expected GRM files "
"(%s.grm.bin, %.grm.ID, and %s.grm.N) "
"do not exist in directory %s." %
(parsed_args.grm, parsed_args.grm, parsed_args.grm, grm_dir))
# If a phenotype file is specified (and gcta is needed), confirm it exists
if settings.calc_h2 and parsed_args.pheno_file:
if not os.path.exists(parsed_args.pheno_file):
raise FileNotFoundError("The designated phenotype file [%s] does not exist." %
parsed_args.pheno_file)
# If bfile directory/file info is specified (and gcta is needed), confirm it all exists
if settings.calc_h2 and parsed_args.bfile:
bfile_dir = os.path.dirname(parsed_args.bfile)
if not os.path.exists(bfile_dir):
raise FileNotFoundError("The specified bfile directory [%s] does not exist." %
bfile_dir)
if not (os.path.exists("%s.bed" % parsed_args.bfile) and
os.path.exists("%s.bim" % parsed_args.bfile) and
os.path.exists("%s.fam" % parsed_args.bfile)):
raise FileNotFoundError("One or more of the expected bed/bim/fam files "
"(%s.bed, %s.bim, and %s.fam) do not exist in directory %s." %
(parsed_args.bfile, parsed_args.bfile,
parsed_args.bfile, bfile_dir))
def validate_numeric_flags(user_args: Dict[str, str], parsed_args: argparse.Namespace,
settings: InternalNamespace):
"""
Responsible for validating numeric flags (e.g. to confirm they are within required bounds)
:param user_args: User-specified flags
:param parsed_args: Result of argparse parsing user command / flags
:param settings: Internal namespace class
"""
# Include check for GRM cutoff here if bounds can be specified
# Include check for weights if bounds can be specified
# Check h^2 if it's specified
h2_lower_bound = 0.0
h2_upper_bound = 1.0
if parsed_args.h2:
if parsed_args.h2 < h2_lower_bound or parsed_args.h2 > h2_upper_bound:
raise ValueError("The specified h^2 value (%s) should be between %f and %f." %
(parsed_args.h2, h2_lower_bound, h2_upper_bound))
# Check R^2 if it's specified
r2_lower_bound = 0.0
r2_upper_bound = 1.0
if parsed_args.R2:
if parsed_args.R2 < r2_lower_bound or parsed_args.R2 > r2_upper_bound:
raise ValueError("The specified R^2 value (%s) should be between %f and %f." %
(parsed_args.R2, r2_lower_bound, r2_upper_bound))
# Check num blocks if it's specified
if parsed_args.num_blocks:
if parsed_args.num_blocks < 2:
raise ValueError("The specified num-blocks (%s) is invalid" % parsed_args.num_blocks)
if parsed_args.num_blocks < MIN_WARNING_JK_BLOCKS:
warn_or_raise(settings.force, "The specified num-blocks (%s) should be at LEAST %f",
parsed_args.num_blocks, MIN_WARNING_JK_BLOCKS)
def validate_jackknife_inputs(user_args: Dict[str, str], parsed_args: argparse.Namespace,
settings: InternalNamespace):
"""
Responsible for validating jack-knife-related flags for internal consistency
:param user_args: User-specified flags
:param parsed_args: Result of argparse parsing user command / flags
:param settings: Internal namespace class
"""
# Classify JK-related flags for checking consistency
required_jk_argnames = {}
optional_jk_argnames = {"num_blocks"}
if settings.calc_h2: # id_col is required if we need to run GCTA
required_jk_argnames.add("id_col")
else:
optional_jk_argnames.add("id_col")
if len(iargs.id_col) > 2:
raise ValueError("Cannot specify more than two ID columns.")
# Break down which flags the user set that are required and optional for GCTA to run
user_args_key_set = set(user_args.keys()) # Set of all user flags employed
all_jk_user_argnames = user_args_key_set & optional_jk_argnames # Right now, all are optional
# Run different checks of flags depending on whether JK is being run or not
if settings.jk_se:
# If all the required flags aren't specified, throw an exception
missing_req_argnames = required_jk_argnames - user_args_key_set
if missing_req_argnames:
missing_flags = {to_flag(arg) for arg in missing_req_argnames}
raise RuntimeError("For JK to run, please specify the following missing flags: %s" %
missing_flags)
else:
# If any JK-related flags are specified, warn the user
if len(all_jk_user_argnames) != 0:
extraneous_flags = {to_flag(arg) for arg in all_jk_user_argnames}
warn_or_raise(settings.force, "The following unneeded flags were specified: %s",
extraneous_flags)
def validate_regression_data_columns(user_args: Dict[str, str], parsed_args: argparse.Namespace,
settings: InternalNamespace):
"""
Responsible for validating the inputs related to column names in the regression data
:param user_args: User-specified flags
:param parsed_args: Result of argparse parsing user command / flags
:param settings: Internal namespace class
"""
# Keep track of sets of columns to check (colname : error_val_user_for_checking_below)
required = {"outcome" : True,
"pgi_var" : True,
"pgi_pheno_var" : False,
"weights" : False,
"id_col" : settings.jk_se,
"pgi_interact_vars" : False,
"covariates" : True}
if "weights" in user_args:
settings.weights = [settings.weights]
# Read the first line of the file to get column names
first_line_of_reg_data = pd.read_csv(parsed_args.reg_data_file, sep=None,
engine='python', nrows=1)
file_columns = set(first_line_of_reg_data.columns)
logging.debug("Found the following columns in regression data: %s\n", file_columns)
# Determine actual interaction and covariate column lists and record them to internal namespace
for coltype in ["pgi_interact_vars", "covariates"]:
pargs_val = getattr(parsed_args, coltype)
if getattr(parsed_args, coltype):
try:
setattr(settings, coltype, determine_col_names_from_input(
pargs_val, file_columns, parsed_args.force))
except:
logging.error("Error matching columns for %s", to_flag(coltype))
raise
# Set pgi pheno var to outcome if not set already (and then disregard, just check outcome)
if not parsed_args.pgi_pheno_var:
settings.pgi_pheno_var = settings.outcome
required.pop("pgi_pheno_var")
# Check flags to make sure required ones are filled in and anything specified maps to a column
col_sets = dict()
for colarg in required:
# Get the set of columns listed / mapped to for the given column type
cols = set(getattr(settings, colarg))
# Determine whether the list of columns for that type is empty or some don't map to file
missing_cols = cols - file_columns
not_specified = not cols
# Run checks against the column type vis-a-vis whether it's required / has invalid values
if missing_cols:
# If any columns are specified, but are missing from the file column list,
# throw unavoidable error
raise LookupError("Could not find columns %s specified using the flag --%s "
"in the list of columns %s in the regression data file." %
(missing_cols, to_flag(colarg), file_columns))
if not_specified:
# If it's not specified but required, then throw an unavoidable error
if required[colarg]:
raise LookupError("No value(s) specified for needed flag --%s!" % to_flag(colarg))
else:
# Keep track of the set of file columns of the given type/arg
col_sets[colarg] = cols
# At this point, valid_coltypes only contains column arguments whose columns
# actually exist in the regression file. Now we need to check that some columns weren't
# specified more than once (with the exception of covariates and pgi_interact_vars)
# If covariates and pgi_interact_vars are both specified, check if pgi_interact_vars is a
# subset of covariates, and, if not, warn or throw and error depending on --force
if settings.pgi_interact_vars:
extra_interact_vars = col_sets["pgi_interact_vars"] - col_sets["covariates"]
if extra_interact_vars:
warn_or_raise(settings.force, "There are interaction columns specified (%s) not in the "
"set of covariates", extra_interact_vars)
# Check to make sure all remaining column sets pairs are disjoint
for cols_pair in itertools.combinations(col_sets, 2):
# Skip pgi_interact_vars and covariates comparison, which is a special case already handled
if (cols_pair[0] == "pgi_interact_vars" and cols_pair[1] == "covariates") or (
cols_pair[0] == "covariates" and cols_pair[1] == "pgi_interact_vars"):
continue
# Check to make sure the given pair is disjoint (if not, throw unavoidable error)
col_intersection = col_sets[cols_pair[0]] & col_sets[cols_pair[1]]
if col_intersection:
raise LookupError("Columns listed for flag --%s and columns listed for flag --%s "
"share columns %s, and these sets must be disjoint." %
(to_flag(cols_pair[0]), to_flag(cols_pair[1]), col_intersection))
# Log the covariates and interact columns found, depending on log level
logging.debug("Identified the following covariate columns: %s", settings.covariates)
logging.debug("Identified the following interaction columns: %s", settings.pgi_interact_vars)
def validate_inputs(pargs: argparse.Namespace, user_args: Dict):
"""
Responsible for coordinating whatever initial validation of inputs can be done
:param pargs: Result of argparse parsing user command / flags
:param user_args: Flags explicitly set by the user along with their values
:return: Dictionary that contains flags and parameters needed by this program. It contains
user-input flags along with defaults set through argparse, and any additional flags
added as calculations proceed
"""
# Create dictionary to be used as the internal store for flags and parameters
settings = InternalNamespace()
# Copy values from args to internal namespace (may be overwritten in the internal namespace)
# Could this be done by just making a shallow copy of `pargs`?
for attr in vars(pargs):
setattr(settings, attr, getattr(pargs, attr))
# Check if heritability calculation(s) required and which software to use
settings.calc_h2 = not pargs.h2
settings.use_gcta = settings.calc_h2 and "bolt_exec" not in user_args
# Check if GCTA commands should have stdout suppressed
settings.quiet_h2 = pargs.logging_level != "debug"
# Check h^2 external software flags (should do this first to figure out if calc is needed, which
# affects later validation steps)
validate_h2_software_inputs(user_args, pargs, settings)
# Check numeric flags
validate_numeric_flags(user_args, pargs, settings)
# Check existence of files and directories
validate_filedir_inputs(user_args, pargs, settings)
# Check regression data column inputs
validate_regression_data_columns(user_args, pargs, settings)
return settings
def determine_col_names_from_input(exprs: List[str], cols: List[str], force: Any = False):
"""
Takes a list of (possibly wildcarded) input column names and a list of actual column names
and determines which, if any, actual column names are matches. If any input column name
expression does not match at least one actual column, an error is thrown (unless force is
True-like, in which case a warning is logged for each unmatched expression).
:param exprs: List of (possibly wildcarded) input expressions to map to the real column names
:param cols: List of actual column names to match against
:param force: Parameter to control whether an unmatched input results in a warning log or error
:return A list of columns from cols that are matched by at least one member of exprs
"""
# Create set of column names by matching against input expressions
colname_set = set()
for expr in exprs:
# Collect any columns that match the given expr
matching_cols = set(filter(lambda col: re.fullmatch(
expr.replace("?", ".").replace("*", ".*"), col), cols))
# If any match, add them to the accumulation set and move to the next expr
if matching_cols:
colname_set |= matching_cols
continue
# No column matched the given expr, either log a warning or throw an exception
warn_or_raise(force, "Could not match any column name against column flag "
"value [%s]", expr)
return list(colname_set)
def _get_parser(progname: str) -> argparse.ArgumentParser:
"""
Return a parser configured for this command line utility
:param prog: Value to pass to ArgumentParser for prog (should be sys.argv[0])
:return: argparse ArgumentParser
"""
parser = argparse.ArgumentParser(prog=progname)
ifile = parser.add_argument_group(title="Input file specifications",
description="Options for input files.")
ifile.add_argument("--reg-data-file", metavar="FILE_PATH", type=str, required=True,
help="Full path to dataset where coefficients are to be corrected. "
"Contains outcome, genetic data / PGI, (optional) interaction terms, "
"covariates, (optional) weights, and (if needed) IDs.")
ifile.add_argument("--outcome", metavar="COLUMN_NAME", type=str, required=True, nargs=1,
help="Name of dependent variable column in regression data file.")
ifile.add_argument("--pgi-var", metavar="COLUMN_NAME", type=str, required=True, nargs=1,
help="Name of PGI variable column in regression data file.")
ifile.add_argument("--pgi-pheno-var", metavar="COLUMN_NAME", type=str, required=False, nargs=1,
default=[], help="Name of column in regression data file corresponding to "
"the phenotype in the PGI. If not specified, it is "
"assumed to be the same column as the outcome column")
ifile.add_argument("--pgi-interact-vars", metavar="COLUMN_NAME", type=str, required=False,
nargs="*", default=[], help="Names of columns from the regression data file "
"(separated by spaces, like VAR1 VAR2 VAR3) "
" that should be interacted with the pgi. "
"NOTE: These columns are assumed to be "
"uninteracted on input! This software will "
"handle the interaction!"
"Use \"*\" for a general wildcard and \"?\" for "
"a single wildcard character. Regular "
"expressions are used to provide this "
"functionality, so limit column characters to "
"A-Z, a-z, 0-9, _, and -. If wildcarding is "
"used, surround each term with quotes.")
ifile.add_argument("--covariates", metavar="COLUMN_NAME", type=str, nargs="+", required=True,
help="Column names from the regression data file of covariates to "
"be included in the regression (separated by spaces, like "
"VAR1 VAR2 VAR3). Use \"*\" for a general wildcard and \"?\" for a "
"single wildcard character. Regular expressions are used to provide "
"this functionality, so limit column characters to A-Z, a-z, 0-9, _, "
"and -. If wildcarding is used, surround each term with quotes.")
ifile.add_argument("--weights", metavar="COLUMN_NAME", type=str, nargs="?", default=[],
help="Optional flag specifying a column name from regression data file "
"to be used for weighted least squares.")
ofile = parser.add_argument_group(title="Output file specifications",
description="Options for output files.")
ofile.add_argument("--out", metavar="FILE_PREFIX", required=False,
default=DEFAULT_FULL_OUT_PREFIX,
help="Full prefix of output files (e.g. logs and results). If not set, "
"[current working directory]/%s = \"%s\" will be used." %
(DEFAULT_SHORT_PREFIX, DEFAULT_FULL_OUT_PREFIX))
ofile.add_argument("--output-vars", metavar="COLUMN_NAME", required=False, nargs="*",
help="To be used if only a subset of variables should be reported in the "
"results file. Useful if controlling for many features but are only "
"interesting in seeing the output of a small number. Specify as a "
"whitespace-delimited list of variables exactly as they appear in "
"your data. If you want an interaction term in this list, append "
"\"_int\" to the corresponding covariate. If not specified, "
"all independent variables will be reported.")
paramopts = parser.add_argument_group(title="Optional parameter specifications",
description="Choose to pre-specify h^2, R^2, and "
"relatedness cutoff.")
paramopts.add_argument("--h2", metavar="PARAM", type=float, required=False,
help="Option to specify heritability of phenotype corresponding "
"to the PGI. If not specified, GCTA will be run to calculate it.")
paramopts.add_argument("--R2", metavar="PARAM", type=float, required=False,
help="Option to specify the R^2 from the regression of the PGI on its "
"corresponding phenotype. If not specified, it will be calculated.")
paramopts.add_argument("--grm-cutoff", metavar="PARAM", type=float, required=False,
default=.025, help="Relatedness cutoff for heritability estimation. "
"Used when heritability is calculated. "
"Defaults to 0.025.")
h2software = parser.add_mutually_exclusive_group()
h2software.add_argument("--gcta-exec", metavar="FILE_PATH", type=str, required=False,
help="Full path to GCTA64 software executable. If this flag is not "
"specified and GCTA is needed for heritability calculations, then it "
"will be downloaded and extracted in a temporary directory that should "
"be cleaned up after this software is finished running.")
h2software.add_argument("--bolt-exec", metavar="FILE_PATH", type=str, required=False,
help="Full path to BOLT-LMM_v2.3.4 software executable. This flag is an alternative "
"to the --gcta-exec option and should be used if you are estimating "
"heritability over a large sample (BOLT is more robust to larger datasets "
"than GCTA, but GCTA has better behavior on smaller inputs). The software "
"will assume that GCTA will be used for heritability estimation unless "
"a valid path to BOLT-LMM_v2.3.4 is supplied. The path should end at the bolt "
"executable, ie /path/to/bolt/BOLT-LMM_V2.3.4/bolt.")
h2download = parser.add_mutually_exclusive_group()
h2download.add_argument("--download-gcta", action="store_true", default=False, required=False,
help="Use this flag to download GCTA for heritability estimation. ")
h2download.add_argument("--download-bolt", action="store_true", default=False, required=False,
help="Use this flag to download BOLT-LMM for heritability estimation.")
h2opts = parser.add_argument_group(title="Heritability estimation flags",
description="Flags relevant to estimating heritability (h^2) "
"with either BOLT or GCTA. These flags are only needed "
"if you are **not** specifying the --h2 flag. If "
"neither executable is specified, GCTA will be used.")
h2opts.add_argument("--grm", metavar="FILE_PREFIX", type=str, required=False,
help="Optional argument to pass full prefix (directory included) of a "
"pre-constructed GRM for GCTA heritability estimation "
"(does not include .grm.bin, .grm.ID, .grm.N suffixes).")
h2opts.add_argument("--bfile", metavar="FILE_PREFIX", type=str, required=False,
help="Full prefix (directory included) of bed/bim/fam files to use in "
"heritability calculation "
"(does not include .bed, .bim, .fam suffixes)")
h2opts.add_argument("--pheno-file", metavar="FILE_PATH", type=str, required=False,
help="Full path to phenotype file for heritability calculation. The "
"phenotype must correspond to the phenotype used in the "
"construction of the PGI. Familiarize yourself with GCTA, specifications "
"(https://cnsgenomics.com/software/gcta/#GREMLanalysis) and BOLT "
"specifications (https://alkesgroup.broadinstitute.org/BOLT-LMM/downloads/BOLT-LMM_v2.3.4_manual.pdf) "
"depending on which software you want used.")
h2opts.add_argument("--pheno-file-pheno-col", metavar="COLUMN_NAME", type=str, required=False, default="PHENOTYPE",
help="Column name in --pheno-file that corresponds to the phenotype, if you are "
"using BOLT to estimate heritability. ")
jkopts = parser.add_argument_group(title="Jack knife standard error specifcation",
description="Jack knife SE flags.")
jkopts.add_argument("--jk-se", required=False, action="store_true", help="Calculate jack-"
"knife standard errors for R^2, h^2, rho, and corrected alphas.")
jkopts.add_argument("--num-blocks", required=False, type=int, default=DEFAULT_NUM_JK_BLOCKS,
help="Number of blocks to use for jack-knifing. "
"Defaults to %s if not specified." % DEFAULT_NUM_JK_BLOCKS)
jkopts.add_argument("--id-col", required=False, nargs="*", metavar="COLUMN_NAME", default=[],
help="Column name(s) in regression data corresponding to person-level ID."
"This ID field must also correspond to the ID's in your "
"bed/bim/fam and phenotype files. If specifying an FID and IID, be sure "
"to pass the FID first.")
controlopts = parser.add_argument_group(title="Control options",
description="Flags related to program execution")
controlopts.add_argument("--force", required=False, action="store_true",
help="Flag that causes the program to continue executing past many "
"situations that ordinarily cause it to halt (e.g. specifying "
"an interaction column that is not also a covariate) This "
"option is not recommended, but if it is employed, make sure "
"to check the log / stderr for warnings to confirm that they "
"are acceptable.")
controlopts.add_argument("--logging-level", required=False, type=str.lower, default="info",
help="Level of verbosity of log file. One of \"debug\", \"info\", or "
"\"warn\". The \"debug\" level will be the most verbose, giving "
"detailed information at all levels of execution. The \"info\" "
"level is the default and is recommended if you are confident "
"in your specification. Lastly, \"warn\" will print sparsely, "
"only if something problematic is identified.")
controlopts.add_argument("--num-threads", required=False, type=int, default=1,
help="Optional flag to specify the number of threads for GCTA operations."
"Encouraged for GCTA/BOLT operations over large datasets. As a rule of "
"thumb, do not specify more threads than cores in your machine.")
return parser
def get_h2_software(dest_dir: str, gcta: bool = True) -> str:
"""
Downloads and unzips h^2 software from internet. Assumes the directory is temporary /
this function is not required to clean up after itself.
:param dest_dir: Destination directory specified by user to where GCTA will be downloaded.
:param gcta: Indicator for whether or not GCTA should be installed (otherwise BOLT)
:return: Full path to GCTA executable
"""
if gcta:
# Construct the expected full path to GCTA executable
full_path_exec = "%s/%s/%s" % (dest_dir, GCTA_VERSION, GCTA_EXEC)
# Determine zipfile name and then full path
full_path_to_zipped_file = "%s/%s.zip" % (dest_dir, GCTA_VERSION)
# Determine download URL and then fetch the file into the specified destination directory
full_url = "%s/%s.zip" % (GCTA_URL, GCTA_VERSION)
else:
# Path to BOLT executable
full_path_exec = "%s/%s/%s" % (dest_dir, BOLT_VERSION, BOLT_EXEC)
# Zipfile full path
full_path_to_zipped_file = "%s/%s.tar.gz" % (dest_dir, BOLT_VERSION)
# BOLT URL
full_url = "%s/%s.tar.gz" % (BOLT_URL, BOLT_VERSION)
logging.info("Downloading %s...", full_url)
wget.download(full_url, out=dest_dir)
# read contents -- GCTA is zipped, BOLT is tarred (need to handle separately)
file = zipfile.Zipfile(full_path_to_zipped_file) if gcta else tarfile.open(full_path_to_zipped_file)
# Extract the zipfile to the destination directory
try:
file.extractall(path=dest_dir)
# Set permissions on GCTA executable to allow for use
os.chmod(full_path_exec, stat.S_IXUSR)
except PermissionError:
logging.error("Make sure you have the proper permissions to execute files in the "
"directory [%s].", dest_dir)
raise
logging.info("Successfully installed GCTA and updated permissions.")
return full_path_exec
def get_user_inputs(argv: List[str], parsed_args: argparse.Namespace) -> str:
"""
Create dictionary of user-specified options/flags and their values. Leverages the argparse
parsing output to glean the actual value, but checks for actual user-set flags in the input
:param argv: Tokenized list of inputs (meant to be sys.argv in most cases)
:param parsed_args: Result of argparse parsing the user input
:return: Dictionary containing user-set args keyed to their values
"""
# Search for everything beginning with "--" (flag names), strip off the --, take everything
# before any "=", and convert - to _
user_set_args = {to_arg(token[2:].split("=")[0]) for token in argv if token.startswith("--")}
# Since any flag actually specified by the user shouldn't have been replaced by a default
# value, one can grab the actual value from argparse without having to parse again
return {user_arg:getattr(parsed_args, user_arg) for user_arg in user_set_args}
def estimate_h2(iargs: InternalNamespace, gcta_exec: str, pheno_file: str, temp_dir: str, grm_cutoff: float,
grm_prefix: str, num_threads: int, suppress_stdout: Any = None) -> float:
"""
Use GCTA to estimate SNP h^2, assumes GRM is available
:param iargs: PGI arguments
:param gcta_exec: Full path to GCTA executable
:param pheno_file: Full path to phenotypic file.
:param temp_dir: Full path to temporary directory to use for GCTA results
:param grm_cutoff: Relatedness cutoff
:param grm_prefix: Full prefix of GRM files
:param num_threads: Number of threads for GCTA.
:param suppress_stdout: If not False-ish, routes GCTA stdout to /dev/null
:return: GCTA estimate of heritability
"""
# Call GCTA to have it estimate heritability
logging.info("\nEstimating heritability using %s..." % iargs.software)
full_h_prefix = temp_dir + "/h2est"
hlog_filename = full_h_prefix + ".log"
if iargs.use_gcta:
cmd_str = "%s --grm %s --pheno %s --reml --grm-cutoff %s --out %s --threads %s" \
% (gcta_exec, grm_prefix, pheno_file, grm_cutoff, full_h_prefix, num_threads)
_log_and_run_os_cmd(cmd_str, suppress_stdout)
else:
cmd_str = "%s --reml --phenoFile=%s --phenoCol=%s --numThreads=%s --bfile=%s --maxModelSnps=2000000" \
% (iargs.bolt_exec, pheno_file, iargs.pheno_file_pheno_col, num_threads, iargs.bfile)
# BOLT doesn't generate a log file so we need to capture stdout and put it in the right place
cmd_str = cmd_str + " > %s" % full_h_prefix + ".log" if suppress_stdout else cmd_str + " | tee %s" % hlog_filename
_log_and_run_os_cmd(cmd_str, False)
# Scan the log file(s) to retrieve the value
with open(hlog_filename, "r") as logfile:
for line in logfile:
if iargs.use_gcta:
if "V(G)/Vp" in line:
heritability = float(line.split("\t")[1])
logging.debug("Estimated GCTA heritability of the trait is %f", heritability)
return heritability
else:
if "h2g (1,1):" in line:
heritability = line.split(":")[1].split(" ")[1]
logging.debug("Estimated BOLT heritability of the trait is %f", heritability)
return heritability
raise LookupError("Could not find heritability in logfile: " + hlog_filename)
def _log_and_run_os_cmd(cmd_str: str, suppress_stdout: Any = None):
"""
Function to run something from the command line (after logging the command)
:param cmd_str: The command to run
:param suppress_stdout: If not False-ish, send command std output to /dev/null
"""
if suppress_stdout:
cmd_str = cmd_str + " >/dev/null"
logging.debug(format_os_cmd(cmd_str.split()))
os.system(cmd_str)
def build_grm(gcta_exec: str, bfile_full_prefix: str, grm_dir: str, num_threads: int,
suppress_stdout: Any = None) -> str:
"""
Function to build GRM using GCTA.
:param gcta_exec: Full path to GCTA executable
:param bfile_full_prefix: Full prefix of bed/bim/fam files.
:param grm_dir: Directory in which to place GRM files
:param num_threads: Number of threads for GCTA operation.
:param suppress_stdout: If not False-ish, sends GCTA std output to /dev/null
:return: Full prefix of GRM files
"""
# Determine full path prefix for the GRM
grm_full_prefix = "%s/%s" % (grm_dir, DEFAULT_SHORT_PREFIX)
# Direct GCTA to create the matrix
cmd_str = "%s --bfile %s --make-grm --out %s --threads %s" %\
(gcta_exec, bfile_full_prefix, grm_full_prefix, num_threads)
_log_and_run_os_cmd(cmd_str, suppress_stdout)
return grm_full_prefix
def estimate_R2(data: pd.DataFrame, pheno: List[str], pgi: List[str]) -> float:
"""
Returns the R^2 from the regression of phenotype on PGI for the phenotype corresponding to the
PGI.
:param data: Pandas DataFrame containing phenotype and PGI.
:param pheno: List containing column name in data corresponding to phenotype.
:param pgi: List containing column name in data corresponding to PGI.
:return: R^2 from regression of phenotype on PGI.
"""
reg = sm.OLS(data[pheno], data[pgi + CONS_COLS])
rsq = reg.fit().rsquared
return rsq
def adjust_regression_data(orig_reg_data: pd.DataFrame, iargs: InternalNamespace) -> pd.DataFrame:
"""
Performs fixes (not necessarily in this order):
1) Add a constant to the dataset.
2) Check for low variance columns.
3) Remove NaN's.
4) Standardize PGI
5) Generate interaction columns
6) Rearrange column order and omit unused columns
:param orig_reg_data: Dataframe containing raw data
:param iargs: Internal namespace for this software
:return: Regression data processed as indicated above
"""
# Determine the lists of columns in the adjusted dataframe
# (make sure order of w's is the same as z_int)
iargs.z_cols = iargs.covariates
iargs.y_cols = iargs.outcome
iargs.wt_cols = iargs.weights
iargs.z_int_cols = iargs.pgi_interact_vars
interact_dict = {z_int_col : (z_int_col+"_int") for z_int_col in iargs.z_int_cols}
iargs.w_cols = [interact_dict[z_int_col] for z_int_col in iargs.z_int_cols]
iargs.G_cols = iargs.pgi_var + iargs.w_cols
iargs.alpha_cols = iargs.G_cols + iargs.z_cols
# Create the (blank except for column-labels) adjusted dataframe
reg_data_cols = iargs.alpha_cols + iargs.y_cols + CONS_COLS + iargs.wt_cols + iargs.id_col
reg_data = pd.DataFrame(columns=reg_data_cols)
# Copy y, z, and wts columns over as-is (side effect: sets the number of rows in the DF)
for cols in [iargs.y_cols, iargs.z_cols, iargs.wt_cols, iargs.id_col]:
for col in cols:
reg_data[col] = orig_reg_data[col].to_numpy()
# Copy the PGI to the new DF, standardizing it on the way
g_col_name = iargs.pgi_var[0]
g_mean = np.mean(orig_reg_data[g_col_name])
g_std = np.std(orig_reg_data[g_col_name])
if g_std == 0.0:
raise ValueError("PGI column \"%s\" has variance zero! Unable to proceed." %
g_col_name)
stdized_pgi_vect = (orig_reg_data[g_col_name].to_numpy() - g_mean) / g_std
reg_data[g_col_name] = stdized_pgi_vect
# Generate the interaction (w) columns
# (multiply the correct z_int column component-wise by PGI and copy to new table)
for z_int_col in iargs.z_int_cols:
reg_data[interact_dict[z_int_col]] = np.multiply(orig_reg_data[z_int_col].to_numpy(),
stdized_pgi_vect)
# Set constant column (need to do that sometime after at least one other column is copied, so
# the number of rows is determined)
reg_data[CONS_COL_NAME] = 1
# Drop any rows / individuals in data with NaN present as a value
reg_data.dropna(inplace=True)
# Check for 0 variance columns and raise error if any (other than CONS_COL_NAME) exist.
var_of_col = dict(reg_data.var())
zero_var_cols = {col_name for col_name in var_of_col if col_name != CONS_COL_NAME and
np.isclose(var_of_col[col_name], 0.0)}
if zero_var_cols:
raise ValueError("Column(s) %s in data has/have a very low variance. "
"Please remove it to allow matrices to invert." % zero_var_cols)
return reg_data
def se_helper(df: pd.DataFrame) -> pd.Series:
"""
Calculates the standard errors for a set of jack-knife runs
:param df: DataFrame where each row is a JK iteration
:return: Series containing the JK standard errors
"""
num_rows = len(df.index)
return np.sqrt(float(num_rows - 1)) * df.std(axis=0, ddof=0)
def jack_knife_se(iargs: InternalNamespace, reg_data: pd.DataFrame, pgic_result: InternalNamespace):
"""
1) Assign each person to a jack knife iteration.
2) For each iteration:
- restrict bfile, grm, regression data to people _not_ in block
- estimate R^2, h^2, rho, alpha_{corr}
- store results
3) Calculate SE's using formula
:param iargs: Internal namespace object that holds internal values and parsed user inputs
:param reg_data: Dataframe containing regression data
:param pgic_result: Internal namespace object that holds initial pgic results
"""
logging.info("Beginning jack knife estimation.")
# Check for a number of blocks that's too large
if iargs.num_blocks > reg_data.shape[0]:
raise ValueError("You cannot specify more jack knife blocks than there are people in "
"your --reg-data-file. You specified %s blocks, and your data is %s rows" %
(iargs.num_blocks, reg_data.shape[0]))
# Label iterations and duplicate ID col to make GCTA happy (if needed)
reg_data_shuf = reg_data.sample(frac=1).reset_index(drop=True)
if iargs.calc_h2:
reg_data_shuf["FID"] = reg_data_shuf[iargs.id_col[0]]
# If a second ID column is specified, use that, otherwise the two ID columns are equivalent
reg_data_shuf["IID"] = reg_data_shuf[iargs.id_col[1]] if len(iargs.id_col) == 2 else \
reg_data_shuf.FID
block_size = np.ceil(reg_data_shuf.shape[0] / iargs.num_blocks)
reg_data_shuf["iteration"] = [int(i / block_size) for i in range(reg_data_shuf.shape[0])]
# Gather results from each JK iteration
uncorr_alpha_cols = ["uncorr_" + c for c in iargs.alpha_cols]
corr_alpha_cols = ["corr_" + c for c in iargs.alpha_cols]
result_cols = ["h2", "R2", "rho"] + uncorr_alpha_cols + corr_alpha_cols
jk_res_table = pd.DataFrame(index=range(iargs.num_blocks), columns=result_cols)
# Run the jack knife iterations
for iter_num in range(iargs.num_blocks):
iter_result = leave_out_est(iter_num, iargs, reg_data_shuf, iargs.grm, iargs.z_cols)
h2_r2_rho = np.array([iter_result["h2"], iter_result["R2"], iter_result["rho"]])
jk_res_table.iloc[iter_num] = np.concatenate(
(h2_r2_rho, iter_result["alpha_uncorr"], iter_result["alpha_corr"]))
logging.debug("\nJack-knife result table:\n%s", jk_res_table)
# Calculate standard errors
se_vector = se_helper(jk_res_table)
logging.debug("\nJack-knife standard errors:\n%s", se_vector)
# Fill in standard errors in the internal namespace object
pgic_result.h2_se = None if iargs.h2 else se_vector["h2"]
pgic_result.R2_se = None if iargs.R2 else se_vector["R2"]
pgic_result.rho_se = None if iargs.h2 and iargs.R2 else se_vector["rho"]
pgic_result.uncorrected_alphas_se = se_vector.loc[uncorr_alpha_cols]
pgic_result.corrected_alphas_se = se_vector.loc[corr_alpha_cols].to_numpy()
def leave_out_est(iteration: int, iargs: InternalNamespace, reg_data: pd.DataFrame,
grm_prefix: str, covs: List) -> Dict:
"""
Remove block from GRM and regression data and run estimation. Uses temporary directory
indicated in iargs and assumes no responsbility for cleanup
:param iteration: Current block number of jack knife iteration.
:param iargs: Internal namespace object that holds internal values and parsed user inputs
:param reg_data: DataFrame of regression data.
:param grm: Full prefix to GRM files
:param covs: Covariates in specification.
:return: Parameter estimates as a dictionary
"""
logging.info("\n============= JK ITERATION %s =============\n", iteration)
# Make copy of internal namespace
iargs_copy = copy.copy(iargs)
# If we need a restricted GRM, make that now
if iargs.calc_h2:
full_path_to_restricted_person_list = "%s/removed_%s.txt" % (iargs.temp_dir, iteration)
full_prefix_to_restricted_grm = "%s/removed_grm_%s" % (iargs.temp_dir, iteration)
remove = reg_data[reg_data.iteration == iteration]
remove[["FID", "IID"]].to_csv(full_path_to_restricted_person_list, sep=" ",
index=False, header=None)
grm_transformation_cmd = "%s --grm %s --remove %s --out %s --make-grm --threads %s" % (
iargs.gcta_exec, grm_prefix, full_path_to_restricted_person_list,
full_prefix_to_restricted_grm, iargs.num_threads)
_log_and_run_os_cmd(grm_transformation_cmd, iargs.quiet_h2)
iargs_copy.grm = full_prefix_to_restricted_grm
# Call the main procedure with a pared down dataframe and GRM (only included if it's needed)
restricted_reg_data = reg_data[reg_data.iteration != iteration]
iargs_copy.reg_data_file = None
corr_result = error_correction_procedure(iargs_copy, restricted_reg_data)
# Return the results in dictionary form
return {"h2" : corr_result.h2, "R2" : corr_result.R2, "rho" : corr_result.rho,
"alpha_corr" : corr_result.corrected_alphas,
"alpha_uncorr" : corr_result.uncorrected_alphas}
def calculate_corrected_coefficients(corr_matrix: np.ndarray,
coefficients: np.ndarray) -> np.ndarray:
"""
Generates corrected coefficients from a correction matrix and uncorrected coefficients
:param corr_matrix: Correction matrix to use
:param coefficients: Coefficients to be corrected
:return: Array of corrected coefficients
"""
return np.matmul(corr_matrix, coefficients)
def calculate_corrected_coeff_stderrs(corr_matrix: np.ndarray,
var_cov_matrix: np.ndarray) -> np.ndarray:
"""
Generates corrected coefficient standard errors
:param corr_matrix: Correction matrix to use
:param var_cov_matrix: Variance-covariance matrix of coefficients to correct
:return: Array of corrected coefficient standard errors
"""
prod = np.linalg.multi_dot([corr_matrix, var_cov_matrix, corr_matrix.T])
return np.sqrt(np.diagonal(prod))
def calculate_center_matrix(V_ghat: np.ndarray, rho: float, z_int_mean: np.ndarray,
z_int_cov: np.ndarray) -> np.ndarray:
"""
Calculates the center matrix component of the product that is the final correction matrix.
:param V_ghat: Covariance matrix of [G, z]
:param rho: Value of rho to use
:param z_int_mean: Mean of z_int
:param z_int_cov: Covariance matrix of z_int
:return: Center matrix to use in correction matrix product
"""
# Calculate values used later
rho_sq_recip = pow(rho, -2)
one_minus_rho_sq_recip = 1.0 - rho_sq_recip
z_int_count = z_int_cov.shape[0] if z_int_cov.shape else 1 # Shape is () if 1x1 matrix
# Take V_ghat, replace (0,0) entry with 1/rho^2, and modify the rest of the matrix as needed
mod_copy_of_V_ghat = V_ghat.copy()
mod_copy_of_V_ghat[0, 0] = rho_sq_recip
mod_copy_of_V_ghat[0, 1:z_int_count+1] -= (one_minus_rho_sq_recip * z_int_mean)
mod_copy_of_V_ghat[1:z_int_count+1, 0] = mod_copy_of_V_ghat[0, 1:z_int_count+1].T
mod_copy_of_V_ghat[1:z_int_count+1, 1:z_int_count+1] -= one_minus_rho_sq_recip * (z_int_cov +
np.outer(z_int_mean, z_int_mean))
# Return inverse of the previously calculated matrix
logging.debug("\nuninverted center matrix = \n%s", mod_copy_of_V_ghat)
return np.linalg.inv(mod_copy_of_V_ghat)
def calculate_correction_matrix(G_cols: List[str], z_cols: List[str], z_int_cols: List[str],
df: pd.DataFrame, rho: float) -> np.ndarray:
"""
Generates the correction matrix
:param G_cols: List of columns in G vector = pgi_var column followed by interaction columns
:param z_cols: List of columns in z vector = covariate columns
:param z_int_cols: List of z columns that correspond (in order) to the non-pgi elements of G
:param df: DataFrame with the required regression data
:param rho: Value of rho
:return: Matrix used to correct coefficients and standard errors
"""
# Determine the needed relevant smaller DFs from column subsets
df_Gz = df[G_cols + z_cols]
df_z_int = df[z_int_cols]
# Useful values
size_of_G = len(G_cols)
# Calculate the V_ghat matrix (rightmost matrix of the 3-matrix product)
V_ghat = np.cov(df_Gz, rowvar=False)
logging.debug("\nV_ghat = \n%s", V_ghat)
# Calculate center matrix (start with V_ghat, since it shares much of that matrix)
z_int_mean = np.mean(df_z_int, axis=0)
logging.debug("\nz_int_mean = \n%s", z_int_mean)
z_int_cov = np.cov(df_z_int, rowvar=False)
logging.debug("\nz_int_cov = \n%s", z_int_cov)
center_matrix = calculate_center_matrix(V_ghat, rho, z_int_mean, z_int_cov)
logging.debug("\ncenter_matrix = \n%s", center_matrix)
# Calculate the correction matrix
corr_matrix = np.matmul(center_matrix, V_ghat) # Almost correct, needs one more multiplication
corr_matrix[0:size_of_G] *= np.reciprocal(rho) # Adjust to account for lefthand matmul
logging.debug("\nCorrection matrix = \n%s", corr_matrix)
return corr_matrix
def get_alpha_ghat(y_cols: List[str], G_cols: List[str], z_cols: List[str], wt_cols: List[str],
reg_data: pd.DataFrame) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Runs the regression to get the initial estimate of coefficients and standard errors
:param y_cols: List containing the name of the outcome column
:param G_cols: List of columns in G vector = pgi_var column followed by interaction columns
:param z_cols: List of columns in z vector = covariate columns
:param wt_cols: List containing the weights column if the regression should be weighted
:param reg_data: DataFrame with the required regression data
:return: Calculated coefficients, standard errors, and variance-covariance matrix
"""
# Set up the regression
if wt_cols:
reg = sm.WLS(reg_data[y_cols], reg_data[G_cols + z_cols + CONS_COLS], weights=reg_data[wt_cols])
else:
reg = sm.OLS(reg_data[y_cols], reg_data[G_cols + z_cols + CONS_COLS])
# Calculate the regression
reg_fit = reg.fit()
return (reg_fit.params.drop(CONS_COL_NAME).rename(UNCORR_COEF_COLUMN, inplace=True),
reg_fit.bse.drop(CONS_COL_NAME).rename(UNCORR_COEF_SE_COLUMN, inplace=True),
reg_fit.cov_params().drop(CONS_COL_NAME).drop(CONS_COL_NAME, axis=1))
def error_correction_procedure(iargs: InternalNamespace, reg_data: pd.DataFrame):
"""
Implementation of error correction procedure.
:param iargs: Holds arguments passed in by user.
:param reg_data: Regression data.
:return: Object holding corrected and uncorrected coefficients and standard errors along with
rho, h^2, R^2, and sample size (n)
"""
# Create object to hold all the return values
result = InternalNamespace()
# If heritability is specified, add it to results, otherwise estimate it.
result.h2 = iargs.h2 if iargs.h2 else estimate_h2(iargs, iargs.gcta_exec, iargs.pheno_file, iargs.temp_dir,
iargs.grm_cutoff, iargs.grm, iargs.num_threads,
iargs.quiet_h2)
# Store sample size of data to report in results.
result.n = reg_data.shape[0]
# Determine R^2 (calculate if necessary)
result.R2 = iargs.R2 if iargs.R2 else estimate_R2(reg_data, iargs.pgi_pheno_var, iargs.pgi_var)
# Calculate rho based on h^2 and R^2
result.rho = calculate_rho(h2=result.h2, r2=result.R2)
logging.debug("rho is estimated to be sqrt(%f/%f) = %f", result.h2, result.R2, result.rho)
if result.rho < 1.0:
warn_or_raise(iargs.force, "It is unexpected that your estimated rho (%f) = sqrt(%f/%f) "
"is less than 1.0. You should double-check that the dependent variable in the R^2 "
"calculation corresponds to the PGI phenotype.", result.rho, result.h2, result.R2)
# Calculate initial regression values
logging.debug("Calculating uncorrected coefficients(s) and standard error(s)...")
result.uncorrected_alphas, result.uncorrected_alphas_se, var_cov_matrix = get_alpha_ghat(
iargs.y_cols, iargs.G_cols, iargs.z_cols, iargs.wt_cols, reg_data)
# Calculate the correction matrix
logging.debug("Getting correction matrix...")
corr_matrix = calculate_correction_matrix(iargs.G_cols, iargs.z_cols, iargs.z_int_cols,
reg_data, result.rho)
# Use that correction matrix to correct coefficients and standard errors
logging.debug("Correcting coefficients and standard error(s)...")
result.corrected_alphas = calculate_corrected_coefficients(corr_matrix,
result.uncorrected_alphas)
result.corrected_alphas_se = calculate_corrected_coeff_stderrs(corr_matrix, var_cov_matrix)
# Set standard errors for h^2, R^2, and rho to be None (need jack-knifing to calculate those)
result.h2_se = None
result.R2_se = None
result.rho_se = None
return result
def calculate_rho(h2: float, r2: float) -> float:
"""
Helper function used to calculate rho given h^2 and R^2
:param h2: Heritability
:param r2: Coefficient of determination
"""
return np.sqrt(h2 / r2)
def report_results(iargs: InternalNamespace, pgic_result: InternalNamespace):
"""
Function that handles logging results and writing them to a results file
:param iargs: Internal namespace of values directly or indirectly from parsed inputs
:param pgic_result: Results from the initial running of the pgi correction method
"""
# Determine output file
full_outputfile_path = iargs.out + ".res"
# Make standard error names a little shorter
h2_se = pgic_result.h2_se
R2_se = pgic_result.R2_se
rho_se = pgic_result.rho_se
uncorr_alphas_se = pgic_result.uncorrected_alphas_se
corr_alphas_se = pgic_result.corrected_alphas_se
# Construct results summary string
results_summary = RESULTS_SUMMARY_FSTR % (pgic_result.n,
pgic_result.h2,
h2_se if h2_se else ASSUMED_VAL,
pgic_result.R2,
R2_se if R2_se else ASSUMED_VAL,
pgic_result.rho,
rho_se if rho_se else ASSUMED_VAL)
# Send results summary to the log file, terminal, and .res file
logging.info("\n\nResults summary:\n%s\n", results_summary)
with open(full_outputfile_path, mode='w') as out_file:
print(results_summary, "="*20, '\n', sep='\n', file=out_file)
# Log and then write the coefficient results to the .res file
out_data = | pd.DataFrame(columns=OUTPUT_COLUMNNAMES) | pandas.DataFrame |
import os
from functools import partial
from typing import Any, Dict
import numpy as np
import pandas as pd
import tensorflow as tf
import torch
from lenet_analysis import LenetAnalysis, get_class_per_layer_traced_edges
from PIL import Image
from torch.autograd import Variable
from torch.nn import Module
from torchvision.models import AlexNet
from torchvision.transforms import transforms
from nninst import mode
from nninst.backend.pytorch.model import LeNet as TorchLeNet
from nninst.backend.tensorflow.dataset import mnist
from nninst.backend.tensorflow.graph import model_fn_with_fetch_hook
from nninst.backend.tensorflow.model import AlexNet as TFAlexNet
from nninst.backend.tensorflow.model import LeNet as TFLeNet
from nninst.backend.tensorflow.trace import (
lenet_mnist_class_trace,
reconstruct_trace_from_tf,
)
from nninst.backend.tensorflow.utils import new_session_config
from nninst.dataset import mnist_info
from nninst.utils.fs import CsvIOAction, abspath
from nninst.utils.numpy import arg_approx
from nninst.utils.ray import ray_init, ray_map
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "1"
def iterate_model(module: Module, structure: Dict, action, *args, **kwargs):
children = dict(module.named_modules())
for key, value in structure.items():
child = children[str(key)]
if isinstance(value, str):
name = value
action(name, child, *args, **kwargs)
elif isinstance(value, dict):
iterate_model(child, value, action, *args, **kwargs)
else:
raise TypeError(f"value: {value}, type: {type(value)}")
def to_numpy(tensor) -> np.ndarray:
return tensor.data.cpu().numpy()
def import_model_from_pytorch():
model = TorchLeNet()
path = abspath("lenet_model.pth")
model.load_state_dict(torch.load(path))
structure = {
"features": {
"0": "conv_1",
"1": "relu_1",
"2": "pool_1",
"3": "conv_2",
"4": "relu_2",
"5": "pool_2",
},
"classifier": {
"0": "linear_3",
"1": "relu_3",
"2": "linear_4",
"3": "relu_4",
"4": "linear_5",
},
}
tf_to_torch_variables = {
"conv2d/kernel:0": "conv_1/weight",
"conv2d/bias:0": "conv_1/bias",
"conv2d_1/kernel:0": "conv_2/weight",
"conv2d_1/bias:0": "conv_2/bias",
"dense/kernel:0": "linear_3/weight",
"dense/bias:0": "linear_3/bias",
"dense_1/kernel:0": "linear_4/weight",
"dense_1/bias:0": "linear_4/bias",
"dense_2/kernel:0": "linear_5/weight",
"dense_2/bias:0": "linear_5/bias",
}
def fetch_variables(name: str, layer: Module):
if "linear" in name or "conv" in name:
if "conv" in name:
variables[f"{name}/weight"] = np.transpose(
to_numpy(layer.weight), (2, 3, 1, 0)
)
else:
variables[f"{name}/weight"] = np.transpose(
to_numpy(layer.weight), (1, 0)
)
variables[f"{name}/bias"] = to_numpy(layer.bias)
variables = {}
iterate_model(model, structure, fetch_variables)
lenet = TFLeNet()
input_tensor = tf.placeholder(tf.float32, (None, 1, 28, 28))
lenet(input_tensor)
tf.train.create_global_step()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for variable in tf.global_variables():
if variable.name != "global_step:0":
variable.load(
variables[tf_to_torch_variables[variable.name]], session=sess
)
saver = tf.train.Saver()
saver.save(sess, abspath("tf/lenet/model_import/model"))
def import_alexnet_model_from_pytorch():
model = AlexNet()
path = abspath("cache/alexnet-owt-4df8aa71.pth")
model.load_state_dict(torch.load(path))
structure = {
"features": {
"0": "conv_1",
"1": "relu_1",
"2": "pool_1",
"3": "conv_2",
"4": "relu_2",
"5": "pool_2",
"6": "conv_3",
"7": "relu_3",
"8": "conv_4",
"9": "relu_4",
"10": "conv_5",
"11": "relu_5",
"12": "pool_5",
},
"classifier": {
"0": "dropout_6",
"1": "linear_6",
"2": "relu_6",
"3": "dropout_7",
"4": "linear_7",
"5": "relu_7",
"6": "linear_8",
},
}
tf_to_torch_variables = {
"conv2d/kernel:0": "conv_1/weight",
"conv2d/bias:0": "conv_1/bias",
"conv2d_1/kernel:0": "conv_2/weight",
"conv2d_1/bias:0": "conv_2/bias",
"conv2d_2/kernel:0": "conv_3/weight",
"conv2d_2/bias:0": "conv_3/bias",
"conv2d_3/kernel:0": "conv_4/weight",
"conv2d_3/bias:0": "conv_4/bias",
"conv2d_4/kernel:0": "conv_5/weight",
"conv2d_4/bias:0": "conv_5/bias",
"dense/kernel:0": "linear_6/weight",
"dense/bias:0": "linear_6/bias",
"dense_1/kernel:0": "linear_7/weight",
"dense_1/bias:0": "linear_7/bias",
"dense_2/kernel:0": "linear_8/weight",
"dense_2/bias:0": "linear_8/bias",
}
def fetch_variables(name: str, layer: Module):
if "linear" in name or "conv" in name:
if "conv" in name:
variables[f"{name}/weight"] = np.transpose(
to_numpy(layer.weight), (2, 3, 1, 0)
)
else:
variables[f"{name}/weight"] = np.transpose(
to_numpy(layer.weight), (1, 0)
)
variables[f"{name}/bias"] = to_numpy(layer.bias)
variables = {}
iterate_model(model, structure, fetch_variables)
alexnet = TFAlexNet()
input_tensor = tf.placeholder(tf.float32, (None, 224, 224, 3))
alexnet(input_tensor)
tf.train.create_global_step()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for variable in tf.global_variables():
if variable.name != "global_step:0":
variable.load(
variables[tf_to_torch_variables[variable.name]], session=sess
)
saver = tf.train.Saver()
saver.save(sess, abspath("tf/alexnet/model_import/model"))
def lenet_model_fn(features, labels, mode):
"""The model_fn argument for creating an Estimator."""
model = TFLeNet()
image = features
if isinstance(image, dict):
image = features["image"]
if mode == tf.estimator.ModeKeys.PREDICT:
logits = model(image, training=False)
predictions = {
"predict": tf.argmax(logits, axis=1),
"logit": tf.reduce_max(logits, axis=1),
}
return tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.PREDICT,
predictions=predictions,
export_outputs={"classify": tf.estimator.export.PredictOutput(predictions)},
)
def compare_predict():
def get_predict() -> pd.DataFrame:
def get_row(image_id: int) -> dict:
data_dir = abspath("/home/yxqiu/data/mnist/raw")
model_dir = abspath("tf/lenet/model_import/")
# model_dir = abspath("tf/lenet/model/")
estimator_config = tf.estimator.RunConfig(
session_config=new_session_config()
)
classifier = tf.estimator.Estimator(
model_fn=lenet_model_fn, model_dir=model_dir, config=estimator_config
)
predictions = list(
classifier.predict(
input_fn=lambda: mnist.test(data_dir)
.skip(image_id)
.take(1)
.batch(1)
)
)
def get_image_val(image: Image) -> Variable:
normalize = transforms.Normalize(mean=(0.1307,), std=(0.3081,))
transform = transforms.Compose([transforms.ToTensor(), normalize])
tensor = transform(image).unsqueeze(0)
image_val = Variable(tensor, volatile=True)
return image_val
image, label = mnist_info.test().image_with_label(image_id)
model = TorchLeNet()
path = abspath("lenet_model.pth")
model.load_state_dict(torch.load(path))
image_val = get_image_val(image)
model.eval()
result = model(image_val).data.cpu().numpy()
def map_prefix(map: Dict[str, Any], prefix: str) -> Dict[str, Any]:
return {f"{prefix}.{key}": value for key, value in map.items()}
row = {
"image_id": image_id,
"label": label,
**map_prefix(predictions[0], "tf"),
**map_prefix(
{"predict": result.argmax(), "logit": result.max()}, "pytorch"
),
}
return row
mode.debug()
# mode.distributed()
ray_init()
# traces = ray_map(get_row, (image_id for image_id in range(mnist_info.test().size)),
traces = ray_map(
get_row,
(image_id for image_id in range(10)),
chunksize=1,
out_of_order=True,
num_gpus=1,
)
return pd.DataFrame(traces)
return CsvIOAction("compare_predict.csv", init_fn=get_predict)
def compare_trace():
def get_trace() -> pd.DataFrame:
def get_row(image_id: int) -> dict:
threshold = 0.5
data_dir = abspath("/home/yxqiu/data/mnist/raw")
# model_dir = abspath("tf/lenet/model_import/")
model_dir = abspath("tf/lenet/model/")
model_fn = partial(
model_fn_with_fetch_hook,
create_model=lambda: TFLeNet(data_format="channels_first"),
)
tf_trace = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=lambda: mnist.train(data_dir).skip(image_id).take(1).batch(1),
select_fn=lambda input: arg_approx(input, threshold),
model_dir=model_dir,
)[0]
def get_image_val(image: Image) -> Variable:
normalize = transforms.Normalize(mean=(0.1307,), std=(0.3081,))
transform = transforms.Compose([transforms.ToTensor(), normalize])
tensor = transform(image).unsqueeze(0)
image_val = Variable(tensor, volatile=True)
return image_val
analysis = LenetAnalysis()
pytorch_trace = analysis.infer_info(image_id).layer_traces(
threshold, arg_approx
)
print("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx")
def map_prefix(map: Dict[str, Any], prefix: str) -> Dict[str, Any]:
return {f"{prefix}.{key}": value for key, value in map.items()}
row = {"image_id": image_id}
return row
mode.debug()
# mode.distributed()
ray_init()
# traces = ray_map(get_row, (image_id for image_id in range(mnist_info.train().size)),
traces = ray_map(
get_row,
(image_id for image_id in range(10)),
chunksize=1,
out_of_order=True,
num_gpus=1,
)
return pd.DataFrame(traces)
return CsvIOAction("compare_trace.csv", init_fn=get_trace)
def compare_class_trace():
def get_trace() -> pd.DataFrame:
def get_row(class_id: int) -> dict:
threshold = 0.5
tf_label = "norm"
torch_label = "best_in_10"
tf_trace = lenet_mnist_class_trace(
class_id, threshold, label=tf_label
).load()
analysis = LenetAnalysis()
torch_trace = {
layer_name: (
get_class_per_layer_traced_edges(
class_id, threshold, layer_name, label=torch_label
).index.get_values()
)
for layer_name in analysis.lenet.weighted_layer_names
}
print("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx")
row = {}
return row
mode.debug()
# mode.distributed()
ray_init()
# traces = ray_map(get_row, (image_id for image_id in range(mnist_info.train().size)),
traces = ray_map(
get_row,
(class_id for class_id in range(10)),
chunksize=1,
out_of_order=True,
num_gpus=1,
)
return | pd.DataFrame(traces) | pandas.DataFrame |
import numpy
import yaml
import pathlib
import pandas
import geopandas as gpd
from decimal import *
def decimal_divide(numerator, denominator, precision):
"""Returns a floating point representation of the
mathematically correct answer to division of
a numerator with a denominator, up to precision equal to
'precision'.
Inputs:
numerator can be either a scalar value or vector.
denominator must be a singular value.
precision defines the order of precision, e.g.,
precision = 10 allows for ten orders of magnitude or 1e-10
decimal places.
Note: No error catches are included.
Python binary arithmatic errors occur for precisions greater
than 1e-9 decimal places and this method ensures that the sum
of weighted values is 1 (and not affected by binary arithmatic
errors).
"""
result_list = []
fraction_decimal = numpy.around(
numerator / denominator,
decimals = precision
)
# fraction_float = fraction_decimal
# if type(numerator) == int or type(numerator) == numpy.float64:
# getcontext().prec = precision
# fraction_decimal = Decimal(numerator)/Decimal(denominator)
# result_list.append(
# numpy.around(
# numpy.float(fraction_decimal),
# decimals = precision
# )
# )
# else:
# for value in numerator:
# getcontext().prec = precision
# fraction_decimal = Decimal(value)/Decimal(denominator)
# result_list.append(
# numpy.around(
# numpy.float(fraction_decimal),
# decimals = precision
# )
# )
# fraction_float = numpy.array(result_list)
# return fraction_float
return fraction_decimal
def make_bins(lower_bound, upper_bound, step_size):
""" Returns an ascending list of tuples from start to stop, with
an interval of width and the center point values for intervals
A tuple bins[i], i.e. (bins[i][0], bins[i][1]) with i > 0
and i < quantity, satisfies the following conditions:
(1) bins[i][0] + width == bins[i][1]
(2) bins[i-1][0] + width == bins[i][0] and
bins[i-1][1] + width == bins[i][1]
"""
bins = []
center_points = []
for low in range(lower_bound, upper_bound, step_size):
bins.append((low, low + step_size))
center_points.append(low + step_size/2)
return bins, center_points
def get_bin(value, bins):
""" Returns the smallest index i of bins so that
bin[i][0] <= value < bin[i][1], where
bins is a list of tuples, like [(0,20), (20, 40), (40, 60)]
"""
for i in range(0, len(bins)):
if bins[i][0] <= value < bins[i][1]:
return i
return -1
def place_into_bins(sorting_data, data_to_bin, bins):
""" Returns 'binned_data, a vector of same length as 'bins' that
has the values of 'data_to_bin' sorted into bins according to
values of 'sorting_data.'
Sorting_data and data_to_bin are 1D arrays of the same length.
In our case, they are different attributes of vessels identified
by MMSI.
'bins' is the output variable of the function 'make_bins'.
"""
binned_data = numpy.zeros(len(bins))
index = 0
for value in sorting_data:
# accounting for no-data: -99999
if value > 0:
bin_index = get_bin(value, bins)
binned_data[bin_index] += data_to_bin[index]
index += 1
return binned_data
#
def clamp(n, minn, maxn):
""" Returns the number n after fixing min and max thresholds.
minn and maxn are scalars that represent min and max capacities.
clamp ensures that capacities are within min/max thresholds
and sets n to minn or maxn if outside of thresholds, such that
minn < n < maxn
"""
if n < minn:
return minn
elif n > maxn:
return maxn
else:
return n
def concat_shp(ship_type, shapefile_path):
"""
INPUT:
- ship_type ["tanker", "barge", "atb", etc]:
MIDOSS-name for ship type (see oil_attribution.yaml for list)
- shapefile_path [Path]: e.g., on Salish,
Path('/data/MIDOSS/shapefiles/')
OUTPUT:
- dataframe of all 2018 ship tracks for given ship_type
"""
for months in range(1,13):
# set file location and name
shapefile = shapefile_path/f'{ship_type}_2018_{months:02d}.shp'
# import shapefile using geopandas
monthly_shp = gpd.read_file(shapefile)
if months == 1:
print(f'creating {ship_type} shapefile for 2018, starting with January data')
allTracks = monthly_shp
else:
print(f'Concatenating {ship_type} data from month {months}')
allTracks = gpd.GeoDataFrame(
pandas.concat([allTracks, monthly_shp])
)
return allTracks
def get_doe_tanker_byvessel(vessels,doe_xls_path,fac_xls_path):
"""
Inputs:
- vessels [list]: List of vessel names, e.g.["AMERICAN FREEDOM","PELICAN STATE"]
- doe_xls_path [path]: Location and name of DOE data spreadsheet
- fac_xls_path [path]: Location and name of facilities transfer spreadsheet
Outputs:
- cargo_transfers [dataframe]: 2018 cargo transfers to/from the vessels and
the marine terminals used in this study, in liters. Transfers are grouped by AntID
"""
# conversion factor
gal2liter = 3.78541
# load dept. of ecology data
DOEdf = get_DOE_df(
doe_xls_path,
fac_xls_path,
group = 'no'
)
# extract tanker cargo transfers
if isinstance(vessels, list):
cargo_transfers = DOEdf.loc[
(DOEdf.TransferType == 'Cargo') &
(DOEdf.Deliverer.isin(vessels) |
DOEdf.Receiver.isin(vessels)),
['TransferQtyInGallon', 'Deliverer','Receiver','StartDateTime','AntID']
].groupby('AntID').agg(
{'TransferQtyInGallon':'sum',
'Deliverer':'first',
'Receiver':'first',
'StartDateTime':'first'}
).sort_values(by='TransferQtyInGallon',ascending=False)
else: # if a string
cargo_transfers = DOEdf.loc[
(DOEdf.TransferType == 'Cargo') &
(DOEdf.Deliverer.str.contains(vessels) |
DOEdf.Receiver.str.contains(vessels)),
['TransferQtyInGallon', 'Deliverer','Receiver','StartDateTime','AntID']
].groupby('AntID').agg(
{'TransferQtyInGallon':'sum',
'Deliverer':'first',
'Receiver':'first',
'StartDateTime':'first'}
).sort_values(by='TransferQtyInGallon',ascending=False)
# convert to liters
cargo_transfers['TransferQtyInGallon'] = gal2liter*cargo_transfers['TransferQtyInGallon']
cargo_transfers=cargo_transfers.rename(
columns={"TransferQtyInGallon":"TransferQtyInLiters"}
).reset_index()
return cargo_transfers
def split_doe_transfers(doe_df):
"""
split dataframe of DOE transfers into two-way transfers (import and export) and one-way transfers
"""
one_way=pandas.DataFrame({})
two_way=pandas.DataFrame({})
count = 0
idx_taken = 0
# order transfers by time
doe_df = doe_df.sort_values(by='StartDateTime').reset_index(drop=True)
# categorize transfers
for idx,deliverer in enumerate(doe_df['Deliverer']):
if idx != doe_df['Deliverer'].shape[0]-1:
if ((doe_df['Deliverer'][idx] == doe_df['Receiver'][idx+1]) &
(doe_df['Deliverer'][idx+1] == doe_df['Receiver'][idx])):
# count number of cases where there is a delivery both ways
count += 1
two_way = two_way.append(doe_df.iloc[[idx]])
idx_taken = 1
else:
if idx_taken:
two_way = two_way.append(doe_df.iloc[[idx]])
idx_taken = 0
else:
one_way = one_way.append(doe_df.iloc[[idx]])
idx_taken = 0
else:
# categorize the last entry by comparing with the end - 1 values
if ((doe_df['Deliverer'][idx] == doe_df['Receiver'][idx-1]) &
(doe_df['Deliverer'][idx-1] == doe_df['Receiver'][idx])):
count += 1
two_way = two_way.append(doe_df.iloc[[idx]])
return one_way, two_way
def get_oil_type_cargo(yaml_file, facility, ship_type, random_generator):
""" Returns oil for cargo attribution based on facility and vessel
by querying information in input yaml_file
"""
with open(yaml_file,"r") as file:
# load fraction_of_total values for weighting
# random generator
cargo = yaml.safe_load(file)
ship = cargo[facility][ship_type]
probability = [ship[fuel]['fraction_of_total']
for fuel in ship]
# First case indicates no cargo transfer to/from terminal
# (and a mistake in origin/destination analysis).
#
# Second case ensures neccessary conditions for
# random_generator
if sum(probability) == 0:
fuel_type = []
else:
try:
fuel_type = random_generator.choice(
list(ship.keys()), p = probability)
except ValueError:
# I was getting an error when including a '\' at the
# end of first line, so I removed it....
raise Exception(['Error: fraction of fuel transfers '
+ f'for {ship_type} servicing {facility} '\
+ f'does not sum to 1 in {yaml_file}'])
return fuel_type
def get_oil_type_cargo_generic_US(yaml_file, ship_type, random_generator):
""" Returns oil for cargo attribution based on facility and vessel
by querying information in input yaml_file. This is essentially
the same as 'get_oil_type_cargo' but is designed for yaml files
that lack facility names
"""
with open(yaml_file,"r") as file:
# load fraction_of_total values for weighting random generator
cargo = yaml.safe_load(file)
ship = cargo[ship_type]
probability = [ship[fuel]['fraction_of_total'] for fuel in ship]
# First case indicates no cargo transfer to/from terminal
# (and a mistake in origin/destination analysis).
#
# Second case ensures neccessary conditions for random_generator
if sum(probability) == 0:
fuel_type = []
else:
try:
fuel_type = random_generator.choice(
list(ship.keys()), p = probability)
except ValueError:
# I was getting an error when including a '\' at the
# end of first line, so I removed it....
raise Exception('Error: fraction of fuel transfers '
f'for {ship_type} servicing {facility} '\
f'does not sum to 1 in {yaml_file}')
return fuel_type
def get_montecarlo_oil_byregion(monte_carlo_csv, oil_attribution_file, fac_xls,
direction = 'export', vessel='tanker'):
"""
PURPOSE: Return dataframe of monte carlo attributions to facilities by
import, export, combined and vessel-type
INPUTS:
directions['import','export','combined']
vessel['tanker','atb','barge']
OUTPUT:
capacities DataFrame. For import or export, this dataframe has a Region
attribution based on the location of the facility. For combined, the
Region attribution is based on the location of the spill (as a US facility
can be both an origin or a destination with conflicting region).
TODO:
- Update method for attributing spill region so it's by mask
rather than by latitude
"""
# open montecarlo spills file
mcdf = get_montecarlo_df(monte_carlo_csv)
# Load oil Attribution File
with open(oil_attribution_file) as file:
oil_attrs = yaml.load(file, Loader=yaml.Loader)
# Read in facility names
facility_names_mc = oil_attrs['categories']['US_origin_destination']
# Load facility information
facdf = assign_facility_region(fac_xls)
# Add region based on spill location
mcdf = assign_spill_region(mcdf)
# ~~~~~ COMBINED ~~~~~
# query dataframe for information on imports & exports by vessel
# and oil types
if direction == 'import':
capacities = mcdf.loc[
(mcdf.fuel_cargo == 'cargo') &
(mcdf.vessel_type == vessel) &
(mcdf.vessel_dest.isin(facility_names_mc)),
['cargo_capacity', 'vessel_dest', 'oil_type']
]
# Create a new "Regions" column to assing region tag, using
# 'not attributed' to define transfers at locations not included
# in our evaluation
capacities['ImportRegion'] = 'not attributed'
# Find locations with transfers in our facility list and
# assign region tag.
for idx,facility in enumerate(facdf['FacilityName']):
capacities['ImportRegion'] = numpy.where(
(capacities['vessel_dest'] == facility), # ID transfer location
facdf['Region'][idx], # assign region, or
capacities['ImportRegion']# keep NA attribution
)
elif direction == 'export':
capacities = mcdf.loc[
(mcdf.fuel_cargo == 'cargo') &
(mcdf.vessel_type == vessel) &
(mcdf.vessel_origin.isin(facility_names_mc)),
['cargo_capacity', 'vessel_origin','oil_type', 'SpillRegion']
]
# Create a new "Regions" column to assing region tag, using
# 'not attributed' to define transfers at locations not included
# in our evaluation
capacities['ExportRegion'] = 'not attributed'
# Find locations with transfers in our facility list and
# assign region tag.
for idx,facility in enumerate(facdf['FacilityName']):
capacities['ExportRegion'] = numpy.where(
(capacities['vessel_origin'] == facility), # ID transfer location
facdf['Region'][idx], # assign region, or
capacities['ExportRegion']# keep NA attribution
)
elif direction == 'combined':
capacities = mcdf.loc[
(mcdf.fuel_cargo == 'cargo') &
(mcdf.vessel_type == vessel) &
(mcdf.vessel_dest.isin(facility_names_mc) |
mcdf.vessel_origin.isin(facility_names_mc)),
['cargo_capacity', 'vessel_dest', 'vessel_origin',
'oil_type', 'SpillRegion']
]
else:
# update this error statement!
print('get_montecarlo_oil_byregion[ERROR]: direction can only be import, export, or combined.')
return capacities
def assign_spill_region(mc_df):
"""
Reads in a monte-carlo spills DataFrame (from on file or combination of files)
and creates a Region attribution based on oil spill region
TODO: Update to use region masks (Ask Ben or Tereza)
"""
# define latitude bins
lat_partition = [46.9, 48.3, 48.7]
# define conditions used to bin facilities by latitude
conditions = [
(mc_df.spill_lat < lat_partition[0]),
(mc_df.spill_lat >= lat_partition[0]) &
(mc_df.spill_lat < lat_partition[1]),
(mc_df.spill_lat >= lat_partition[1]) &
(mc_df.spill_lat < lat_partition[2]),
(mc_df.spill_lat >= lat_partition[2])
]
# regional tags
values = ['Columbia River','Puget Sound','Anacortes','Whatcom']
# create a new column and assign values to it using
# defined conditions on latitudes
mc_df['SpillRegion'] = numpy.select(conditions, values)
return mc_df
def assign_facility_region(facilities_xlsx):
"""
Loads the facilities excel spreadsheet and returns a dataframe with
that identifies the region the facility is in
"""
# Facility information
facdf = pandas.read_excel(
facilities_xlsx,
sheet_name = 'Washington',
usecols="B,D,J,K"
)
# define latitude bins
lat_partition = [46.9, 48.3, 48.7]
# define conditions used to bin facilities by latitude
conditions = [
(facdf.DockLatNumber < lat_partition[0]),
(facdf.DockLatNumber >= lat_partition[0]) &
(facdf.DockLatNumber < lat_partition[1]),
(facdf.DockLatNumber >= lat_partition[1]) &
(facdf.DockLatNumber < lat_partition[2]),
(facdf.DockLatNumber >= lat_partition[2])
]
# regional tags
values = ['Columbia River','Puget Sound','Anacortes','Whatcom County']
# create a new column and assign values to it using
# defined conditions on latitudes
facdf['Region'] = numpy.select(conditions, values)
return facdf
def get_voyage_transfers(voyage_xls, fac_xls):
"""
PURPOSE: Read in voyage transfers for tankers, atbs, and barges
and assign region attribution to voyages
INPUT:
voyage_xls: Path to Origin_Destination_Analysis_updated.xlsx
fac_xls: Path to Oil_Transfer_Facilities.xlsx
OUTPUT:
dataframe with columns for atbs, tankers, barges and region
"""
# create dataframe for voyage transfers (From DOE_transfers.ipynb)
# read in data
tankers_df = pandas.read_excel(
voyage_xls,
sheet_name="VoyageCountsbyFacility_MR",
usecols="M,N,O",
skiprows = 1
)
barge_atb_df = pandas.read_excel(
voyage_xls,
sheet_name="VoyageCountsbyFacility_MR",
usecols="E,F,G,J",
skiprows = 1
)
# tidy-up column names
tankers_df=tankers_df.rename(
columns={"LOCATION.3":"LOCATION",
"TRANSFERS.3":"tanker_transfers",
"FACILITY CATEGORY.3":"CATEGORY"
}
)
barge_atb_df=barge_atb_df.rename(
columns={"LOCATION.1":"LOCATION",
"TRANSFERS.1":"atb_transfers",
"FACILITY CATEGORY.1":"CATEGORY",
"TRANSFERS.2":"barge_transfers"}
)
# extract voyage transfers from WA
tankers_df = tankers_df.loc[
tankers_df.CATEGORY == 'WA',
['LOCATION','tanker_transfers']
]
barges_df = barge_atb_df.loc[
barge_atb_df.CATEGORY == 'WA',
['LOCATION','barge_transfers']
]
atbs_df = barge_atb_df.loc[
barge_atb_df.CATEGORY == 'WA',
['LOCATION','atb_transfers']
]
# combine into one dataframe
voyages = pandas.merge(
left=tankers_df,
right=barges_df,
on='LOCATION',
how='left'
)
voyages = pandas.merge(
left=voyages,
right=atbs_df,
on='LOCATION',
how='left'
)
# Create a new "Regions" column to assing region tag, using
# 'not attributed' to define transfers at locations not included
# in our evaluation
voyages['Region'] = 'not attributed'
# Load facility information
facdf = assign_facility_region(fac_xls)
# Find locations with transfers in our facility list and
# assign region tag.
for idx,facility in enumerate(facdf['FacilityName']):
voyages['Region'] = numpy.where(
(voyages['LOCATION'] == facility), # identify transfer location
facdf['Region'][idx], # assign region to transfer
voyages['Region'] # or keep the NA attribution
)
voyages=voyages.set_index('LOCATION')
return voyages
def get_doe_transfers(doe_xls, fac_xls):
"""
PURPOSE: Tally transfers to/from marine terminals used in our study.
Currently this just tallies cargo transfers but could easily be
modified to tally fuel or cargo + fuel
INPUTS:
- doe_xls: Path to Dept. of Ecology data file, 'MuellerTrans4-30-20.xlsx'
- fac_xls: Path to facilities data (simply because it's used by
get_DOE_df()), Oil_Transfer_Facilities.xlsx
OUTPUTS:
- Dataframe with total number of import, export and combined (import
+ export) Cargo transfers for each marine terminal, sorted by vessel
type receiving or delivering product. Format: df[vessel_type].
"""
# Facility information for assinging regions
facdf = assign_facility_region(fac_xls)
facility_names = facdf['FacilityName']
# Load DOE data
DOEdf = get_DOE_df(
doe_xls,
fac_xls,
group = 'no'
)
# Tally transfers for imports and exports, combined below
imports = {}
exports = {}
#~~~~~~~ TANKERS ~~~~~~~~~~~~~~~~~~~~~~~
vessel_type = 'tanker'
transfer_type = 'Cargo'
type_description = ['TANK SHIP']
imports[vessel_type] = DOEdf.loc[
(DOEdf.TransferType == transfer_type) &
(DOEdf.DelivererTypeDescription.isin(type_description)) &
(DOEdf.Receiver.isin(facility_names)),
['Receiver', 'TransferType']
].groupby('Receiver').count().rename(columns={'TransferType':'imports'})
imports[vessel_type].index.names=['LOCATIONS']
exports[vessel_type] = DOEdf.loc[
(DOEdf.TransferType == transfer_type) &
(DOEdf.ReceiverTypeDescription.isin(type_description)) &
(DOEdf.Deliverer.isin(facility_names)),
['Deliverer', 'TransferType']
].groupby('Deliverer').count().rename(columns={'TransferType':'exports'})
exports[vessel_type].index.names=['LOCATIONS']
#~~~~~~~ ATBs ~~~~~~~~~~~~~~~~~~~~~~~
vessel_type = 'atb'
transfer_type = 'Cargo'
imports[vessel_type] = DOEdf.loc[
(DOEdf.TransferType == transfer_type) &
(DOEdf.Receiver.isin(facility_names)) &
(DOEdf.Deliverer.str.contains('ITB') |
DOEdf.Deliverer.str.contains('ATB')),
['Receiver', 'TransferType']
].groupby('Receiver').count().rename(columns={'TransferType':'imports'})
imports[vessel_type].index.names=['LOCATIONS']
exports[vessel_type] = DOEdf.loc[
(DOEdf.TransferType == transfer_type) &
(DOEdf.Deliverer.isin(facility_names)) &
(DOEdf.Receiver.str.contains('ITB') |
DOEdf.Receiver.str.contains('ATB')),
['Deliverer', 'TransferType']
].groupby('Deliverer').count().rename(columns={'TransferType':'exports'})
exports[vessel_type].index.names=['LOCATIONS']
#~~~~~~~ BARGES ~~~~~~~~~~~~~~~~~~~~~~~
vessel_type = 'barge'
transfer_type = 'Cargo'
type_description = ['TANK BARGE','TUGBOAT']
imports[vessel_type] = DOEdf.loc[
(DOEdf.TransferType == transfer_type) &
(DOEdf.DelivererTypeDescription.isin(type_description)) &
(~DOEdf.Deliverer.str.contains('ITB')) &
(~DOEdf.Deliverer.str.contains('ATB')) &
(DOEdf.Receiver.isin(facility_names)),
['Receiver', 'TransferType']
].groupby('Receiver').count().rename(columns={'TransferType':'imports'})
imports[vessel_type].index.names=['LOCATIONS']
exports[vessel_type] = DOEdf.loc[
(DOEdf.TransferType == transfer_type) &
(DOEdf.ReceiverTypeDescription.isin(type_description)) &
(~DOEdf.Receiver.str.contains('ITB')) &
(~DOEdf.Receiver.str.contains('ATB')) &
(DOEdf.Deliverer.isin(facility_names)),
['Deliverer', 'TransferType']
].groupby('Deliverer').count().rename(columns={'TransferType':'exports'})
exports[vessel_type].index.names=['LOCATIONS']
doe={}
for vessel in ['tanker','atb','barge']:
doe[vessel] = pandas.DataFrame(0,index=facility_names, columns={'combined'})
doe[vessel].index.name='LOCATIONS'
doe[vessel] = pandas.merge(
left=doe[vessel],
right=exports[vessel],
left_index = True,
right_index=True,
how='left'
).fillna(0)
doe[vessel] = pandas.merge(
left=doe[vessel],
right=imports[vessel],
left_index = True,
right_index=True,
how='left'
).fillna(0)
doe[vessel]['combined'] = (doe[vessel]['imports'] +
doe[vessel]['exports'])
# Now assign regions to dataframe for each vessel "spreadsheet"
for vessel in ['tanker','atb','barge']:
doe[vessel]['Region'] = 'not attributed'
for idx,facility in enumerate(facdf['FacilityName']):
doe[vessel]['Region'] = numpy.where(
(doe[vessel].index == facility), # identify transfer location
facdf['Region'][idx], # assign region to transfer
doe[vessel]['Region'] # or keep the NA attribution
)
return doe
def get_montecarlo_df(MC_csv):
"""
PURPOSE: Read in monte-carlo csv file and re-name Lagrangian_template to
oil_type with Lagrangian file names changed to oil-type name
INPUT:
MC_csv[Path(to-mc-file)]
"""
# define names used for Lagrangian files
oil_template_names = [
'Lagrangian_akns.dat','Lagrangian_bunker.dat',
'Lagrangian_diesel.dat','Lagrangian_gas.dat',
'Lagrangian_jet.dat','Lagrangian_dilbit.dat',
'Lagrangian_other.dat'
]
# define desired, end-product names for oil-types
oil_types = [
'ANS','Bunker-C',
'Diesel','Gasoline',
'Jet Fuel', 'Dilbit',
'Other'
]
# open montecarlo spills file
mc_df = pandas.read_csv(MC_csv)
# replace Lagrangian template file names with oil type tags
mc_df['oil_type'] = mc_df['Lagrangian_template'].replace(
oil_template_names,
oil_types
)
# remove Lagrangian_template column
mc_df = mc_df.drop(columns='Lagrangian_template')
return mc_df
def get_DOE_atb(DOE_xls, fac_xls, transfer_type = 'cargo', facilities='selected'):
"""
Returns transfer data for ATBs.
DOE_xls[Path obj. or string]: Path(to Dept. of Ecology transfer dataset)
facilities_xls[Path obj. or string]: Path(to spreadsheet with facilities information)
transfer_type [string]: 'fuel', 'cargo', 'cargo_fuel'
facilities [string]: 'all' or 'selected',
"""
# load DOE data
DOE_df = get_DOE_df(
DOE_xls,
fac_xls,
group = 'yes'
)
# convert inputs to lower-case
transfer_type = transfer_type.lower()
facilities = facilities.lower()
if transfer_type not in ['fuel', 'cargo', 'cargo_fuel']:
raise ValueError('transfer_type options: fuel,cargo or cargo_fuel.')
# SELECTED FACILITIES
if facilities == 'selected':
# Facility information
facdf = pandas.read_excel(
fac_xls,
sheet_name = 'Washington',
usecols="D"
)
# This list was copied from oil_attribution.yaml on 07/02/21
# Eventually will update to read in from oil_attribution
facility_names = facdf['FacilityDOEName']
if transfer_type == 'cargo':
import_df = DOE_df.loc[
(DOE_df.Receiver.isin(facility_names)) &
(DOE_df.TransferType == 'Cargo') &
(DOE_df.Deliverer.str.contains('ITB') |
DOE_df.Deliverer.str.contains('ATB')),
]
export_df = DOE_df.loc[
(DOE_df.Deliverer.isin(facility_names)) &
(DOE_df.TransferType == 'Cargo') &
(DOE_df.Receiver.str.contains('ITB') |
DOE_df.Receiver.str.contains('ATB')),
]
elif transfer_type == 'fuel':
import_df = DOE_df.loc[
(DOE_df.Receiver.isin(facility_names)) &
(DOE_df.TransferType == 'Fueling') &
(DOE_df.Deliverer.str.contains('ITB') |
DOE_df.Deliverer.str.contains('ATB')),
]
export_df = DOE_df.loc[
(DOE_df.Deliverer.isin(facility_names)) &
(DOE_df.TransferType == 'Fueling') &
(DOE_df.Receiver.str.contains('ITB') |
DOE_df.Receiver.str.contains('ATB')),
]
elif transfer_type == 'cargo_fuel':
import_df = DOE_df.loc[
(DOE_df.Receiver.isin(facility_names)) &
(DOE_df.Deliverer.str.contains('ITB') |
DOE_df.Deliverer.str.contains('ATB')),
]
export_df = DOE_df.loc[
(DOE_df.Deliverer.isin(facility_names)) &
(DOE_df.Receiver.str.contains('ITB') |
DOE_df.Receiver.str.contains('ATB')),
]
elif facilities == 'all':
if transfer_type == 'cargo':
import_df = DOE_df.loc[
(DOE_df.TransferType == 'Cargo') &
(DOE_df.Deliverer.str.contains('ITB') |
DOE_df.Deliverer.str.contains('ATB')),
]
export_df = DOE_df.loc[
(DOE_df.TransferType == 'Cargo') &
(DOE_df.Receiver.str.contains('ITB') |
DOE_df.Receiver.str.contains('ATB')),
]
elif transfer_type == 'fuel':
import_df = DOE_df.loc[
(DOE_df.TransferType == 'Fueling') &
(DOE_df.Deliverer.str.contains('ITB') |
DOE_df.Deliverer.str.contains('ATB')),
]
export_df = DOE_df.loc[
(DOE_df.TransferType == 'Fueling') &
(DOE_df.Receiver.str.contains('ITB') |
DOE_df.Receiver.str.contains('ATB')),
]
elif transfer_type == 'cargo_fuel':
import_df = DOE_df.loc[
(DOE_df.Deliverer.str.contains('ITB') |
DOE_df.Deliverer.str.contains('ATB')),
]
export_df = DOE_df.loc[
(DOE_df.Receiver.str.contains('ITB') |
DOE_df.Receiver.str.contains('ATB')),
]
return import_df, export_df
def get_DOE_df(DOE_xls, fac_xls, group='no'):
"""
group['yes','no']: Specificies whether or not terminals ought to be re-named to
the names used in our monte carlo grouping
"""
# Import columns are: (A) AndID, (E) StartDateTime, (G) Deliverer,
# (H) Receiver, (O) Region,
# (P) Product, (Q) Quantity in Gallons, (R) Transfer Type
# (oiling, Cargo, or Other)', (w) DelivererTypeDescription,
# (x) ReceiverTypeDescription
# define floating point precision for transfer quanitities
precision = 5
# read in data
df = pandas.read_excel(
DOE_xls,
sheet_name='Vessel Oil Transfer',
usecols="A,E,G,H,P,Q,R,W,X"
)
# convert to float (though I'm not sure if this is still needed)
df.TransferQtyInGallon = (
df.TransferQtyInGallon.astype(float).round(precision)
)
# Housekeeping: Force one name per marine transfer site
df = df.replace(
to_replace = "US Oil Tacoma ",
value = "U.S. Oil & Refining"
)
df = df.replace(
to_replace = "TLP",
value = "TLP Management Services LLC (TMS)"
)
# Housekeeping: Convert DOE terminal names to the names
# used in our monte-carlo, if different.
df = df.replace(
to_replace = "Maxum (<NAME>)",
value = "Maxum Petroleum - Harbor Island Terminal"
)
df = df.replace(
to_replace = "<NAME> (formerly Tesoro)",
value = "<NAME> (formerly Tesoro)"
)
# Consolidate (if selected by group='yes'):
# Apply terminal groupings used in our monte-carlo by
# renaming terminals to the names used in our
# origin-destination attribution
if group == 'yes':
df = df.replace(
to_replace = "Maxum Petroleum - Harbor Island Terminal",
value = "Kinder Morgan Liquids Terminal - Harbor Island"
)
df = df.replace(
to_replace = "Shell Oil LP Seattle Distribution Terminal",
value = "Kinder Morgan Liquids Terminal - Harbor Island"
)
df = df.replace(
to_replace = "Nustar Energy Tacoma",
value = "Phillips 66 Tacoma Terminal"
)
# Create a new "Regions" column to assing region tag, using
# 'not attributed' to define transfers at locations not included
# in our evaluation
df['ImportRegion'] = 'not attributed'
df['ExportRegion'] = 'not attributed'
# Load facility information
facdf = assign_facility_region(fac_xls)
# Find locations with transfers in our facility list and
# assign region tag.
for idx,facility in enumerate(facdf['FacilityName']):
df['ImportRegion'] = numpy.where(
(df['Receiver'] == facility), # identify transfer location
facdf['Region'][idx], # assign region to transfer
df['ImportRegion'] # or keep the NA attribution
)
df['ExportRegion'] = numpy.where(
(df['Deliverer'] == facility), # identify transfer location
facdf['Region'][idx], # assign region to transfer
df['ExportRegion'] # or keep the NA attribution
)
return df
def rename_DOE_df_oils(DOE_df, DOE_xls):
"""
Reads in DOE dataframe with original 'Product' names and converts
them to the names we use in our monte-carlo
DOE_df: Department of Ecolody data in DataFrame format,
as in output from get_DOE_df
DOE_xls: The original DOE oil transfer spreadsheet, the same as is
read into get_DOE_df
"""
# I'm sure there is a better way of allowing name flaxibilitye
# and preventing unnecessary memory hogging, but...I'm choosing
# ease and efficiency right now....
df = DOE_df.copy()
# read in monte-carlo oil classifications
oil_classification = get_DOE_oilclassification(DOE_xls)
# Rename oil types to match our in-house naming convention
for oil_mc in oil_classification.keys():
for oil_doe in oil_classification[oil_mc]:
df['Product'] = df['Product'].replace(oil_doe, oil_mc)
# Now convert from our in-house names to our presentation names
conditions = [
(df['Product']=='akns'),
(df['Product']=='bunker'),
(df['Product']=='dilbit'),
(df['Product']=='jet'),
(df['Product']=='diesel'),
(df['Product']=='gas'),
(df['Product']=='other')
]
# regional tags
new_values = [
'ANS' ,'Bunker-C','Dilbit','Jet Fuel','Diesel','Gasoline','Other'
]
# create a new column and assign values to it using
# defined conditions on oil types
df['Product'] = numpy.select(conditions, new_values)
return df
def get_oil_classification(DOE_transfer_xlsx):
""" Returns the list of all the names in the DOE database that are
attributed to our oil types.
INPUT['string' or Path]:
location/name of 2018 DOE oil transfer excel spreadsheet
OUTPUT[dictionary]:
Oil types attributed in our study to: AKNS, Bunker, Dilbit,
Diesel, Gas, Jet and Other.
"""
# Import columns are:
# (G) Deliverer, (H) Receiver, (O) Region, (P) Product,
# (Q) Quantity in Gallons, (R) Transfer Type (Fueling, Cargo, or Other)',
# (w) DelivererTypeDescription, (x) ReceiverTypeDescription
#2018
df = pandas.read_excel(
DOE_transfer_xlsx,
sheet_name='Vessel Oil Transfer',
usecols="G,H,P,Q,R,W,X"
)
# oil types used in our study
oil_types = [
'akns', 'bunker', 'dilbit', 'jet', 'diesel', 'gas', 'other'
]
# initialize oil dictionary
oil_classification = {}
for oil in oil_types:
oil_classification[oil] = []
[nrows,ncols] = df.shape
for row in range(nrows):
if ('CRUDE' in df.Product[row] and
df.Product[row] not in
oil_classification['akns']
):
oil_classification['akns'].append(df.Product[row])
elif ('BAKKEN' in df.Product[row] and
df.Product[row] not in oil_classification['akns']
):
oil_classification['akns'].append(df.Product[row])
elif ('BUNKER' in df.Product[row] and
df.Product[row] not in oil_classification['bunker']
):
oil_classification['bunker'].append(df.Product[row])
elif ('BITUMEN' in df.Product[row] and
df.Product[row] not in oil_classification['dilbit']
):
oil_classification['dilbit'].append(df.Product[row])
elif ('DIESEL' in df.Product[row] and
df.Product[row] not in oil_classification['diesel']
):
oil_classification['diesel'].append(df.Product[row])
elif ('GASOLINE' in df.Product[row] and
df.Product[row] not in oil_classification['gas']
):
oil_classification['gas'].append(df.Product[row])
elif ('JET' in df.Product[row] and df.Product[row] not in
oil_classification['jet']
):
oil_classification['jet'].append(df.Product[row])
elif ('CRUDE' not in df.Product[row] and
'BAKKEN' not in df.Product[row] and
'BUNKER' not in df.Product[row] and
'BITUMEN' not in df.Product[row] and
'DIESEL' not in df.Product[row] and
'GASOLINE' not in df.Product[row] and
'JET' not in df.Product[row] and
df.Product[row] not in oil_classification['other']):
oil_classification['other'].append(df.Product[row])
return oil_classification
def get_DOE_barges(DOE_xls,fac_xls, direction='combined',facilities='selected',transfer_type = 'cargo_fuel'):
"""
THIS CODE HAS A LOT OF REDUNDANCY. I PLAN TO UPDATE BY USING
COMBINED INPUT/OUTPUT TO RETURN EITHER IMPORT OR OUTPUT, IF SELECTED
ALSO CHANGE NAME TO GET_DOE_BARGES_TRANSFERS TO MATCH ATB FUNCTION
Returns number of transfers to/from WA marine terminals used in our study
DOE_xls[Path obj. or string]: Path(to Dept. of Ecology transfer dataset)
marine_terminals [string list]: list of US marine terminals to include
direction [string]: 'import','export','combined', where:
'import' means from vessel to marine terminal
'export' means from marine terminal to vessel
'combined' means both import and export transfers
facilities [string]: 'all' or 'selected',
transfer_type [string]: 'fuel','cargo','cargo_fuel'
"""
print('get_DOE_barges: not yet tested with fac_xls as input')
# load DOE data
DOE_df = get_DOE_df(
DOE_xls,
fac_xls,
group = 'yes'
)
# convert inputs to lower-case
direction = direction.lower()
transfer_type = transfer_type.lower()
facilities = facilities.lower()
#
if transfer_type not in ['fuel', 'cargo', 'cargo_fuel']:
raise ValueError('transfer_type options: fuel,cargo or cargo_fuel.')
if direction not in ['import', 'export', 'combined']:
raise ValueError('direction options: import, export or combined.')
# SELECTED FACILITIES
if facilities == 'selected':
# This list was copied from oil_attribution.yaml on 07/02/21
# Eventually will update to read in from oil_attribution
facility_names = [
'BP Cherry Point Refinery',
'Shell Puget Sound Refinery',
'Tidewater Snake River Terminal',
'SeaPort Sound Terminal',
'Tesoro Vancouver Terminal',
'Phillips 66 Ferndale Refinery',
'Phillips 66 Tacoma Terminal',
'Marathon Anacortes Refinery (formerly Tesoro)',
'Tesoro Port Angeles Terminal',
'U.S. Oil & Refining',
'Naval Air Station Whidbey Island (NASWI)',
'NAVSUP Manchester',
'Alon Asphalt Company (Paramount Petroleum)',
'Kinder Morgan Liquids Terminal - Harbor Island',
'Nustar Energy Vancouver',
'Tesoro Pasco Terminal',
'REG Grays Harbor, LLC',
'Tidewater Vancouver Terminal',
'TLP Management Services LLC (TMS)'
]
# get transfer records for imports, exports and both imports and exports
# imports
if direction == 'import':
print('import')
if transfer_type == 'cargo':
import_df = DOE_df.loc[
(DOE_df.Receiver.isin(facility_names)) &
(DOE_df.TransferType == 'Cargo') &
(DOE_df.DelivererTypeDescription.isin(
['TANK BARGE','TUGBOAT'])) &
(~DOE_df.Deliverer.str.contains('ITB')) &
(~DOE_df.Deliverer.str.contains('ATB')),
]
elif transfer_type == 'fuel':
import_df = DOE_df.loc[
(DOE_df.Receiver.isin(facility_names)) &
(DOE_df.TransferType == 'Fueling') &
(DOE_df.DelivererTypeDescription.isin(
['TANK BARGE','TUGBOAT'])) &
(~DOE_df.Deliverer.str.contains('ITB')) &
(~DOE_df.Deliverer.str.contains('ATB')),
]
elif transfer_type == 'cargo_fuel':
import_df = DOE_df.loc[
(DOE_df.Receiver.isin(facility_names)) &
(DOE_df.DelivererTypeDescription.isin(
['TANK BARGE','TUGBOAT'])) &
(~DOE_df.Deliverer.str.contains('ITB')) &
(~DOE_df.Deliverer.str.contains('ATB')),
]
return import_df
if direction == 'export':
print('export')
#exports
if transfer_type == 'cargo':
export_df = DOE_df.loc[
(DOE_df.Deliverer.isin(facility_names)) &
(DOE_df.TransferType == 'Cargo') &
(DOE_df.ReceiverTypeDescription.isin(
['TANK BARGE','TUGBOAT'])) &
(~DOE_df.Receiver.str.contains('ITB')) &
(~DOE_df.Receiver.str.contains('ATB')),
]
elif transfer_type == 'fuel':
export_df = DOE_df.loc[
(DOE_df.Deliverer.isin(facility_names)) &
(DOE_df.TransferType == 'Fueling') &
(DOE_df.ReceiverTypeDescription.isin(
['TANK BARGE','TUGBOAT'])) &
(~DOE_df.Receiver.str.contains('ITB')) &
(~DOE_df.Receiver.str.contains('ATB')),
]
elif transfer_type == 'cargo_fuel':
export_df = DOE_df.loc[
(DOE_df.Deliverer.isin(facility_names)) &
(DOE_df.ReceiverTypeDescription.isin(
['TANK BARGE','TUGBOAT'])) &
(~DOE_df.Receiver.str.contains('ITB')) &
(~DOE_df.Receiver.str.contains('ATB')),
]
return export_df
# Now combine both imports and exports for
if direction == 'combined':
print('combined')
#import
if transfer_type == 'cargo':
print('cargo')
import_df = DOE_df.loc[
(DOE_df.Receiver.isin(facility_names)) &
(DOE_df.TransferType == 'Cargo') &
(DOE_df.DelivererTypeDescription.isin(
['TANK BARGE','TUGBOAT'])) &
(~DOE_df.Deliverer.str.contains('ITB')) &
(~DOE_df.Deliverer.str.contains('ATB')),
]
elif transfer_type == 'fuel':
print('fuel')
import_df = DOE_df.loc[
(DOE_df.Receiver.isin(facility_names)) &
(DOE_df.TransferType == 'Fueling') &
(DOE_df.DelivererTypeDescription.isin(
['TANK BARGE','TUGBOAT'])) &
(~DOE_df.Deliverer.str.contains('ITB')) &
(~DOE_df.Deliverer.str.contains('ATB')),
]
elif transfer_type == 'cargo_fuel':
print('cargo_fuel')
import_df = DOE_df.loc[
(DOE_df.Receiver.isin(facility_names)) &
(DOE_df.DelivererTypeDescription.isin(
['TANK BARGE','TUGBOAT'])) &
(~DOE_df.Deliverer.str.contains('ITB')) &
(~DOE_df.Deliverer.str.contains('ATB')),
]
#export
if transfer_type == 'cargo':
print('cargo')
export_df = DOE_df.loc[
(DOE_df.Deliverer.isin(facility_names)) &
(DOE_df.TransferType == 'Cargo') &
(DOE_df.ReceiverTypeDescription.isin(
['TANK BARGE','TUGBOAT'])) &
(~DOE_df.Receiver.str.contains('ITB')) &
(~DOE_df.Receiver.str.contains('ATB')),
]
elif transfer_type == 'fuel':
print('fuel')
export_df = DOE_df.loc[
(DOE_df.Deliverer.isin(facility_names)) &
(DOE_df.TransferType == 'Fueling') &
(DOE_df.ReceiverTypeDescription.isin(
['TANK BARGE','TUGBOAT'])) &
(~DOE_df.Receiver.str.contains('ITB')) &
(~DOE_df.Receiver.str.contains('ATB')),
]
elif transfer_type == 'cargo_fuel':
print('cargo_fuel')
export_df = DOE_df.loc[
(DOE_df.Deliverer.isin(facility_names)) &
(DOE_df.ReceiverTypeDescription.isin(
['TANK BARGE','TUGBOAT'])) &
(~DOE_df.Receiver.str.contains('ITB')) &
(~DOE_df.Receiver.str.contains('ATB')),
]
#combine import and export
importexport_df = import_df.append(export_df)
importexport_df.reset_index(inplace=True)
return importexport_df
elif facilities == 'all':
# get transfer records for imports, exports and both imports and exports
# imports
if direction == 'import':
print('import')
if transfer_type == 'cargo':
import_df = DOE_df.loc[
(DOE_df.TransferType == 'Cargo') &
(DOE_df.DelivererTypeDescription.isin(
['TANK BARGE','TUGBOAT'])) &
(~DOE_df.Deliverer.str.contains('ITB')) &
(~DOE_df.Deliverer.str.contains('ATB')),
]
elif transfer_type == 'fuel':
import_df = DOE_df.loc[
(DOE_df.TransferType == 'Fueling') &
(DOE_df.DelivererTypeDescription.isin(
['TANK BARGE','TUGBOAT'])) &
(~DOE_df.Deliverer.str.contains('ITB')) &
(~DOE_df.Deliverer.str.contains('ATB')),
]
elif transfer_type == 'cargo_fuel':
import_df = DOE_df.loc[
(DOE_df.DelivererTypeDescription.isin(
['TANK BARGE','TUGBOAT'])) &
(~DOE_df.Deliverer.str.contains('ITB')) &
(~DOE_df.Deliverer.str.contains('ATB')),
]
return import_df
if direction == 'export':
print('export')
#exports
if transfer_type == 'cargo':
export_df = DOE_df.loc[
(DOE_df.TransferType == 'Cargo') &
(DOE_df.ReceiverTypeDescription.isin(
['TANK BARGE','TUGBOAT'])) &
(~DOE_df.Receiver.str.contains('ITB')) &
(~DOE_df.Receiver.str.contains('ATB')),
]
elif transfer_type == 'fuel':
export_df = DOE_df.loc[
(DOE_df.TransferType == 'Fueling') &
(DOE_df.ReceiverTypeDescription.isin(
['TANK BARGE','TUGBOAT'])) &
(~DOE_df.Receiver.str.contains('ITB')) &
(~DOE_df.Receiver.str.contains('ATB')),
]
elif transfer_type == 'cargo_fuel':
export_df = DOE_df.loc[
(DOE_df.ReceiverTypeDescription.isin(
['TANK BARGE','TUGBOAT'])) &
(~DOE_df.Receiver.str.contains('ITB')) &
(~DOE_df.Receiver.str.contains('ATB')),
]
return export_df
# Now combine both imports and exports for
if direction == 'combined':
print('combined')
#import
if transfer_type == 'cargo':
print('cargo')
import_df = DOE_df.loc[
(DOE_df.TransferType == 'Cargo') &
(DOE_df.DelivererTypeDescription.isin(
['TANK BARGE','TUGBOAT'])) &
(~DOE_df.Deliverer.str.contains('ITB')) &
(~DOE_df.Deliverer.str.contains('ATB')),
]
elif transfer_type == 'fuel':
print('fuel')
import_df = DOE_df.loc[
(DOE_df.TransferType == 'Fueling') &
(DOE_df.DelivererTypeDescription.isin(
['TANK BARGE','TUGBOAT'])) &
(~DOE_df.Deliverer.str.contains('ITB')) &
(~DOE_df.Deliverer.str.contains('ATB')),
]
elif transfer_type == 'cargo_fuel':
print('cargo_fuel')
import_df = DOE_df.loc[
(DOE_df.DelivererTypeDescription.isin(
['TANK BARGE','TUGBOAT'])) &
(~DOE_df.Deliverer.str.contains('ITB')) &
(~DOE_df.Deliverer.str.contains('ATB')),
]
#export
if transfer_type == 'cargo':
print('cargo')
export_df = DOE_df.loc[
(DOE_df.TransferType == 'Cargo') &
(DOE_df.ReceiverTypeDescription.isin(
['TANK BARGE','TUGBOAT'])) &
(~DOE_df.Receiver.str.contains('ITB')) &
(~DOE_df.Receiver.str.contains('ATB')),
]
elif transfer_type == 'fuel':
print('fuel')
export_df = DOE_df.loc[
(DOE_df.TransferType == 'Fueling') &
(DOE_df.ReceiverTypeDescription.isin(
['TANK BARGE','TUGBOAT'])) &
(~DOE_df.Receiver.str.contains('ITB')) &
(~DOE_df.Receiver.str.contains('ATB')),
]
elif transfer_type == 'cargo_fuel':
print('cargo_fuel')
export_df = DOE_df.loc[
(DOE_df.ReceiverTypeDescription.isin(
['TANK BARGE','TUGBOAT'])) &
(~DOE_df.Receiver.str.contains('ITB')) &
(~DOE_df.Receiver.str.contains('ATB')),
]
#combine import and export
importexport_df = import_df.append(export_df)
importexport_df.reset_index(inplace=True)
return importexport_df
def get_DOE_atb_transfers(DOE_xls,fac_xls,transfer_type = 'cargo',facilities='selected'):
"""
Returns number of transfers to/from WA marine terminals used in our study
DOE_xls[Path obj. or string]: Path(to Dept. of Ecology transfer dataset)
direction [string]: 'import','export','combined', where:
'import' means from vessel to marine terminal
'export' means from marine terminal to vessel
'combined' means both import and export transfers
facilities [string]: 'all' or 'selected',
TO-DO: Update to count transfers with the same AntID as one
"""
print('this code not yet tested with fac_xls as input')
# load DOE data
DOE_df = get_DOE_df(
DOE_xls,
fac_xls,
group = 'yes'
)
# convert inputs to lower-case
transfer_type = transfer_type.lower()
facilities = facilities.lower()
if transfer_type not in ['fuel', 'cargo', 'cargo_fuel']:
raise ValueError('transfer_type options: fuel,cargo or cargo_fuel.')
# SELECTED FACILITIES
if facilities == 'selected':
# This list was copied from oil_attribution.yaml on 07/02/21
# Eventually will update to read in from oil_attribution
facility_names = [
'BP Cherry Point Refinery',
'Shell Puget Sound Refinery',
'Tidewater Snake River Terminal',
'SeaPort Sound Terminal',
'Tesoro Vancouver Terminal',
'Phillips 66 Ferndale Refinery',
'Phillips 66 Tacoma Terminal',
'Marathon Anacortes Refinery (formerly Tesoro)',
'Tesoro Port Angeles Terminal',
'U.S. Oil & Refining',
'Naval Air Station Whidbey Island (NASWI)',
'NAVSUP Manchester',
'Alon Asphalt Company (Paramount Petroleum)',
'Kinder Morgan Liquids Terminal - Harbor Island',
'Nustar Energy Vancouver',
'Tesoro Pasco Terminal',
'REG Grays Harbor, LLC',
'Tidewater Vancouver Terminal',
'TLP Management Services LLC (TMS)'
]
#import
if transfer_type == 'cargo':
print('cargo')
import_df = DOE_df.loc[
(DOE_df.Receiver.isin(facility_names)) &
(DOE_df.TransferType == 'Cargo') &
(DOE_df.Deliverer.str.contains('ITB') |
DOE_df.Deliverer.str.contains('ATB')),
]
elif transfer_type == 'fuel':
print('fuel')
import_df = DOE_df.loc[
(DOE_df.Receiver.isin(facility_names)) &
(DOE_df.TransferType == 'Fueling') &
(DOE_df.Deliverer.str.contains('ITB') |
DOE_df.Deliverer.str.contains('ATB')),
]
elif transfer_type == 'cargo_fuel':
print('cargo_fuel')
import_df = DOE_df.loc[
(DOE_df.Receiver.isin(facility_names)) &
(DOE_df.Deliverer.str.contains('ITB') |
DOE_df.Deliverer.str.contains('ATB')),
]
import_count = import_df['Deliverer'].count()
print(f'{import_count} {transfer_type}'
' transfers to monte carlo terminals')
#export
if transfer_type == 'cargo':
print('cargo')
export_df = DOE_df.loc[
(DOE_df.Deliverer.isin(facility_names)) &
(DOE_df.TransferType == 'Cargo') &
(DOE_df.Receiver.str.contains('ITB') |
DOE_df.Receiver.str.contains('ATB')),
]
elif transfer_type == 'fuel':
print('fuel')
export_df = DOE_df.loc[
(DOE_df.Deliverer.isin(facility_names)) &
(DOE_df.TransferType == 'Fueling') &
(DOE_df.Receiver.str.contains('ITB') |
DOE_df.Receiver.str.contains('ATB')),
]
elif transfer_type == 'cargo_fuel':
print('cargo_fuel')
export_df = DOE_df.loc[
(DOE_df.Deliverer.isin(facility_names)) &
(DOE_df.Receiver.str.contains('ITB') |
DOE_df.Receiver.str.contains('ATB')),
]
export_count = export_df['Deliverer'].count()
print(f'{export_count} {transfer_type}'
' transfers from monte carlo terminals')
#combine import and export
importexport_df = import_df.append(export_df)
importexport_df.reset_index(inplace=True)
count = importexport_df['Deliverer'].count()
return count
elif facilities == 'all':
#import
if transfer_type == 'cargo':
print('cargo')
import_df = DOE_df.loc[
(DOE_df.TransferType == 'Cargo') &
(DOE_df.Deliverer.str.contains('ITB') |
DOE_df.Deliverer.str.contains('ATB')),
]
elif transfer_type == 'fuel':
print('fuel')
import_df = DOE_df.loc[
(DOE_df.TransferType == 'Fueling') &
(DOE_df.Deliverer.str.contains('ITB') |
DOE_df.Deliverer.str.contains('ATB')),
]
elif transfer_type == 'cargo_fuel':
print('cargo_fuel')
import_df = DOE_df.loc[
(DOE_df.Deliverer.str.contains('ITB') |
DOE_df.Deliverer.str.contains('ATB')),
]
import_count = import_df['Deliverer'].count()
print(f'{import_count} {transfer_type}'
' transfers from all sources')
#export
if transfer_type == 'cargo':
print('cargo')
export_df = DOE_df.loc[
(DOE_df.TransferType == 'Cargo') &
(DOE_df.Receiver.str.contains('ITB') |
DOE_df.Receiver.str.contains('ATB')),
]
elif transfer_type == 'fuel':
print('fuel')
export_df = DOE_df.loc[
(DOE_df.TransferType == 'Fueling') &
(DOE_df.Receiver.str.contains('ITB') |
DOE_df.Receiver.str.contains('ATB')),
]
elif transfer_type == 'cargo_fuel':
print('cargo_fuel')
export_df = DOE_df.loc[
(DOE_df.Receiver.str.contains('ITB') |
DOE_df.Receiver.str.contains('ATB')),
]
export_count = export_df['Deliverer'].count()
print(f'{export_count} {transfer_type}'
' transfers from all sources')
#combine import and export
importexport_df = import_df.append(export_df)
importexport_df.reset_index(inplace=True)
count = importexport_df['Deliverer'].count()
return count
def get_montecarlo_oil_byvessel(vessel, monte_carlo_csv):
# Currently use hard-coded file location for oil_attribution.yaml
# This won't work for distribution and will need to be fixed.
# Oil Attribution file location
oil_attribution_file = (
'/Users/rmueller/Data/MIDOSS/marine_transport_data/'
'oil_attribution.yaml'
)
# Load oil Attribution File
with open(oil_attribution_file) as file:
oil_attrs = yaml.load(file, Loader=yaml.Loader)
# Read in facility names
facility_names_mc = oil_attrs['categories']['US_origin_destination']
oil_template_names = [
'Lagrangian_akns.dat','Lagrangian_bunker.dat',
'Lagrangian_diesel.dat','Lagrangian_gas.dat',
'Lagrangian_jet.dat','Lagrangian_dilbit.dat',
'Lagrangian_other.dat'
]
oil_types = [
'ANS','Bunker-C',
'Diesel','Gasoline',
'Jet Fuel', 'Dilbit',
'Other'
]
# open montecarlo spills file
mcdf = pandas.read_csv(monte_carlo_csv)
# replace Lagrangian template file names with oil type tags
mcdf['Lagrangian_template'] = mcdf['Lagrangian_template'].replace(
oil_template_names,
oil_types
)
# ~~~~~ EXPORTS ~~~~~
# query dataframe for information on oil export types by vessel
export_capacity = mcdf.loc[
(mcdf.vessel_type == vessel) &
(mcdf.fuel_cargo == 'cargo') &
(mcdf.vessel_origin.isin(facility_names_mc)),
['cargo_capacity', 'vessel_origin', 'Lagrangian_template']
]
# add up oil capacities by vessel and oil types
montecarlo_export_byoil = (
export_capacity.groupby(
'Lagrangian_template'
).cargo_capacity.sum()
)
# ~~~~~ IMPORTS ~~~~~
# query dataframe for information on oil export types by vessel
import_capacity = mcdf.loc[
(mcdf.vessel_type == vessel) &
(mcdf.fuel_cargo == 'cargo') &
(mcdf.vessel_dest.isin(facility_names_mc)),
['cargo_capacity', 'vessel_dest', 'Lagrangian_template']
]
# add up oil capacities by vessel and oil types
montecarlo_import_byoil = (
import_capacity.groupby(
'Lagrangian_template'
).cargo_capacity.sum()
)
# ~~~~~ COMBINED ~~~~~
# query dataframe for information on imports & exports by vessel
# and oil types
net_capacity = mcdf.loc[
(mcdf.vessel_type == vessel) &
(mcdf.fuel_cargo == 'cargo') &
(mcdf.vessel_dest.isin(facility_names_mc) |
mcdf.vessel_origin.isin(facility_names_mc)),
['cargo_capacity', 'vessel_dest', 'Lagrangian_template']
]
# add up oil capacities by vessel and oil types
montecarlo_byoil = (
net_capacity.groupby(
'Lagrangian_template'
).cargo_capacity.sum()
)
return montecarlo_export_byoil, montecarlo_import_byoil, montecarlo_byoil
def get_montecarlo_oil(vessel, monte_carlo_csv):
"""
Same as get_montecarlo_oil_byfac but generalized to return quantities of
oil by oil type for all US attribution (inclusive of US general and facilities)
INPUTS:
- vessel ['string']: ['atb','barge','tanker']
- monte_carlo_csv['Path' or 'string']: Path and file name for csv file
"""
# VERIFY THIS LIST IS SAME AS IN OIL_ATTRIBUTION.YAML
# AND GET FACILITY NAMES FROM THERE
# list of facility names to query monte-carlo csv file, with:
# 1) <NAME>ortes Refinery (formerly Tesoro) instead of Andeavor
# Anacortes Refinery (formerly Tesoro)
# 2) <NAME> - Harbor Island Terminal instead of
# Maxum (<NAME>um)
origin_dest_names = [
'BP Cherry Point Refinery', 'Shell Puget Sound Refinery',
'Tidewater Snake River Terminal',
'SeaPort Sound Terminal', 'Tesoro Vancouver Terminal',
'Phillips 66 Ferndale Refinery', 'Phillips 66 Tacoma Terminal',
'<NAME>ortes Refinery (formerly Tesoro)',
'Tesoro Port Angeles Terminal','U.S. Oil & Refining',
'Naval Air Station Whidbey Island (NASWI)',
'NAVSUP Manchester', 'Alon Asphalt Company (Paramount Petroleum)',
'Kinder Morgan Liquids Terminal - Harbor Island',
'Tesoro Pasco Terminal', 'REG Grays Harbor, LLC',
'Tidewater Vancouver Terminal',
'TLP Management Services LLC (TMS)',
'US'
]
oil_template_names = [
'Lagrangian_akns.dat','Lagrangian_bunker.dat',
'Lagrangian_diesel.dat','Lagrangian_gas.dat',
'Lagrangian_jet.dat','Lagrangian_dilbit.dat',
'Lagrangian_other.dat'
]
oil_types = [
'ANS','Bunker-C',
'Diesel','Gasoline',
'Jet Fuel', 'Dilbit',
'Other'
]
# open montecarlo spills file
mcdf = | pandas.read_csv(monte_carlo_csv) | pandas.read_csv |
'''
/*******************************************************************************
* Copyright 2016-2019 Exactpro (Exactpro Systems Limited)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
'''
import numpy
import matplotlib.pyplot as plt
import scipy.stats as stats
import pandas
import datetime
import calendar
class RelativeFrequencyChart:
# returns coordinates for each chart column
def get_coordinates(self, data, bins): # bins - chart columns count
self.btt = numpy.array(list(data))
self.y, self.x, self.bars = plt.hist(self.btt, weights=numpy.zeros_like(self.btt) + 1. / self.btt.size, bins=bins)
return self.x, self.y
class FrequencyDensityChart:
def get_coordinates_histogram(self, data, bins):
self.btt = numpy.array(list(data))
self.y, self.x, self.bars = plt.hist(self.btt, bins=bins, density=True)
return self.x, self.y
def get_coordinates_line(self, data):
try:
self.btt = numpy.array(list(data))
self.density = stats.kde.gaussian_kde(list(data))
self.x_den = numpy.linspace(0, data.max(), data.count())
self.density = self.density(self.x_den)
return self.x_den, self.density
except numpy.linalg.linalg.LinAlgError:
return [-1], [-1]
class DynamicChart:
def get_coordinates(self, frame, step_size):
self.plot = {} # chart coordinates
self.dynamic_bugs = []
self.x = []
self.y = []
self.plot['period'] = step_size
if step_size == 'W-SUN':
self.periods = DynamicChart.get_periods(self, frame, step_size) # separates DataFrame to the specified periods
if len(self.periods) == 0:
return 'error'
self.cumulative = 0 # cumulative total of defect submission for specific period
for self.period in self.periods:
# checks whether the first day of period is Monday (if not then we change first day to Monday)
if pandas.to_datetime(self.period[0]) < pandas.to_datetime(frame['Created_tr']).min():
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >=
pandas.to_datetime(frame['Created_tr']).min()) &
(pandas.to_datetime(frame['Created_tr']) <= pandas.to_datetime(self.period[1]))]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str(datetime.datetime.date(pandas.to_datetime(frame['Created_tr'], format='%Y-%m-%d').min())))
self.y.append(self.cumulative)
else:
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >= pandas.to_datetime(self.period[0]))
& (pandas.to_datetime(frame['Created_tr']) <= pandas.to_datetime(self.period[1]))]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str((self.period[0])))
self.y.append(self.cumulative)
# check whether the date from new DataFrame is greater than date which is specified in settings
if pandas.to_datetime(frame['Created_tr']).max() > pandas.to_datetime(self.periods[-1][1]):
# processing of days which are out of full period set
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) > pandas.to_datetime(self.periods[-1][1]))
& (pandas.to_datetime(frame['Created_tr']) <=
pandas.to_datetime(frame['Created_tr']).max())]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str(datetime.datetime.date(pandas.to_datetime(self.periods[-1][1], format='%Y-%m-%d')) + datetime.timedelta(days=1)))
self.y.append(self.cumulative)
self.dynamic_bugs.append(self.x)
self.dynamic_bugs.append(self.y)
self.plot['dynamic bugs'] = self.dynamic_bugs
self.cumulative = 0
return self.plot
if step_size in ['7D', '10D', '3M', '6M', 'A-DEC']:
self.count0 = 0
self.count1 = 1
self.periods = DynamicChart.get_periods(self, frame, step_size) # DataFrame separation by the specified periods
if len(self.periods) == 0:
return 'error'
self.cumulative = 0
self.countPeriodsList = len(self.periods) # count of calculated periods
self.count = 1
if self.countPeriodsList == 1:
if step_size == '7D':
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >=
pandas.to_datetime(frame['Created_tr']).min())
& (pandas.to_datetime(frame['Created_tr'])
< pandas.to_datetime(datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())
+datetime.timedelta(days=7)))]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key'].count())
self.x.append(str(DynamicChart.get_date_for_dynamic_my(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr'], format='%Y-%m-%d').min()), step_size)))
self.y.append(self.cumulative)
if pandas.to_datetime(frame['Created_tr']).max() > pandas.to_datetime(datetime.datetime.date( | pandas.to_datetime(frame['Created_tr']) | pandas.to_datetime |
"""Summarise per-hazard total intersections (for the whole system)
Purpose
-------
Collect network-hazard intersection attributes
- Combine with boundary Polygons to collect network-boundary intersection attributes
- Write final results to an Excel sheet
Input data requirements
-----------------------
1. Correct paths to all files and correct input parameters
2. Shapefiles of network-hazard intersections results with attributes:
- edge_id or node_id - String/Integer/Float Edge ID or Node ID of network
- length - Float length of edge intersecting with hazards
- geometry - Shapely geometry of edges as LineString or nodes as Points
3. Shapefile of administrative boundaries of Argentina with attributes:
- province_i - String/Integer ID of Province
- pro_name_e - String name of Province in English
- district_i - String/Integer ID of District
- dis_name_e - String name of District in English
- commune_id - String/Integer ID of Commune
- name_eng - String name of Commune in English
- geometry - Shapely geometry of boundary Polygon
Results
-------
1. Excel sheet of network-hazard-boundary intersection with attributes:
- edge_id/node_id - String name of intersecting edge ID or node ID
- length - Float length of intersection of edge LineString and hazard Polygon: Only for edges
- province_id - String/Integer ID of Province
- province_name - String name of Province in English
- district_id - String/Integer ID of District
- district_name - String name of District in English
- commune_id - String/Integer ID of Commune
- commune_name - String name of Commune in English
"""
import itertools
import os
import sys
import geopandas as gpd
import pandas as pd
from shapely.geometry import Polygon
from atra.utils import *
from atra.transport_flow_and_failure_functions import *
def hazard_data_summary(hazard_network_dataframe,network_dataframe):
df = pd.merge(network_dataframe,hazard_network_dataframe,how='left',on=['edge_id']).fillna(0)
df['min_exposure_length'] = 0.001*df['min_exposure_length']
df['max_exposure_length'] = 0.001*df['max_exposure_length']
hazard_totals = df.groupby(['hazard_type','model','climate_scenario','year'])['min_exposure_length','max_exposure_length'].sum().reset_index()
hazard_totals_min = hazard_totals.groupby(['hazard_type','climate_scenario','year'])['min_exposure_length'].min().reset_index()
hazard_totals_min['Percentage (min)'] = hazard_totals_min['min_exposure_length']/df['length'].sum()
hazard_totals_max = hazard_totals.groupby(['hazard_type','climate_scenario','year'])['max_exposure_length'].max().reset_index()
hazard_totals_max['Percentage (max)'] = hazard_totals_max['max_exposure_length']/df['length'].sum()
hazards = pd.merge(hazard_totals_min,hazard_totals_max,how='left',on=['hazard_type','climate_scenario','year'])
hazards.rename(columns={'hazard_type':'Hazard Type','climate_scenario':'Climate Scenario','year':'Year'},inplace=True)
return hazards
def main():
"""Summarise
1. Specify the paths from where you to read and write:
- Input data
- Intermediate calcuations data
- Output results
2. Supply input data and parameters
- Names of the three Provinces - List of string types
- Names of modes - List of strings
- Names of output modes - List of strings
- Names of hazard bands - List of integers
- Names of hazard thresholds - List of integers
- Condition 'Yes' or 'No' is the users wants to process results
3. Give the paths to the input data files:
- Commune boundary and stats data shapefile
- Hazard datasets description Excel file
- String name of sheet in hazard datasets description Excel file
4. Specify the output files and paths to be created
"""
data_path, calc_path, output_path = load_config()['paths']['data'], load_config()[
'paths']['calc'], load_config()['paths']['output']
# Supply input data and parameters
modes = ['road','rail','bridge','port','air']
boundary_cols = ['department_id','department_name','province_id','province_name']
hazard_cols = ['climate_scenario','hazard_type','model','probability','year']
flood_types = ['fluvial flooding','pluvial flooding']
climate_scenarios = ['Baseline','Future_Med','Future_High']
# Give the paths to the input data files
hazard_path = os.path.join(output_path, 'hazard_scenarios')
# Give the paths to the input data files
national_file = os.path.join(output_path,
'network_stats',
'national_scale_boundary_stats.xlsx')
national_hazard_file = os.path.join(output_path,
'hazard_scenarios')
# Specify the output files and paths to be created
output_dir = os.path.join(output_path, 'network_stats')
if os.path.exists(output_dir) == False:
os.mkdir(output_dir)
data_excel = os.path.join(
output_dir,'national_scale_hazard_intersections_summary.xlsx')
nat_excel_writer = pd.ExcelWriter(data_excel)
data_excel = os.path.join(
output_dir,'national_scale_hazard_intersections_boundary_summary.xlsx')
bd_excel_writer = pd.ExcelWriter(data_excel)
'''Flood stats
'''
for m in range(len(modes)):
flood_df = pd.read_csv(os.path.join(national_hazard_file,
'{}_hazard_intersections.csv'.format(modes[m])),
encoding='utf-8-sig')
network_stats = pd.read_excel(national_file,sheet_name=modes[m],encoding='utf-8-sig')
if modes[m] in ['road','rail']:
edges = pd.read_csv(os.path.join(data_path,'network','{}_edges.csv'.format(modes[m])),encoding='utf-8-sig')
if modes[m] == 'road':
edges = edges[(edges['road_type'] == 'national') | (edges['road_type'] == 'province') | (edges['road_type'] == 'rural')]
else:
flow_df = pd.read_csv(os.path.join(output_path,'flow_mapping_combined','weighted_flows_rail_100_percent.csv'))
edges = pd.merge(edges,flow_df,how='left',on=['edge_id'])
edges = edges[edges['max_total_tons'] > 0]
del flow_df
flood_df = flood_df[flood_df['edge_id'].isin(edges['edge_id'].values.tolist())]
network_stats = network_stats[network_stats['edge_id'].isin(edges['edge_id'].values.tolist())]
network_stats = network_stats.groupby(boundary_cols)['length'].sum().reset_index()
network_stats.rename(columns={'length':'total_length_m'},inplace=True)
hazard_stats = flood_df.groupby(boundary_cols+hazard_cols)['length'].sum().reset_index()
hazard_stats.rename(columns={'length':'exposure_length_m'},inplace=True)
hazard_stats = pd.merge(hazard_stats,network_stats,how='left', on=boundary_cols).fillna(0)
hazard_stats['percentage'] = 100.0*hazard_stats['exposure_length_m']/hazard_stats['total_length_m']
hazard_stats.to_excel(bd_excel_writer, modes[m], index=False,encoding='utf-8-sig')
bd_excel_writer.save()
total_length = edges['length'].values.sum()
flood_df = flood_df[['hazard_type','climate_scenario','probability','length']].groupby(['hazard_type','climate_scenario','probability'])['length'].sum().reset_index()
flood_df['length'] = 0.001*flood_df['length']
flood_df.rename(columns={'length':'exposure_length_km'},inplace=True)
flood_df['return period'] = 1/flood_df['probability']
return_periods = list(set(flood_df['return period'].values.tolist()))
f_df = | pd.DataFrame(return_periods,columns=['return period']) | pandas.DataFrame |
import numpy as np
import rasterio as rio
import geopandas as gpd
import pandas as pd
import random
#from osgeo import gdal, ogr, osr
from rasterio.mask import mask
from shapely.geometry import mapping, Polygon
from skimage.util import img_as_float
import os as os
os.chdir('E:/SLICUAV_manuscript_code/3_Landscape_mapping/2019_10_23_1_compute_superpixel_features')
# import machinery for this
from trees.clusterfeatures import ClusterFeatures
grid_shps = gpd.read_file('E:/SLICUAV_manuscript_data/3_Clipped_OMs/'+
'2019_08_30_basecamp_grid/'+
'2019_08_30_basecamp_50m_grid.shp')
ftprnt_shp = gpd.read_file('E:/SLICUAV_manuscript_data/7_Harapan_shapefiles/'+
'2019_09_19_basecamp_footprint_both_years_latlong.shp')
flag = True
#for shp_i in range(all_shps.shape[0]):
for i in range(50):
shp_flag = True
random.seed(42)
shp_i = i + 450
# Get unique tag for this block
ths_id = grid_shps['id'][shp_i]
# load images
rgbtif = rio.open('E:/SLICUAV_manuscript_data/3_Clipped_OMs/2019_09_19_basecamp_grid_with_buffer/all_clips/'+
'id_' + str(ths_id) + '_RGB.tif')
rgbimg = rgbtif.read()
# Reorder correctly as first dimension is bands
rgbimg = np.swapaxes(rgbimg,0,2)
rgbimg = np.swapaxes(rgbimg,0,1)
rgbtif.close()
mstif = rio.open('E:/SLICUAV_manuscript_data/3_Clipped_OMs/2019_09_19_basecamp_grid_with_buffer/all_clips/'+
'id_' + str(ths_id) + '_MS.tif')
msimg = mstif.read()
# Reorder correctly as first dimension is bands
msimg = np.swapaxes(msimg,0,2)
msimg = np.swapaxes(msimg,0,1)
mstif.close()
dsmtif = rio.open('E:/SLICUAV_manuscript_data/3_Clipped_OMs/2019_09_19_basecamp_grid_with_buffer/all_clips/'+
'id_' + str(ths_id) + '_DSM.tif')
dsmimg = dsmtif.read()
# Reorder correctly as first dimension is bands
dsmimg = np.swapaxes(dsmimg,0,2)
dsmimg = np.swapaxes(dsmimg,0,1)
dsmtif.close()
# Remove redundant third axis
dsmimg = np.squeeze(dsmimg)
# Deal with any missing value set to arbitrary negative number
dsmimg[dsmimg<-1000]=0
### scale both actual images to 0-1
rgbimg = img_as_float(rgbimg)
msimg = msimg/65535
# read in the segmented shapes
cur_segs = gpd.read_file('E:/SLICUAV_manuscript_data/5_Landscape_superpixels/'+\
str(ths_id) +'_SLIC_5000.shp')
seg_flag = True
ticker = 0
for seg_i in range(cur_segs.shape[0]):
ths_shp = []
# check if it's in the area for which we collected data
if not ftprnt_shp.intersects(Polygon(cur_segs['geometry'][seg_i]))[0]:
ticker += 1
continue
tmp_gjson = mapping(cur_segs['geometry'][seg_i])
ths_shp.append(tmp_gjson)
del tmp_gjson
# Get RGB mask
with rio.open('E:/SLICUAV_manuscript_data/3_Clipped_OMs/2019_09_19_basecamp_grid_with_buffer/all_clips/'+
'id_' + str(ths_id) + '_RGB.tif') as gtif:
rgb_clip, clip_affine = mask(gtif,ths_shp,crop=False,all_touched=True)
rgb_clip = np.swapaxes(rgb_clip,0,2)
rgb_clip = np.swapaxes(rgb_clip,0,1)
rgb_mask = np.nonzero(rgb_clip.sum(axis=2))
del rgb_clip, clip_affine
# Get MS mask
with rio.open('E:/SLICUAV_manuscript_data/3_Clipped_OMs/2019_09_19_basecamp_grid_with_buffer/all_clips/'+
'id_' + str(ths_id) + '_MS.tif') as gtif:
ms_clip, clip_affine = mask(gtif,ths_shp,crop=False,all_touched=True)
ms_clip = np.swapaxes(ms_clip,0,2)
ms_clip = np.swapaxes(ms_clip,0,1)
ms_clip[ms_clip>65535]=0
ms_mask = np.nonzero(ms_clip.sum(axis=2))
del ms_clip, clip_affine
# Get DSM mask
with rio.open('E:/SLICUAV_manuscript_data/3_Clipped_OMs/2019_09_19_basecamp_grid_with_buffer/all_clips/'+
'id_' + str(ths_id) + '_DSM.tif') as gtif:
dsm_clip, clip_affine = mask(gtif,ths_shp,crop=False,all_touched=True)
dsm_clip = np.swapaxes(dsm_clip,0,2)
dsm_clip = np.swapaxes(dsm_clip,0,1)
dsm_mask = np.nonzero(dsm_clip.sum(axis=2))
del dsm_clip, clip_affine
feat_struct = ClusterFeatures(shp_i,'NA',rgbimg,rgb_mask,msimg,ms_mask,dsmimg,dsm_mask)
feat_struct.runFeaturePipeline(thresh=0.5,glcm_steps=3,acor_steps=3,mode=False,HSV=True)
feat_vec = feat_struct.featStack
del rgb_mask, ms_mask, dsm_mask, ths_shp
if flag:
| pd.DataFrame(feat_struct.featList) | pandas.DataFrame |
from __future__ import division #brings in Python 3.0 mixed type calculation rules
import datetime
import inspect
import numpy as np
import numpy.testing as npt
import os.path
import pandas as pd
import sys
from tabulate import tabulate
import unittest
print("Python version: " + sys.version)
print("Numpy version: " + np.__version__)
# #find parent directory and import model
# parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
# sys.path.append(parent_dir)
from ..kabam_exe import Kabam
test = {}
class TestKabam(unittest.TestCase):
"""
Unit tests for Kabam model.
: unittest will
: 1) call the setup method,
: 2) then call every method starting with "test",
: 3) then the teardown method
"""
print("kabam unittests conducted at " + str(datetime.datetime.today()))
def setUp(self):
"""
Setup routine for Kabam unit tests.
:return:
"""
pass
# setup the test as needed
# e.g. pandas to open Kabam qaqc csv
# Read qaqc csv and create pandas DataFrames for inputs and expected outputs
def tearDown(self):
"""
Teardown routine for Kabam unit tests.
:return:
"""
pass
# teardown called after each test
# e.g. maybe write test results to some text file
def create_kabam_object(self):
# create empty pandas dataframes to create empty object for testing
df_empty = pd.DataFrame()
# create an empty kabam object
kabam_empty = Kabam(df_empty, df_empty)
return kabam_empty
def test_ventilation_rate(self):
"""
:description Ventilation rate of aquatic animal
:unit L/d
:expression Kabam Eq. A5.2b (Gv)
:param zoo_wb: wet weight of animal (kg)
:param conc_do: concentration of dissolved oxygen (mg O2/L)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series(['nan', 0.00394574, 0.468885], dtype = 'float')
try:
#use the zooplankton variables/values for the test
kabam_empty.zoo_wb = pd.Series(['nan', 1.e-07, 1.e-4], dtype = 'float')
kabam_empty.conc_do = pd.Series([5.0, 10.0, 7.5], dtype='float')
result = kabam_empty.ventilation_rate(kabam_empty.zoo_wb)
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_pest_uptake_eff_gills(self):
"""
:description Pesticide uptake efficiency by gills
:unit fraction
"expresssion Kabam Eq. A5.2a (Ew)
:param log kow: octanol-water partition coefficient ()
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series(['nan', 0.540088, 0.540495], dtype = 'float')
try:
kabam_empty.log_kow = pd.Series(['nan', 5., 6.], dtype = 'float')
kabam_empty.kow = 10.**(kabam_empty.log_kow)
result = kabam_empty.pest_uptake_eff_bygills()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_phytoplankton_k1_calc(self):
"""
:description Uptake rate constant through respiratory area for phytoplankton
:unit: L/kg*d
:expression Kabam Eq. A5.1 (K1:unique to phytoplankton)
:param log kow: octanol-water partition coefficient ()
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series([1639.34426, 8695.6521, 15267.1755], dtype = 'float')
try:
kabam_empty.log_kow = pd.Series([4., 5., 6.], dtype = 'float')
kabam_empty.kow = 10.**(kabam_empty.log_kow)
result = kabam_empty.phytoplankton_k1_calc(kabam_empty.kow)
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_aq_animal_k1_calc(self):
"""
U:description ptake rate constant through respiratory area for aquatic animals
:unit: L/kg*d
:expression Kabam Eq. A5.2 (K1)
:param pest_uptake_eff_bygills: Pesticide uptake efficiency by gills of aquatic animals (fraction)
:param vent_rate: Ventilation rate of aquatic animal (L/d)
:param wet_wgt: wet weight of animal (kg)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series(['nan', 1201.13849, 169.37439], dtype = 'float')
try:
pest_uptake_eff_bygills = pd.Series(['nan', 0.0304414, 0.0361228], dtype = 'float')
vent_rate = pd.Series(['nan', 0.00394574, 0.468885], dtype = 'float')
wet_wgt = pd.Series(['nan', 1.e-07, 1.e-4], dtype = 'float')
result = kabam_empty.aq_animal_k1_calc(pest_uptake_eff_bygills, vent_rate, wet_wgt)
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_animal_water_part_coef(self):
"""
:description Organism-Water partition coefficient (based on organism wet weight)
:unit ()
:expression Kabam Eq. A6a (Kbw)
:param zoo_lipid: lipid fraction of organism (kg lipid/kg organism wet weight)
:param zoo_nlom: non-lipid organic matter (NLOM) fraction of organism (kg NLOM/kg organism wet weight)
:param zoo_water: water content of organism (kg water/kg organism wet weight)
:param kow: octanol-water partition coefficient ()
:param beta: proportionality constant expressing the sorption capacity of NLOM or NLOC to
that of octanol
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series([650.87, 11000.76, 165000.64], dtype = 'float')
try:
#For test purpose we'll use the zooplankton variable names
kabam_empty.zoo_lipid_frac = pd.Series([0.03, 0.04, 0.06], dtype = 'float')
kabam_empty.zoo_nlom_frac = pd.Series([0.10, 0.20, 0.30,], dtype = 'float')
kabam_empty.zoo_water_frac = pd.Series([0.87, 0.76, 0.64], dtype = 'float')
kabam_empty.kow = pd.Series([1.e4, 1.e5, 1.e6], dtype = 'float')
beta = 0.35
result = kabam_empty.animal_water_part_coef(kabam_empty.zoo_lipid_frac,
kabam_empty.zoo_nlom_frac,
kabam_empty.zoo_water_frac, beta)
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_aq_animal_k2_calc(self):
"""
:description Elimination rate constant through the respiratory area
:unit (per day)
:expression Kabam Eq. A6 (K2)
:param zoo_k1: Uptake rate constant through respiratory area for aquatic animals
:param k_bw_zoo (Kbw): Organism-Water partition coefficient (based on organism wet weight ()
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series([2.5186969, 0.79045921, 0.09252798], dtype = 'float')
try:
#For test purpose we'll use the zooplankton variable names
kabam_empty.zoo_k1 = pd.Series([1639.34426, 8695.6521, 15267.1755], dtype = 'float')
kabam_empty.k_bw_zoo = pd.Series([650.87, 11000.76, 165000.64], dtype = 'float')
result = kabam_empty.aq_animal_k2_calc(kabam_empty.zoo_k1, kabam_empty.k_bw_zoo)
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_animal_grow_rate_const(self):
"""
:description Aquatic animal/organism growth rate constant
:unit (per day)
:expression Kabam Eq. A7.1 & A7.2
:param zoo_wb: wet weight of animal/organism (kg)
:param water_temp: water temperature (degrees C)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series([0.01255943, 0.00125594, 0.00251], dtype = 'float')
try:
#For test purpose we'll use the zooplankton variable names
kabam_empty.zoo_wb = pd.Series([1.e-7, 1.e-2, 1.0], dtype = 'float')
kabam_empty.water_temp = pd.Series([10., 15., 20.], dtype = 'float')
result = kabam_empty.animal_grow_rate_const(kabam_empty.zoo_wb)
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_dietary_trans_eff(self):
"""
:description Aquatic animal/organizm dietary pesticide transfer efficiency
:unit fraction
:expression Kabam Eq. A8a (Ed)
:param kow: octanol-water partition coefficient ()
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series([0.499251, 0.492611, 0.434783], dtype = 'float')
try:
kabam_empty.kow = pd.Series([1.e4, 1.e5, 1.e6], dtype = 'float')
result = kabam_empty.dietary_trans_eff()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_aq_animal_feeding_rate(self):
"""
:description Aquatic animal feeding rate (except filterfeeders)
:unit kg/d
:expression Kabam Eq. A8b1 (Gd)
:param wet_wgt: wet weight of animal/organism (kg)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series([4.497792e-08, 1.0796617e-3, 0.073042572], dtype = 'float')
try:
#For test purpose we'll use the zooplankton variable names
kabam_empty.zoo_wb = pd.Series([1.e-7, 1.e-2, 1.], dtype = 'float')
kabam_empty.water_temp = pd.Series([10., 15., 20.])
result = kabam_empty.aq_animal_feeding_rate(kabam_empty.zoo_wb)
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_filterfeeder_feeding_rate(self):
"""
:description Filter feeder feeding rate
:unit kg/d
:expression Kabam Eq. A8b2 (Gd)
:param self.gv_filterfeeders: filterfeeder ventilation rate (L/d)
:param self.conc_ss: Concentration of Suspended Solids (Css - kg/L)
:param particle_scav_eff: efficiency of scavenging of particles absorbed from water (fraction)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series(['nan', 1.97287e-7, 0.03282195], dtype = 'float')
try:
kabam_empty.gv_filterfeeders = pd.Series(['nan', 0.00394574, 0.468885], dtype = 'float')
kabam_empty.conc_ss = pd.Series([0.00005, 0.00005, 0.07], dtype = 'float')
kabam_empty.particle_scav_eff = 1.0
result = kabam_empty.filterfeeders_feeding_rate()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_diet_uptake_rate_const(self):
"""
:description pesticide uptake rate constant for uptake through ingestion of food rate
:unit kg food/kg organism - day
:expression Kabam Eq. A8 (kD)
:param dietary_trans_eff: dietary pesticide transfer efficiency (fraction)
:param feeding rate: animal/organism feeding rate (kg/d)
:param wet weight of aquatic animal/organism (kg)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series([0.22455272, 0.05318532, 0.031755767 ], dtype = 'float')
try:
#For test purpose we'll use the zooplankton variable names
kabam_empty.ed_zoo = pd.Series([0.499251, 0.492611, 0.434783], dtype = 'float')
kabam_empty.gd_zoo = pd.Series([4.497792e-08, 1.0796617e-3, 0.073042572], dtype = 'float')
kabam_empty.zoo_wb = pd.Series([1.e-7, 1.e-2, 1.0])
result = kabam_empty.diet_uptake_rate_const(kabam_empty.ed_zoo, \
kabam_empty.gd_zoo, kabam_empty.zoo_wb)
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_overall_diet_content(self):
"""
:description Overall fraction of aquatic animal/organism diet attributed to diet food component
(i.e., lipids or NLOM or water)
:unit kg/kg
:expression not shown in Kabam documentation: it is associated with Kabam Eq. A9
overall_diet_content is equal to the sum over dietary elements
: of (fraction of diet) * (content in diet element); for example zooplankton ingest seidment and
: phytoplankton, thus the overall lipid content of the zooplankton diet equals
: (fraction of sediment in zooplankton diet) * (fraction of lipids in sediment) +
: (fraction of phytoplankton in zooplankton diet) * (fraction of lipids in phytoplankton)
:param diet_fraction: list of values representing fractions of aquatic animal/organism diet attibuted
to each element of diet
:param content_fraction: list of values representing fraction of diet element attributed to a specific
component of that diet element (e.g., lipid, NLOM, or water)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series([0.025, 0.03355, 0.0465], dtype = 'float')
try:
#For test purposes we'll use the small fish diet variables/values
kabam_empty.sfish_diet_sediment = pd.Series([0.0, 0.01, 0.05], dtype = 'float')
kabam_empty.sfish_diet_phytoplankton = pd.Series([0.0, 0.01, 0.05], dtype = 'float')
kabam_empty.sfish_diet_zooplankton = pd.Series([0.5, 0.4, 0.5], dtype = 'float')
kabam_empty.sfish_diet_benthic_invertebrates = pd.Series([0.5, 0.57, 0.35], dtype = 'float')
kabam_empty.sfish_diet_filterfeeders = pd.Series([0.0, 0.01, 0.05], dtype = 'float')
kabam_empty.sediment_lipid = pd.Series([0.0, 0.01, 0.0], dtype = 'float')
kabam_empty.phytoplankton_lipid = pd.Series([0.02, 0.015, 0.03], dtype = 'float')
kabam_empty.zoo_lipid = pd.Series([0.03, 0.04, 0.05], dtype = 'float')
kabam_empty.beninv_lipid = pd.Series([0.02, 0.03, 0.05], dtype = 'float')
kabam_empty.filterfeeders_lipid = pd.Series([0.01, 0.02, 0.05], dtype = 'float')
diet_elements = pd.Series([], dtype = 'float')
content_fracs = pd.Series([], dtype = 'float')
for i in range(len(kabam_empty.sfish_diet_sediment)):
diet_elements = [kabam_empty.sfish_diet_sediment[i],
kabam_empty.sfish_diet_phytoplankton[i],
kabam_empty.sfish_diet_zooplankton[i],
kabam_empty.sfish_diet_benthic_invertebrates[i],
kabam_empty.sfish_diet_filterfeeders[i]]
content_fracs = [kabam_empty.sediment_lipid[i],
kabam_empty.phytoplankton_lipid[i],
kabam_empty.zoo_lipid[i],
kabam_empty.beninv_lipid[i],
kabam_empty.filterfeeders_lipid[i]]
result[i] = kabam_empty.overall_diet_content(diet_elements, content_fracs)
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_fecal_egestion_rate_factor(self):
"""
Aquatic animal/organism egestion rate of fecal matter factor (to be multiplied by the
feeding rate to calculate egestion rate of fecal matter)
:unit (kg feces)/[(kg organism) - day]
:expression Kabam Eq. A9 (GF)
:param epsilonL: dietary assimilation rate of lipids (fraction)
:param epsilonN: dietary assimilation rate of NLOM (fraction)
:param epsilonW: dietary assimilation rate of water (fraction)
:param diet_lipid; lipid content of aquatic animal/organism diet (fraction)
:param diet_nlom NLOM content of aquatic animal/organism diet (fraction)
:param diet_water water content of aquatic animal/organism diet (fraction)
:param feeding_rate: aquatic animal/organism feeding rate (kg/d)
:return:
"""
#this test includes two results; 'result1' represents the overall assimilation rate of the
#aquatic animal/organism diet; and 'result' represents the product of this assimilation rate
#and the feeding rate (this multiplication will be done in the main model routine
#as opposed to within a method -- the method here is limited to the assimilation factor
#because this factor is used elsewhere as well
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = | pd.Series([], dtype='float') | pandas.Series |
#----------------------------------------------------------------------------------------------
####################
# IMPORT LIBRARIES #
####################
import streamlit as st
import pandas as pd
import numpy as np
import plotly as dd
import plotly.express as px
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.font_manager
import plotly.graph_objects as go
import functions as fc
import modelling as ml
import os
import altair as alt
import altair
import itertools
import statsmodels.api as sm
from scipy import stats
import sys
from streamlit import caching
import SessionState
import platform
import base64
from io import BytesIO
from pygam import LinearGAM, LogisticGAM, s
from sklearn import decomposition
from sklearn.preprocessing import StandardScaler
from factor_analyzer import FactorAnalyzer
from factor_analyzer.factor_analyzer import calculate_bartlett_sphericity
from factor_analyzer.factor_analyzer import calculate_kmo
#----------------------------------------------------------------------------------------------
def app():
# Clear cache
caching.clear_cache()
# Hide traceback in error messages (comment out for de-bugging)
#sys.tracebacklimit = 0
# Show altair tooltip when full screen
st.markdown('<style>#vg-tooltip-element{z-index: 1000051}</style>',unsafe_allow_html=True)
#Session state
session_state = SessionState.get(id = 0)
# Analysis type
analysis_type = st.selectbox("What kind of analysis would you like to conduct?", ["Regression", "Multi-class classification", "Data decomposition"], key = session_state.id)
st.header("**Multivariate data**")
if analysis_type == "Regression":
st.markdown("Get your data ready for powerfull methods: Artificial Neural Networks, Boosted Regression Trees, Random Forest, Generalized Additive Models, Multiple Linear Regression, and Logistic Regression! Let STATY do data cleaning, variable transformations, visualizations and deliver you the stats you need. Specify your data processing preferences and start exploring your data stories right below... ")
if analysis_type == "Multi-class classification":
st.markdown("Get your data ready for powerfull multi-class classification methods! Let STATY do data cleaning, variable transformations, visualizations and deliver you the stats you need. Specify your data processing preferences and start exploring your data stories right below... ")
if analysis_type == "Data decomposition":
st.markdown("Decompose your data with Principal Component Analysis or Factor Analysis! Let STATY do data cleaning, variable transformations, visualizations and deliver you the stats you need. Specify your data processing preferences and start exploring your data stories right below... ")
#------------------------------------------------------------------------------------------
#++++++++++++++++++++++++++++++++++++++++++++
# DATA IMPORT
# File upload section
df_dec = st.sidebar.radio("Get data", ["Use example dataset", "Upload data"])
uploaded_data=None
if df_dec == "Upload data":
#st.subheader("Upload your data")
#uploaded_data = st.sidebar.file_uploader("Make sure that dot (.) is a decimal separator!", type=["csv", "txt"])
separator_expander=st.sidebar.beta_expander('Upload settings')
with separator_expander:
a4,a5=st.beta_columns(2)
with a4:
dec_sep=a4.selectbox("Decimal sep.",['.',','], key = session_state.id)
with a5:
col_sep=a5.selectbox("Column sep.",[';', ',' , '|', '\s+', '\t','other'], key = session_state.id)
if col_sep=='other':
col_sep=st.text_input('Specify your column separator', key = session_state.id)
a4,a5=st.beta_columns(2)
with a4:
thousands_sep=a4.selectbox("Thousands x sep.",[None,'.', ' ','\s+', 'other'], key = session_state.id)
if thousands_sep=='other':
thousands_sep=st.text_input('Specify your thousands separator', key = session_state.id)
with a5:
encoding_val=a5.selectbox("Encoding",[None,'utf_8','utf_8_sig','utf_16_le','cp1140','cp1250','cp1251','cp1252','cp1253','cp1254','other'], key = session_state.id)
if encoding_val=='other':
encoding_val=st.text_input('Specify your encoding', key = session_state.id)
# Error handling for separator selection:
if dec_sep==col_sep:
st.sidebar.error("Decimal and column separators cannot be identical!")
elif dec_sep==thousands_sep:
st.sidebar.error("Decimal and thousands separators cannot be identical!")
elif col_sep==thousands_sep:
st.sidebar.error("Column and thousands separators cannot be identical!")
uploaded_data = st.sidebar.file_uploader("Default separators: decimal '.' | column ';'", type=["csv", "txt"])
if uploaded_data is not None:
df = pd.read_csv(uploaded_data, decimal=dec_sep, sep = col_sep,thousands=thousands_sep,encoding=encoding_val, engine='python')
df_name=os.path.splitext(uploaded_data.name)[0]
st.sidebar.success('Loading data... done!')
elif uploaded_data is None:
if analysis_type == "Regression" or analysis_type == "Data decomposition":
df = pd.read_csv("default data/WHR_2021.csv", sep = ";|,|\t",engine='python')
df_name="WHR_2021"
if analysis_type == "Multi-class classification":
df = pd.read_csv("default data/iris.csv", sep = ";|,|\t",engine='python')
df_name="iris"
else:
if analysis_type == "Regression" or analysis_type == "Data decomposition":
df = pd.read_csv("default data/WHR_2021.csv", sep = ";|,|\t",engine='python')
df_name="WHR_2021"
if analysis_type == "Multi-class classification":
df = pd.read_csv("default data/iris.csv", sep = ";|,|\t",engine='python')
df_name="iris"
st.sidebar.markdown("")
#Basic data info
n_rows = df.shape[0]
n_cols = df.shape[1]
#++++++++++++++++++++++++++++++++++++++++++++
# SETTINGS
settings_expander=st.sidebar.beta_expander('Settings')
with settings_expander:
st.caption("**Precision**")
user_precision=st.number_input('Number of digits after the decimal point',min_value=0,max_value=10,step=1,value=4)
st.caption("**Help**")
sett_hints = st.checkbox('Show learning hints', value=False)
st.caption("**Appearance**")
sett_wide_mode = st.checkbox('Wide mode', value=False)
sett_theme = st.selectbox('Theme', ["Light", "Dark"])
#sett_info = st.checkbox('Show methods info', value=False)
#sett_prec = st.number_input('Set the number of diggits for the output', min_value=0, max_value=8, value=2)
st.sidebar.markdown("")
# Check if wide mode
if sett_wide_mode:
fc.wide_mode_func()
# Check theme
if sett_theme == "Dark":
fc.theme_func_dark()
if sett_theme == "Light":
fc.theme_func_light()
fc.theme_func_dl_button()
#++++++++++++++++++++++++++++++++++++++++++++
# RESET INPUT
reset_clicked = st.sidebar.button("Reset all your input")
if reset_clicked:
session_state.id = session_state.id + 1
st.sidebar.markdown("")
#------------------------------------------------------------------------------------------
#++++++++++++++++++++++++++++++++++++++++++++
# DATA PREPROCESSING & VISUALIZATION
# Check if enough data is available
if n_rows > 0 and n_cols > 0:
st.empty()
else:
st.error("ERROR: Not enough data!")
return
data_exploration_container = st.beta_container()
with data_exploration_container:
st.header("**Data screening and processing**")
#------------------------------------------------------------------------------------------
#++++++++++++++++++++++
# DATA SUMMARY
# Main panel for data summary (pre)
#----------------------------------
dev_expander_dsPre = st.beta_expander("Explore raw data info and stats ", expanded = False)
with dev_expander_dsPre:
# Default data description:
if uploaded_data == None:
if analysis_type == "Regression" or analysis_type == "Data decomposition":
if st.checkbox("Show data description", value = False, key = session_state.id):
st.markdown("**Data source:**")
st.markdown("The data come from the Gallup World Poll surveys from 2018 to 2020. For more details see the [World Happiness Report 2021] (https://worldhappiness.report/).")
st.markdown("**Citation:**")
st.markdown("Helliwell, <NAME>., <NAME>, <NAME>, and <NAME>, eds. 2021. World Happiness Report 2021. New York: Sustainable Development Solutions Network.")
st.markdown("**Variables in the dataset:**")
col1,col2=st.beta_columns(2)
col1.write("Country")
col2.write("country name")
col1,col2=st.beta_columns(2)
col1.write("Year ")
col2.write("year ranging from 2005 to 2020")
col1,col2=st.beta_columns(2)
col1.write("Ladder")
col2.write("happiness score or subjective well-being with the best possible life being a 10, and the worst possible life being a 0")
col1,col2=st.beta_columns(2)
col1.write("Log GDP per capita")
col2.write("in purchasing power parity at constant 2017 international dollar prices")
col1,col2=st.beta_columns(2)
col1.write("Social support")
col2.write("the national average of the binary responses (either 0 or 1) to the question regarding relatives or friends to count on")
col1,col2=st.beta_columns(2)
col1.write("Healthy life expectancy at birth")
col2.write("based on the data extracted from the World Health Organization’s Global Health Observatory data repository")
col1,col2=st.beta_columns(2)
col1.write("Freedom to make life choices")
col2.write("national average of responses to the corresponding question")
col1,col2=st.beta_columns(2)
col1.write("Generosity")
col2.write("residual of regressing national average of response to the question regarding money donations in the past month on GDP per capita")
col1,col2=st.beta_columns(2)
col1.write("Perceptions of corruption")
col2.write("the national average of the survey responses to the corresponding question")
col1,col2=st.beta_columns(2)
col1.write("Positive affect")
col2.write("the average of three positive affect measures (happiness, laugh and enjoyment)")
col1,col2=st.beta_columns(2)
col1.write("Negative affect (worry, sadness and anger)")
col2.write("the average of three negative affect measures (worry, sadness and anger)")
st.markdown("")
if analysis_type == "Multi-class classification":
if st.checkbox("Show data description", value = False, key = session_state.id):
st.markdown("**Data source:**")
st.markdown("The data come from Fisher's Iris data set. See [here] (https://archive.ics.uci.edu/ml/datasets/iris) for more information.")
st.markdown("**Citation:**")
st.markdown("<NAME>. (1936). The use of multiple measurements in taxonomic problems. Annals of Eugenics, 7(2): 179–188. doi: [10.1111/j.1469-1809.1936.tb02137.x] (https://doi.org/10.1111%2Fj.1469-1809.1936.tb02137.x)")
st.markdown("**Variables in the dataset:**")
col1,col2=st.beta_columns(2)
col1.write("class_category")
col2.write("Numerical category for 'class': Iris Setosa (0), Iris Versicolour (1), and Iris Virginica (2)")
col1,col2=st.beta_columns(2)
col1.write("class")
col2.write("Iris Setosa, Iris Versicolour, and Iris Virginica")
col1,col2=st.beta_columns(2)
col1.write("sepal length")
col2.write("sepal length in cm")
col1,col2=st.beta_columns(2)
col1.write("sepal width")
col2.write("sepal width in cm")
col1,col2=st.beta_columns(2)
col1.write("petal length")
col2.write("petal length in cm")
col1,col2=st.beta_columns(2)
col1.write("petal width")
col2.write("petal width in cm")
st.markdown("")
# Show raw data & data info
df_summary = fc.data_summary(df)
if st.checkbox("Show raw data ", value = False, key = session_state.id):
st.write(df)
st.write("Data shape: ", n_rows, " rows and ", n_cols, " columns")
if df[df.duplicated()].shape[0] > 0 or df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0] > 0:
check_nasAnddupl=st.checkbox("Show duplicates and NAs info ", value = False, key = session_state.id)
if check_nasAnddupl:
if df[df.duplicated()].shape[0] > 0:
st.write("Number of duplicates: ", df[df.duplicated()].shape[0])
st.write("Duplicate row index: ", ', '.join(map(str,list(df.index[df.duplicated()]))))
if df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0] > 0:
st.write("Number of rows with NAs: ", df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0])
st.write("Rows with NAs: ", ', '.join(map(str,list(pd.unique(np.where(df.isnull())[0])))))
# Show variable info
if st.checkbox('Show variable info ', value = False, key = session_state.id):
st.write(df_summary["Variable types"])
# Show summary statistics (raw data)
if st.checkbox('Show summary statistics (raw data) ', value = False, key = session_state.id):
st.write(df_summary["ALL"].style.set_precision(user_precision))
# Download link for summary statistics
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
df_summary["Variable types"].to_excel(excel_file, sheet_name="variable_info")
df_summary["ALL"].to_excel(excel_file, sheet_name="summary_statistics")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "Summary statistics__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download summary statistics</a>
""",
unsafe_allow_html=True)
st.write("")
if fc.get_mode(df).loc["n_unique"].any():
st.caption("** Mode is not unique.")
if sett_hints:
st.info(str(fc.learning_hints("de_summary_statistics")))
#++++++++++++++++++++++
# DATA PROCESSING
# Settings for data processing
#-------------------------------------
#st.subheader("Data processing")
dev_expander_dm_sb = st.beta_expander("Specify data processing preferences", expanded = False)
with dev_expander_dm_sb:
n_rows_wNAs = df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0]
n_rows_wNAs_pre_processing = "No"
if n_rows_wNAs > 0:
n_rows_wNAs_pre_processing = "Yes"
a1, a2, a3 = st.beta_columns(3)
else: a1, a3 = st.beta_columns(2)
sb_DM_dImp_num = None
sb_DM_dImp_other = None
sb_DM_delRows=None
sb_DM_keepRows=None
with a1:
#--------------------------------------------------------------------------------------
# DATA CLEANING
st.markdown("**Data cleaning**")
# Delete rows
delRows =st.selectbox('Delete rows with index ...', options=['-', 'greater', 'greater or equal', 'smaller', 'smaller or equal', 'equal', 'between'], key = session_state.id)
if delRows!='-':
if delRows=='between':
row_1=st.number_input('Lower limit is', value=0, step=1, min_value= 0, max_value=len(df)-1, key = session_state.id)
row_2=st.number_input('Upper limit is', value=2, step=1, min_value= 0, max_value=len(df)-1, key = session_state.id)
if (row_1 + 1) < row_2 :
sb_DM_delRows=df.index[(df.index > row_1) & (df.index < row_2)]
elif (row_1 + 1) == row_2 :
st.warning("WARNING: No row is deleted!")
elif row_1 == row_2 :
st.warning("WARNING: No row is deleted!")
elif row_1 > row_2 :
st.error("ERROR: Lower limit must be smaller than upper limit!")
return
elif delRows=='equal':
sb_DM_delRows = st.multiselect("to...", df.index, key = session_state.id)
else:
row_1=st.number_input('than...', step=1, value=1, min_value = 0, max_value=len(df)-1, key = session_state.id)
if delRows=='greater':
sb_DM_delRows=df.index[df.index > row_1]
if row_1 == len(df)-1:
st.warning("WARNING: No row is deleted!")
elif delRows=='greater or equal':
sb_DM_delRows=df.index[df.index >= row_1]
if row_1 == 0:
st.error("ERROR: All rows are deleted!")
return
elif delRows=='smaller':
sb_DM_delRows=df.index[df.index < row_1]
if row_1 == 0:
st.warning("WARNING: No row is deleted!")
elif delRows=='smaller or equal':
sb_DM_delRows=df.index[df.index <= row_1]
if row_1 == len(df)-1:
st.error("ERROR: All rows are deleted!")
return
if sb_DM_delRows is not None:
df = df.loc[~df.index.isin(sb_DM_delRows)]
no_delRows=n_rows-df.shape[0]
# Keep rows
keepRows =st.selectbox('Keep rows with index ...', options=['-', 'greater', 'greater or equal', 'smaller', 'smaller or equal', 'equal', 'between'], key = session_state.id)
if keepRows!='-':
if keepRows=='between':
row_1=st.number_input('Lower limit is', value=0, step=1, min_value= 0, max_value=len(df)-1, key = session_state.id)
row_2=st.number_input('Upper limit is', value=2, step=1, min_value= 0, max_value=len(df)-1, key = session_state.id)
if (row_1 + 1) < row_2 :
sb_DM_keepRows=df.index[(df.index > row_1) & (df.index < row_2)]
elif (row_1 + 1) == row_2 :
st.error("ERROR: No row is kept!")
return
elif row_1 == row_2 :
st.error("ERROR: No row is kept!")
return
elif row_1 > row_2 :
st.error("ERROR: Lower limit must be smaller than upper limit!")
return
elif keepRows=='equal':
sb_DM_keepRows = st.multiselect("to...", df.index, key = session_state.id)
else:
row_1=st.number_input('than...', step=1, value=1, min_value = 0, max_value=len(df)-1, key = session_state.id)
if keepRows=='greater':
sb_DM_keepRows=df.index[df.index > row_1]
if row_1 == len(df)-1:
st.error("ERROR: No row is kept!")
return
elif keepRows=='greater or equal':
sb_DM_keepRows=df.index[df.index >= row_1]
if row_1 == 0:
st.warning("WARNING: All rows are kept!")
elif keepRows=='smaller':
sb_DM_keepRows=df.index[df.index < row_1]
if row_1 == 0:
st.error("ERROR: No row is kept!")
return
elif keepRows=='smaller or equal':
sb_DM_keepRows=df.index[df.index <= row_1]
if sb_DM_keepRows is not None:
df = df.loc[df.index.isin(sb_DM_keepRows)]
no_keptRows=df.shape[0]
# Delete columns
sb_DM_delCols = st.multiselect("Select columns to delete ", df.columns, key = session_state.id)
df = df.loc[:,~df.columns.isin(sb_DM_delCols)]
# Keep columns
sb_DM_keepCols = st.multiselect("Select columns to keep", df.columns, key = session_state.id)
if len(sb_DM_keepCols) > 0:
df = df.loc[:,df.columns.isin(sb_DM_keepCols)]
# Delete duplicates if any exist
if df[df.duplicated()].shape[0] > 0:
sb_DM_delDup = st.selectbox("Delete duplicate rows ", ["No", "Yes"], key = session_state.id)
if sb_DM_delDup == "Yes":
n_rows_dup = df[df.duplicated()].shape[0]
df = df.drop_duplicates()
elif df[df.duplicated()].shape[0] == 0:
sb_DM_delDup = "No"
# Delete rows with NA if any exist
n_rows_wNAs = df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0]
if n_rows_wNAs > 0:
sb_DM_delRows_wNA = st.selectbox("Delete rows with NAs ", ["No", "Yes"], key = session_state.id)
if sb_DM_delRows_wNA == "Yes":
df = df.dropna()
elif n_rows_wNAs == 0:
sb_DM_delRows_wNA = "No"
# Filter data
st.markdown("**Data filtering**")
filter_var = st.selectbox('Filter your data by a variable...', list('-')+ list(df.columns), key = session_state.id)
if filter_var !='-':
if df[filter_var].dtypes=="int64" or df[filter_var].dtypes=="float64":
if df[filter_var].dtypes=="float64":
filter_format="%.8f"
else:
filter_format=None
user_filter=st.selectbox('Select values that are ...', options=['greater','greater or equal','smaller','smaller or equal', 'equal','between'], key = session_state.id)
if user_filter=='between':
filter_1=st.number_input('Lower limit is', format=filter_format, value=df[filter_var].min(), min_value=df[filter_var].min(), max_value=df[filter_var].max(), key = session_state.id)
filter_2=st.number_input('Upper limit is', format=filter_format, value=df[filter_var].max(), min_value=df[filter_var].min(), max_value=df[filter_var].max(), key = session_state.id)
#reclassify values:
if filter_1 < filter_2 :
df = df[(df[filter_var] > filter_1) & (df[filter_var] < filter_2)]
if len(df) == 0:
st.error("ERROR: No data available for the selected limits!")
return
elif filter_1 >= filter_2 :
st.error("ERROR: Lower limit must be smaller than upper limit!")
return
elif user_filter=='equal':
filter_1=st.multiselect('to... ', options=df[filter_var].values, key = session_state.id)
if len(filter_1)>0:
df = df.loc[df[filter_var].isin(filter_1)]
else:
filter_1=st.number_input('than... ',format=filter_format, value=df[filter_var].min(), min_value=df[filter_var].min(), max_value=df[filter_var].max(), key = session_state.id)
#reclassify values:
if user_filter=='greater':
df = df[df[filter_var] > filter_1]
elif user_filter=='greater or equal':
df = df[df[filter_var] >= filter_1]
elif user_filter=='smaller':
df= df[df[filter_var]< filter_1]
elif user_filter=='smaller or equal':
df = df[df[filter_var] <= filter_1]
if len(df) == 0:
st.error("ERROR: No data available for the selected value!")
return
elif len(df) == n_rows:
st.warning("WARNING: Data are not filtered for this value!")
else:
filter_1=st.multiselect('Filter your data by a value...', (df[filter_var]).unique(), key = session_state.id)
if len(filter_1)>0:
df = df.loc[df[filter_var].isin(filter_1)]
if n_rows_wNAs_pre_processing == "Yes":
with a2:
#--------------------------------------------------------------------------------------
# DATA IMPUTATION
# Select data imputation method (only if rows with NA not deleted)
if sb_DM_delRows_wNA == "No" and n_rows_wNAs > 0:
st.markdown("**Data imputation**")
sb_DM_dImp_choice = st.selectbox("Replace entries with NA ", ["No", "Yes"], key = session_state.id)
if sb_DM_dImp_choice == "Yes":
# Numeric variables
sb_DM_dImp_num = st.selectbox("Imputation method for numeric variables ", ["Mean", "Median", "Random value"], key = session_state.id)
# Other variables
sb_DM_dImp_other = st.selectbox("Imputation method for other variables ", ["Mode", "Random value"], key = session_state.id)
df = fc.data_impute(df, sb_DM_dImp_num, sb_DM_dImp_other)
else:
st.markdown("**Data imputation**")
st.write("")
st.info("No NAs in data set!")
with a3:
#--------------------------------------------------------------------------------------
# DATA TRANSFORMATION
st.markdown("**Data transformation**")
# Select columns for different transformation types
transform_options = df.select_dtypes([np.number]).columns
numCat_options = df.columns
sb_DM_dTrans_log = st.multiselect("Select columns to transform with log ", transform_options, key = session_state.id)
if sb_DM_dTrans_log is not None:
df = fc.var_transform_log(df, sb_DM_dTrans_log)
sb_DM_dTrans_sqrt = st.multiselect("Select columns to transform with sqrt ", transform_options, key = session_state.id)
if sb_DM_dTrans_sqrt is not None:
df = fc.var_transform_sqrt(df, sb_DM_dTrans_sqrt)
sb_DM_dTrans_square = st.multiselect("Select columns for squaring ", transform_options, key = session_state.id)
if sb_DM_dTrans_square is not None:
df = fc.var_transform_square(df, sb_DM_dTrans_square)
sb_DM_dTrans_cent = st.multiselect("Select columns for centering ", transform_options, key = session_state.id)
if sb_DM_dTrans_cent is not None:
df = fc.var_transform_cent(df, sb_DM_dTrans_cent)
sb_DM_dTrans_stand = st.multiselect("Select columns for standardization ", transform_options, key = session_state.id)
if sb_DM_dTrans_stand is not None:
df = fc.var_transform_stand(df, sb_DM_dTrans_stand)
sb_DM_dTrans_norm = st.multiselect("Select columns for normalization ", transform_options, key = session_state.id)
if sb_DM_dTrans_norm is not None:
df = fc.var_transform_norm(df, sb_DM_dTrans_norm)
sb_DM_dTrans_numCat = st.multiselect("Select columns for numeric categorization ", numCat_options, key = session_state.id)
if sb_DM_dTrans_numCat:
if not df[sb_DM_dTrans_numCat].columns[df[sb_DM_dTrans_numCat].isna().any()].tolist():
sb_DM_dTrans_numCat_sel = st.multiselect("Select variables for manual categorization ", sb_DM_dTrans_numCat, key = session_state.id)
if sb_DM_dTrans_numCat_sel:
for var in sb_DM_dTrans_numCat_sel:
if df[var].unique().size > 5:
st.error("ERROR: Selected variable has too many categories (>5): " + str(var))
return
else:
manual_cats = pd.DataFrame(index = range(0, df[var].unique().size), columns=["Value", "Cat"])
text = "Category for "
# Save manually selected categories
for i in range(0, df[var].unique().size):
text1 = text + str(var) + ": " + str(sorted(df[var].unique())[i])
man_cat = st.number_input(text1, value = 0, min_value=0, key = session_state.id)
manual_cats.loc[i]["Value"] = sorted(df[var].unique())[i]
manual_cats.loc[i]["Cat"] = man_cat
new_var_name = "numCat_" + var
new_var = pd.DataFrame(index = df.index, columns = [new_var_name])
for c in df[var].index:
if pd.isnull(df[var][c]) == True:
new_var.loc[c, new_var_name] = np.nan
elif pd.isnull(df[var][c]) == False:
new_var.loc[c, new_var_name] = int(manual_cats[manual_cats["Value"] == df[var][c]]["Cat"])
df[new_var_name] = new_var.astype('int64')
# Exclude columns with manual categorization from standard categorization
numCat_wo_manCat = [var for var in sb_DM_dTrans_numCat if var not in sb_DM_dTrans_numCat_sel]
df = fc.var_transform_numCat(df, numCat_wo_manCat)
else:
df = fc.var_transform_numCat(df, sb_DM_dTrans_numCat)
else:
col_with_na = df[sb_DM_dTrans_numCat].columns[df[sb_DM_dTrans_numCat].isna().any()].tolist()
st.error("ERROR: Please select columns without NAs: " + ', '.join(map(str,col_with_na)))
return
else:
sb_DM_dTrans_numCat = None
sb_DM_dTrans_mult = st.number_input("Number of variable multiplications ", value = 0, min_value=0, key = session_state.id)
if sb_DM_dTrans_mult != 0:
multiplication_pairs = pd.DataFrame(index = range(0, sb_DM_dTrans_mult), columns=["Var1", "Var2"])
text = "Multiplication pair"
for i in range(0, sb_DM_dTrans_mult):
text1 = text + " " + str(i+1)
text2 = text + " " + str(i+1) + " "
mult_var1 = st.selectbox(text1, transform_options, key = session_state.id)
mult_var2 = st.selectbox(text2, transform_options, key = session_state.id)
multiplication_pairs.loc[i]["Var1"] = mult_var1
multiplication_pairs.loc[i]["Var2"] = mult_var2
fc.var_transform_mult(df, mult_var1, mult_var2)
sb_DM_dTrans_div = st.number_input("Number of variable divisions ", value = 0, min_value=0, key = session_state.id)
if sb_DM_dTrans_div != 0:
division_pairs = pd.DataFrame(index = range(0, sb_DM_dTrans_div), columns=["Var1", "Var2"])
text = "Division pair"
for i in range(0, sb_DM_dTrans_div):
text1 = text + " " + str(i+1) + " (numerator)"
text2 = text + " " + str(i+1) + " (denominator)"
div_var1 = st.selectbox(text1, transform_options, key = session_state.id)
div_var2 = st.selectbox(text2, transform_options, key = session_state.id)
division_pairs.loc[i]["Var1"] = div_var1
division_pairs.loc[i]["Var2"] = div_var2
fc.var_transform_div(df, div_var1, div_var2)
data_transform=st.checkbox("Transform data in Excel?", value=False)
if data_transform==True:
st.info("Press the button to open your data in Excel. Don't forget to save your result as a csv or a txt file!")
# Download link
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
df.to_excel(excel_file, sheet_name="data",index=False)
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "Data_transformation__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Transform your data in Excel</a>
""",
unsafe_allow_html=True)
st.write("")
#--------------------------------------------------------------------------------------
# PROCESSING SUMMARY
if st.checkbox('Show a summary of my data processing preferences ', value = False, key = session_state.id):
st.markdown("Summary of data changes:")
#--------------------------------------------------------------------------------------
# DATA CLEANING
# Rows
if sb_DM_delRows is not None and delRows!='-' :
if no_delRows > 1:
st.write("-", no_delRows, " rows were deleted!")
elif no_delRows == 1:
st.write("-",no_delRows, " row was deleted!")
elif no_delRows == 0:
st.write("- No row was deleted!")
else:
st.write("- No row was deleted!")
if sb_DM_keepRows is not None and keepRows!='-' :
if no_keptRows > 1:
st.write("-", no_keptRows, " rows are kept!")
elif no_keptRows == 1:
st.write("-",no_keptRows, " row is kept!")
elif no_keptRows == 0:
st.write("- All rows are kept!")
else:
st.write("- All rows are kept!")
# Columns
if len(sb_DM_delCols) > 1:
st.write("-", len(sb_DM_delCols), " columns were deleted:", ', '.join(sb_DM_delCols))
elif len(sb_DM_delCols) == 1:
st.write("-",len(sb_DM_delCols), " column was deleted:", str(sb_DM_delCols[0]))
elif len(sb_DM_delCols) == 0:
st.write("- No column was deleted!")
if len(sb_DM_keepCols) > 1:
st.write("-", len(sb_DM_keepCols), " columns are kept:", ', '.join(sb_DM_keepCols))
elif len(sb_DM_keepCols) == 1:
st.write("-",len(sb_DM_keepCols), " column is kept:", str(sb_DM_keepCols[0]))
elif len(sb_DM_keepCols) == 0:
st.write("- All columns are kept!")
# Duplicates
if sb_DM_delDup == "Yes":
if n_rows_dup > 1:
st.write("-", n_rows_dup, " duplicate rows were deleted!")
elif n_rows_dup == 1:
st.write("-", n_rows_dup, "duplicate row was deleted!")
else:
st.write("- No duplicate row was deleted!")
# NAs
if sb_DM_delRows_wNA == "Yes":
if n_rows_wNAs > 1:
st.write("-", n_rows_wNAs, "rows with NAs were deleted!")
elif n_rows_wNAs == 1:
st.write("-", n_rows - n_rows_wNAs, "row with NAs was deleted!")
else:
st.write("- No row with NAs was deleted!")
# Filter
if filter_var != "-":
if df[filter_var].dtypes=="int64" or df[filter_var].dtypes=="float64":
if isinstance(filter_1, list):
if len(filter_1) == 0:
st.write("-", " Data was not filtered!")
elif len(filter_1) > 0:
st.write("-", " Data filtered by:", str(filter_var))
elif filter_1 is not None:
st.write("-", " Data filtered by:", str(filter_var))
else:
st.write("-", " Data was not filtered!")
elif len(filter_1)>0:
st.write("-", " Data filtered by:", str(filter_var))
elif len(filter_1) == 0:
st.write("-", " Data was not filtered!")
else:
st.write("-", " Data was not filtered!")
#--------------------------------------------------------------------------------------
# DATA IMPUTATION
if sb_DM_delRows_wNA == "No" and n_rows_wNAs > 0:
st.write("- Data imputation method for numeric variables:", sb_DM_dImp_num)
st.write("- Data imputation method for other variable types:", sb_DM_dImp_other)
#--------------------------------------------------------------------------------------
# DATA TRANSFORMATION
# log
if len(sb_DM_dTrans_log) > 1:
st.write("-", len(sb_DM_dTrans_log), " columns were log-transformed:", ', '.join(sb_DM_dTrans_log))
elif len(sb_DM_dTrans_log) == 1:
st.write("-",len(sb_DM_dTrans_log), " column was log-transformed:", sb_DM_dTrans_log[0])
elif len(sb_DM_dTrans_log) == 0:
st.write("- No column was log-transformed!")
# sqrt
if len(sb_DM_dTrans_sqrt) > 1:
st.write("-", len(sb_DM_dTrans_sqrt), " columns were sqrt-transformed:", ', '.join(sb_DM_dTrans_sqrt))
elif len(sb_DM_dTrans_sqrt) == 1:
st.write("-",len(sb_DM_dTrans_sqrt), " column was sqrt-transformed:", sb_DM_dTrans_sqrt[0])
elif len(sb_DM_dTrans_sqrt) == 0:
st.write("- No column was sqrt-transformed!")
# square
if len(sb_DM_dTrans_square) > 1:
st.write("-", len(sb_DM_dTrans_square), " columns were squared:", ', '.join(sb_DM_dTrans_square))
elif len(sb_DM_dTrans_square) == 1:
st.write("-",len(sb_DM_dTrans_square), " column was squared:", sb_DM_dTrans_square[0])
elif len(sb_DM_dTrans_square) == 0:
st.write("- No column was squared!")
# centering
if len(sb_DM_dTrans_cent) > 1:
st.write("-", len(sb_DM_dTrans_cent), " columns were centered:", ', '.join(sb_DM_dTrans_cent))
elif len(sb_DM_dTrans_cent) == 1:
st.write("-",len(sb_DM_dTrans_cent), " column was centered:", sb_DM_dTrans_cent[0])
elif len(sb_DM_dTrans_cent) == 0:
st.write("- No column was centered!")
# standardize
if len(sb_DM_dTrans_stand) > 1:
st.write("-", len(sb_DM_dTrans_stand), " columns were standardized:", ', '.join(sb_DM_dTrans_stand))
elif len(sb_DM_dTrans_stand) == 1:
st.write("-",len(sb_DM_dTrans_stand), " column was standardized:", sb_DM_dTrans_stand[0])
elif len(sb_DM_dTrans_stand) == 0:
st.write("- No column was standardized!")
# normalize
if len(sb_DM_dTrans_norm) > 1:
st.write("-", len(sb_DM_dTrans_norm), " columns were normalized:", ', '.join(sb_DM_dTrans_norm))
elif len(sb_DM_dTrans_norm) == 1:
st.write("-",len(sb_DM_dTrans_norm), " column was normalized:", sb_DM_dTrans_norm[0])
elif len(sb_DM_dTrans_norm) == 0:
st.write("- No column was normalized!")
# numeric category
if sb_DM_dTrans_numCat is not None:
if len(sb_DM_dTrans_numCat) > 1:
st.write("-", len(sb_DM_dTrans_numCat), " columns were transformed to numeric categories:", ', '.join(sb_DM_dTrans_numCat))
elif len(sb_DM_dTrans_numCat) == 1:
st.write("-",len(sb_DM_dTrans_numCat), " column was transformed to numeric categories:", sb_DM_dTrans_numCat[0])
elif sb_DM_dTrans_numCat is None:
st.write("- No column was transformed to numeric categories!")
# multiplication
if sb_DM_dTrans_mult != 0:
st.write("-", "Number of variable multiplications: ", sb_DM_dTrans_mult)
elif sb_DM_dTrans_mult == 0:
st.write("- No variables were multiplied!")
# division
if sb_DM_dTrans_div != 0:
st.write("-", "Number of variable divisions: ", sb_DM_dTrans_div)
elif sb_DM_dTrans_div == 0:
st.write("- No variables were divided!")
st.write("")
st.write("")
#------------------------------------------------------------------------------------------
#++++++++++++++++++++++
# UPDATED DATA SUMMARY
# Show only if changes were made
if any(v for v in [sb_DM_delCols, sb_DM_dImp_num, sb_DM_dImp_other, sb_DM_dTrans_log, sb_DM_dTrans_sqrt, sb_DM_dTrans_square, sb_DM_dTrans_cent, sb_DM_dTrans_stand, sb_DM_dTrans_norm, sb_DM_dTrans_numCat ] if v is not None) or sb_DM_delDup == "Yes" or sb_DM_delRows_wNA == "Yes" or sb_DM_dTrans_mult != 0 or sb_DM_dTrans_div != 0 or filter_var != "-" or delRows!='-' or keepRows!='-' or len(sb_DM_keepCols) > 0:
dev_expander_dsPost = st.beta_expander("Explore cleaned and transformed data info and stats ", expanded = False)
with dev_expander_dsPost:
if df.shape[1] > 0 and df.shape[0] > 0:
# Show cleaned and transformed data & data info
df_summary_post = fc.data_summary(df)
if st.checkbox("Show cleaned and transformed data ", value = False, key = session_state.id):
n_rows_post = df.shape[0]
n_cols_post = df.shape[1]
st.dataframe(df)
st.write("Data shape: ", n_rows_post, "rows and ", n_cols_post, "columns")
# Download transformed data:
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
df.to_excel(excel_file, sheet_name="Clean. and transf. data")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "CleanedTransfData__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download cleaned and transformed data</a>
""",
unsafe_allow_html=True)
st.write("")
if df[df.duplicated()].shape[0] > 0 or df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0] > 0:
check_nasAnddupl2 = st.checkbox("Show duplicates and NAs info (processed) ", value = False, key = session_state.id)
if check_nasAnddupl2:
index_c = []
for c in df.columns:
for r in df.index:
if pd.isnull(df[c][r]):
index_c.append(r)
if df[df.duplicated()].shape[0] > 0:
st.write("Number of duplicates: ", df[df.duplicated()].shape[0])
st.write("Duplicate row index: ", ', '.join(map(str,list(df.index[df.duplicated()]))))
if df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0] > 0:
st.write("Number of rows with NAs: ", len(pd.unique(sorted(index_c))))
st.write("Rows with NAs: ", ', '.join(map(str,list(pd.unique(sorted(index_c))))))
# Show cleaned and transformed variable info
if st.checkbox("Show cleaned and transformed variable info ", value = False, key = session_state.id):
st.write(df_summary_post["Variable types"])
# Show summary statistics (cleaned and transformed data)
if st.checkbox('Show summary statistics (cleaned and transformed data) ', value = False, key = session_state.id):
st.write(df_summary_post["ALL"].style.set_precision(user_precision))
# Download link
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
df.to_excel(excel_file, sheet_name="cleaned_data")
df_summary_post["Variable types"].to_excel(excel_file, sheet_name="cleaned_variable_info")
df_summary_post["ALL"].to_excel(excel_file, sheet_name="cleaned_summary_statistics")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "Cleaned data summary statistics_multi_" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download cleaned data summary statistics</a>
""",
unsafe_allow_html=True)
st.write("")
if fc.get_mode(df).loc["n_unique"].any():
st.caption("** Mode is not unique.")
if sett_hints:
st.info(str(fc.learning_hints("de_summary_statistics")))
else:
st.error("ERROR: No data available for preprocessing!")
return
#------------------------------------------------------------------------------------------
#++++++++++++++++++++++
# DATA VISUALIZATION
data_visualization_container = st.beta_container()
with data_visualization_container:
st.write("")
st.write("")
st.header("**Data visualization**")
dev_expander_dv = st.beta_expander("Explore visualization types ", expanded = False)
with dev_expander_dv:
if df.shape[1] > 0 and df.shape[0] > 0:
st.write('**Variable selection**')
varl_sel_options = df.columns
var_sel = st.selectbox('Select variable for visualizations', varl_sel_options, key = session_state.id)
if df[var_sel].dtypes == "float64" or df[var_sel].dtypes == "float32" or df[var_sel].dtypes == "int64" or df[var_sel].dtypes == "int32":
a4, a5 = st.beta_columns(2)
with a4:
st.write('**Scatterplot with LOESS line**')
yy_options = df.columns
yy = st.selectbox('Select variable for y-axis', yy_options, key = session_state.id)
if df[yy].dtypes == "float64" or df[yy].dtypes == "float32" or df[yy].dtypes == "int64" or df[yy].dtypes == "int32":
fig_data = pd.DataFrame()
fig_data[yy] = df[yy]
fig_data[var_sel] = df[var_sel]
fig_data["Index"] = df.index
fig = alt.Chart(fig_data).mark_circle().encode(
x = alt.X(var_sel, scale = alt.Scale(domain = [min(fig_data[var_sel]), max(fig_data[var_sel])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y(yy, scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = [yy, var_sel, "Index"]
)
st.altair_chart(fig + fig.transform_loess(var_sel, yy).mark_line(size = 2, color = "darkred"), use_container_width=True)
if sett_hints:
st.info(str(fc.learning_hints("dv_scatterplot")))
else: st.error("ERROR: Please select a numeric variable for the y-axis!")
with a5:
st.write('**Histogram**')
binNo = st.slider("Select maximum number of bins", 5, 100, 25, key = session_state.id)
fig2 = alt.Chart(df).mark_bar().encode(
x = alt.X(var_sel, title = var_sel + " (binned)", bin = alt.BinParams(maxbins = binNo), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("count()", title = "count of records", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["count()", alt.Tooltip(var_sel, bin = alt.BinParams(maxbins = binNo))]
)
st.altair_chart(fig2, use_container_width=True)
if sett_hints:
st.info(str(fc.learning_hints("dv_histogram")))
a6, a7 = st.beta_columns(2)
with a6:
st.write('**Boxplot**')
# Boxplot
boxplot_data = pd.DataFrame()
boxplot_data[var_sel] = df[var_sel]
boxplot_data["Index"] = df.index
boxplot = alt.Chart(boxplot_data).mark_boxplot(size = 100, color = "#1f77b4", median = dict(color = "darkred"),).encode(
y = alt.Y(var_sel, scale = alt.Scale(zero = False)),
tooltip = [var_sel, "Index"]
).configure_axis(
labelFontSize = 11,
titleFontSize = 12
)
st.altair_chart(boxplot, use_container_width=True)
if sett_hints:
st.info(str(fc.learning_hints("dv_boxplot")))
with a7:
st.write("**QQ-plot**")
var_values = df[var_sel]
qqplot_data = pd.DataFrame()
qqplot_data[var_sel] = var_values
qqplot_data["Index"] = df.index
qqplot_data = qqplot_data.sort_values(by = [var_sel])
qqplot_data["Theoretical quantiles"] = stats.probplot(var_values, dist="norm")[0][0]
qqplot = alt.Chart(qqplot_data).mark_circle(size=20).encode(
x = alt.X("Theoretical quantiles", title = "theoretical quantiles", scale = alt.Scale(domain = [min(qqplot_data["Theoretical quantiles"]), max(qqplot_data["Theoretical quantiles"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y(var_sel, title = str(var_sel), scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = [var_sel, "Theoretical quantiles", "Index"]
)
st.altair_chart(qqplot + qqplot.transform_regression('Theoretical quantiles', var_sel).mark_line(size = 2, color = "darkred"), use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("dv_qqplot")))
else: st.error("ERROR: Please select a numeric variable!")
else: st.error("ERROR: No data available for Data Visualization!")
# scatter matrix
#Check If variables are numeric
num_cols=[]
for column in df:
if df[column].dtypes in ('float', 'float64', 'int','int64'):
num_cols.append(column)
if len(num_cols)>1:
show_scatter_matrix=st.checkbox('Show scatter matrix',value=False,key= session_state.id)
if show_scatter_matrix==True:
multi_var_sel = st.multiselect('Select variables for scatter matrix', num_cols, num_cols, key = session_state.id)
if len(multi_var_sel)<2:
st.error("ERROR: Please choose at least two variables fro a scatterplot")
else:
#Plot scatter matrix:
scatter_matrix=alt.Chart(df[multi_var_sel]).mark_circle().encode(
x=alt.X(alt.repeat("column"), type='quantitative'),
y=alt.Y(alt.repeat("row"), type='quantitative')
).properties(
width=150,
height=150
).repeat(
row=multi_var_sel,
column=multi_var_sel
).interactive()
st.altair_chart(scatter_matrix, use_container_width=True)
#------------------------------------------------------------------------------------------
# REGRESSION
if analysis_type == "Regression":
#++++++++++++++++++++++++++++++++++++++++++++
# MACHINE LEARNING (PREDICTIVE DATA ANALYSIS)
st.write("")
st.write("")
data_machinelearning_container = st.beta_container()
with data_machinelearning_container:
st.header("**Multivariate data modelling**")
st.markdown("Go for creating predictive models of your data using classical and machine learning techniques! STATY will take care of the modelling for you, so you can put your focus on results interpretation and communication! ")
ml_settings = st.beta_expander("Specify models ", expanded = False)
with ml_settings:
# Initial status for running models
run_models = False
sb_ML_alg = "NA"
do_hypTune = "No"
do_modval = "No"
do_hypTune_no = "No hyperparameter tuning"
final_hyPara_values="None"
model_val_results = None
model_full_results = None
gam_finalPara = None
brt_finalPara = None
brt_tuning_results = None
rf_finalPara = None
rf_tuning_results = None
ann_finalPara = None
ann_tuning_results = None
MLR_intercept = None
MLR_cov_type = None
MLR_finalPara = None
MLR_model = "OLS"
LR_cov_type = None
LR_finalPara = None
LR_finalPara = None
if df.shape[1] > 0 and df.shape[0] > 0:
#--------------------------------------------------------------------------------------
# GENERAL SETTINGS
st.markdown("**Variable selection**")
# Variable categories
df_summary_model = fc.data_summary(df)
var_cat = df_summary_model["Variable types"].loc["category"]
# Response variable
response_var_options = df.columns
response_var = st.selectbox("Select response variable", response_var_options, key = session_state.id)
# Check if response variable is numeric and has no NAs
response_var_message_num = False
response_var_message_na = False
response_var_message_cat = False
if var_cat.loc[response_var] == "string/binary" or var_cat.loc[response_var] == "bool/binary":
response_var_message_num = "ERROR: Please transform the binary response variable into a numeric binary categorization in data processing preferences!"
elif var_cat.loc[response_var] == "string/categorical" or var_cat.loc[response_var] == "other" or var_cat.loc[response_var] == "string/single":
response_var_message_num = "ERROR: Please select a numeric or binary response variable!"
elif var_cat.loc[response_var] == "categorical":
response_var_message_cat = "WARNING: Non-continuous variables are treated as continuous!"
if response_var_message_num != False:
st.error(response_var_message_num)
if response_var_message_cat != False:
st.warning(response_var_message_cat)
# Continue if everything is clean for response variable
if response_var_message_num == False and response_var_message_na == False:
# Select explanatory variables
expl_var_options = df.columns
expl_var_options = expl_var_options[expl_var_options.isin(df.drop(response_var, axis = 1).columns)]
expl_var = st.multiselect("Select explanatory variables", expl_var_options, key = session_state.id)
var_list = list([response_var]) + list(expl_var)
# Check if explanatory variables are numeric
expl_var_message_num = False
expl_var_message_na = False
if any(a for a in df[expl_var].dtypes if a != "float64" and a != "float32" and a != "int64" and a != "int64"):
expl_var_not_num = df[expl_var].select_dtypes(exclude=["int64", "int32", "float64", "float32"]).columns
expl_var_message_num = "ERROR: Please exclude non-numeric variables: " + ', '.join(map(str,list(expl_var_not_num)))
# Check if NAs are present and delete them automatically (delete before run models button)
if np.where(df[var_list].isnull())[0].size > 0:
st.warning("WARNING: Your modelling data set includes NAs. Rows with NAs are automatically deleted!")
if expl_var_message_num != False:
st.error(expl_var_message_num)
elif expl_var_message_na != False:
st.error(expl_var_message_na)
# Continue if everything is clean for explanatory variables and at least one was selected
elif expl_var_message_num == False and expl_var_message_na == False and len(expl_var) > 0:
#--------------------------------------------------------------------------------------
# ALGORITHMS
st.markdown("**Specify modelling algorithms**")
# Select algorithms based on chosen response variable
# Binary (has to be integer or float)
if var_cat.loc[response_var] == "binary":
algorithms = ["Multiple Linear Regression", "Logistic Regression", "Generalized Additive Models", "Random Forest", "Boosted Regression Trees", "Artificial Neural Networks"]
response_var_type = "binary"
# Multi-class (has to be integer, currently treated as continuous response)
elif var_cat.loc[response_var] == "categorical":
algorithms = ["Multiple Linear Regression", "Generalized Additive Models", "Random Forest", "Boosted Regression Trees", "Artificial Neural Networks"]
response_var_type = "continuous"
# Continuous
elif var_cat.loc[response_var] == "numeric":
algorithms = ["Multiple Linear Regression", "Generalized Additive Models", "Random Forest", "Boosted Regression Trees", "Artificial Neural Networks"]
response_var_type = "continuous"
alg_list = list(algorithms)
sb_ML_alg = st.multiselect("Select modelling techniques", alg_list, alg_list)
# MLR + binary info message
if any(a for a in sb_ML_alg if a == "Multiple Linear Regression") and response_var_type == "binary":
st.warning("WARNING: For Multiple Linear Regression only the full model output will be determined.")
st.markdown("**Model-specific settings**")
# Multiple Linear Regression settings
if any(a for a in sb_ML_alg if a == "Multiple Linear Regression"):
MLR_finalPara = pd.DataFrame(index = ["value"], columns = ["intercept", "covType"])
MLR_intercept = "Yes"
MLR_cov_type = "non-robust"
MLR_finalPara["intercept"] = MLR_intercept
MLR_finalPara["covType"] = MLR_cov_type
if st.checkbox("Adjust settings for Multiple Linear Regression"):
col1, col2 = st.beta_columns(2)
with col1:
MLR_intercept = st.selectbox("Include intercept", ["Yes", "No"])
with col2:
MLR_cov_type = st.selectbox("Covariance type", ["non-robust", "HC0", "HC1", "HC2", "HC3"])
MLR_finalPara["intercept"] = MLR_intercept
MLR_finalPara["covType"] = MLR_cov_type
st.write("")
# Logistic Regression settings
if any(a for a in sb_ML_alg if a == "Logistic Regression"):
LR_finalPara = pd.DataFrame(index = ["value"], columns = ["intercept", "covType"])
LR_intercept = "Yes"
LR_cov_type = "non-robust"
LR_finalPara["intercept"] = LR_intercept
LR_finalPara["covType"] = LR_cov_type
if st.checkbox("Adjust settings for Logistic Regression"):
col1, col2 = st.beta_columns(2)
with col1:
LR_intercept = st.selectbox("Include intercept ", ["Yes", "No"])
with col2:
LR_cov_type = st.selectbox("Covariance type", ["non-robust", "HC0"])
LR_finalPara["intercept"] = LR_intercept
LR_finalPara["covType"] = LR_cov_type
st.write("")
# Generalized Additive Models settings
if any(a for a in sb_ML_alg if a == "Generalized Additive Models"):
gam_finalPara = pd.DataFrame(index = ["value"], columns = ["intercept", "number of splines", "spline order", "lambda"])
gam_finalPara["intercept"] = "Yes"
gam_finalPara["number of splines"] = 20
gam_finalPara["spline order"] = 3
gam_finalPara["lambda"] = 0.6
gam_lam_search = "No"
if st.checkbox("Adjust settings for Generalized Additive Models"):
gam_finalPara = pd.DataFrame(index = ["value"], columns = ["intercept", "number of splines", "spline order", "lambda"])
col1, col2 = st.beta_columns(2)
with col1:
gam_intercept = st.selectbox("Include intercept ", ["Yes", "No"])
gam_finalPara["intercept"] = gam_intercept
with col2:
gam_lam_search = st.selectbox("Search for lambda ", ["No", "Yes"])
if gam_lam_search == "Yes":
ls_col1, ls_col2, ls_col3 = st.beta_columns(3)
with ls_col1:
ls_min = st.number_input("Minimum lambda value", value=0.001, step=1e-3, min_value=0.001, format="%.3f")
with ls_col2:
ls_max = st.number_input("Maximum lambda value", value=100.000, step=1e-3, min_value=0.002, format="%.3f")
with ls_col3:
ls_number = st.number_input("Lambda values per variable", value=50, min_value=2)
if ls_number**len(expl_var) > 10000:
st.warning("WARNING: Your grid has " + str(ls_number**len(expl_var)) + " combinations. Please note that searching for lambda will take a lot of time!")
else:
st.info("Your grid has " + str(ls_number**len(expl_var)) + " combinations.")
if gam_lam_search == "No":
gam_col1, gam_col2, gam_col3 = st.beta_columns(3)
if gam_lam_search == "Yes":
gam_col1, gam_col2= st.beta_columns(2)
gam_nos_values = []
gam_so_values = []
gam_lam_values = []
for gset in range(0,len(expl_var)):
var_name = expl_var[gset]
with gam_col1:
nos = st.number_input("Number of splines (" + var_name + ")", value = 20, min_value=1)
gam_nos_values.append(nos)
with gam_col2:
so = st.number_input("Spline order (" + var_name + ")", value = 3, min_value=3)
gam_so_values.append(so)
if gam_lam_search == "No":
with gam_col3:
lam = st.number_input("Lambda (" + var_name + ")", value = 0.6, min_value=0.001, step=1e-3, format="%.3f")
gam_lam_values.append(lam)
if nos <= so:
st.error("ERROR: Please make sure that the number of splines is greater than the spline order for "+ str(expl_var[gset]) + "!")
return
if gam_lam_search == "Yes":
lam = np.round(np.linspace(ls_min, ls_max, ls_number),3)
if len(expl_var) == 1:
gam_lam_values = lam
else:
gam_lam_values = [lam] * len(expl_var)
gam_finalPara.at["value", "number of splines"] = gam_nos_values
gam_finalPara.at["value","spline order"] = gam_so_values
gam_finalPara.at["value","lambda"] = gam_lam_values
st.write("")
# Save hyperparameter values for machine learning methods
final_hyPara_values = {}
# Random Forest settings
if any(a for a in sb_ML_alg if a == "Random Forest"):
rf_finalPara = pd.DataFrame(index = ["value"], columns = ["number of trees", "maximum tree depth", "maximum number of features", "sample rate"])
rf_finalPara["number of trees"] = [100]
rf_finalPara["maximum tree depth"] = [None]
rf_finalPara["maximum number of features"] = [len(expl_var)]
rf_finalPara["sample rate"] = [0.99]
final_hyPara_values["rf"] = rf_finalPara
if st.checkbox("Adjust settings for Random Forest "):
col1, col2 = st.beta_columns(2)
col3, col4 = st.beta_columns(2)
with col1:
rf_finalPara["number of trees"] = st.number_input("Number of trees", value=100, step=1, min_value=1)
with col3:
rf_mtd_sel = st.selectbox("Specify maximum tree depth ", ["No", "Yes"])
if rf_mtd_sel == "No":
rf_finalPara["maximum tree depth"] = [None]
if rf_mtd_sel == "Yes":
rf_finalPara["maximum tree depth"] = st.slider("Maximum tree depth ", value=20, step=1, min_value=1, max_value=50)
if len(expl_var) >1:
with col4:
rf_finalPara["maximum number of features"] = st.slider("Maximum number of features ", value=len(expl_var), step=1, min_value=1, max_value=len(expl_var))
with col2:
rf_finalPara["sample rate"] = st.slider("Sample rate ", value=0.99, step=0.01, min_value=0.5, max_value=0.99)
else:
with col2:
rf_finalPara["sample rate"] = st.slider("Sample rate ", value=0.99, step=0.01, min_value=0.5, max_value=0.99)
final_hyPara_values["rf"] = rf_finalPara
st.write("")
# Boosted Regression Trees settings
if any(a for a in sb_ML_alg if a == "Boosted Regression Trees"):
brt_finalPara = pd.DataFrame(index = ["value"], columns = ["number of trees", "learning rate", "maximum tree depth", "sample rate"])
brt_finalPara["number of trees"] = [100]
brt_finalPara["learning rate"] = [0.1]
brt_finalPara["maximum tree depth"] = [3]
brt_finalPara["sample rate"] = [1]
final_hyPara_values["brt"] = brt_finalPara
if st.checkbox("Adjust settings for Boosted Regression Trees "):
col1, col2 = st.beta_columns(2)
col3, col4 = st.beta_columns(2)
with col1:
brt_finalPara["number of trees"] = st.number_input("Number of trees ", value=100, step=1, min_value=1)
with col2:
brt_finalPara["learning rate"] = st.slider("Learning rate ", value=0.1, min_value=0.001, max_value=0.1 , step=1e-3, format="%.3f")
with col3:
brt_finalPara["maximum tree depth"] = st.slider("Maximum tree depth ", value=3, step=1, min_value=1, max_value=30)
with col4:
brt_finalPara["sample rate"] = st.slider("Sample rate ", value=1.0, step=0.01, min_value=0.5, max_value=1.0)
final_hyPara_values["brt"] = brt_finalPara
st.write("")
# Artificial Neural Networks settings
if any(a for a in sb_ML_alg if a == "Artificial Neural Networks"):
ann_finalPara = pd.DataFrame(index = ["value"], columns = ["weight optimization solver", "maximum number of iterations", "activation function", "hidden layer sizes", "learning rate", "L² regularization"])
ann_finalPara["weight optimization solver"] = ["adam"]
ann_finalPara["maximum number of iterations"] = [200]
ann_finalPara["activation function"] = ["relu"]
ann_finalPara["hidden layer sizes"] = [(100,)]
ann_finalPara["learning rate"] = [0.001]
ann_finalPara["L² regularization"] = [0.0001]
final_hyPara_values["ann"] = ann_finalPara
if st.checkbox("Adjust settings for Artificial Neural Networks "):
col1, col2 = st.beta_columns(2)
col3, col4 = st.beta_columns(2)
col5, col6 = st.beta_columns(2)
with col1:
ann_finalPara["weight optimization solver"] = st.selectbox("Weight optimization solver ", ["adam"])
with col2:
ann_finalPara["activation function"] = st.selectbox("Activation function ", ["relu", "identity", "logistic", "tanh"])
with col3:
ann_finalPara["maximum number of iterations"] = st.slider("Maximum number of iterations ", value=200, step=1, min_value=10, max_value=1000)
with col4:
ann_finalPara["learning rate"] = st.slider("Learning rate ", min_value=0.0001, max_value=0.01, value=0.001, step=1e-4, format="%.4f")
with col5:
number_hidden_layers = st.selectbox("Number of hidden layers", [1, 2, 3])
if number_hidden_layers == 1:
number_nodes1 = st.slider("Number of nodes in hidden layer", 5, 500, 100)
ann_finalPara["hidden layer sizes"] = [(number_nodes1,)]
if number_hidden_layers == 2:
number_nodes1 = st.slider("Number of neurons in first hidden layer", 5, 500, 100)
number_nodes2 = st.slider("Number of neurons in second hidden layer", 5, 500, 100)
ann_finalPara["hidden layer sizes"] = [(number_nodes1,number_nodes2,)]
if number_hidden_layers == 3:
number_nodes1 = st.slider("Number of neurons in first hidden layer", 5, 500, 100)
number_nodes2 = st.slider("Number of neurons in second hidden layer", 5, 500, 100)
number_nodes3 = st.slider("Number of neurons in third hidden layer", 5, 500, 100)
ann_finalPara["hidden layer sizes"] = [(number_nodes1,number_nodes2,number_nodes3,)]
with col6:
ann_finalPara["L² regularization"] = st.slider("L² regularization ", min_value=0.00001, max_value=0.001, value=0.0001, step=1e-5, format="%.5f")
#--------------------------------------------------------------------------------------
# HYPERPARAMETER TUNING SETTINGS
if len(sb_ML_alg) >= 1:
# Depending on algorithm selection different hyperparameter settings are shown
if any(a for a in sb_ML_alg if a == "Random Forest") or any(a for a in sb_ML_alg if a == "Boosted Regression Trees") or any(a for a in sb_ML_alg if a == "Artificial Neural Networks"):
# General settings
st.markdown("**Hyperparameter-tuning settings**")
do_hypTune = st.selectbox("Use hyperparameter-tuning", ["No", "Yes"])
# Save hyperparameter values for all algorithms
hyPara_values = {}
# No hyperparameter-tuning
if do_hypTune == "No":
do_hypTune_no = "Default hyperparameter values are used!"
# Hyperparameter-tuning
elif do_hypTune == "Yes":
st.warning("WARNING: Hyperparameter-tuning can take a lot of time! For tips, please [contact us](mailto:<EMAIL>?subject=Staty-App).")
# Further general settings
hypTune_method = st.selectbox("Hyperparameter-search method", ["random grid-search", "grid-search", "Bayes optimization", "sequential model-based optimization"])
col1, col2 = st.beta_columns(2)
with col1:
hypTune_nCV = st.slider("Select number for n-fold cross-validation", 2, 10, 5)
if hypTune_method == "random grid-search" or hypTune_method == "Bayes optimization" or hypTune_method == "sequential model-based optimization":
with col2:
hypTune_iter = st.slider("Select number of iterations for search", 20, 1000, 20)
else:
hypTune_iter = False
st.markdown("**Model-specific tuning settings**")
# Random Forest settings
if any(a for a in sb_ML_alg if a == "Random Forest"):
rf_tunePara = pd.DataFrame(index = ["min", "max"], columns = ["number of trees", "maximum tree depth", "maximum number of features", "sample rate"])
rf_tunePara["number of trees"] = [50, 500]
rf_tunePara["maximum tree depth"] = [None, None]
rf_tunePara["maximum number of features"] = [1, len(expl_var)]
rf_tunePara["sample rate"] = [0.8, 0.99]
hyPara_values["rf"] = rf_tunePara
if st.checkbox("Adjust tuning settings for Random Forest"):
col1, col2 = st.beta_columns(2)
col3, col4 = st.beta_columns(2)
with col1:
rf_tunePara["number of trees"] = st.slider("Range for number of trees ", 50, 1000, [50, 500])
with col3:
rf_mtd_choice = st.selectbox("Specify maximum tree depth", ["No", "Yes"])
if rf_mtd_choice == "Yes":
rf_tunePara["maximum tree depth"] = st.slider("Range for maximum tree depth ", 1, 50, [2, 10])
else:
rf_tunePara["maximum tree depth"] = [None, None]
with col4:
if len(expl_var) > 1:
rf_tunePara["maximum number of features"] = st.slider("Range for maximum number of features", 1, len(expl_var), [1, len(expl_var)])
else:
rf_tunePara["maximum number of features"] = [1,1]
with col2:
rf_tunePara["sample rate"] = st.slider("Range for sample rate ", 0.5, 0.99, [0.8, 0.99])
hyPara_values["rf"] = rf_tunePara
# Boosted Regression Trees settings
if any(a for a in sb_ML_alg if a == "Boosted Regression Trees"):
brt_tunePara = pd.DataFrame(index = ["min", "max"], columns = ["number of trees", "learning rate", "maximum tree depth", "sample rate"])
brt_tunePara["number of trees"] = [50, 500]
brt_tunePara["learning rate"] = [0.001, 0.010]
brt_tunePara["maximum tree depth"] = [2, 10]
brt_tunePara["sample rate"] = [0.8, 1.0]
hyPara_values["brt"] = brt_tunePara
if st.checkbox("Adjust tuning settings for Boosted Regression Trees"):
col1, col2 = st.beta_columns(2)
col3, col4 = st.beta_columns(2)
with col1:
brt_tunePara["number of trees"] = st.slider("Range for number of trees", 50, 1000, [50, 500])
with col2:
brt_tunePara["learning rate"] = st.slider("Range for learning rate", 0.001, 0.1, [0.001, 0.02], step=1e-3, format="%.3f")
with col3:
brt_tunePara["maximum tree depth"] = st.slider("Range for maximum tree depth", 1, 30, [2, 10])
with col4:
brt_tunePara["sample rate"] = st.slider("Range for sample rate", 0.5, 1.0, [0.8, 1.0])
hyPara_values["brt"] = brt_tunePara
# Artificial Neural Networks settings
if any(a for a in sb_ML_alg if a == "Artificial Neural Networks"):
ann_tunePara = pd.DataFrame(index = ["min", "max"], columns = ["weight optimization solver", "maximum number of iterations", "activation function", "number of hidden layers", "nodes per hidden layer", "learning rate","L² regularization"])# "learning rate schedule", "momentum", "epsilon"])
ann_tunePara["weight optimization solver"] = list([["adam"], "NA"])
ann_tunePara["maximum number of iterations"] = [100, 200]
ann_tunePara["activation function"] = list([["relu"], "NA"])
ann_tunePara["number of hidden layers"] = list([1, "NA"])
ann_tunePara["nodes per hidden layer"] = [50, 100]
ann_tunePara["learning rate"] = [0.0001, 0.002]
ann_tunePara["L² regularization"] = [0.00001, 0.0002]
hyPara_values["ann"] = ann_tunePara
if st.checkbox("Adjust tuning settings for Artificial Neural Networks"):
col1, col2 = st.beta_columns(2)
col3, col4 = st.beta_columns(2)
col5, col6 = st.beta_columns(2)
with col1:
weight_opt_list = st.selectbox("Weight optimization solver ", ["adam"])
if len(weight_opt_list) == 0:
weight_opt_list = ["adam"]
st.warning("WARNING: Default value used 'adam'")
ann_tunePara["weight optimization solver"] = list([[weight_opt_list], "NA"])
with col2:
ann_tunePara["maximum number of iterations"] = st.slider("Maximum number of iterations (epochs) ", 10, 1000, [100, 200])
with col3:
act_func_list = st.multiselect("Activation function ", ["identity", "logistic", "tanh", "relu"], ["relu"])
if len(act_func_list) == 0:
act_func_list = ["relu"]
st.warning("WARNING: Default value used 'relu'")
ann_tunePara["activation function"] = list([act_func_list, "NA"])
with col5:
number_hidden_layers = st.selectbox("Number of hidden layers ", [1, 2, 3])
ann_tunePara["number of hidden layers"] = list([number_hidden_layers, "NA"])
# Cases for hidden layers
if number_hidden_layers == 1:
ann_tunePara["nodes per hidden layer"] = st.slider("Number of nodes in hidden layer ", 5, 500, [50, 100])
if number_hidden_layers == 2:
number_nodes1 = st.slider("Number of neurons in first hidden layer ", 5, 500, [50, 100])
number_nodes2 = st.slider("Number of neurons in second hidden layer ", 5, 500, [50, 100])
min_nodes = list([number_nodes1[0], number_nodes2[0]])
max_nodes = list([number_nodes1[1], number_nodes2[1]])
ann_tunePara["nodes per hidden layer"] = list([min_nodes, max_nodes])
if number_hidden_layers == 3:
number_nodes1 = st.slider("Number of neurons in first hidden layer ", 5, 500, [50, 100])
number_nodes2 = st.slider("Number of neurons in second hidden layer ", 5, 500, [50, 100])
number_nodes3 = st.slider("Number of neurons in third hidden layer ", 5, 500, [50, 100])
min_nodes = list([number_nodes1[0], number_nodes2[0], number_nodes3[0]])
max_nodes = list([number_nodes1[1], number_nodes2[1], number_nodes3[1]])
ann_tunePara["nodes per hidden layer"] = list([min_nodes, max_nodes])
with col6:
if weight_opt_list == "adam":
ann_tunePara["learning rate"] = st.slider("Range for learning rate ", 0.0001, 0.01, [0.0001, 0.002], step=1e-4, format="%.4f")
with col4:
ann_tunePara["L² regularization"] = st.slider("L² regularization parameter ", 0.0, 0.001, [0.00001, 0.0002], step=1e-5, format="%.5f")
hyPara_values["ann"] = ann_tunePara
#--------------------------------------------------------------------------------------
# VALIDATION SETTINGS
st.markdown("**Validation settings**")
do_modval= st.selectbox("Use model validation", ["No", "Yes"])
if do_modval == "Yes":
col1, col2 = st.beta_columns(2)
# Select training/ test ratio
with col1:
train_frac = st.slider("Select training data size", 0.5, 0.95, 0.8)
# Select number for validation runs
with col2:
val_runs = st.slider("Select number for validation runs", 5, 100, 10)
#--------------------------------------------------------------------------------------
# PREDICTION SETTINGS
st.markdown("**Model predictions**")
do_modprednew = st.selectbox("Use model prediction for new data", ["No", "Yes"])
if do_modprednew == "Yes":
# Upload new data
new_data_pred = st.file_uploader(" ", type=["csv", "txt"])
if new_data_pred is not None:
# Read data
if uploaded_data is not None:
df_new = pd.read_csv(new_data_pred, decimal=dec_sep, sep = col_sep,thousands=thousands_sep,encoding=encoding_val, engine='python')
else:
df_new = pd.read_csv(new_data_pred, sep = ";|,|\t",engine='python')
st.success('Loading data... done!')
# Transform columns if any were transformed
# Log-transformation
if sb_DM_dTrans_log is not None:
# List of log-transformed variables that are included as explanatory variables
tv_list = []
for tv in sb_DM_dTrans_log:
if "log_"+tv in expl_var:
tv_list.append(tv)
# Check if log-transformed explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for log-transformation in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
df_new = fc.var_transform_log(df_new, tv_list)
# Sqrt-transformation
if sb_DM_dTrans_sqrt is not None:
# List of sqrt-transformed variables that are included as explanatory variables
tv_list = []
for tv in sb_DM_dTrans_sqrt:
if "sqrt_"+tv in expl_var:
tv_list.append(tv)
# Check if sqrt-transformed explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for sqrt-transformation in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
df_new = fc.var_transform_sqrt(df_new, tv_list)
# Square-transformation
if sb_DM_dTrans_square is not None:
# List of square-transformed variables that are included as explanatory variables
tv_list = []
for tv in sb_DM_dTrans_square:
if "square_"+tv in expl_var:
tv_list.append(tv)
# Check if square-transformed explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for square-transformation in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
df_new = fc.var_transform_square(df_new, tv_list)
# Standardization
if sb_DM_dTrans_stand is not None:
# List of standardized variables that are included as explanatory variables
tv_list = []
for tv in sb_DM_dTrans_stand:
if "stand_"+tv in expl_var:
tv_list.append(tv)
# Check if standardized explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for standardization in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
# Use mean and standard deviation of original data for standardization
for tv in tv_list:
if df_new[tv].dtypes == "float64" or df_new[tv].dtypes == "int64" or df_new[tv].dtypes == "float32" or df_new[tv].dtypes == "int32":
if df[tv].std() != 0:
new_var_name = "stand_" + tv
new_var = (df_new[tv] - df[tv].mean())/df[tv].std()
df_new[new_var_name] = new_var
else:
st.error("ERROR: " + str(tv) + " is not numerical and cannot be standardized!")
return
# Normalization
if sb_DM_dTrans_norm is not None:
# List of normalized variables that are included as explanatory variables
tv_list = []
for tv in sb_DM_dTrans_norm:
if "norm_"+tv in expl_var:
tv_list.append(tv)
# Check if normalized explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for normalization in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
# Use min and max of original data for normalization
for tv in tv_list:
if df_new[tv].dtypes == "float64" or df_new[tv].dtypes == "int64" or df_new[tv].dtypes == "float32" or df_new[tv].dtypes == "int32":
if (df[tv].max()-df[tv].min()) != 0:
new_var_name = "norm_" + tv
new_var = (df_new[tv] - df[tv].min())/(df[tv].max()-df[tv].min())
df_new[new_var_name] = new_var
else:
st.error("ERROR: " + str(tv) + " is not numerical and cannot be normalized!")
return
# Categorization
if sb_DM_dTrans_numCat is not None:
# List of categorized variables that are included as explanatory variables
tv_list = []
for tv in sb_DM_dTrans_numCat:
if "numCat_"+tv in expl_var:
tv_list.append(tv)
# Check if categorized explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for categorization in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
# Use same categories as for original data
for tv in tv_list:
new_var_name = "numCat_" + tv
new_var = pd.DataFrame(index = df_new.index, columns = [new_var_name])
for r in df_new.index:
if df.loc[df[tv] == df_new[tv][r]].empty == False:
new_var.loc[r, new_var_name] = df["numCat_" + tv][df.loc[df[tv] == df_new[tv][r]].index[0]]
else:
st.error("ERROR: Category is missing for the value in row: "+ str(r) + ", variable: " + str(tv))
return
df_new[new_var_name] = new_var.astype('int64')
# Multiplication
if sb_DM_dTrans_mult != 0:
# List of multiplied variables that are included as explanatory variables
tv_list = []
for tv in range(0, sb_DM_dTrans_mult):
mult_name = "mult_" + str(multiplication_pairs.loc[tv]["Var1"]) + "_" + str(multiplication_pairs.loc[tv]["Var2"])
if mult_name in expl_var:
tv_list.append(str(multiplication_pairs.loc[tv]["Var1"]))
tv_list.append(str(multiplication_pairs.loc[tv]["Var2"]))
# Check if multiplied explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for multiplication in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
for var in range(0, sb_DM_dTrans_mult):
df_new = fc.var_transform_mult(df_new, multiplication_pairs.loc[var]["Var1"], multiplication_pairs.loc[var]["Var2"])
# Division
if sb_DM_dTrans_div != 0:
# List of divided variables that are included as explanatory variables
tv_list = []
for tv in range(0, sb_DM_dTrans_div):
mult_name = "div_" + str(division_pairs.loc[tv]["Var1"]) + "_" + str(division_pairs.loc[tv]["Var2"])
if mult_name in expl_var:
tv_list.append(str(division_pairs.loc[tv]["Var1"]))
tv_list.append(str(division_pairs.loc[tv]["Var2"]))
# Check if multiplied explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for division in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
for var in range(0, sb_DM_dTrans_div):
df_new = fc.var_transform_div(df_new, division_pairs.loc[var]["Var1"], division_pairs.loc[var]["Var2"])
# Check if explanatory variables are available as columns
expl_list = []
for expl_incl in expl_var:
if expl_incl not in df_new.columns:
expl_list.append(expl_incl)
if expl_list:
st.error("ERROR: Some variables are missing in new data: "+ ', '.join(expl_list))
return
else:
st.info("All variables are available for predictions!")
# Check if NAs are present and delete them automatically
if df_new.iloc[list(pd.unique(np.where(df_new.isnull())[0]))].shape[0] == 0:
st.empty()
else:
df_new = df_new[expl_var].dropna()
st.warning("WARNING: Your new data set includes NAs. Rows with NAs are automatically deleted!")
df_new = df_new[expl_var]
# Modelling data set
df = df[var_list]
# Check if NAs are present and delete them automatically
if np.where(df[var_list].isnull())[0].size > 0:
df = df.dropna()
#--------------------------------------------------------------------------------------
# SETTINGS SUMMARY
st.write("")
# Show modelling data
if st.checkbox("Show modelling data"):
st.write(df)
st.write("Data shape: ", df.shape[0], " rows and ", df.shape[1], " columns")
# Download link for modelling data
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
df.to_excel(excel_file, sheet_name="modelling_data")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name= "Modelling data__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download modelling data</a>
""",
unsafe_allow_html=True)
st.write("")
# Show prediction data
if do_modprednew == "Yes":
if new_data_pred is not None:
if st.checkbox("Show new data for predictions"):
st.write(df_new)
st.write("Data shape: ", df_new.shape[0], " rows and ", df_new.shape[1], " columns")
# Download link for forecast data
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
df_new.to_excel(excel_file, sheet_name="new_data")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name= "New data for predictions__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download new data for predictions</a>
""",
unsafe_allow_html=True)
st.write("")
# Show machine learning summary
if st.checkbox('Show a summary of machine learning settings', value = False):
#--------------------------------------------------------------------------------------
# ALGORITHMS
st.write("Algorithms summary:")
st.write("- Models:", ', '.join(sb_ML_alg))
if any(a for a in sb_ML_alg if a == "Multiple Linear Regression"):
# st.write("- Multiple Linear Regression model: ", MLR_model)
st.write("- Multiple Linear Regression including intercept: ", MLR_intercept)
st.write("- Multiple Linear Regression covariance type: ", MLR_cov_type)
if any(a for a in sb_ML_alg if a == "Logistic Regression"):
st.write("- Logistic Regression including intercept: ", LR_intercept)
st.write("- Logistic Regression covariance type: ", LR_cov_type)
if any(a for a in sb_ML_alg if a == "Generalized Additive Models"):
st.write("- Generalized Additive Models parameters: ")
st.write(gam_finalPara)
if any(a for a in sb_ML_alg if a == "Random Forest") and do_hypTune == "No":
st.write("- Random Forest parameters: ")
st.write(rf_finalPara)
if any(a for a in sb_ML_alg if a == "Boosted Regression Trees") and do_hypTune == "No":
st.write("- Boosted Regression Trees parameters: ")
st.write(brt_finalPara)
if any(a for a in sb_ML_alg if a == "Artificial Neural Networks") and do_hypTune == "No":
st.write("- Artificial Neural Networks parameters: ")
st.write(ann_finalPara)
st.write("")
#--------------------------------------------------------------------------------------
# SETTINGS
# Hyperparameter settings summary
if any(a for a in sb_ML_alg if a == "Artificial Neural Networks" or a == "Boosted Regression Trees" or a == "Random Forest"):
st.write("Hyperparameter-tuning settings summary:")
if do_hypTune == "No":
st.write("- ", do_hypTune_no)
st.write("")
if do_hypTune == "Yes":
st.write("- Search method:", hypTune_method)
st.write("- ", hypTune_nCV, "-fold cross-validation")
if hypTune_method == "random grid-search" or hypTune_method == "Bayes optimization" or hypTune_method == "sequential model-based optimization":
st.write("- ", hypTune_iter, "iterations in search")
st.write("")
# Random Forest summary
if any(a for a in sb_ML_alg if a == "Random Forest"):
st.write("Random Forest tuning settings summary:")
st.write(rf_tunePara)
# Boosted Regression Trees summary
if any(a for a in sb_ML_alg if a == "Boosted Regression Trees"):
st.write("Boosted Regression Trees tuning settings summary:")
st.write(brt_tunePara)
# Artificial Neural Networks summary
if any(a for a in sb_ML_alg if a == "Artificial Neural Networks"):
st.write("Artificial Neural Networks tuning settings summary:")
st.write(ann_tunePara.style.format({"L² regularization": "{:.5}"}))
#st.caption("** Learning rate is only used in adam")
st.write("")
# General settings summary
st.write("General settings summary:")
st.write("- Response variable type: ", response_var_type)
# Modelling formula
if expl_var != False:
st.write("- Modelling formula:", response_var, "~", ' + '.join(expl_var))
if do_modval == "Yes":
# Train/ test ratio
if train_frac != False:
st.write("- Train/ test ratio:", str(round(train_frac*100)), "% / ", str(round(100-train_frac*100)), "%")
# Validation runs
if val_runs != False:
st.write("- Validation runs:", str(val_runs))
st.write("")
#--------------------------------------------------------------------------------------
# RUN MODELS
# Models are run on button click
st.write("")
run_models = st.button("Run models")
st.write("")
if run_models:
# Check if new data available
if do_modprednew == "Yes":
if new_data_pred is None:
st.error("ERROR: Please upload new data for additional model predictions or select 'No'!")
return
#Hyperparameter
if do_hypTune == "Yes":
# Tuning
model_tuning_results = ml.model_tuning(df, sb_ML_alg, hypTune_method, hypTune_iter, hypTune_nCV, hyPara_values, response_var_type, response_var, expl_var)
# Save final hyperparameters
# Random Forest
if any(a for a in sb_ML_alg if a == "Random Forest"):
rf_tuning_results = model_tuning_results["rf tuning"]
rf_finalPara = pd.DataFrame(index = ["value"], columns = ["number of trees", "maximum tree depth", "maximum number of features", "sample rate"])
rf_finalPara["number of trees"] = [rf_tuning_results.loc["value"]["number of trees"]]
if [rf_tuning_results.loc["value"]["maximum tree depth"]][0] == "None":
rf_finalPara["maximum tree depth"] = None
else:
rf_finalPara["maximum tree depth"] = [rf_tuning_results.loc["value"]["maximum tree depth"]]
rf_finalPara["maximum number of features"] = [rf_tuning_results.loc["value"]["maximum number of features"]]
rf_finalPara["sample rate"] = [rf_tuning_results.loc["value"]["sample rate"]]
final_hyPara_values["rf"] = rf_finalPara
# Boosted Regression Trees
if any(a for a in sb_ML_alg if a == "Boosted Regression Trees"):
brt_tuning_results = model_tuning_results["brt tuning"]
brt_finalPara = pd.DataFrame(index = ["value"], columns = ["number of trees", "learning rate", "maximum tree depth", "sample rate"])
brt_finalPara["number of trees"] = [brt_tuning_results.loc["value"]["number of trees"]]
brt_finalPara["learning rate"] = [brt_tuning_results.loc["value"]["learning rate"]]
brt_finalPara["maximum tree depth"] = [brt_tuning_results.loc["value"]["maximum tree depth"]]
brt_finalPara["sample rate"] = [brt_tuning_results.loc["value"]["sample rate"]]
final_hyPara_values["brt"] = brt_finalPara
# Artificial Neural Networks
if any(a for a in sb_ML_alg if a == "Artificial Neural Networks"):
ann_tuning_results = model_tuning_results["ann tuning"]
ann_finalPara = pd.DataFrame(index = ["value"], columns = ["weight optimization solver", "maximum number of iterations", "activation function", "hidden layer sizes", "learning rate", "L² regularization"]) #"learning rate schedule", "momentum", "epsilon"])
ann_finalPara["weight optimization solver"] = [ann_tuning_results.loc["value"]["weight optimization solver"]]
ann_finalPara["maximum number of iterations"] = [ann_tuning_results.loc["value"]["maximum number of iterations"]]
ann_finalPara["activation function"] = [ann_tuning_results.loc["value"]["activation function"]]
ann_finalPara["hidden layer sizes"] = [ann_tuning_results.loc["value"]["hidden layer sizes"]]
ann_finalPara["learning rate"] = [ann_tuning_results.loc["value"]["learning rate"]]
#ann_finalPara["learning rate schedule"] = [ann_tuning_results.loc["value"]["learning rate schedule"]]
#ann_finalPara["momentum"] = [ann_tuning_results.loc["value"]["momentum"]]
ann_finalPara["L² regularization"] = [ann_tuning_results.loc["value"]["L² regularization"]]
#ann_finalPara["epsilon"] = [ann_tuning_results.loc["value"]["epsilon"]]
final_hyPara_values["ann"] = ann_finalPara
# Lambda search for GAM
if any(a for a in sb_ML_alg if a == "Generalized Additive Models"):
if gam_lam_search == "Yes":
st.info("Lambda search")
my_bar = st.progress(0.0)
progress = 0
Y_data_gam = df[response_var]
X_data_gam = df[expl_var]
nos = gam_finalPara["number of splines"][0]
so = gam_finalPara["spline order"][0]
lams = gam_lam_values
if response_var_type == "continuous":
if gam_finalPara["intercept"][0] == "Yes":
gam_grid = LinearGAM(n_splines = nos, spline_order = so, fit_intercept = True).gridsearch(X_data_gam.values, Y_data_gam.values, lam=lams)
gam_finalPara.at["value", "lambda"] = gam_grid.lam
if gam_finalPara["intercept"][0] == "No":
gam_grid = LinearGAM(n_splines = nos, spline_order = so, fit_intercept = False).gridsearch(X_data_gam.values, Y_data_gam.values, lam=lams)
gam_finalPara.at["value", "lambda"] = gam_grid.lam
if response_var_type == "binary":
if gam_finalPara["intercept"][0] == "Yes":
gam_grid = LogisticGAM(n_splines = nos, spline_order = so, fit_intercept = True).gridsearch(X_data_gam.values, Y_data_gam.values, lam=lams)
gam_finalPara.at["value", "lambda"] = gam_grid.lam
if gam_finalPara["intercept"][0] == "No":
gam_grid = LogisticGAM(n_splines = nos, spline_order = so, fit_intercept = False).gridsearch(X_data_gam.values, Y_data_gam.values, lam=lams)
gam_finalPara.at["value", "lambda"] = gam_grid.lam
progress += 1
my_bar.progress(progress/1)
# Model validation
if do_modval == "Yes":
model_val_results = ml.model_val(df, sb_ML_alg, MLR_model, train_frac, val_runs, response_var_type, response_var, expl_var, final_hyPara_values, gam_finalPara, MLR_finalPara, LR_finalPara)
# Full model (depending on prediction for new data)
if do_modprednew == "Yes":
if new_data_pred is not None:
model_full_results = ml.model_full(df, df_new, sb_ML_alg, MLR_model, MLR_finalPara, LR_finalPara, response_var_type, response_var, expl_var, final_hyPara_values, gam_finalPara)
if do_modprednew == "No":
df_new = pd.DataFrame()
model_full_results = ml.model_full(df, df_new, sb_ML_alg, MLR_model, MLR_finalPara, LR_finalPara, response_var_type, response_var, expl_var, final_hyPara_values, gam_finalPara)
# Success message
st.success('Models run successfully!')
else: st.error("ERROR: No data available for Modelling!")
#++++++++++++++++++++++
# ML OUTPUT
# Show only if models were run (no further widgets after run models or the full page reloads)
if run_models == True:
st.write("")
st.write("")
st.header("**Model outputs**")
#--------------------------------------------------------------------------------------
# FULL MODEL OUTPUT
full_output = st.beta_expander("Full model output", expanded = False)
with full_output:
if model_full_results is not None:
st.markdown("**Correlation Matrix & 2D-Histogram**")
# Define variable selector
var_sel_cor = alt.selection_single(fields=['variable', 'variable2'], clear=False,
init={'variable': response_var, 'variable2': response_var})
# Calculate correlation data
corr_data = df[[response_var] + expl_var].corr().stack().reset_index().rename(columns={0: "correlation", 'level_0': "variable", 'level_1': "variable2"})
corr_data["correlation_label"] = corr_data["correlation"].map('{:.2f}'.format)
# Basic plot
base = alt.Chart(corr_data).encode(
x = alt.X('variable2:O', sort = None, axis = alt.Axis(title = None, labelFontSize = 12)),
y = alt.Y('variable:O', sort = None, axis = alt.Axis(title = None, labelFontSize = 12))
)
# Correlation values to insert
text = base.mark_text().encode(
text='correlation_label',
color = alt.condition(
alt.datum.correlation > 0.5,
alt.value('white'),
alt.value('black')
)
)
# Correlation plot
corr_plot = base.mark_rect().encode(
color = alt.condition(var_sel_cor, alt.value('#86c29c'), 'correlation:Q', legend = alt.Legend(title = "Bravais-Pearson correlation coefficient", orient = "top", gradientLength = 350), scale = alt.Scale(scheme='redblue', reverse = True, domain = [-1,1]))
).add_selection(var_sel_cor)
# Calculate values for 2d histogram
value_columns = df[[response_var] + expl_var]
df_2dbinned = pd.concat([fc.compute_2d_histogram(var1, var2, df) for var1 in value_columns for var2 in value_columns])
# 2d binned histogram plot
scat_plot = alt.Chart(df_2dbinned).transform_filter(
var_sel_cor
).mark_rect().encode(
alt.X('value2:N', sort = alt.EncodingSortField(field='raw_left_value2'), axis = alt.Axis(title = "Horizontal variable", labelFontSize = 12)),
alt.Y('value:N', axis = alt.Axis(title = "Vertical variable", labelFontSize = 12), sort = alt.EncodingSortField(field='raw_left_value', order = 'descending')),
alt.Color('count:Q', scale = alt.Scale(scheme='reds'), legend = alt.Legend(title = "Count", orient = "top", gradientLength = 350))
)
# Combine all plots
correlation_plot = alt.vconcat((corr_plot + text).properties(width = 400, height = 400), scat_plot.properties(width = 400, height = 400)).resolve_scale(color = 'independent')
corr_plot1 = (corr_plot + text).properties(width = 400, height = 400)
correlation_plot = correlation_plot.properties(padding = {"left": 50, "top": 5, "right": 5, "bottom": 50})
# hist_2d_plot = scat_plot.properties(height = 350)
if response_var_type == "continuous":
st.altair_chart(correlation_plot, use_container_width = True)
if response_var_type == "binary":
st.altair_chart(correlation_plot, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_cor")))
st.write("")
#-------------------------------------------------------------
# Continuous response variable
if response_var_type == "continuous":
# MLR specific output
if any(a for a in sb_ML_alg if a == "Multiple Linear Regression"):
st.markdown("**Multiple Linear Regression**")
# Regression information
fm_mlr_reg_col1, fm_mlr_reg_col2 = st.beta_columns(2)
with fm_mlr_reg_col1:
st.write("Regression information:")
st.table(model_full_results["MLR information"].style.set_precision(user_precision))
# Regression statistics
with fm_mlr_reg_col2:
st.write("Regression statistics:")
st.table(model_full_results["MLR statistics"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_regStat")))
st.write("")
# Coefficients
st.write("Coefficients:")
st.table(model_full_results["MLR coefficients"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_coef")))
st.write("")
# ANOVA
st.write("ANOVA:")
st.table(model_full_results["MLR ANOVA"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_ANOVA")))
st.write("")
# Heteroskedasticity tests
if MLR_intercept == "Yes":
st.write("Heteroskedasticity tests:")
st.table(model_full_results["MLR hetTest"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_hetTest")))
st.write("")
# Variable importance (via permutation)
fm_mlr_reg2_col1, fm_mlr_reg2_col2 = st.beta_columns(2)
with fm_mlr_reg2_col1:
st.write("Variable importance (via permutation):")
mlr_varImp_table = model_full_results["MLR variable importance"]
st.table(mlr_varImp_table.style.set_precision(user_precision))
st.write("")
with fm_mlr_reg2_col2:
st.write("")
st.write("")
st.write("")
mlr_varImp_plot_data = model_full_results["MLR variable importance"]
mlr_varImp_plot_data["Variable"] = mlr_varImp_plot_data.index
mlr_varImp = alt.Chart(mlr_varImp_plot_data, height = 200).mark_bar().encode(
x = alt.X("mean", title = "variable importance", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Variable", title = None, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), sort = None),
tooltip = ["Variable", "mean"]
)
st.altair_chart(mlr_varImp, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_varImp")))
st.write("")
# Graphical output
fm_mlr_figs_col1, fm_mlr_figs_col2 = st.beta_columns(2)
with fm_mlr_figs_col1:
st.write("Observed vs Fitted:")
observed_fitted_data = pd.DataFrame()
observed_fitted_data["Observed"] = df[response_var]
observed_fitted_data["Fitted"] = model_full_results["MLR fitted"]
observed_fitted_data["Index"] = df.index
observed_fitted = alt.Chart(observed_fitted_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Fitted", title = "fitted", scale = alt.Scale(domain = [min(observed_fitted_data["Fitted"]), max(observed_fitted_data["Fitted"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Observed", title = "observed", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Observed", "Fitted", "Index"]
)
observed_fitted_plot = observed_fitted + observed_fitted.transform_regression("Fitted", "Observed").mark_line(size = 2, color = "darkred")
st.altair_chart(observed_fitted_plot, use_container_width = True)
with fm_mlr_figs_col2:
st.write("Residuals vs Fitted:")
residuals_fitted_data = pd.DataFrame()
residuals_fitted_data["Residuals"] = model_full_results["residuals"]["Multiple Linear Regression"]
residuals_fitted_data["Fitted"] = model_full_results["MLR fitted"]
residuals_fitted_data["Index"] = df.index
residuals_fitted = alt.Chart(residuals_fitted_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Fitted", title = "fitted", scale = alt.Scale(domain = [min(residuals_fitted_data["Fitted"]), max(residuals_fitted_data["Fitted"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Residuals", title = "residuals", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Residuals", "Fitted", "Index"]
)
residuals_fitted_plot = residuals_fitted + residuals_fitted.transform_loess("Fitted", "Residuals", bandwidth = 0.5).mark_line(size = 2, color = "darkred")
st.altair_chart(residuals_fitted_plot, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_obsResVsFit")))
st.write("")
fm_mlr_figs1_col1, fm_mlr_figs1_col2 = st.beta_columns(2)
with fm_mlr_figs1_col1:
st.write("Normal QQ-plot:")
residuals = model_full_results["residuals"]["Multiple Linear Regression"]
qq_plot_data = pd.DataFrame()
qq_plot_data["StandResiduals"] = (residuals - residuals.mean())/residuals.std()
qq_plot_data["Index"] = df.index
qq_plot_data = qq_plot_data.sort_values(by = ["StandResiduals"])
qq_plot_data["Theoretical quantiles"] = stats.probplot(residuals, dist="norm")[0][0]
qq_plot = alt.Chart(qq_plot_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Theoretical quantiles", title = "theoretical quantiles", scale = alt.Scale(domain = [min(qq_plot_data["Theoretical quantiles"]), max(qq_plot_data["Theoretical quantiles"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("StandResiduals", title = "stand. residuals", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["StandResiduals", "Theoretical quantiles", "Index"]
)
line = alt.Chart(
pd.DataFrame({"Theoretical quantiles": [min(qq_plot_data["Theoretical quantiles"]), max(qq_plot_data["Theoretical quantiles"])], "StandResiduals": [min(qq_plot_data["Theoretical quantiles"]), max(qq_plot_data["Theoretical quantiles"])]})).mark_line(size = 2, color = "darkred").encode(
alt.X("Theoretical quantiles"),
alt.Y("StandResiduals"),
)
st.altair_chart(qq_plot + line, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_qqplot")))
with fm_mlr_figs1_col2:
st.write("Scale-Location:")
scale_location_data = pd.DataFrame()
residuals = model_full_results["residuals"]["Multiple Linear Regression"]
scale_location_data["SqrtStandResiduals"] = np.sqrt(abs((residuals - residuals.mean())/residuals.std()))
scale_location_data["Fitted"] = model_full_results["MLR fitted"]
scale_location_data["Index"] = df.index
scale_location = alt.Chart(scale_location_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Fitted", title = "fitted", scale = alt.Scale(domain = [min(scale_location_data["Fitted"]), max(scale_location_data["Fitted"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("SqrtStandResiduals", title = "sqrt(|stand. residuals|)", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["SqrtStandResiduals", "Fitted", "Index"]
)
scale_location_plot = scale_location + scale_location.transform_loess("Fitted", "SqrtStandResiduals", bandwidth = 0.5).mark_line(size = 2, color = "darkred")
st.altair_chart(scale_location_plot, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_scaleLoc")))
st.write("")
fm_mlr_figs2_col1, fm_mlr_figs2_col2 = st.beta_columns(2)
with fm_mlr_figs2_col1:
st.write("Residuals vs Leverage:")
residuals_leverage_data = pd.DataFrame()
residuals = model_full_results["residuals"]["Multiple Linear Regression"]
residuals_leverage_data["StandResiduals"] = (residuals - residuals.mean())/residuals.std()
residuals_leverage_data["Leverage"] = model_full_results["MLR leverage"]
residuals_leverage_data["Index"] = df.index
residuals_leverage = alt.Chart(residuals_leverage_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Leverage", title = "leverage", scale = alt.Scale(domain = [min(residuals_leverage_data["Leverage"]), max(residuals_leverage_data["Leverage"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("StandResiduals", title = "stand. residuals", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["StandResiduals","Leverage", "Index"]
)
residuals_leverage_plot = residuals_leverage + residuals_leverage.transform_loess("Leverage", "StandResiduals", bandwidth = 0.5).mark_line(size = 2, color = "darkred")
st.altair_chart(residuals_leverage_plot, use_container_width = True)
with fm_mlr_figs2_col2:
st.write("Cook's distance:")
cooksD_data = pd.DataFrame()
cooksD_data["CooksD"] = model_full_results["MLR Cooks distance"]
cooksD_data["Index"] = df.index
cooksD = alt.Chart(cooksD_data, height = 200).mark_bar(size = 2).encode(
x = alt.X("Index", title = "index", scale = alt.Scale(domain = [-1, max(cooksD_data["Index"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("CooksD", title = "Cook's distance", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["CooksD", "Index"]
)
st.altair_chart(cooksD, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_resVsLev_cooksD")))
# Download link for MLR output
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
model_full_results["MLR information"].to_excel(excel_file, sheet_name="regression_information")
model_full_results["MLR statistics"].to_excel(excel_file, sheet_name="regression_statistics")
model_full_results["MLR coefficients"].to_excel(excel_file, sheet_name="coefficients")
model_full_results["MLR ANOVA"].to_excel(excel_file, sheet_name="ANOVA")
model_full_results["MLR hetTest"].to_excel(excel_file, sheet_name="heteroskedasticity_tests")
mlr_varImp_table.to_excel(excel_file, sheet_name="variable_importance")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name= "MLR full model output__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download Multiple Linear Regression full model output</a>
""",
unsafe_allow_html=True)
st.write("")
# GAM specific output
if any(a for a in sb_ML_alg if a == "Generalized Additive Models"):
st.markdown("**Generalized Additive Models**")
fm_gam_reg_col1, fm_gam_reg_col2 = st.beta_columns(2)
# Regression information
with fm_gam_reg_col1:
st.write("Regression information:")
st.table(model_full_results["GAM information"].style.set_precision(user_precision))
# Regression statistics
with fm_gam_reg_col2:
st.write("Regression statistics:")
st.table(model_full_results["GAM statistics"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_GAM_regStat")))
st.write("")
# Feature significance
st.write("Feature significance:")
st.table(model_full_results["GAM feature significance"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_GAM_featSig")))
st.write("")
# Variable importance (via permutation)
fm_gam_figs1_col1, fm_gam_figs1_col2 = st.beta_columns(2)
with fm_gam_figs1_col1:
st.write("Variable importance (via permutation):")
gam_varImp_table = model_full_results["GAM variable importance"]
st.table(gam_varImp_table.style.set_precision(user_precision))
st.write("")
with fm_gam_figs1_col2:
st.write("")
st.write("")
st.write("")
gam_varImp_plot_data = model_full_results["GAM variable importance"]
gam_varImp_plot_data["Variable"] = gam_varImp_plot_data.index
gam_varImp = alt.Chart(gam_varImp_plot_data, height = 200).mark_bar().encode(
x = alt.X("mean", title = "variable importance", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Variable", title = None, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), sort = None),
tooltip = ["Variable", "mean"]
)
st.altair_chart(gam_varImp, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_GAM_varImp")))
st.write("")
# Partial dependence plots
st.write("Partial dependence plots:")
fm_gam_figs3_col1, fm_gam_figs3_col2 = st.beta_columns(2)
for pd_var in expl_var:
pd_data_gam = pd.DataFrame(columns = [pd_var])
pd_data_gam[pd_var] = model_full_results["GAM partial dependence"][pd_var]["x_values"]
pd_data_gam["Partial dependence"] = model_full_results["GAM partial dependence"][pd_var]["pd_values"]
pd_data_gam["Lower 95%"] = model_full_results["GAM partial dependence"][pd_var]["lower_95"]
pd_data_gam["Upper 95%"] = model_full_results["GAM partial dependence"][pd_var]["upper_95"]
pd_chart_gam = alt.Chart(pd_data_gam, height = 200).mark_line(color = "darkred").encode(
x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Partial dependence", title = "partial dependence", scale = alt.Scale(domain = [model_full_results["GAM partial dependence min/max"]["min"].min(), model_full_results["GAM partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Upper 95%", "Partial dependence", "Lower 95%"] + [pd_var]
)
pd_data_ticks_gam = pd.DataFrame(columns = [pd_var])
pd_data_ticks_gam[pd_var] = df[pd_var]
pd_data_ticks_gam["y"] = [model_full_results["GAM partial dependence min/max"]["min"].min()] * df.shape[0]
pd_ticks_gam = alt.Chart(pd_data_ticks_gam, height = 200).mark_tick(size = 5, thickness = 1).encode(
x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), scale = alt.Scale(domain = [pd_data_ticks_gam[pd_var].min(), pd_data_ticks_gam[pd_var].max()])),
y = alt.Y("y", title = "partial dependence", scale = alt.Scale(domain = [model_full_results["GAM partial dependence min/max"]["min"].min(), model_full_results["GAM partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = [pd_var]
)
pd_data_gam_lower = pd.DataFrame(columns = [pd_var])
pd_data_gam_lower[pd_var] = model_full_results["GAM partial dependence"][pd_var]["x_values"]
pd_data_gam_lower["Lower 95%"] = model_full_results["GAM partial dependence"][pd_var]["lower_95"]
pd_chart_gam_lower = alt.Chart(pd_data_gam_lower, height = 200).mark_line(strokeDash=[1,1], color = "darkred").encode(
x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Lower 95%", title = "", scale = alt.Scale(domain = [model_full_results["GAM partial dependence min/max"]["min"].min(), model_full_results["GAM partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Lower 95%"] + [pd_var]
)
pd_data_gam_upper = pd.DataFrame(columns = [pd_var])
pd_data_gam_upper[pd_var] = model_full_results["GAM partial dependence"][pd_var]["x_values"]
pd_data_gam_upper["Upper 95%"] = model_full_results["GAM partial dependence"][pd_var]["upper_95"]
pd_chart_gam_upper = alt.Chart(pd_data_gam_upper, height = 200).mark_line(strokeDash=[1,1], color = "darkred").encode(
x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Upper 95%", title = "", scale = alt.Scale(domain = [model_full_results["GAM partial dependence min/max"]["min"].min(), model_full_results["GAM partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Upper 95%"] + [pd_var]
)
if expl_var.index(pd_var)%2 == 0:
with fm_gam_figs3_col1:
st.altair_chart(pd_ticks_gam + pd_chart_gam_lower + pd_chart_gam_upper + pd_chart_gam, use_container_width = True)
if expl_var.index(pd_var)%2 == 1:
with fm_gam_figs3_col2:
st.altair_chart(pd_ticks_gam + pd_chart_gam_lower + pd_chart_gam_upper + pd_chart_gam, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_GAM_partDep")))
st.write("")
# Further graphical output
fm_gam_figs4_col1, fm_gam_figs4_col2 = st.beta_columns(2)
with fm_gam_figs4_col1:
st.write("Observed vs Fitted:")
observed_fitted_data = pd.DataFrame()
observed_fitted_data["Observed"] = df[response_var]
observed_fitted_data["Fitted"] = model_full_results["GAM fitted"]
observed_fitted_data["Index"] = df.index
observed_fitted = alt.Chart(observed_fitted_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Fitted", title = "fitted", scale = alt.Scale(domain = [min(observed_fitted_data["Fitted"]), max(observed_fitted_data["Fitted"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Observed", title = "observed", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Observed", "Fitted", "Index"]
)
observed_fitted_plot = observed_fitted + observed_fitted.transform_regression("Fitted", "Observed").mark_line(size = 2, color = "darkred")
st.altair_chart(observed_fitted_plot, use_container_width = True)
with fm_gam_figs4_col2:
st.write("Residuals vs Fitted:")
residuals_fitted_data = pd.DataFrame()
residuals_fitted_data["Residuals"] = model_full_results["residuals"]["Generalized Additive Models"]
residuals_fitted_data["Fitted"] = model_full_results["GAM fitted"]
residuals_fitted_data["Index"] = df.index
residuals_fitted = alt.Chart(residuals_fitted_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Fitted", title = "fitted", scale = alt.Scale(domain = [min(residuals_fitted_data["Fitted"]), max(residuals_fitted_data["Fitted"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Residuals", title = "residuals", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Residuals", "Fitted", "Index"]
)
residuals_fitted_plot = residuals_fitted + residuals_fitted.transform_loess("Fitted", "Residuals", bandwidth = 0.5).mark_line(size = 2, color = "darkred")
st.altair_chart(residuals_fitted_plot, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_GAM_obsResVsFit")))
# Download link for GAM output
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
model_full_results["GAM information"].to_excel(excel_file, sheet_name="regression_information")
model_full_results["GAM statistics"].to_excel(excel_file, sheet_name="regression_statistics")
model_full_results["GAM feature significance"].to_excel(excel_file, sheet_name="feature_significance")
gam_varImp_table.to_excel(excel_file, sheet_name="variable_importance")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "GAM full model output__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download Generalized Additive Models full model output</a>
""",
unsafe_allow_html=True)
st.write("")
# RF specific output
if any(a for a in sb_ML_alg if a == "Random Forest"):
st.markdown("**Random Forest**")
fm_rf_reg_col1, fm_rf_reg_col2 = st.beta_columns(2)
# Regression information
with fm_rf_reg_col1:
st.write("Regression information:")
st.table(model_full_results["RF information"].style.set_precision(user_precision))
# Regression statistics
with fm_rf_reg_col2:
st.write("Regression statistics:")
rf_error_est = pd.DataFrame(index = ["MSE", "RMSE", "MAE", "Residual SE"], columns = ["Value"])
rf_error_est.loc["MSE"] = model_full_results["model comparison"].loc["MSE"]["Random Forest"]
rf_error_est.loc["RMSE"] = model_full_results["model comparison"].loc["RMSE"]["Random Forest"]
rf_error_est.loc["MAE"] = model_full_results["model comparison"].loc["MAE"]["Random Forest"]
rf_error_est.loc["Residual SE"] = model_full_results["RF Residual SE"]
st.table(rf_error_est.style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_RF_regStat")))
st.write("")
# Variable importance (via permutation)
fm_rf_figs1_col1, fm_rf_figs1_col2 = st.beta_columns(2)
with fm_rf_figs1_col1:
st.write("Variable importance (via permutation):")
rf_varImp_table = model_full_results["RF variable importance"]
st.table(rf_varImp_table.style.set_precision(user_precision))
st.write("")
with fm_rf_figs1_col2:
st.write("")
st.write("")
st.write("")
rf_varImp_plot_data = model_full_results["RF variable importance"]
rf_varImp_plot_data["Variable"] = rf_varImp_plot_data.index
rf_varImp = alt.Chart(rf_varImp_plot_data, height = 200).mark_bar().encode(
x = alt.X("mean", title = "variable importance", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Variable", title = None, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), sort = None),
tooltip = ["Variable", "mean"]
)
st.altair_chart(rf_varImp, use_container_width = True)
st.write("")
fm_rf_figs2_col1, fm_rf_figs2_col2 = st.beta_columns(2)
# Feature importance
with fm_rf_figs2_col1:
st.write("Feature importance (impurity-based):")
rf_featImp_table = model_full_results["RF feature importance"]
st.table(rf_featImp_table.style.set_precision(user_precision))
st.write("")
with fm_rf_figs2_col2:
st.write("")
st.write("")
st.write("")
rf_featImp_plot_data = model_full_results["RF feature importance"]
rf_featImp_plot_data["Variable"] = rf_featImp_plot_data.index
rf_featImp = alt.Chart(rf_featImp_plot_data, height = 200).mark_bar().encode(
x = alt.X("Value", title = "feature importance", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Variable", title = None, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), sort = None),
tooltip = ["Variable", "Value"]
)
st.altair_chart(rf_featImp, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_RF_varImp")))
st.write("")
# Partial dependence plots
st.write("Partial dependence plots:")
fm_rf_figs3_col1, fm_rf_figs3_col2 = st.beta_columns(2)
for pd_var in expl_var:
pd_data_rf = pd.DataFrame(columns = [pd_var])
pd_data_rf[pd_var] = model_full_results["RF partial dependence"][pd_var][1][0]
pd_data_rf["Partial dependence"] = model_full_results["RF partial dependence"][pd_var][0][0]
pd_chart_rf = alt.Chart(pd_data_rf, height = 200).mark_line(color = "darkred").encode(
x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Partial dependence", title = "partial dependence", scale = alt.Scale(domain = [model_full_results["RF partial dependence min/max"]["min"].min(), model_full_results["RF partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Partial dependence"] + [pd_var]
)
pd_data_ticks_rf = pd.DataFrame(columns = [pd_var])
pd_data_ticks_rf[pd_var] = df[pd_var]
pd_data_ticks_rf["y"] = [model_full_results["RF partial dependence min/max"]["min"].min()] * df.shape[0]
pd_ticks_rf = alt.Chart(pd_data_ticks_rf, height = 200).mark_tick(size = 5, thickness = 1).encode(
x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), scale = alt.Scale(domain = [pd_data_ticks_rf[pd_var].min(), pd_data_ticks_rf[pd_var].max()])),
y = alt.Y("y", title = "partial dependence", scale = alt.Scale(domain = [model_full_results["RF partial dependence min/max"]["min"].min(), model_full_results["RF partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = [pd_var]
)
if expl_var.index(pd_var)%2 == 0:
with fm_rf_figs3_col1:
st.altair_chart(pd_ticks_rf + pd_chart_rf, use_container_width = True)
if expl_var.index(pd_var)%2 == 1:
with fm_rf_figs3_col2:
st.altair_chart(pd_ticks_rf + pd_chart_rf, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_RF_partDep")))
st.write("")
# Further graphical output
fm_rf_figs4_col1, fm_rf_figs4_col2 = st.beta_columns(2)
with fm_rf_figs4_col1:
st.write("Observed vs Fitted:")
observed_fitted_data = pd.DataFrame()
observed_fitted_data["Observed"] = df[response_var]
observed_fitted_data["Fitted"] = model_full_results["RF fitted"]
observed_fitted_data["Index"] = df.index
observed_fitted = alt.Chart(observed_fitted_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Fitted", title = "fitted", scale = alt.Scale(domain = [min(observed_fitted_data["Fitted"]), max(observed_fitted_data["Fitted"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Observed", title = "observed", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Observed", "Fitted", "Index"]
)
observed_fitted_plot = observed_fitted + observed_fitted.transform_regression("Fitted", "Observed").mark_line(size = 2, color = "darkred")
st.altair_chart(observed_fitted_plot, use_container_width = True)
with fm_rf_figs4_col2:
st.write("Residuals vs Fitted:")
residuals_fitted_data = pd.DataFrame()
residuals_fitted_data["Residuals"] = model_full_results["residuals"]["Random Forest"]
residuals_fitted_data["Fitted"] = model_full_results["RF fitted"]
residuals_fitted_data["Index"] = df.index
residuals_fitted = alt.Chart(residuals_fitted_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Fitted", title = "fitted", scale = alt.Scale(domain = [min(residuals_fitted_data["Fitted"]), max(residuals_fitted_data["Fitted"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Residuals", title = "residuals", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Residuals", "Fitted", "Index"]
)
residuals_fitted_plot = residuals_fitted + residuals_fitted.transform_loess("Fitted", "Residuals", bandwidth = 0.5).mark_line(size = 2, color = "darkred")
st.altair_chart(residuals_fitted_plot, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_RF_obsResVsFit")))
# Download link for RF output
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
model_full_results["RF information"].to_excel(excel_file, sheet_name="regression_information")
rf_error_est.to_excel(excel_file, sheet_name="regression_statistics")
rf_varImp_table.to_excel(excel_file, sheet_name="variable_importance")
rf_featImp_table.to_excel(excel_file, sheet_name="feature_importance")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "RF full model output__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download Random Forest full model output</a>
""",
unsafe_allow_html=True)
st.write("")
# BRT specific output
if any(a for a in sb_ML_alg if a == "Boosted Regression Trees"):
st.markdown("**Boosted Regression Trees**")
fm_brt_reg_col1, fm_brt_reg_col2 = st.beta_columns(2)
# Regression information
with fm_brt_reg_col1:
st.write("Regression information:")
st.table(model_full_results["BRT information"].style.set_precision(user_precision))
# Regression statistics
with fm_brt_reg_col2:
st.write("Regression statistics:")
brt_error_est = pd.DataFrame(index = ["MSE", "RMSE", "MAE", "Residual SE"], columns = ["Value"])
brt_error_est.loc["MSE"] = model_full_results["model comparison"].loc["MSE"]["Boosted Regression Trees"]
brt_error_est.loc["RMSE"] = model_full_results["model comparison"].loc["RMSE"]["Boosted Regression Trees"]
brt_error_est.loc["MAE"] = model_full_results["model comparison"].loc["MAE"]["Boosted Regression Trees"]
brt_error_est.loc["Residual SE"] = model_full_results["BRT Residual SE"]
st.table(brt_error_est.style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_BRT_regStat")))
st.write("")
# Training score (MSE vs. number of trees)
st.write("Training score:")
train_score = pd.DataFrame(index = range(model_full_results["BRT train score"].shape[0]), columns = ["Training MSE"])
train_score["Training MSE"] = model_full_results["BRT train score"]
train_score["Trees"] = train_score.index+1
train_score_plot = alt.Chart(train_score, height = 200).mark_line(color = "darkred").encode(
x = alt.X("Trees", title = "trees", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), scale = alt.Scale(domain = [train_score["Trees"].min(), train_score["Trees"].max()])),
y = alt.Y("Training MSE", title = "training MSE", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Training MSE", "Trees"]
)
st.altair_chart(train_score_plot, use_container_width = True)
st.write("")
# Variable importance (via permutation)
fm_brt_figs1_col1, fm_brt_figs1_col2 = st.beta_columns(2)
with fm_brt_figs1_col1:
st.write("Variable importance (via permutation):")
brt_varImp_table = model_full_results["BRT variable importance"]
st.table(brt_varImp_table.style.set_precision(user_precision))
st.write("")
with fm_brt_figs1_col2:
st.write("")
st.write("")
st.write("")
brt_varImp_plot_data = model_full_results["BRT variable importance"]
brt_varImp_plot_data["Variable"] = brt_varImp_plot_data.index
brt_varImp = alt.Chart(brt_varImp_plot_data, height = 200).mark_bar().encode(
x = alt.X("mean", title = "variable importance", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Variable", title = None, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), sort = None),
tooltip = ["Variable", "mean"]
)
st.altair_chart(brt_varImp, use_container_width = True)
st.write("")
fm_brt_figs2_col1, fm_brt_figs2_col2 = st.beta_columns(2)
# Feature importance
with fm_brt_figs2_col1:
st.write("Feature importance (impurity-based):")
brt_featImp_table = model_full_results["BRT feature importance"]
st.table(brt_featImp_table.style.set_precision(user_precision))
st.write("")
with fm_brt_figs2_col2:
st.write("")
st.write("")
st.write("")
brt_featImp_plot_data = model_full_results["BRT feature importance"]
brt_featImp_plot_data["Variable"] = brt_featImp_plot_data.index
brt_featImp = alt.Chart(brt_featImp_plot_data, height = 200).mark_bar().encode(
x = alt.X("Value", title = "feature importance", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Variable", title = None, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), sort = None),
tooltip = ["Variable", "Value"]
)
st.altair_chart(brt_featImp, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_BRT_varImp")))
st.write("")
# Partial dependence plots
st.write("Partial dependence plots:")
fm_brt_figs3_col1, fm_brt_figs3_col2 = st.beta_columns(2)
for pd_var in expl_var:
pd_data_brt = pd.DataFrame(columns = [pd_var])
pd_data_brt[pd_var] = model_full_results["BRT partial dependence"][pd_var][1][0]
pd_data_brt["Partial dependence"] = model_full_results["BRT partial dependence"][pd_var][0][0]
pd_chart_brt = alt.Chart(pd_data_brt, height = 200).mark_line(color = "darkred").encode(
x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Partial dependence", title = "partial dependence", scale = alt.Scale(domain = [model_full_results["BRT partial dependence min/max"]["min"].min(), model_full_results["BRT partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Partial dependence"] + [pd_var]
)
pd_data_ticks_brt = pd.DataFrame(columns = [pd_var])
pd_data_ticks_brt[pd_var] = df[pd_var]
pd_data_ticks_brt["y"] = [model_full_results["BRT partial dependence min/max"]["min"].min()] * df.shape[0]
pd_ticks_brt = alt.Chart(pd_data_ticks_brt, height = 200).mark_tick(size = 5, thickness = 1).encode(
x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), scale = alt.Scale(domain = [pd_data_ticks_brt[pd_var].min(), pd_data_ticks_brt[pd_var].max()])),
y = alt.Y("y", title = "partial dependence", scale = alt.Scale(domain = [model_full_results["BRT partial dependence min/max"]["min"].min(), model_full_results["BRT partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = [pd_var]
)
if expl_var.index(pd_var)%2 == 0:
with fm_brt_figs3_col1:
st.altair_chart(pd_ticks_brt + pd_chart_brt, use_container_width = True)
if expl_var.index(pd_var)%2 == 1:
with fm_brt_figs3_col2:
st.altair_chart(pd_ticks_brt + pd_chart_brt, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_BRT_partDep")))
st.write("")
# Further graphical output
fm_brt_figs4_col1, fm_brt_figs4_col2 = st.beta_columns(2)
with fm_brt_figs4_col1:
st.write("Observed vs Fitted:")
observed_fitted_data = pd.DataFrame()
observed_fitted_data["Observed"] = df[response_var]
observed_fitted_data["Fitted"] = model_full_results["BRT fitted"]
observed_fitted_data["Index"] = df.index
observed_fitted = alt.Chart(observed_fitted_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Fitted", title = "fitted", scale = alt.Scale(domain = [min(observed_fitted_data["Fitted"]), max(observed_fitted_data["Fitted"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Observed", title = "observed", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Observed", "Fitted", "Index"]
)
observed_fitted_plot = observed_fitted + observed_fitted.transform_regression("Fitted", "Observed").mark_line(size = 2, color = "darkred")
st.altair_chart(observed_fitted_plot, use_container_width = True)
with fm_brt_figs4_col2:
st.write("Residuals vs Fitted:")
residuals_fitted_data = pd.DataFrame()
residuals_fitted_data["Residuals"] = model_full_results["residuals"]["Boosted Regression Trees"]
residuals_fitted_data["Fitted"] = model_full_results["BRT fitted"]
residuals_fitted_data["Index"] = df.index
residuals_fitted = alt.Chart(residuals_fitted_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Fitted", title = "fitted", scale = alt.Scale(domain = [min(residuals_fitted_data["Fitted"]), max(residuals_fitted_data["Fitted"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Residuals", title = "residuals", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Residuals", "Fitted", "Index"]
)
residuals_fitted_plot = residuals_fitted + residuals_fitted.transform_loess("Fitted", "Residuals", bandwidth = 0.5).mark_line(size = 2, color = "darkred")
st.altair_chart(residuals_fitted_plot, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_BRT_obsResVsFit")))
# Download link for BRT output
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
model_full_results["BRT information"].to_excel(excel_file, sheet_name="regression_information")
brt_error_est.to_excel(excel_file, sheet_name="regression_statistics")
brt_varImp_table.to_excel(excel_file, sheet_name="variable_importance")
brt_featImp_table.to_excel(excel_file, sheet_name="feature_importance")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "BRT full model output__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download Boosted Regression Trees full model output</a>
""",
unsafe_allow_html=True)
st.write("")
# ANN specific output
if any(a for a in sb_ML_alg if a == "Artificial Neural Networks"):
st.markdown("**Artificial Neural Networks**")
fm_ann_reg_col1, fm_ann_reg_col2 = st.beta_columns(2)
# Regression information
with fm_ann_reg_col1:
st.write("Regression information:")
st.table(model_full_results["ANN information"].style.set_precision(user_precision))
# Regression statistics
with fm_ann_reg_col2:
st.write("Regression statistics:")
ann_error_est = pd.DataFrame(index = ["MSE", "RMSE", "MAE", "Residual SE", "Best loss"], columns = ["Value"])
ann_error_est.loc["MSE"] = model_full_results["model comparison"].loc["MSE"]["Artificial Neural Networks"]
ann_error_est.loc["RMSE"] = model_full_results["model comparison"].loc["RMSE"]["Artificial Neural Networks"]
ann_error_est.loc["MAE"] = model_full_results["model comparison"].loc["MAE"]["Artificial Neural Networks"]
ann_error_est.loc["Residual SE"] = model_full_results["ANN Residual SE"]
if ann_finalPara["weight optimization solver"][0] != "lbfgs":
ann_error_est.loc["Best loss"] = model_full_results["ANN loss"]
st.table(ann_error_est.style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_ANN_regStat")))
st.write("")
# Loss curve (loss vs. number of iterations (epochs))
if ann_finalPara["weight optimization solver"][0] != "lbfgs":
st.write("Loss curve:")
loss_curve = pd.DataFrame(index = range(len(model_full_results["ANN loss curve"])), columns = ["Loss"])
loss_curve["Loss"] = model_full_results["ANN loss curve"]
loss_curve["Iterations"] = loss_curve.index+1
loss_curve_plot = alt.Chart(loss_curve, height = 200).mark_line(color = "darkred").encode(
x = alt.X("Iterations", title = "iterations (epochs)", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), scale = alt.Scale(domain = [loss_curve["Iterations"].min(), loss_curve["Iterations"].max()])),
y = alt.Y("Loss", title = "loss", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Loss", "Iterations"]
)
st.altair_chart(loss_curve_plot, use_container_width = True)
st.write("")
fm_ann_figs1_col1, fm_ann_figs1_col2 = st.beta_columns(2)
# Variable importance (via permutation)
with fm_ann_figs1_col1:
st.write("Variable importance (via permutation):")
ann_varImp_table = model_full_results["ANN variable importance"]
st.table(ann_varImp_table.style.set_precision(user_precision))
st.write("")
with fm_ann_figs1_col2:
st.write("")
st.write("")
st.write("")
ann_varImp_plot_data = model_full_results["ANN variable importance"]
ann_varImp_plot_data["Variable"] = ann_varImp_plot_data.index
ann_varImp = alt.Chart(ann_varImp_plot_data, height = 200).mark_bar().encode(
x = alt.X("mean", title = "variable importance", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Variable", title = None, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), sort = None),
tooltip = ["Variable", "mean"]
)
st.altair_chart(ann_varImp, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_ANN_varImp")))
st.write("")
# Partial dependence plots
st.write("Partial dependence plots:")
fm_ann_figs2_col1, fm_ann_figs2_col2 = st.beta_columns(2)
for pd_var in expl_var:
pd_data_ann = pd.DataFrame(columns = [pd_var])
pd_data_ann[pd_var] = (model_full_results["ANN partial dependence"][pd_var][1][0]*(df[pd_var].std()))+df[pd_var].mean()
pd_data_ann["Partial dependence"] = model_full_results["ANN partial dependence"][pd_var][0][0]
pd_chart_ann = alt.Chart(pd_data_ann, height = 200).mark_line(color = "darkred").encode(
x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Partial dependence", title = "partial dependence", scale = alt.Scale(domain = [model_full_results["ANN partial dependence min/max"]["min"].min(), model_full_results["ANN partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Partial dependence"] + [pd_var]
)
pd_data_ticks_ann = pd.DataFrame(columns = [pd_var])
pd_data_ticks_ann[pd_var] = df[pd_var]
pd_data_ticks_ann["y"] = [model_full_results["ANN partial dependence min/max"]["min"].min()] * df.shape[0]
pd_ticks_ann = alt.Chart(pd_data_ticks_ann, height = 200).mark_tick(size = 5, thickness = 1).encode(
x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), scale = alt.Scale(domain = [pd_data_ticks_ann[pd_var].min(), pd_data_ticks_ann[pd_var].max()])),
y = alt.Y("y", title = "partial dependence", scale = alt.Scale(domain = [model_full_results["ANN partial dependence min/max"]["min"].min(), model_full_results["ANN partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = [pd_var]
)
if expl_var.index(pd_var)%2 == 0:
with fm_ann_figs2_col1:
st.altair_chart(pd_ticks_ann + pd_chart_ann, use_container_width = True)
if expl_var.index(pd_var)%2 == 1:
with fm_ann_figs2_col2:
st.altair_chart(pd_ticks_ann + pd_chart_ann, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_ANN_partDep")))
st.write("")
# Further graphical output
fm_ann_figs3_col1, fm_ann_figs3_col2 = st.beta_columns(2)
with fm_ann_figs3_col1:
st.write("Observed vs Fitted:")
observed_fitted_data = pd.DataFrame()
observed_fitted_data["Observed"] = df[response_var]
observed_fitted_data["Fitted"] = model_full_results["ANN fitted"]
observed_fitted_data["Index"] = df.index
observed_fitted = alt.Chart(observed_fitted_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Fitted", title = "fitted", scale = alt.Scale(domain = [min(observed_fitted_data["Fitted"]), max(observed_fitted_data["Fitted"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Observed", title = "observed", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Observed", "Fitted", "Index"]
)
observed_fitted_plot = observed_fitted + observed_fitted.transform_regression("Fitted", "Observed").mark_line(size = 2, color = "darkred")
st.altair_chart(observed_fitted_plot, use_container_width = True)
with fm_ann_figs3_col2:
st.write("Residuals vs Fitted:")
residuals_fitted_data = pd.DataFrame()
residuals_fitted_data["Residuals"] = model_full_results["residuals"]["Artificial Neural Networks"]
residuals_fitted_data["Fitted"] = model_full_results["ANN fitted"]
residuals_fitted_data["Index"] = df.index
residuals_fitted = alt.Chart(residuals_fitted_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Fitted", title = "fitted", scale = alt.Scale(domain = [min(residuals_fitted_data["Fitted"]), max(residuals_fitted_data["Fitted"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Residuals", title = "residuals", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Residuals", "Fitted", "Index"]
)
residuals_fitted_plot = residuals_fitted + residuals_fitted.transform_loess("Fitted", "Residuals", bandwidth = 0.5).mark_line(size = 2, color = "darkred")
st.altair_chart(residuals_fitted_plot, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_ANN_obsResVsFit")))
# Download link for ANN output
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
model_full_results["ANN information"].to_excel(excel_file, sheet_name="regression_information")
ann_error_est.to_excel(excel_file, sheet_name="regression_statistics")
ann_varImp_table.to_excel(excel_file, sheet_name="variable_importance")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "ANN full model output__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download Artificial Neural Networks full model output</a>
""",
unsafe_allow_html=True)
st.write("")
# Performance metrics across all models
st.markdown("**Model comparison**")
st.write("Performance metrics:")
model_comp_sort_enable = (model_full_results["model comparison"]).transpose()
st.write(model_comp_sort_enable.style.set_precision(user_precision))
if len(sb_ML_alg) > 1:
if sett_hints:
st.info(str(fc.learning_hints("mod_md_modCompPerf")))
st.write("")
model_full_res = pd.DataFrame(index = ["min", "25%-Q", "median", "75%-Q", "max"], columns = sb_ML_alg)
for m in sb_ML_alg:
model_full_res.loc["min"][m] = model_full_results["residuals"][m].min()
model_full_res.loc["25%-Q"][m] = model_full_results["residuals"][m].quantile(q = 0.25)
model_full_res.loc["median"][m] = model_full_results["residuals"][m].quantile(q = 0.5)
model_full_res.loc["75%-Q"][m] = model_full_results["residuals"][m].quantile(q = 0.75)
model_full_res.loc["max"][m] = model_full_results["residuals"][m].max()
st.write("Residuals distribution:")
st.write((model_full_res).transpose().style.set_precision(user_precision))
if len(sb_ML_alg) > 1:
if sett_hints:
st.info(str(fc.learning_hints("mod_md_modCompRes")))
st.write("")
# Download link for model comparison output
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
model_comp_sort_enable.to_excel(excel_file, sheet_name="performance_metrics")
model_full_res.transpose().to_excel(excel_file, sheet_name="residuals_distribution")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "Model comparison full model output__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download model comparison output</a>
""",
unsafe_allow_html=True)
st.write("")
#-------------------------------------------------------------
# Binary response variable
if response_var_type == "binary":
# MLR specific output
if any(a for a in sb_ML_alg if a == "Multiple Linear Regression"):
st.markdown("**Multiple Linear Regression**")
# Regression information
fm_mlr_reg_col1, fm_mlr_reg_col2 = st.beta_columns(2)
with fm_mlr_reg_col1:
st.write("Regression information:")
st.table(model_full_results["MLR information"].style.set_precision(user_precision))
# Regression statistics
with fm_mlr_reg_col2:
st.write("Regression statistics:")
st.table(model_full_results["MLR statistics"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_regStat")))
st.write("")
# Coefficients
st.write("Coefficients:")
st.table(model_full_results["MLR coefficients"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_coef")))
st.write("")
# ANOVA
st.write("ANOVA:")
st.table(model_full_results["MLR ANOVA"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_ANOVA")))
st.write("")
# Heteroskedasticity tests
if MLR_intercept == "Yes":
st.write("Heteroskedasticity tests:")
st.table(model_full_results["MLR hetTest"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_hetTest")))
st.write("")
# Variable importance (via permutation)
fm_mlr_reg2_col1, fm_mlr_reg2_col2 = st.beta_columns(2)
with fm_mlr_reg2_col1:
st.write("Variable importance (via permutation):")
mlr_varImp_table = model_full_results["MLR variable importance"]
st.table(mlr_varImp_table.style.set_precision(user_precision))
st.write("")
with fm_mlr_reg2_col2:
st.write("")
st.write("")
st.write("")
mlr_varImp_plot_data = model_full_results["MLR variable importance"]
mlr_varImp_plot_data["Variable"] = mlr_varImp_plot_data.index
mlr_varImp = alt.Chart(mlr_varImp_plot_data, height = 200).mark_bar().encode(
x = alt.X("mean", title = "variable importance", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Variable", title = None, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), sort = None),
tooltip = ["Variable", "mean"]
)
st.altair_chart(mlr_varImp, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_varImp")))
st.write("")
# Graphical output
fm_mlr_figs_col1, fm_mlr_figs_col2 = st.beta_columns(2)
with fm_mlr_figs_col1:
st.write("Observed vs Fitted:")
observed_fitted_data = pd.DataFrame()
observed_fitted_data["Observed"] = df[response_var]
observed_fitted_data["Fitted"] = model_full_results["MLR fitted"]
observed_fitted_data["Index"] = df.index
observed_fitted = alt.Chart(observed_fitted_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Fitted", title = "fitted", scale = alt.Scale(domain = [min(observed_fitted_data["Fitted"]), max(observed_fitted_data["Fitted"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Observed", title = "observed", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Observed", "Fitted", "Index"]
)
observed_fitted_plot = observed_fitted + observed_fitted.transform_regression("Fitted", "Observed").mark_line(size = 2, color = "darkred")
st.altair_chart(observed_fitted_plot, use_container_width = True)
with fm_mlr_figs_col2:
st.write("Residuals vs Fitted:")
residuals_fitted_data = pd.DataFrame()
residuals_fitted_data["Residuals"] = model_full_results["residuals"]["Multiple Linear Regression"]
residuals_fitted_data["Fitted"] = model_full_results["MLR fitted"]
residuals_fitted_data["Index"] = df.index
residuals_fitted = alt.Chart(residuals_fitted_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Fitted", title = "fitted", scale = alt.Scale(domain = [min(residuals_fitted_data["Fitted"]), max(residuals_fitted_data["Fitted"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Residuals", title = "residuals", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Residuals", "Fitted", "Index"]
)
residuals_fitted_plot = residuals_fitted + residuals_fitted.transform_loess("Fitted", "Residuals", bandwidth = 0.5).mark_line(size = 2, color = "darkred")
st.altair_chart(residuals_fitted_plot, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_obsResVsFit")))
st.write("")
fm_mlr_figs1_col1, fm_mlr_figs1_col2 = st.beta_columns(2)
with fm_mlr_figs1_col1:
st.write("Normal QQ-plot:")
residuals = model_full_results["residuals"]["Multiple Linear Regression"]
qq_plot_data = pd.DataFrame()
qq_plot_data["StandResiduals"] = (residuals - residuals.mean())/residuals.std()
qq_plot_data["Index"] = df.index
qq_plot_data = qq_plot_data.sort_values(by = ["StandResiduals"])
qq_plot_data["Theoretical quantiles"] = stats.probplot(residuals, dist="norm")[0][0]
qq_plot = alt.Chart(qq_plot_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Theoretical quantiles", title = "theoretical quantiles", scale = alt.Scale(domain = [min(qq_plot_data["Theoretical quantiles"]), max(qq_plot_data["Theoretical quantiles"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("StandResiduals", title = "stand. residuals", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["StandResiduals", "Theoretical quantiles", "Index"]
)
line = alt.Chart(
pd.DataFrame({"Theoretical quantiles": [min(qq_plot_data["Theoretical quantiles"]), max(qq_plot_data["Theoretical quantiles"])], "StandResiduals": [min(qq_plot_data["Theoretical quantiles"]), max(qq_plot_data["Theoretical quantiles"])]})).mark_line(size = 2, color = "darkred").encode(
alt.X("Theoretical quantiles"),
alt.Y("StandResiduals"),
)
st.altair_chart(qq_plot + line, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_qqplot")))
with fm_mlr_figs1_col2:
st.write("Scale-Location:")
scale_location_data = pd.DataFrame()
residuals = model_full_results["residuals"]["Multiple Linear Regression"]
scale_location_data["SqrtStandResiduals"] = np.sqrt(abs((residuals - residuals.mean())/residuals.std()))
scale_location_data["Fitted"] = model_full_results["MLR fitted"]
scale_location_data["Index"] = df.index
scale_location = alt.Chart(scale_location_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Fitted", title = "fitted", scale = alt.Scale(domain = [min(scale_location_data["Fitted"]), max(scale_location_data["Fitted"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("SqrtStandResiduals", title = "sqrt(|stand. residuals|)", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["SqrtStandResiduals", "Fitted", "Index"]
)
scale_location_plot = scale_location + scale_location.transform_loess("Fitted", "SqrtStandResiduals", bandwidth = 0.5).mark_line(size = 2, color = "darkred")
st.altair_chart(scale_location_plot, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_scaleLoc")))
st.write("")
fm_mlr_figs2_col1, fm_mlr_figs2_col2 = st.beta_columns(2)
with fm_mlr_figs2_col1:
st.write("Residuals vs Leverage:")
residuals_leverage_data = pd.DataFrame()
residuals = model_full_results["residuals"]["Multiple Linear Regression"]
residuals_leverage_data["StandResiduals"] = (residuals - residuals.mean())/residuals.std()
residuals_leverage_data["Leverage"] = model_full_results["MLR leverage"]
residuals_leverage_data["Index"] = df.index
residuals_leverage = alt.Chart(residuals_leverage_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Leverage", title = "leverage", scale = alt.Scale(domain = [min(residuals_leverage_data["Leverage"]), max(residuals_leverage_data["Leverage"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("StandResiduals", title = "stand. residuals", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["StandResiduals","Leverage", "Index"]
)
residuals_leverage_plot = residuals_leverage + residuals_leverage.transform_loess("Leverage", "StandResiduals", bandwidth = 0.5).mark_line(size = 2, color = "darkred")
st.altair_chart(residuals_leverage_plot, use_container_width = True)
with fm_mlr_figs2_col2:
st.write("Cook's distance:")
cooksD_data = pd.DataFrame()
cooksD_data["CooksD"] = model_full_results["MLR Cooks distance"]
cooksD_data["Index"] = df.index
cooksD = alt.Chart(cooksD_data, height = 200).mark_bar(size = 2).encode(
x = alt.X("Index", title = "index", scale = alt.Scale(domain = [-1, max(cooksD_data["Index"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("CooksD", title = "Cook's distance", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["CooksD", "Index"]
)
st.altair_chart(cooksD, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_resVsLev_cooksD")))
# Download link for MLR output
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
model_full_results["MLR information"].to_excel(excel_file, sheet_name="regression_information")
model_full_results["MLR statistics"].to_excel(excel_file, sheet_name="regression_statistics")
model_full_results["MLR coefficients"].to_excel(excel_file, sheet_name="coefficients")
model_full_results["MLR ANOVA"].to_excel(excel_file, sheet_name="ANOVA")
model_full_results["MLR hetTest"].to_excel(excel_file, sheet_name="heteroskedasticity_tests")
mlr_varImp_table.to_excel(excel_file, sheet_name="variable_importance")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "MLR full model output__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download Multiple Linear Regression full model output</a>
""",
unsafe_allow_html=True)
st.write("")
# LR specific output
if any(a for a in sb_ML_alg if a == "Logistic Regression"):
st.markdown("**Logistic Regression**")
# Regression information
fm_lr_reg_col1, fm_lr_reg_col2 = st.beta_columns(2)
with fm_lr_reg_col1:
st.write("Regression information:")
st.table(model_full_results["LR information"].style.set_precision(user_precision))
# Regression statistics
with fm_lr_reg_col2:
st.write("Regression statistics:")
st.table(model_full_results["LR statistics"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_LR_regStat")))
st.write("")
# Coefficients
st.write("Coefficients:")
st.table(model_full_results["LR coefficients"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_LR_coef")))
st.write("")
# Variable importance (via permutation)
fm_lr_fig1_col1, fm_lr_fig1_col2 = st.beta_columns(2)
with fm_lr_fig1_col1:
st.write("Variable importance (via permutation):")
lr_varImp_table = model_full_results["LR variable importance"]
st.table(lr_varImp_table.style.set_precision(user_precision))
with fm_lr_fig1_col2:
st.write("")
st.write("")
st.write("")
lr_varImp_plot_data = model_full_results["LR variable importance"]
lr_varImp_plot_data["Variable"] = lr_varImp_plot_data.index
lr_varImp = alt.Chart(lr_varImp_plot_data, height = 200).mark_bar().encode(
x = alt.X("mean", title = "variable importance", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Variable", title = None, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), sort = None),
tooltip = ["Variable", "mean"]
)
st.altair_chart(lr_varImp, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_LR_varImp")))
st.write("")
fm_lr_fig_col1, fm_lr_fig_col2 = st.beta_columns(2)
# Observed vs. Probability of Occurrence
with fm_lr_fig_col1:
st.write("Observed vs. Probability of Occurrence:")
prob_data = pd.DataFrame(model_full_results["LR fitted"])
prob_data["Observed"] = df[response_var]
prob_data["ProbabilityOfOccurrence"] = prob_data[1]
prob_data["Threshold"] = model_full_results["model comparison thres"].loc["threshold"]["Logistic Regression"]
prob_data_plot = alt.Chart(prob_data, height = 200).mark_tick(size = 5, thickness = 1).encode(
x = alt.X("ProbabilityOfOccurrence", title = "probability of occurrence", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Observed", title = "observed", scale = alt.Scale(domain = [min(prob_data["Observed"]), max(prob_data["Observed"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Observed", "ProbabilityOfOccurrence", "Threshold"]
)
thres = alt.Chart(prob_data, height = 200).mark_rule(size = 2, color = "darkred").encode(x = "Threshold", tooltip = ["Threshold"])
prob_plot = prob_data_plot + thres
st.altair_chart(prob_plot, use_container_width = True)
# ROC curve
with fm_lr_fig_col2:
st.write("ROC curve:")
AUC_ROC_data = pd.DataFrame()
AUC_ROC_data["FPR"] = model_full_results["LR ROC curve"][0]
AUC_ROC_data["TPR"] = model_full_results["LR ROC curve"][1]
AUC_ROC_data["AUC ROC"] = model_full_results["model comparison thresInd"].loc["AUC ROC"]["Logistic Regression"]
AUC_ROC_data["Threshold"] = model_full_results["model comparison thres"].loc["threshold"]["Logistic Regression"]
AUC_ROC_plot= alt.Chart(AUC_ROC_data, height = 200).mark_line().encode(
x = alt.X("FPR", title = "1 - specificity (FPR)", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("TPR", title = "sensitivity (TPR)", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["TPR", "FPR", "AUC ROC"]
)
line = alt.Chart(
pd.DataFrame({"FPR": [min(AUC_ROC_data["FPR"]), max(AUC_ROC_data["FPR"])], "TPR": [min(AUC_ROC_data["FPR"]), max(AUC_ROC_data["FPR"])]})).mark_line(size = 2, color = "darkred").encode(
alt.X("FPR"),
alt.Y("TPR"),
)
st.altair_chart(AUC_ROC_plot + line, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_LR_thresAUC")))
st.write("")
# Partial probabilities
st.write("Partial probability plots:")
fm_lr_figs2_col1, fm_lr_figs2_col2 = st.beta_columns(2)
for pp_var in expl_var:
pp_data = pd.DataFrame(columns = [pp_var])
pp_data[pp_var] = model_full_results["LR partial probabilities"][pp_var][pp_var]
pp_data["ProbabilityOfOccurrence"] = model_full_results["LR partial probabilities"][pp_var]["prediction"]
pp_data["Observed"] = df[response_var]
pp_chart = alt.Chart(pp_data, height = 200).mark_line(color = "darkred").encode(
x = alt.X(pp_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("ProbabilityOfOccurrence", title = "probability of occurrence", scale = alt.Scale(domain = [0, 1]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["ProbabilityOfOccurrence"] + [pp_var]
)
obs_data_plot = alt.Chart(pp_data, height = 200).mark_tick(size = 5, thickness = 1).encode(
x = alt.X(pp_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Observed", title = "probability of occurrence", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Observed", "ProbabilityOfOccurrence"] + [pp_var]
)
if expl_var.index(pp_var)%2 == 0:
with fm_lr_figs2_col1:
st.altair_chart(pp_chart + obs_data_plot, use_container_width = True)
if expl_var.index(pp_var)%2 == 1:
with fm_lr_figs2_col2:
st.altair_chart(pp_chart + obs_data_plot, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_LR_partProb")))
# Download link for LR output
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
model_full_results["LR information"].to_excel(excel_file, sheet_name="regression_information")
model_full_results["LR statistics"].to_excel(excel_file, sheet_name="regression_statistics")
model_full_results["LR coefficients"].to_excel(excel_file, sheet_name="coefficients")
lr_varImp_table.to_excel(excel_file, sheet_name="variable_importance")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "LR full model output__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download Logistic Regression full model output</a>
""",
unsafe_allow_html=True)
st.write("")
# GAM specific output
if any(a for a in sb_ML_alg if a == "Generalized Additive Models"):
st.markdown("**Generalized Additive Models**")
fm_gam_reg_col1, fm_gam_reg_col2 = st.beta_columns(2)
# Regression information
with fm_gam_reg_col1:
st.write("Regression information:")
st.table(model_full_results["GAM information"].style.set_precision(user_precision))
# Regression statistics
with fm_gam_reg_col2:
st.write("Regression statistics:")
st.table(model_full_results["GAM statistics"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_GAM_regStat_bin")))
st.write("")
# Feature significance
st.write("Feature significance:")
st.table(model_full_results["GAM feature significance"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_GAM_featSig_bin")))
st.write("")
# Variable importance (via permutation)
fm_gam_figs1_col1, fm_gam_figs1_col2 = st.beta_columns(2)
with fm_gam_figs1_col1:
st.write("Variable importance (via permutation):")
gam_varImp_table = model_full_results["GAM variable importance"]
st.table(gam_varImp_table.style.set_precision(user_precision))
st.write("")
with fm_gam_figs1_col2:
st.write("")
st.write("")
st.write("")
gam_varImp_plot_data = model_full_results["GAM variable importance"]
gam_varImp_plot_data["Variable"] = gam_varImp_plot_data.index
gam_varImp = alt.Chart(gam_varImp_plot_data, height = 200).mark_bar().encode(
x = alt.X("mean", title = "variable importance", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Variable", title = None, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), sort = None),
tooltip = ["Variable", "mean"]
)
st.altair_chart(gam_varImp, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_GAM_varImp_bin")))
st.write("")
# Observed vs. Probability of Occurrence
fm_gam_figs5_col1, fm_gam_figs5_col2 = st.beta_columns(2)
with fm_gam_figs5_col1:
st.write("Observed vs. Probability of Occurrence:")
prob_data = | pd.DataFrame(model_full_results["GAM fitted"]) | pandas.DataFrame |
# libraries
import numpy as np
import pandas as pd
from pyliftover import LiftOver
import io
import os
import pyBigWig
import pickle
import time
"""Function to read and format data for process
Args:
input_path (str): The path of the input data.
Chr_col_name (str): The name of the column in the input data representing chromosome name.
BP_col_name (str): The name of the column in the input data representing base pair position.
SNP_col_name (str): The name of the column in the input data representing rs ID.
A1_col_name (str): The name of the column in the input data representing effect allele.
A2_col_name (str): The name of the column in the input data representing non-effect allele.
EAF_col_name (str): The name of the column in the input data representing effect size.
Beta_col_name (str): The name of the column in the input data representing beta.
Se_col_name (str): The name of the column in the input data representing standard deviation.
P_col_name (str): The name of the column in the input data representing p-value.
separate_by (str): How the input data is separated. Default to "\t" (tab separated).
Returns:
pandas.DataFrame: return formatted data in the form of pandas DataFrame
"""
def read_data( input_path, Chr_col_name, BP_col_name, SNP_col_name, A1_col_name, A2_col_name, EAF_col_name, Beta_col_name, Se_col_name, P_col_name, separate_by="\t"):
raw_df = pd.read_csv(input_path, compression='gzip', header=0, sep=separate_by,quotechar='"')
# print(raw_df)
result = raw_df.loc[:,[Chr_col_name, BP_col_name, SNP_col_name, A1_col_name, A2_col_name, EAF_col_name, Beta_col_name, Se_col_name, P_col_name]]
res = result.rename(
{
Chr_col_name:"Chr",
BP_col_name:"BP",
SNP_col_name:"SNP",
A1_col_name:"A1",
A2_col_name:"A2",
EAF_col_name:"EAF",
Beta_col_name:"Beta",
Se_col_name:"Se",
P_col_name:"P"
},axis="columns")
dtype = dict(Chr="string", BP='Int64', SNP="string", A1="string", A2="string", EAF=float, Beta=float, Se=float, P=float)
res = res.astype(dtype)
res["Chr"] = res["Chr"].str.upper()
res["Chr"] = res["Chr"].apply(lambda y: "X" if y=="23" else("Y" if y=="24" else y))
res["A1"] = res["A1"].str.upper()
res["A2"] = res["A2"].str.upper()
res["SNP"] = res["SNP"].str.lower()
dtype = dict(Chr="string", BP='Int64', SNP="string", A1="string", A2="string", EAF=float, Beta=float, Se=float, P=float)
res = res.astype(dtype)
return res
def read_formatted_data(input_path):
raw_df = pd.read_csv(input_path, compression='gzip', header=0, quotechar='"')
dtype = dict(Chr="string", BP='Int64', SNP="string", A1="string", A2="string", EAF=float, Beta=float, Se=float, P=float)
res = raw_df.astype(dtype)
return res
"""Function to filter only bi-allelic cases in the data
Args:
df (pandas.DataFrame): The data frame to be filtered.
rest (boolean): value indicating wether or not to keep (mark only) the non-bi-allelic cases. Default to False.
Returns:
pandas.DataFrame: return filtered data in the form of pandas DataFrame.
"""
def filter_bi_allelic(df, rest=False):
len_mask = (df['A1'].str.len() == 1) & (df['A2'].str.len() == 1)
val_mask = (df['A1'] != "I") & (df['A1'] != "D") & (df['A1'] != "R") & (df['A2'] != "I") & (df['A2'] != "D") & (df['A2'] != "R")
chr_mask = df['Chr'].str.isdigit()
mask = len_mask & val_mask & chr_mask
if not rest:
result = df[mask].reset_index(drop=True)
return result
else:
result = df[~mask].reset_index(drop=True)
return result
"""Function to drop rows in data containing dduplicate keys (Chr + BP)
Args:
df (pandas.DataFrame): The data frame to be deduplicated.
Returns:
pandas.DataFrame: return filtered data in the form of pandas DataFrame.
"""
def deduplicate(df):
result = df.drop_duplicates(subset=['Chr', 'BP'], keep=False)
return result
"""Function to sort the data based on Chr and BP
Args:
df (pandas.DataFrame): the data to be sorted
Returns:
pandas.DataFrame: return the sorted data
"""
def sort_by_chr_bp(df):
def mixs(v):
try:
return int(v)
except ValueError:
return v
df = df.assign(chr_numeric = lambda x: x['Chr'].apply(lambda y: 23 if y=="X" else(24 if y=="Y" else int(y))))
result = df.sort_values(by=["chr_numeric", "BP"]).drop(['chr_numeric'], axis=1).reset_index(drop=True)
return result
"""Function to query required data from dbSnp153
Args:
df (pandas.DataFrame): the data we want more info
link (str): path or link of the '.bb' file of dbSnp153
Returns:
pandas.DataFrame: return complete information from dbSnp153 as a python dictionary
"""
# link = "http://hgdownload.soe.ucsc.edu/gbdb/hg38/snp/dbSnp153.bb"
def query_data(df, link="http://hgdownload.soe.ucsc.edu/gbdb/hg19/snp/dbSnp153.bb", print_log = False):
bb = pyBigWig.open(link)
result = {}
set_list = []
chrom = ""
log = []
not_found = 0
for row in df.itertuples():
if str(row.Chr) == "23":
chrom = "chrX"
elif str(row.Chr) == "23":
chrom = "chrY"
else:
chrom = "chr" + str(row.Chr)
end_pos = row.BP
start_pos =end_pos - 1
# print(chrom, start_pos, end_pos)
try:
dat = bb.entries(chrom, start_pos, end_pos)
except RuntimeError:
log.append((chrom, start_pos, end_pos))
if dat != None:
for i in dat:
reference_start = i[0]
reference_end = i[1]
raw_string = i[2]
if reference_start == start_pos and reference_end == end_pos:
key = (str(row.Chr), reference_end)
result[key] = raw_string
else:
not_found += 1
print(not_found, " cannot be found in dbSnp153")
if print_log:
print("The following Chr + BP cannot be matched with current genome build version")
print(log)
else:
print(len(log), " does not be matched with current genome build")
return result
"""Function to save python data structure on disk
Args:
obj (obj): the data structure/object to be saved on disk.
name (str): the name for the obj to be saved as.
Returns:
return nothing
"""
def save_obj(obj, name ):
with open(name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
"""Function to load saved python data structure from disk
Args:
name (str): the name of the saved obj on disk to be loaded
Returns:
return the loaded object/ data structure
"""
def load_obj(obj_path ):
with open(obj_path, 'rb') as f:
return pickle.load(f)
"""Function to create query data for lift over
Args:
input_version (str): the genome build of the original data.
output_version (str): the desired genome build you want to lift over to.
Returns:
python dictionary: return the input version, output version and the liftover chain file.
"""
def create_lo(input_version, output_version):
lo = LiftOver(input_version, output_version)
return {"input_version": input_version, "output_version":output_version, "lo":lo}
"""Function to lift over genome build
Args:
df (pandas.DataFrame): the data to be lifted over
lo_dict (python dictionary): the lift over dictionary return from the create_lo function
keep_all (boolean): if true, the function will keep and mark the rows that are not convertible. Default to False.
inplace (boolean): if true, the function will keep the Chr + BP of original genome build. Default to False.
Returns:
pandas.DataFrame: return the data being lifted over to the desired genome build
"""
def lift_over(df, lo_dict, keep_all=False, inplace= True):
reference_table = _lift_over_basic(df, lo_dict)
result = _lift_over_merge(df, reference_table)
if not keep_all:
new_chr_name = reference_table.columns[2]
new_pos_name = reference_table.columns[3]
result = result.dropna(subset=[new_chr_name]).reset_index(drop=True)
if inplace:
new_chr_col_name = lo_dict['output_version']+"_chr"
new_pos_col_name = lo_dict['output_version']+"_pos"
result = result[[new_chr_col_name, new_pos_col_name, "SNP", "A1", "A2", "EAF", "Beta", "Se", "P"]].rename({new_chr_col_name:"Chr", new_pos_col_name:"BP"}, axis="columns")
return result
"""Function to query and add rs ID for rows missing rsIDs.
Args:
df (pandas.DataFrame): the data to be added rs_ids
data (python dictionary): the dictionary containing required info from dbSnp153
Returns:
pandas.DataFrame: return the data being added rs_ids.
"""
def add_rsid(df, data, select_cols="drop_comments", filter_rows="drop"):
added_rsid = []
comment = []
for row in df.itertuples():
chrom = row.Chr
pos = row.BP
rs_id = row.SNP
key = (chrom, pos)
if key in data: # the row in the data can be found in dbSnp153
raw_string = data[key]
parsed_string = raw_string.split('\t')
data_rs_id = parsed_string[0]
if pd.isna(rs_id): # rs_id is absence in original dataset
added_rsid.append(data_rs_id)
comment.append("A")
elif rs_id == data_rs_id: # if rs_id in original dataset is the same as dnSnp153
added_rsid.append(rs_id)
comment.append("S")
else: # find different rsid in dbSnp153, update with new
added_rsid.append(data_rs_id)
comment.append("D")
else:
added_rsid.append(pd.NA)
comment.append("NF")
result = df.assign(added_rsid = added_rsid)
result = result.assign(comment=comment)
# filter rows
if filter_rows == "all":
result = result
elif filter_rows == "errors":
mask = pd.isna(result["added_rsid"])
result = result[mask]
elif filter_rows == "drop":
result = result.dropna(subset=["added_rsid"]).reset_index(drop=True)
else:
raise ValueError('Illegal argument for filter_rows! Choose among "errors", "all", and "drop".')
# select cols
if select_cols == "all":
result = result
elif select_cols == "inplace":
result = result[["Chr", "BP" ,"added_rsid", "A1", "A2", "EAF", "Beta", "Se", "P"]].rename({"added_rsid": "SNP"},axis="columns")
elif select_cols == "drop_comments":
result = result[["Chr", "BP" ,"SNP", "A1", "A2", "EAF", "Beta", "Se", "P", "added_rsid"]]
else:
raise ValueError('Illegal argument for select_cols! Choose among "inplace", "all", and "drop_comments".')
return result
"""Function to flip the input data to forward strand
Args:
df (pandas.DataFrame): the data to be flipped to forward strand
data (python dictionary): the dictionary containing required info from dbSnp153
keep_unconvertible (boolean): if true, the function will keep and mark the rows that are not flipped. Default to False.
Returns:
pandas.DataFrame: return the data being flipped to forward strand
"""
def flip_strand( df, data, select_cols="drop_comments", filter_rows="drop"):
flipped_A1 = []
flipped_A2 = []
comment = []
for row in df.itertuples():
chrom = row.Chr
pos = row.BP
A1 = row.A1
A2 = row.A2
key = (chrom, pos)
if key in data: # check if key in dnSnp153
cur_set = {A1, A2}
raw_string = data[key]
parsed_string = raw_string.split("\t")
data_a1 = [i for i in parsed_string[1] if i != ","]
data_a2 = [i for i in parsed_string[3] if i != ","]
if len(data_a1) == 1 and len(data_a2) == 1:
cur_set.add(data_a1[0])
cur_set.add(data_a2[0])
# print(cur_set)
if len(cur_set) == 4: # flip
new_a1 = _flip(A1)
new_a2 = _flip(A2)
flipped_A1.append(new_a1)
flipped_A2.append(new_a2)
comment.append("F")
elif len(cur_set) == 2: # do not flip
# print(i)
flipped_A1.append(A1)
flipped_A2.append(A2)
comment.append("S")
else: # mark: what is this case? => original data T/C, dbsnp153 C/A: 10 94958283 rs111998500
flipped_A1.append(data_a1[0])
flipped_A2.append(data_a2[0])
comment.append("D")
else: # tri-alleic snps in dbSnp153 -> mark
flipped_A1.append(pd.NA)
flipped_A2.append(pd.NA)
comment.append("ID")
else: # key not found
flipped_A1.append(pd.NA)
flipped_A2.append(pd.NA)
comment.append("NF")
result = df.assign(new_A1 = flipped_A1)
result = result.assign(new_A2 = flipped_A2)
result = result.assign(comment=comment)
# filter rows
if filter_rows == "all":
result = result
elif filter_rows == "errors":
mask = (result["comment"] != "flipped") & (result["comment"] != "same") # needs to have comment here
result = result[mask]
elif filter_rows == "drop":
result = result.dropna(subset=["new_A1", "new_A2"]).reset_index(drop=True)
else:
raise ValueError('Illegal argument for filter_rows! Choose among "errors", "all", and "drop".')
# select cols
if select_cols == "all":
result = result
elif select_cols == "inplace":
result = result[["Chr", "BP" , "new_A1", "new_A2", "EAF", "Beta", "Se", "P"]].rename({"new_A1": "A1", "new_A2":"A2"},axis="columns")
elif select_cols == "drop_comments":
result = result[["Chr", "BP", "A1", "A2", "EAF", "Beta", "Se", "P", "new_A1", "new_A2"]]
else:
raise ValueError('Illegal argument for select_cols! Choose among "inplace", "all", and "drop_comments".')
return result
"""Function to align effect allele
this function will align the effect allele of input data based on a reference data
Args:
reference (pandas.DataFrame): the reference table
df (pandas.DataFrame): the data to be aligned
check_error_rows (boolean): if true, the function will output the rows that cannot be aligned. Default to False.
Returns:
pandas.DataFrame: return the data with its effect allele being aligned with the reference table.
"""
# TODO: to be completed
def align_effect_allele( reference, df, show_errors=False):
reference = reference[["Chr", "BP", "A1", "A2"]].rename({"A1":"reference_A1", "A2":"reference_A2"}, axis="columns")
process = df[["Chr", "BP", "A1", "A2"]].rename({"A1":"process_A1", "A2":"process_A2"}, axis="columns")
merge_table = pd.merge(process, reference, on=["Chr", "BP"], how="inner")
if len(merge_table) == 0:
print("reference data and process data have no records in common. Please check data source.")
return
nochange_mask = (merge_table["process_A1"] == merge_table["reference_A1"]) & (merge_table["process_A2"] == merge_table["reference_A2"])
align_mask = (merge_table["process_A1"] == merge_table["reference_A2"]) & (merge_table["process_A2"] == merge_table["reference_A1"])
error_mask = ~nochange_mask & ~align_mask
key_to_nochange = merge_table[nochange_mask][["Chr", "BP"]]
key_to_align = merge_table[align_mask][["Chr", "BP"]]
key_to_error = merge_table[error_mask][["Chr", "BP"]]
# print(key_to_error)
nochange = pd.merge(df, key_to_nochange, on=["Chr", "BP"], how="inner")
align = pd.merge(df, key_to_align, on=["Chr", "BP"], how="inner")
aligned = _swap_effect_allele(align)
# print("aligned")
# print(aligned)
error = pd.merge(df, key_to_error, on=["Chr", "BP"], how="inner")
# print(error)
# print(aligned)
result = nochange.append(aligned).reset_index(drop=True)
# print(result)
sorted_result = sort_by_chr_bp(result)
if show_errors:
return pd.merge(error, merge_table[error_mask], on=["Chr", "BP"], how="inner")
print(str(nochange.shape[0]) + " rows were left unchanged (already aligned)")
print(str(aligned.shape[0]) + " rows were aligned successfully")
print(str(error.shape[0]) + " rows failed to align, dropped from result! Set the check_error_rows flag to True to view them.")
return sorted_result
"""Function to save the processed data in gz or csv
Args:
output_path (str): the path you want the data to be saved.
df (pandas.DataFrame): the processed data to be saved.
name (str): the output name of the data.
save_format (str): the saving format. Choose between 'gzip' or 'csv'. Default to gz.
Returns:
pandas.DataFrame: return filtered data in the form of pandas DataFrame
"""
def save_data(output_path, df, name, save_format="gzip"):
# TODO: add support for other compression format/ txt
if save_format == "gzip":
df_out = output_path + "/" + name +".gz"
try:
df.to_csv(df_out, compression='gzip', index=False)
return "successfully save"
except:
return "fail to save data"
elif save_format == "csv": # csv
df_out = output_path + "/" + name + ".csv"
try:
df.to_csv(df_out)
return "successfully save"
except:
return "fail to save data"
else:
print("format not accepted, use 'gzip' or 'csv' for the `save_format` argument")
return
# ---------------------------------------------------------------------------------------------
# Helper Functions
# helper function to flip strand for one row
def _flip( allele):
new_allele = ""
if allele == "A":
new_allele = "T"
if allele == "T":
new_allele = "A"
if allele == "C":
new_allele = "G"
if allele == "G":
new_allele = "C"
return new_allele
# helper function to swap effect allele and align effect size
def _swap_effect_allele( df):
col_list = list(df)
col_list[3], col_list[4] = col_list[4], col_list[3]
df.columns = col_list
df["Beta"] = -1 * df["Beta"]
df["EAF"] = 1 - df["EAF"]
df = df[["Chr", "BP", "SNP", "A1", "A2", "EAF", "Beta", "Se", "P"]]
return df
# helper function to lift over
def _lift_over_basic( df, lo_dict):
cols = list(df.columns)
temp = []
lo = lo_dict["lo"]
input_version = lo_dict["input_version"]
output_version = lo_dict["output_version"]
for row in df.itertuples():
chrom = "chr" + str(row.Chr)
pos =row.BP
modified = lo.convert_coordinate(chrom, pos)
# print(i)
if modified:
new_chrom = modified[0][0][3:]
new_pos = modified[0][1]
temp.append([chrom[3:], pos, new_chrom, new_pos])
new_chr_name = output_version + '_' + 'chr'
new_pos_name = output_version + '_' + 'pos'
temp_df = | pd.DataFrame(temp, columns=["Chr", "BP", new_chr_name, new_pos_name]) | pandas.DataFrame |
###########################################################################################################
## IMPORTS
###########################################################################################################
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
import pickle
from keras.layers.advanced_activations import LeakyReLU, ELU, ReLU
from keras.models import Sequential, Model, model_from_json
from keras.layers import Activation, Convolution2D, Conv2D, LocallyConnected2D, MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D, BatchNormalization, Flatten, Dense, Dropout, Input, concatenate, add, Add, ZeroPadding2D, GlobalMaxPooling2D, DepthwiseConv2D
from keras.callbacks import ReduceLROnPlateau, ModelCheckpoint, EarlyStopping
from keras.optimizers import Adam
from keras.regularizers import l2
#from keras.activations import linear, elu, tanh, relu
from keras import metrics, losses, initializers, backend
from keras.utils import multi_gpu_model
from keras.initializers import glorot_uniform, Constant, lecun_uniform
from keras import backend as K
os.environ["PATH"] += os.pathsep + "C:/ProgramData/Anaconda3/GraphViz/bin/"
os.environ["PATH"] += os.pathsep + "C:/Anaconda/Graphviz2.38/bin/"
from keras.utils.vis_utils import plot_model
from sklearn.model_selection import train_test_split
import tensorflow as tf
np.random.seed(42)
tf.random.set_seed(42)
tf.get_logger().setLevel('ERROR')
physical_devices = tf.config.list_physical_devices('GPU')
for pd_dev in range(len(physical_devices)):
tf.config.experimental.set_memory_growth(physical_devices[pd_dev], True)
##from tensorflow.compat.v1.keras.backend import set_session
##config = tf.compat.v1.ConfigProto()
##config.gpu_options.per_process_gpu_memory_fraction = 0.9
##config.gpu_options.allow_growth = True
##config.log_device_placement = True
##set_session(config)
#config = tf.compat.v1.ConfigProto()
#config.gpu_options.allow_growth = True
#config.log_device_placement = True
#sess = tf.compat.v1.InteractiveSession(config = config)
#set_session(sess)
#backend.set_session(sess)
###########################################################################################################
## PLOTTING PALETTE
###########################################################################################################
# Create a dict object containing U.C. Berkeley official school colors for plot palette
# reference : https://alumni.berkeley.edu/brand/color-palette
berkeley_palette = {'berkeley_blue' : '#003262',
'california_gold' : '#FDB515',
'metallic_gold' : '#BC9B6A',
'founders_rock' : '#2D637F',
'medalist' : '#E09E19',
'bay_fog' : '#C2B9A7',
'lawrence' : '#00B0DA',
'sather_gate' : '#B9D3B6',
'pacific' : '#53626F',
'soybean' : '#9DAD33',
'california_purple' : '#5C3160',
'south_hall' : '#6C3302'}
###########################################################################################################
## CLASS CONTAINING MODEL ZOO
###########################################################################################################
class Models(object):
def __init__(self, model_path, **kwargs):
super(Models, self).__init__(** kwargs)
# validate that the constructor parameters were provided by caller
if (not model_path):
raise RuntimeError('path to model files must be provided on initialization.')
# ensure all are string snd leading/trailing whitespace removed
model_path = str(model_path).replace('\\', '/').strip()
if (not model_path.endswith('/')): model_path = ''.join((model_path, '/'))
# validate the existence of the data path
if (not os.path.isdir(model_path)):
raise RuntimeError("Models path specified'%s' is invalid." % model_path)
self.__models_path = model_path
self.__GPU_count = len(tf.config.list_physical_devices('GPU'))
self.__MIN_early_stopping = 10
#------------------------------------------------
# Private Methods
#------------------------------------------------
# plotting method for keras history arrays
def __plot_keras_history(self, history, metric, model_name, feature_name, file_name, verbose = False):
# Plot the performance of the model training
fig = plt.figure(figsize=(15,8),dpi=80)
ax = fig.add_subplot(121)
ax.plot(history.history[metric][1:], color = berkeley_palette['founders_rock'], label = 'Train',
marker = 'o', markersize = 4, alpha = 0.9)
ax.plot(history.history["".join(["val_",metric])][1:], color = berkeley_palette['medalist'], label = 'Validation',
marker = 'o', markersize = 4, alpha = 0.9)
ax.set_title(" ".join(['Model Performance',"(" + model_name + ")"]) + "\n" + feature_name,
color = berkeley_palette['berkeley_blue'], fontsize = 15, fontweight = 'bold')
ax.spines["top"].set_alpha(.0)
ax.spines["bottom"].set_alpha(.3)
ax.spines["right"].set_alpha(.0)
ax.spines["left"].set_alpha(.3)
ax.set_xlabel("Epoch", fontsize = 12, horizontalalignment='right', x = 1.0, color = berkeley_palette['berkeley_blue'])
ax.set_ylabel(metric, fontsize = 12, horizontalalignment='right', y = 1.0, color = berkeley_palette['berkeley_blue'])
plt.legend(loc = 'upper right')
ax = fig.add_subplot(122)
ax.plot(history.history['loss'][1:], color = berkeley_palette['founders_rock'], label = 'Train',
marker = 'o', markersize = 4, alpha = 0.9)
ax.plot(history.history["".join(["val_loss"])][1:], color = berkeley_palette['medalist'], label = 'Validation',
marker = 'o', markersize = 4, alpha = 0.9)
ax.set_title(" ".join(['Model Performance',"(" + model_name + ")"]) + "\n" + feature_name,
color = berkeley_palette['berkeley_blue'], fontsize = 15, fontweight = 'bold')
ax.spines["top"].set_alpha(.0)
ax.spines["bottom"].set_alpha(.3)
ax.spines["right"].set_alpha(.0)
ax.spines["left"].set_alpha(.3)
ax.set_xlabel("Epoch", fontsize = 12, horizontalalignment='right', x = 1.0, color = berkeley_palette['berkeley_blue'])
ax.set_ylabel("Loss", fontsize = 12, horizontalalignment='right', y = 1.0, color = berkeley_palette['berkeley_blue'])
plt.legend(loc = 'upper right')
plt.tight_layout()
plt.savefig(file_name, dpi=300)
if verbose: print("Training plot file saved to '%s'." % file_name)
plt.close()
# load Keras model files from json / h5
def __load_keras_model(self, model_name, model_file, model_json, verbose = False):
"""Loads a Keras model from disk"""
if not os.path.isfile(model_file):
raise RuntimeError("Model file '%s' does not exist; exiting inferencing." % model_file)
if not os.path.isfile(model_json):
raise RuntimeError("Model file '%s' does not exist; exiting inferencing." % model_json)
# load model file
if verbose: print("Retrieving model: %s..." % model_name)
json_file = open(model_json, "r")
model_json_data = json_file.read()
json_file.close()
model = model_from_json(model_json_data)
model.load_weights(model_file)
return model
# Performs standard scaling on a 4D image
def __4d_Scaler(self, arr, ss, fit = False, verbose = False):
"""Performs standard scaling of the 4D array with the 'ss' model provided by caller"""
#Unwinds a (instances, rows, columns, layers) array to 2D for standard scaling
num_instances, num_rows, num_columns, num_layers = arr.shape
arr_copy = np.reshape(arr, (-1, num_columns))
# fit the standard scaler
if fit:
if verbose: print("Fitting SCALER and transforming...")
arr_copy = ss.fit_transform(arr_copy)
else:
if verbose: print("Transforming SCALER only...")
arr_copy = ss.transform(arr_copy)
arr = np.reshape(arr_copy, (num_instances, num_rows, num_columns, num_layers))
return arr
# resnet identity block builder
def __identity_block(self, model, kernel_size, filters, stage, block):
"""modularized identity block for resnet"""
filters1, filters2, filters3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = Conv2D(filters1, (1, 1),
kernel_initializer='he_normal',
name=conv_name_base + '2a')(model)
x = BatchNormalization(axis=3, name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = Conv2D(filters2, kernel_size,
padding='same',
kernel_initializer='he_normal',
name=conv_name_base + '2b')(x)
x = BatchNormalization(axis=3, name=bn_name_base + '2b')(x)
x = Activation('relu')(x)
x = Conv2D(filters3, (1, 1),
kernel_initializer='he_normal',
name=conv_name_base + '2c')(x)
x = BatchNormalization(axis=3, name=bn_name_base + '2c')(x)
x = add([x, model])
x = Activation('relu')(x)
return x
# resnet conv block builder
def __conv_block(self, model, kernel_size, filters, stage, block, strides=(2, 2)):
"""conv block builder for resnet"""
filters1, filters2, filters3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = Conv2D(filters1, (1, 1), strides=strides,
kernel_initializer='he_normal',
name=conv_name_base + '2a')(model)
x = BatchNormalization(axis=3, name=bn_name_base + '2a')(x)
x =Activation('relu')(x)
x = Conv2D(filters2, kernel_size, padding='same',
kernel_initializer='he_normal',
name=conv_name_base + '2b')(x)
x = BatchNormalization(axis=3, name=bn_name_base + '2b')(x)
x = Activation('relu')(x)
x = Conv2D(filters3, (1, 1),
kernel_initializer='he_normal',
name=conv_name_base + '2c')(x)
x = BatchNormalization(axis=3, name=bn_name_base + '2c')(x)
shortcut = Conv2D(filters3, (1, 1), strides=strides,
kernel_initializer='he_normal',
name=conv_name_base + '1')(model)
shortcut = BatchNormalization(
axis=3, name=bn_name_base + '1')(shortcut)
x = add([x, shortcut])
x = Activation('relu')(x)
return x
# create a layerable inception module
def __inception_module(self, model, filters_1x1, filters_3x3_reduce, filters_3x3,
filters_5x5_reduce, filters_5x5, filters_pool_proj, kernel_init, bias_init, name = None):
"""modularized inception block for layering"""
# Connection Layer 1 (1x1)
conv_1x1 = Convolution2D(filters_1x1, (1, 1), padding = 'same', activation = 'relu',
kernel_initializer = kernel_init, bias_initializer = bias_init) (model)
# Connection Layer 2 (3x3)
conv_3x3 = Convolution2D(filters_3x3_reduce, (1, 1), padding = 'same', activation = 'relu',
kernel_initializer = kernel_init, bias_initializer = bias_init) (model)
conv_3x3 = Convolution2D(filters_3x3, (3, 3), padding = 'same', activation = 'relu',
kernel_initializer = kernel_init, bias_initializer = bias_init) (conv_3x3)
# Connection Layer 3 (5x5)
conv_5x5 = Convolution2D(filters_5x5_reduce, (1, 1), padding = 'same', activation = 'relu',
kernel_initializer = kernel_init, bias_initializer = bias_init) (model)
conv_5x5 = Convolution2D(filters_5x5, (5, 5), padding = 'same', activation = 'relu',
kernel_initializer = kernel_init, bias_initializer = bias_init) (conv_5x5)
# Connection Layer 4 (pool)
pool_proj = MaxPooling2D((3, 3), strides = (1, 1), padding = 'same') (model)
pool_proj = Convolution2D(filters_pool_proj, (1, 1), padding = 'same', activation = 'relu',
kernel_initializer = kernel_init, bias_initializer = bias_init) (pool_proj)
# Concatenation layer
output = concatenate(inputs = [conv_1x1, conv_3x3, conv_5x5, pool_proj], axis = 3, name = name)
return output
# return an InceptionV3 output tensor after applying Conv2D and BatchNormalization
def __conv2d_bn(self, x, filters, num_row, num_col, padding = 'same', strides = (1, 1), name = None):
if name is not None:
bn_name = name + '_bn'
conv_name = name + '_conv'
else:
bn_name = None
conv_name = None
bn_axis = 3
x = Convolution2D(filters, (num_row, num_col), strides = strides,
padding = padding, use_bias = False, name = conv_name) (x)
x = BatchNormalization(axis = bn_axis, scale = False, name = bn_name) (x)
x = ReLU(name = name) (x)
return x
# a residual block for resnext
def __resnext_block(self, x, filters, kernel_size = 3, stride = 1, groups = 32, conv_shortcut = True, name = None):
if conv_shortcut is True:
shortcut = Conv2D((64 // groups) * filters, 1, strides = stride, use_bias = False, name = name + '_0_conv') (x)
shortcut = BatchNormalization(axis = 3, epsilon=1.001e-5, name = name + '_0_bn') (shortcut)
else:
shortcut = x
x = Conv2D(filters, 1, use_bias = False, name = name + '_1_conv') (x)
x = BatchNormalization(axis = 3, epsilon = 1.001e-5, name = name + '_1_bn') (x)
x = Activation('relu', name = name + '_1_relu') (x)
c = filters // groups
x = ZeroPadding2D(padding=((1, 1), (1, 1)), name=name + '_2_pad')(x)
x = DepthwiseConv2D(kernel_size, strides = stride, depth_multiplier = c, use_bias = False, name = name + '_2_conv') (x)
kernel = np.zeros((1, 1, filters * c, filters), dtype = np.float32)
for i in range(filters):
start = (i // c) * c * c + i % c
end = start + c * c
kernel[:, :, start:end:c, i] = 1.
x = Conv2D(filters, 1, use_bias = False, trainable = False, kernel_initializer = {'class_name': 'Constant','config': {'value': kernel}}, name = name + '_2_gconv') (x)
x = BatchNormalization(axis=3, epsilon = 1.001e-5, name = name + '_2_bn') (x)
x = Activation('relu', name=name + '_2_relu') (x)
x = Conv2D((64 // groups) * filters, 1, use_bias = False, name = name + '_3_conv') (x)
x = BatchNormalization(axis = 3, epsilon=1.001e-5, name = name + '_3_bn') (x)
x = Add(name = name + '_add') ([shortcut, x])
x = Activation('relu', name = name + '_out') (x)
return x
# a set of stacked residual blocks for ResNeXt
def __resnext_stack(self, x, filters, blocks, stride1 = 2, groups = 32, name = None, dropout = None):
x = self.__resnext_block(x, filters, stride = stride1, groups = groups, name = name + '_block1')
for i in range(2, blocks + 1):
x = self.__resnext_block(x, filters, groups = groups, conv_shortcut = False,
name = name + '_block' + str(i))
if not dropout is None:
x = Dropout(dropout) (x)
return x
def __bn_relu(self, x, bn_name = None, relu_name = None):
norm = BatchNormalization(axis = 3, name = bn_name) (x)
return Activation("relu", name = relu_name) (norm)
def __bn_relu_conv(self, **conv_params):
filters = conv_params["filters"]
kernel_size = conv_params["kernel_size"]
strides = conv_params.setdefault("strides", (1, 1))
dilation_rate = conv_params.setdefault("dilation_rate", (1, 1))
conv_name = conv_params.setdefault("conv_name", None)
bn_name = conv_params.setdefault("bn_name", None)
relu_name = conv_params.setdefault("relu_name", None)
kernel_initializer = conv_params.setdefault("kernel_initializer", "he_normal")
padding = conv_params.setdefault("padding", "same")
kernel_regularizer = conv_params.setdefault("kernel_regularizer", l2(1.e-4))
def f(x):
activation = self.__bn_relu(x, bn_name = bn_name, relu_name = relu_name)
return Conv2D(filters = filters, kernel_size = kernel_size,
strides = strides, padding = padding,
dilation_rate = dilation_rate,
kernel_initializer = kernel_initializer,
kernel_regularizer = kernel_regularizer,
name = conv_name) (activation)
return f
def __conv_bn_relu(self, **conv_params):
filters = conv_params["filters"]
kernel_size = conv_params["kernel_size"]
strides = conv_params.setdefault("strides", (1, 1))
dilation_rate = conv_params.setdefault("dilation_rate", (1, 1))
conv_name = conv_params.setdefault("conv_name", None)
bn_name = conv_params.setdefault("bn_name", None)
relu_name = conv_params.setdefault("relu_name", None)
kernel_initializer = conv_params.setdefault("kernel_initializer", "he_normal")
padding = conv_params.setdefault("padding", "same")
kernel_regularizer = conv_params.setdefault("kernel_regularizer", l2(1.e-4))
def f(x):
x = Conv2D(filters = filters, kernel_size = kernel_size,
strides = strides, padding = padding,
dilation_rate = dilation_rate,
kernel_initializer = kernel_initializer,
kernel_regularizer = kernel_regularizer,
name = conv_name) (x)
return self.__bn_relu(x, bn_name = bn_name, relu_name = relu_name)
return f
def __block_name_base(self, stage, block):
if block < 27:
block = '%c' % (block + 97) # 97 is the ascii number for lowercase 'a'
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
return conv_name_base, bn_name_base
def __shortcut(self, input_feature, residual, conv_name_base = None, bn_name_base = None):
input_shape = K.int_shape(input_feature)
residual_shape = K.int_shape(residual)
stride_width = int(round(input_shape[1] / residual_shape[1]))
stride_height = int(round(input_shape[2] / residual_shape[2]))
equal_channels = input_shape[3] == residual_shape[3]
shortcut = input_feature
# 1 X 1 conv if shape is different. Else identity.
if stride_width > 1 or stride_height > 1 or not equal_channels:
print('reshaping via a convolution...')
if conv_name_base is not None:
conv_name_base = conv_name_base + '1'
shortcut = Conv2D(filters=residual_shape[3],
kernel_size=(1, 1),
strides=(stride_width, stride_height),
padding="valid",
kernel_initializer="he_normal",
kernel_regularizer=l2(0.0001),
name=conv_name_base)(input_feature)
if bn_name_base is not None:
bn_name_base = bn_name_base + '1'
shortcut = BatchNormalization(axis=3,
name=bn_name_base)(shortcut)
return add([shortcut, residual])
def __basic_block(self, filters, stage, block, transition_strides = (1, 1),
dilation_rate = (1, 1), is_first_block_of_first_layer = False, dropout = None,
residual_unit = None):
def f(input_features):
conv_name_base, bn_name_base = self.__block_name_base(stage, block)
if is_first_block_of_first_layer:
# don't repeat bn->relu since we just did bn->relu->maxpool
x = Conv2D(filters = filters, kernel_size = (3, 3),
strides = transition_strides, dilation_rate = dilation_rate,
padding = "same", kernel_initializer = "he_normal", kernel_regularizer = l2(1e-4),
name = conv_name_base + '2a') (input_features)
else:
x = residual_unit(filters = filters, kernel_size = (3, 3),
strides = transition_strides,
dilation_rate = dilation_rate,
conv_name_base = conv_name_base + '2a',
bn_name_base = bn_name_base + '2a') (input_features)
if dropout is not None:
x = Dropout(dropout) (x)
x = residual_unit(filters = filters, kernel_size = (3, 3),
conv_name_base = conv_name_base + '2b',
bn_name_base = bn_name_base + '2b') (x)
return self.__shortcut(input_features, x)
return f
def __bottleneck(self, filters, stage, block, transition_strides = (1, 1),
dilation_rate = (1, 1), is_first_block_of_first_layer = False, dropout = None,
residual_unit = None):
"""Bottleneck architecture for > 34 layer resnet.
Follows improved proposed scheme in http://arxiv.org/pdf/1603.05027v2.pdf
Returns:
A final conv layer of filters * 4
"""
def f(input_feature):
conv_name_base, bn_name_base = self.__block_name_base(stage, block)
if is_first_block_of_first_layer:
# don't repeat bn->relu since we just did bn->relu->maxpool
x = Conv2D(filters=filters, kernel_size=(1, 1),
strides=transition_strides,
dilation_rate=dilation_rate,
padding="same",
kernel_initializer="he_normal",
kernel_regularizer=l2(1e-4),
name=conv_name_base + '2a')(input_feature)
else:
x = residual_unit(filters=filters, kernel_size=(1, 1),
strides=transition_strides,
dilation_rate=dilation_rate,
conv_name_base=conv_name_base + '2a',
bn_name_base=bn_name_base + '2a')(input_feature)
if dropout is not None:
x = Dropout(dropout)(x)
x = residual_unit(filters=filters, kernel_size=(3, 3),
conv_name_base=conv_name_base + '2b',
bn_name_base=bn_name_base + '2b')(x)
if dropout is not None:
x = Dropout(dropout)(x)
x = residual_unit(filters=filters * 4, kernel_size=(1, 1),
conv_name_base=conv_name_base + '2c',
bn_name_base=bn_name_base + '2c')(x)
return self.__shortcut(input_feature, x)
return f
# builds a residual block for resnet with repeating bottleneck blocks
def __residual_block(self, block_function, filters, blocks, stage, transition_strides = None, transition_dilation_rates = None,
dilation_rates = None, is_first_layer = False, dropout = None, residual_unit = None):
if transition_dilation_rates is None:
transition_dilation_rates = [(1, 1)] * blocks
if transition_strides is None:
transition_strides = [(1, 1)] * blocks
if dilation_rates is None:
dilation_rates = [1] * blocks
def f(x):
for i in range(blocks):
is_first_block = is_first_layer and i == 0
x = block_function(filters=filters, stage=stage, block=i,
transition_strides=transition_strides[i],
dilation_rate=dilation_rates[i],
is_first_block_of_first_layer=is_first_block,
dropout=dropout,
residual_unit=residual_unit)(x)
return x
return f
######################################################
######################################################
######################################################
### KERAS MODEL ZOO
######################################################
######################################################
######################################################
#------------------------------------------------
# NaimishNet Model
# ref: https://arxiv.org/abs/1710.00977
#------------------------------------------------
def get_keras_naimishnet(self, X, Y, batch_size, epoch_count, X_val = None, Y_val = None, val_split = 0.1, shuffle = True,
feature_name = "unknown", recalculate_pickle = True, full = True, verbose = False):
__MODEL_NAME = "Keras - NaimishNet"
__MODEL_FNAME_PREFIX = "KERAS_NAIMISHNET/"
if full:
__MODEL_SUFFIX = "_30"
else:
__MODEL_SUFFIX = "_8"
nested_dir = "".join([self.__models_path,__MODEL_FNAME_PREFIX])
if not os.path.exists(nested_dir):
os.makedirs(nested_dir)
__model_file_name = "".join([nested_dir, "NaimishNet_", feature_name, __MODEL_SUFFIX, ".h5"])
__model_json_file = "".join([nested_dir, "NaimishNet_", feature_name, __MODEL_SUFFIX, ".json"])
__history_params_file = "".join([nested_dir, "NaimishNet_", feature_name, __MODEL_SUFFIX, "_params.csv"])
__history_performance_file = "".join([nested_dir, "NaimishNet_", feature_name, __MODEL_SUFFIX, "_history.csv"])
__history_plot_file = "".join([nested_dir, "NaimishNet_", feature_name, __MODEL_SUFFIX, "_plot.png"])
__model_architecture_plot_file = "".join([nested_dir, "NaimishNet_", feature_name, __MODEL_SUFFIX, "_model_plot.png"])
if verbose: print("Retrieving model: %s..." % "".join([__MODEL_NAME, "_", feature_name, __MODEL_SUFFIX]))
# Create or load the model
if (not os.path.isfile(__model_file_name)) or (not os.path.isfile(__model_json_file)) or recalculate_pickle:
if verbose: print("Pickle file for '%s' MODEL not found or skipped by caller." % feature_name)
act = Adam(lr = 0.001, beta_1 = 0.9, beta_2 = 0.999, epsilon = 1e-08)
lss = 'mean_squared_error'
mtrc = ['mae','mse']
#ke = initializers.lecun_uniform(seed = 42)
ke = 'glorot_uniform'
stop_at = np.max([int(0.1 * epoch_count), self.__MIN_early_stopping])
es = EarlyStopping(patience = stop_at, verbose = verbose)
cp = ModelCheckpoint(filepath = __model_file_name, verbose = verbose, save_best_only = True,
mode = 'min', monitor = 'val_mae')
if self.__GPU_count > 1: dev = "/cpu:0"
else: dev = "/gpu:0"
with tf.device(dev):
l1 = Input((96, 96, 1))
l2 = Convolution2D(32, (4, 4), kernel_initializer = ke, padding = 'valid', activation = 'elu') (l1)
#l3 = ELU() (l2)
l3 = MaxPooling2D(pool_size=(2,2), strides = (2,2)) (l2)
l4 = Dropout(rate = 0.1) (l3)
l5 = Convolution2D(64, (3, 3), kernel_initializer = ke, padding = 'valid', activation = 'elu') (l4)
#l7 = ELU() (l6)
l6 = MaxPooling2D(pool_size=(2,2), strides = (2,2)) (l5)
l7 = Dropout(rate = 0.2) (l6)
l8 = Convolution2D(128, (2, 2), kernel_initializer = ke, padding = 'valid', activation = 'elu') (l7)
#l11 = ELU() (l10)
l9 = MaxPooling2D(pool_size=(2,2), strides = (2,2)) (l8)
l10 = Dropout(rate = 0.3) (l9)
l11 = Convolution2D(256, (1, 1), kernel_initializer = ke, padding = 'valid', activation = 'elu') (l10)
#l15 = ELU() (l14)
l12 = MaxPooling2D(pool_size=(2,2), strides = (2,2)) (l11)
l13 = Dropout(rate = 0.4) (l12)
l14 = Flatten() (l13)
l15 = Dense(1000, activation = 'elu') (l14)
#l20 = ELU() (l19)
l16 = Dropout(rate = 0.5) (l15)
#l22 = Dense(1000) (l21)
#l23 = linear(l22)
l17 = Dense(1000, activation = 'linear') (l16)
l18 = Dropout(rate = 0.6) (l17)
l19 = Dense(2) (l18)
model = Model(inputs = [l1], outputs = [l19])
model.compile(optimizer = act, loss = lss, metrics = mtrc)
if verbose: print(model.summary())
# Compile the model
if self.__GPU_count > 1:
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
parallel_model = multi_gpu_model(model, gpus = self.__GPU_count)
parallel_model.compile(optimizer = act, loss = lss, metrics = mtrc)
else:
parallel_model = model
parallel_model.compile(optimizer = act, loss = lss, metrics = mtrc)
if (X_val is None) or (Y_val is None):
history = parallel_model.fit(X, Y, validation_split = val_split, batch_size = batch_size * self.__GPU_count,
epochs = epoch_count, shuffle = shuffle, callbacks = [es, cp], verbose = verbose)
else:
history = parallel_model.fit(X, Y, validation_data = (X_val, Y_val), batch_size = batch_size * self.__GPU_count,
epochs = epoch_count, shuffle = shuffle, callbacks = [es, cp], verbose = verbose)
# print and/or save a performance plot
self.__plot_keras_history(history = history, metric = 'mse', model_name = __MODEL_NAME,
feature_name = feature_name, file_name = __history_plot_file, verbose = verbose)
# save the model, parameters, and performance history
model_json = parallel_model.to_json()
with open(__model_json_file, "w") as json_file:
json_file.write(model_json)
hist_params = pd.DataFrame(history.params)
hist_params.to_csv(__history_params_file)
hist = pd.DataFrame(history.history)
hist.to_csv(__history_performance_file)
if verbose: print("Model JSON, history, and parameters file saved.")
# save a plot of the model architecture
plot_model(parallel_model, to_file = __model_architecture_plot_file, rankdir = 'TB',
show_shapes = True, show_layer_names = True, expand_nested = True, dpi=300)
else:
if verbose: print("Loading history and params files for '%s' MODEL..." % feature_name)
hist_params = pd.read_csv(__history_params_file)
hist = pd.read_csv(__history_performance_file)
if verbose: print("Loading pickle file for '%s' MODEL from file '%s'" % (feature_name, __model_file_name))
modparallel_modelel = self.__load_keras_model(__MODEL_NAME, __model_file_name, __model_json_file, verbose = verbose)
return parallel_model, hist_params, hist
# inferencing
def predict_keras_naimishnet(self, X, feature_name = "unknown", full = True, verbose = False):
__MODEL_NAME = "Keras - NaimishNet"
__MODEL_FNAME_PREFIX = "KERAS_NAIMISHNET/"
if full:
__MODEL_SUFFIX = "_30"
else:
__MODEL_SUFFIX = "_8"
nested_dir = "".join([self.__models_path,__MODEL_FNAME_PREFIX])
if not os.path.exists(nested_dir):
raise RuntimeError("Model path '%s' does not exist; exiting inferencing." % nested_dir)
__model_file_name = "".join([nested_dir, "NaimishNet_", feature_name, __MODEL_SUFFIX, ".h5"])
__model_json_file = "".join([nested_dir, "NaimishNet_", feature_name, __MODEL_SUFFIX, ".json"])
if (not os.path.isfile(__model_file_name)) or (not os.path.isfile(__model_json_file)):
raise RuntimeError("One or some of the following files are missing; prediction cancelled:\n\n'%s'\n'%s'\n" %
(__model_file_name, __model_json_file))
# load the Keras model for the specified feature
model = self.__load_keras_model(__MODEL_NAME, __model_file_name, __model_json_file, verbose = verbose)
# predict
if verbose: print("Predicting %d (x,y) coordinates for '%s'..." % (len(X), feature_name))
Y = model.predict(X, verbose = verbose)
if verbose: print("Predictions completed!")
return Y
#------------------------------------------------
# Kaggle1 Model
# Inspired by: https://www.kaggle.com/balraj98/data-augmentation-for-facial-keypoint-detection
#------------------------------------------------
def get_keras_kaggle1(self, X, Y, batch_size, epoch_count, val_split = 0.05, X_val = None, Y_val = None, shuffle = True,
feature_name = "ALL_FEATURES", recalculate_pickle = True, full = True, verbose = False):
__MODEL_NAME = "Keras - Kaggle1"
__MODEL_FNAME_PREFIX = "KERAS_KAGGLE1/"
if full:
__MODEL_SUFFIX = "_30"
else:
__MODEL_SUFFIX = "_8"
nested_dir = "".join([self.__models_path,__MODEL_FNAME_PREFIX])
if not os.path.exists(nested_dir):
os.makedirs(nested_dir)
__model_file_name = "".join([nested_dir, feature_name, __MODEL_SUFFIX, ".h5"])
__model_json_file = "".join([nested_dir, feature_name, __MODEL_SUFFIX, ".json"])
__history_params_file = "".join([nested_dir, feature_name, __MODEL_SUFFIX, "_params.csv"])
__history_performance_file = "".join([nested_dir, feature_name, __MODEL_SUFFIX, "_history.csv"])
__history_plot_file = "".join([nested_dir, feature_name, __MODEL_SUFFIX, "_plot.png"])
__model_architecture_plot_file = "".join([nested_dir, feature_name, __MODEL_SUFFIX, "_model_plot.png"])
##__scaler_file = "".join([nested_dir, feature_name, "_scaler.pkl"])
if verbose: print("Retrieving model: %s..." % "".join([__MODEL_NAME, __MODEL_SUFFIX]))
# Create or load the model
if (not os.path.isfile(__model_file_name)) or (not os.path.isfile(__model_json_file)) or recalculate_pickle:
if verbose: print("Pickle file for '%s' MODEL not found or skipped by caller." % feature_name)
#act = Adam(lr = 0.001, beta_1 = 0.9, beta_2 = 0.999, epsilon = 1e-08)
act = 'adam'
#lss = losses.mean_squared_error
lss = 'mean_squared_error'
#mtrc = [metrics.RootMeanSquaredError()]
mtrc = ['mae','mse']
stop_at = np.max([int(0.1 * epoch_count), self.__MIN_early_stopping])
es = EarlyStopping(patience = stop_at, verbose = verbose)
cp = ModelCheckpoint(filepath = __model_file_name, verbose = verbose, save_best_only = True,
mode = 'min', monitor = 'val_mae')
if self.__GPU_count > 1: dev = "/cpu:0"
else: dev = "/gpu:0"
with tf.device(dev):
model = Sequential()
# Input dimensions: (None, 96, 96, 1)
model.add(Convolution2D(32, (3,3), padding='same', use_bias=False, input_shape=(96,96,1)))
model.add(LeakyReLU(alpha = 0.1))
model.add(BatchNormalization())
# Input dimensions: (None, 96, 96, 32)
model.add(Convolution2D(32, (3,3), padding='same', use_bias=False))
model.add(LeakyReLU(alpha = 0.1))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
# CDB: 3/5 DROPOUT ADDED
model.add(Dropout(0.2))
# Input dimensions: (None, 48, 48, 32)
model.add(Convolution2D(64, (3,3), padding='same', use_bias=False))
model.add(LeakyReLU(alpha = 0.1))
model.add(BatchNormalization())
# Input dimensions: (None, 48, 48, 64)
model.add(Convolution2D(64, (3,3), padding='same', use_bias=False))
model.add(LeakyReLU(alpha = 0.1))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
# CDB: 3/5 DROPOUT ADDED
model.add(Dropout(0.25))
# Input dimensions: (None, 24, 24, 64)
model.add(Convolution2D(96, (3,3), padding='same', use_bias=False))
model.add(LeakyReLU(alpha = 0.1))
model.add(BatchNormalization())
# Input dimensions: (None, 24, 24, 96)
model.add(Convolution2D(96, (3,3), padding='same', use_bias=False))
model.add(LeakyReLU(alpha = 0.1))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
# CDB: 3/5 DROPOUT ADDED
model.add(Dropout(0.15))
# Input dimensions: (None, 12, 12, 96)
model.add(Convolution2D(128, (3,3),padding='same', use_bias=False))
model.add(LeakyReLU(alpha = 0.1))
model.add(BatchNormalization())
# Input dimensions: (None, 12, 12, 128)
model.add(Convolution2D(128, (3,3),padding='same', use_bias=False))
model.add(LeakyReLU(alpha = 0.1))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
# CDB: 3/5 DROPOUT ADDED
model.add(Dropout(0.3))
# Input dimensions: (None, 6, 6, 128)
model.add(Convolution2D(256, (3,3),padding='same',use_bias=False))
model.add(LeakyReLU(alpha = 0.1))
model.add(BatchNormalization())
# Input dimensions: (None, 6, 6, 256)
model.add(Convolution2D(256, (3,3),padding='same',use_bias=False))
model.add(LeakyReLU(alpha = 0.1))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
# CDB: 3/5 DROPOUT ADDED
model.add(Dropout(0.2))
# Input dimensions: (None, 3, 3, 256)
model.add(Convolution2D(512, (3,3), padding='same', use_bias=False))
model.add(LeakyReLU(alpha = 0.1))
model.add(BatchNormalization())
# Input dimensions: (None, 3, 3, 512)
model.add(Convolution2D(512, (3,3), padding='same', use_bias=False))
model.add(LeakyReLU(alpha = 0.1))
model.add(BatchNormalization())
# TEST added 4/8
model.add(Dropout(0.3))
model.add(Convolution2D(1024, (3,3), padding='same', use_bias=False))
model.add(LeakyReLU(alpha = 0.1))
model.add(BatchNormalization())
# Input dimensions: (None, 3, 3, 512)
model.add(Convolution2D(1024, (3,3), padding='same', use_bias=False))
model.add(LeakyReLU(alpha = 0.1))
model.add(BatchNormalization())
# Input dimensions: (None, 3, 3, 512)
model.add(Flatten())
model.add(Dense(1024,activation='relu'))
# CDB DROPOUT INCREASED FROM 0.1 to 0.2
model.add(Dropout(0.15))
if full:
model.add(Dense(30))
else:
model.add(Dense(8))
if verbose: print(model.summary())
# Compile the model
if self.__GPU_count > 1:
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
parallel_model = multi_gpu_model(model, gpus = self.__GPU_count)
parallel_model.compile(optimizer = act, loss = lss, metrics = mtrc)
else:
parallel_model = model
parallel_model.compile(optimizer = act, loss = lss, metrics = mtrc)
if (X_val is None) or (Y_val is None):
history = parallel_model.fit(X, Y, validation_split = val_split, batch_size = batch_size * self.__GPU_count,
epochs = epoch_count, shuffle = shuffle, callbacks = [es, cp], verbose = verbose)
else:
history = parallel_model.fit(X, Y, validation_data = (X_val, Y_val), batch_size = batch_size * self.__GPU_count,
epochs = epoch_count, shuffle = shuffle, callbacks = [es, cp], verbose = verbose)
# print and/or save a performance plot
self.__plot_keras_history(history = history, metric = 'mse', #metric = 'root_mean_squared_error',
model_name = __MODEL_NAME, feature_name = feature_name, file_name = __history_plot_file,
verbose = verbose)
# save the model, parameters, and performance history
model_json = parallel_model.to_json()
with open(__model_json_file, "w") as json_file:
json_file.write(model_json)
hist_params = pd.DataFrame(history.params)
hist_params.to_csv(__history_params_file)
hist = pd.DataFrame(history.history)
hist.to_csv(__history_performance_file)
if verbose: print("Model JSON, history, and parameters file saved.")
# save a plot of the model architecture
plot_model(parallel_model, to_file = __model_architecture_plot_file, rankdir = 'TB',
show_shapes = True, show_layer_names = True, expand_nested = True, dpi=300)
else:
if verbose: print("Loading history and params files for '%s' MODEL..." % feature_name)
hist_params = pd.read_csv(__history_params_file)
hist = pd.read_csv(__history_performance_file)
if verbose: print("Loading pickle file for '%s' MODEL from file '%s'" % (feature_name, __model_file_name))
parallel_model = self.__load_keras_model(__MODEL_NAME, __model_file_name, __model_json_file, verbose = verbose)
return parallel_model, hist_params, hist
# inferencing
def predict_keras_kaggle1(self, X, feature_name = "unknown", full = True, verbose = False):
__MODEL_NAME = "Keras - Kaggle1"
__MODEL_FNAME_PREFIX = "KERAS_KAGGLE1/"
if full:
__MODEL_SUFFIX = "_30"
else:
__MODEL_SUFFIX = "_8"
nested_dir = "".join([self.__models_path,__MODEL_FNAME_PREFIX])
if not os.path.exists(nested_dir):
raise RuntimeError("Model path '%s' does not exist; exiting inferencing." % nested_dir)
__model_file_name = "".join([nested_dir, feature_name, __MODEL_SUFFIX, ".h5"])
__model_json_file = "".join([nested_dir, feature_name, __MODEL_SUFFIX, ".json"])
##__scaler_file = "".join([nested_dir, feature_name, "_scaler.pkl"])
if (not os.path.isfile(__model_file_name)) or (not os.path.isfile(__model_json_file)):## or (not os.path.isfile(__scaler_file)):
raise RuntimeError("One or some of the following files are missing; prediction cancelled:\n\n'%s'\n'%s'\n" % ##'%s'\n" %
(__model_file_name, __model_json_file))##, __scaler_file))
# Load the training scaler for this model
##if verbose: print("Loading SCALER for '%s' and zero-centering X." % feature_name)
##scaler = pickle.load(open(__scaler_file, "rb"))
##X = self.__4d_Scaler(arr = X, ss = scaler, fit = False, verbose = verbose)
# load the Keras model for the specified feature
model = self.__load_keras_model(__MODEL_NAME, __model_file_name, __model_json_file, verbose = verbose)
# predict
if verbose: print("Predicting %d (x,y) coordinates for '%s'..." % (len(X), feature_name))
Y = model.predict(X, verbose = verbose)
if verbose: print("Predictions completed!")
return Y
#-------------------------------------------------------------
# LeNet5 Model
# Inspired by: Google's LeNet5 for MNSIST - Modified
#-------------------------------------------------------------
def get_keras_lenet5(self, X, Y, batch_size, epoch_count, X_val = None, Y_val = None, val_split = 0.1, shuffle = True,
feature_name = "ALL_FEATURES", recalculate_pickle = True, full = True, verbose = False):
__MODEL_NAME = "Keras - LeNet5"
__MODEL_FNAME_PREFIX = "KERAS_LENET5/"
if full:
__MODEL_SUFFIX = "_30"
else:
__MODEL_SUFFIX = "_8"
nested_dir = "".join([self.__models_path,__MODEL_FNAME_PREFIX])
if not os.path.exists(nested_dir):
os.makedirs(nested_dir)
__model_file_name = "".join([nested_dir, feature_name, __MODEL_SUFFIX, ".h5"])
__model_json_file = "".join([nested_dir, feature_name, __MODEL_SUFFIX, ".json"])
__model_architecture_plot_file = "".join([nested_dir, feature_name, __MODEL_SUFFIX, "_model_plot.png"])
__history_params_file = "".join([nested_dir, feature_name, __MODEL_SUFFIX, "_params.csv"])
__history_performance_file = "".join([nested_dir, feature_name, __MODEL_SUFFIX, "_history.csv"])
__history_plot_file = "".join([nested_dir, feature_name, __MODEL_SUFFIX, "_plot.png"])
if verbose: print("Retrieving model: %s..." % "".join([__MODEL_NAME, __MODEL_SUFFIX]))
# Create or load the model
if (not os.path.isfile(__model_file_name)) or (not os.path.isfile(__model_json_file)) or recalculate_pickle:
if verbose: print("Pickle file for '%s' MODEL not found or skipped by caller." % feature_name)
#if (X_val is None) or (Y_val is None):
# if verbose: print("No validation set specified; creating a split based on %.2f val_split parameter." % val_split)
# X, Y, X_val, Y_val = train_test_split(X, Y, test_size = val_split, random_state = 42)
act = Adam(lr = 0.001, beta_1 = 0.9, beta_2 = 0.999, epsilon = 1e-8)
lss = 'mean_squared_error'
mtrc = ['mae','mse']
stop_at = np.max([int(0.1 * epoch_count), self.__MIN_early_stopping])
es = EarlyStopping(patience = stop_at, verbose = verbose)
cp = ModelCheckpoint(filepath = __model_file_name, verbose = verbose, save_best_only = True,
mode = 'min', monitor = 'val_mae')
if self.__GPU_count > 1: dev = "/cpu:0"
else: dev = "/gpu:0"
with tf.device(dev):
model = Sequential()
model.add(Convolution2D(filters = 6, kernel_size = (3, 3), input_shape = (96, 96, 1)))
model.add(ReLU())
# CDB: 3/5 added Batch Normalization
#model.add(BatchNormalization())
model.add(AveragePooling2D())
#model.add(Dropout(0.2))
model.add(Convolution2D(filters = 16, kernel_size = (3, 3)))
model.add(ReLU())
# CDB: 3/5 added Batch Normalization
#model.add(BatchNormalization())
model.add(AveragePooling2D())
#model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(512))
model.add(ReLU())
#model.add(Dropout(0.1))
model.add(Dense(256))
model.add(ReLU())
#model.add(Dropout(0.2))
if full:
model.add(Dense(30))
else:
model.add(Dense(8))
if verbose: print(model.summary())
# Compile the model
if self.__GPU_count > 1:
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
parallel_model = multi_gpu_model(model, gpus = self.__GPU_count)
else:
parallel_model = model
parallel_model.compile(optimizer = act, loss = lss, metrics = mtrc)
if (X_val is None) or (Y_val is None):
history = parallel_model.fit(X, Y, validation_split = val_split, batch_size = batch_size * self.__GPU_count,
epochs = epoch_count, shuffle = shuffle, callbacks = [es, cp], verbose = verbose)
else:
history = parallel_model.fit(X, Y, validation_data = (X_val, Y_val), batch_size = batch_size * self.__GPU_count,
epochs = epoch_count, shuffle = shuffle, callbacks = [es, cp], verbose = verbose)
# print and/or save a performance plot
self.__plot_keras_history(history = history, metric = 'mse', model_name = __MODEL_NAME,
feature_name = feature_name, file_name = __history_plot_file, verbose = verbose)
# save the model, parameters, and performance history
model_json = parallel_model.to_json()
with open(__model_json_file, "w") as json_file:
json_file.write(model_json)
hist_params = pd.DataFrame(history.params)
hist_params.to_csv(__history_params_file)
hist = pd.DataFrame(history.history)
hist.to_csv(__history_performance_file)
# save a plot of the model architecture
plot_model(parallel_model, to_file = __model_architecture_plot_file, rankdir = 'TB',
show_shapes = True, show_layer_names = True, expand_nested = True, dpi=300)
if verbose: print("Model JSON, history, and parameters file saved.")
else:
if verbose: print("Loading history and params files for '%s' MODEL..." % feature_name)
hist_params = pd.read_csv(__history_params_file)
hist = pd.read_csv(__history_performance_file)
if verbose: print("Loading pickle file for '%s' MODEL from file '%s'" % (feature_name, __model_file_name))
parallel_model = self.__load_keras_model(__MODEL_NAME, __model_file_name, __model_json_file, verbose = verbose)
return parallel_model, hist_params, hist
# inferencing
def predict_keras_lenet5(self, X, feature_name = "ALL_FEATURES", full = True, verbose = False):
__MODEL_NAME = "Keras - LeNet5"
__MODEL_FNAME_PREFIX = "KERAS_LENET5/"
if full:
__MODEL_SUFFIX = "_30"
else:
__MODEL_SUFFIX = "_8"
nested_dir = "".join([self.__models_path,__MODEL_FNAME_PREFIX])
if not os.path.exists(nested_dir):
raise RuntimeError("Model path '%s' does not exist; exiting inferencing." % nested_dir)
__model_file_name = "".join([nested_dir, feature_name, __MODEL_SUFFIX, ".h5"])
__model_json_file = "".join([nested_dir, feature_name, __MODEL_SUFFIX, ".json"])
if (not os.path.isfile(__model_file_name)) or (not os.path.isfile(__model_json_file)):
raise RuntimeError("One or some of the following files are missing; prediction cancelled:\n\n'%s'\n'%s'\n" %
(__model_file_name, __model_json_file))
# load the Keras model for the specified feature
model = self.__load_keras_model(__MODEL_NAME, __model_file_name, __model_json_file, verbose = verbose)
# predict
if verbose: print("Predicting %d (x,y) coordinates for '%s'..." % (len(X), feature_name))
Y = model.predict(X, verbose = verbose)
if verbose: print("Predictions completed!")
return Y
#-------------------------------------------------------------
# Inception V1
# Inspired by : https://arxiv.org/abs/1409.4842
#-------------------------------------------------------------
def get_keras_inception(self, X, Y, batch_size, epoch_count, val_split = 0.1, shuffle = True,
feature_name = "ALL_FEATURES", recalculate_pickle = True, X_val = None, Y_val = None, full = True, verbose = False):
__MODEL_NAME = "Keras - Inception"
__MODEL_FNAME_PREFIX = "KERAS_INCEPTION/"
if full:
__MODEL_SUFFIX = "_30"
else:
__MODEL_SUFFIX = "_8"
nested_dir = "".join([self.__models_path,__MODEL_FNAME_PREFIX])
if not os.path.exists(nested_dir):
os.makedirs(nested_dir)
__model_file_MAIN_name = "".join([nested_dir, "inception_MAIN_", feature_name, __MODEL_SUFFIX, ".h5"])
__model_file_AUX1_name = "".join([nested_dir, "inception_AUX1_", feature_name, __MODEL_SUFFIX, ".h5"])
__model_file_AUX2_name = "".join([nested_dir, "inception_AUX2_", feature_name, __MODEL_SUFFIX, ".h5"])
__model_json_file = "".join([nested_dir, "inception_", feature_name, __MODEL_SUFFIX, ".json"])
__model_architecture_plot_file = "".join([nested_dir, "inception_", feature_name, __MODEL_SUFFIX, "_plot.png"])
__history_params_file = "".join([nested_dir, "inception_", feature_name, __MODEL_SUFFIX, "_params.csv"])
__history_performance_file = "".join([nested_dir, "inception_", feature_name, __MODEL_SUFFIX, "_history.csv"])
__history_plot_file_main = "".join([nested_dir, "inception_", feature_name, __MODEL_SUFFIX, "_main_output_mse_plot.png"])
__history_plot_file_auxilliary1 = "".join([nested_dir, "inception_", feature_name, __MODEL_SUFFIX, "_auxilliary_output_1_mse_plot.png"])
__history_plot_file_auxilliary2 = "".join([nested_dir, "inception_", feature_name, __MODEL_SUFFIX, "_auxilliary_output_2_mse_plot.png"])
if verbose: print("Retrieving model: %s..." % "".join([__MODEL_NAME, __MODEL_SUFFIX]))
# Create or load the model
if (not os.path.isfile(__model_file_MAIN_name)) or (not os.path.isfile(__model_file_AUX1_name)) or (not os.path.isfile(__model_file_AUX2_name)) or (not os.path.isfile(__model_json_file)) or recalculate_pickle:
if verbose: print("Pickle file for '%s' MODEL not found or skipped by caller." % __MODEL_NAME)
act = Adam(lr = 0.001, beta_1 = 0.9, beta_2 = 0.999, epsilon = 1e-8)
lss = 'mean_squared_error'
mtrc = ['mae','mse']
stop_at = np.max([int(0.1 * epoch_count), self.__MIN_early_stopping])
es = EarlyStopping(patience = stop_at, verbose = verbose)
cp_main = ModelCheckpoint(filepath = __model_file_MAIN_name, verbose = verbose, save_best_only = True,
mode = 'min', monitor = 'val_main_output_mae')
cp_aux1 = ModelCheckpoint(filepath = __model_file_AUX1_name, verbose = verbose, save_best_only = True,
mode = 'min', monitor = 'val_auxilliary_output_1_mae')
cp_aux2 = ModelCheckpoint(filepath = __model_file_AUX2_name, verbose = verbose, save_best_only = True,
mode = 'min', monitor = 'val_auxilliary_output_2_mae')
kernel_init = glorot_uniform()
bias_init = Constant(value = 0.2)
if self.__GPU_count > 1: dev = "/cpu:0"
else: dev = "/gpu:0"
with tf.device(dev):
# Input image shape (H, W, C)
input_img = Input(shape=(96, 96, 1))
# Top Layer (Begin MODEL)
model = Convolution2D(filters = 64, kernel_size = (7, 7), padding = 'same', strides = (2, 2),
activation = 'relu', name = 'conv_1_7x7/2', kernel_initializer = kernel_init,
bias_initializer = bias_init) (input_img)
model = MaxPooling2D((3, 3), padding = 'same', strides = (2, 2), name = 'max_pool_1_3x3/2') (model)
model = Convolution2D(64, (1, 1), padding = 'same', strides = (1, 1), activation = 'relu', name = 'conv_2a_3x3/1') (model)
model = Convolution2D(192, (3, 3), padding = 'same', strides = (1, 1), activation = 'relu', name = 'conv_2b_3x3/1') (model)
model = MaxPooling2D((3, 3), padding = 'same', strides = (2, 2), name = 'max_pool_2_3x3/2') (model)
# Inception Module
model = self.__inception_module(model,
filters_1x1 = 64,
filters_3x3_reduce = 96,
filters_3x3 = 128,
filters_5x5_reduce = 16,
filters_5x5 = 32,
filters_pool_proj = 32,
kernel_init = kernel_init,
bias_init = bias_init,
name = 'inception_3a')
# Inception Module
model = self.__inception_module(model,
filters_1x1 = 128,
filters_3x3_reduce = 128,
filters_3x3 = 192,
filters_5x5_reduce = 32,
filters_5x5 = 96,
filters_pool_proj = 64,
kernel_init = kernel_init,
bias_init = bias_init,
name = 'inception_3b')
model = MaxPooling2D((3, 3), padding = 'same', strides = (2, 2), name= 'max_pool_3_3x3/2') (model)
# Inception Module
model = self.__inception_module(model,
filters_1x1 = 192,
filters_3x3_reduce = 96,
filters_3x3 = 208,
filters_5x5_reduce = 16,
filters_5x5 = 48,
filters_pool_proj = 64,
kernel_init = kernel_init,
bias_init = bias_init,
name = 'inception_4a')
# CDB 3/5 DROPOUT ADDED
model = Dropout(0.2) (model)
# Begin MODEL1 (auxillary output)
model1 = AveragePooling2D((5, 5), padding = 'same', strides = 3, name= 'avg_pool_4_5x5/2') (model)
model1 = Convolution2D(128, (1, 1), padding = 'same', activation = 'relu') (model1)
model1 = Flatten() (model1)
model1 = Dense(1024, activation = 'relu') (model1)
model1 = Dropout(0.3) (model1)
if full:
model1 = Dense(30, name = 'auxilliary_output_1') (model1)
else:
model1 = Dense(8, name = 'auxilliary_output_1') (model1)
# Resume MODEL w/ Inception
model = self.__inception_module(model,
filters_1x1 = 160,
filters_3x3_reduce = 112,
filters_3x3 = 224,
filters_5x5_reduce = 24,
filters_5x5 = 64,
filters_pool_proj = 64,
kernel_init = kernel_init,
bias_init = bias_init,
name='inception_4b')
model = self.__inception_module(model,
filters_1x1 = 128,
filters_3x3_reduce = 128,
filters_3x3 = 256,
filters_5x5_reduce = 24,
filters_5x5 = 64,
filters_pool_proj = 64,
kernel_init = kernel_init,
bias_init = bias_init,
name='inception_4c')
model = self.__inception_module(model,
filters_1x1 = 112,
filters_3x3_reduce = 144,
filters_3x3 = 288,
filters_5x5_reduce = 32,
filters_5x5 = 64,
filters_pool_proj = 64,
kernel_init = kernel_init,
bias_init = bias_init,
name='inception_4d')
# CDB : 3/5 DROPOUT ADDED
model = Dropout(0.2) (model)
# Begin MODEL2 (auxilliary output)
model2 = AveragePooling2D((5, 5), strides = 3) (model)
model2 = Convolution2D(128, (1, 1), padding = 'same', activation = 'relu') (model2)
model2 = Flatten() (model2)
model2 = Dense(1024, activation = 'relu') (model2)
model2 = Dropout(0.3) (model2)
if full:
model2 = Dense(30, name = 'auxilliary_output_2') (model2)
else:
model2 = Dense(8, name = 'auxilliary_output_2') (model2)
# Resume MODEL w/ Inception
model = self.__inception_module(model,
filters_1x1 = 256,
filters_3x3_reduce = 160,
filters_3x3 = 320,
filters_5x5_reduce = 32,
filters_5x5 = 128,
filters_pool_proj = 128,
kernel_init = kernel_init,
bias_init = bias_init,
name='inception_4e')
model = MaxPooling2D((3, 3), padding = 'same', strides = (2, 2), name = 'max_pool_4_3x3/2') (model)
model = self.__inception_module(model,
filters_1x1 = 256,
filters_3x3_reduce = 160,
filters_3x3 = 320,
filters_5x5_reduce = 32,
filters_5x5 = 128,
filters_pool_proj = 128,
kernel_init = kernel_init,
bias_init = bias_init,
name='inception_5a')
model = self.__inception_module(model,
filters_1x1 = 384,
filters_3x3_reduce = 192,
filters_3x3 = 384,
filters_5x5_reduce = 48,
filters_5x5 = 128,
filters_pool_proj = 128,
kernel_init = kernel_init,
bias_init = bias_init,
name='inception_5b')
model = GlobalAveragePooling2D(name = 'avg_pool_5_3x3/1') (model)
model = Dropout(0.3) (model)
# Output Layer (Main)
if full:
model = Dense(30, name = 'main_output') (model)
else:
model = Dense(8, name = 'main_output') (model)
model = Model(input_img, [model, model1, model2], name = 'Inception')
if verbose: print(model.summary())
# Compile the model
if self.__GPU_count > 1:
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
parallel_model = multi_gpu_model(model, gpus = self.__GPU_count)
parallel_model.compile(optimizer = act, loss = lss, metrics = mtrc)
else:
parallel_model = model
parallel_model.compile(optimizer = act, loss = lss, metrics = mtrc)
if (X_val is None) or (Y_val is None):
history = parallel_model.fit(X, [Y, Y, Y], validation_split = val_split, batch_size = batch_size * self.__GPU_count,
epochs = epoch_count, shuffle = shuffle, callbacks = [es, cp_main, cp_aux1, cp_aux2], verbose = verbose)
else:
history = parallel_model.fit(X, [Y, Y, Y], validation_data = (X_val, [Y_val, Y_val, Y_val]), batch_size = batch_size * self.__GPU_count,
epochs = epoch_count, shuffle = shuffle, callbacks = [es, cp_main, cp_aux1, cp_aux2], verbose = verbose)
# print and/or save a performance plot
for m, f in zip(['main_output_mse', 'auxilliary_output_1_mse', 'auxilliary_output_2_mse'],
[__history_plot_file_main, __history_plot_file_auxilliary1, __history_plot_file_auxilliary2]):
try:
self.__plot_keras_history(history = history, metric = m, model_name = __MODEL_NAME,
feature_name = feature_name, file_name = f, verbose = False)
except:
print("error during history plot generation; skipped.")
pass
# save the model, parameters, and performance history
model_json = parallel_model.to_json()
with open(__model_json_file, "w") as json_file:
json_file.write(model_json)
hist_params = pd.DataFrame(history.params)
hist_params.to_csv(__history_params_file)
hist = pd.DataFrame(history.history)
hist.to_csv(__history_performance_file)
# save a plot of the model architecture
try:
plot_model(parallel_model, to_file = __model_architecture_plot_file, rankdir = 'TB',
show_shapes = True, show_layer_names = True, expand_nested = True, dpi=300)
except:
print("error during model plot generation; skiopped.")
pass
if verbose: print("Model JSON, history, and parameters file saved.")
else:
if verbose: print("Loading history and params files for '%s' MODEL..." % feature_name)
hist_params = pd.read_csv(__history_params_file)
hist = pd.read_csv(__history_performance_file)
if verbose: print("Loading pickle file for '%s' MODEL (MAIN) from file '%s'" % (__MODEL_NAME, __model_file_MAIN_name))
main_model = self.__load_keras_model(__MODEL_NAME, __model_file_MAIN_name, __model_json_file, verbose = verbose)
if verbose: print("Loading pickle file for '%s' MODEL (AUX1) from file '%s'" % (__MODEL_NAME, __model_file_AUX1_name))
aux1_model = self.__load_keras_model(__MODEL_NAME, __model_file_AUX1_name, __model_json_file, verbose = verbose)
if verbose: print("Loading pickle file for '%s' MODEL (AUX2) from file '%s'" % (__MODEL_NAME, __model_file_AUX2_name))
aux2_model = self.__load_keras_model(__MODEL_NAME, __model_file_AUX2_name, __model_json_file, verbose = verbose)
return main_model, aux1_model, aux2_model, hist_params, hist
# inferencing
def predict_keras_inception(self, X, feature_name = "ALL_FEATURES", full = True, verbose = False):
__MODEL_NAME = "Keras - Inception"
__MODEL_FNAME_PREFIX = "KERAS_INCEPTION/"
if full:
__MODEL_SUFFIX = "_30"
else:
__MODEL_SUFFIX = "_8"
nested_dir = "".join([self.__models_path,__MODEL_FNAME_PREFIX])
if not os.path.exists(nested_dir):
raise RuntimeError("Model path '%s' does not exist; exiting inferencing." % nested_dir)
__model_file_MAIN_name = "".join([nested_dir, "inception_MAIN_", feature_name, __MODEL_SUFFIX, ".h5"])
__model_file_AUX1_name = "".join([nested_dir, "inception_AUX1_", feature_name, __MODEL_SUFFIX, ".h5"])
__model_file_AUX2_name = "".join([nested_dir, "inception_AUX2_", feature_name, __MODEL_SUFFIX, ".h5"])
__model_json_file = "".join([nested_dir, "inception_", feature_name, __MODEL_SUFFIX, ".json"])
if (not os.path.isfile(__model_file_MAIN_name)) or (not os.path.isfile(__model_file_AUX1_name)) or (not os.path.isfile(__model_file_AUX2_name)) or (not os.path.isfile(__model_json_file)):
raise RuntimeError("One or some of the following files are missing; prediction cancelled:\n\n'%s'\n'%s'\n'%s'\n'%s'\n\n" %
(__model_file_MAIN_name, __model_file_AUX1_name, __model_file_AUX2_name, __model_json_file))
# load the Keras model for the specified feature
main_model = self.__load_keras_model(__MODEL_NAME, __model_file_MAIN_name, __model_json_file, verbose = verbose)
aux1_model = self.__load_keras_model(__MODEL_NAME, __model_file_AUX1_name, __model_json_file, verbose = verbose)
aux2_model = self.__load_keras_model(__MODEL_NAME, __model_file_AUX2_name, __model_json_file, verbose = verbose)
# predict
if verbose: print("Predicting %d (x,y) coordinates for 'MAIN' model file..." % len(X))
Y_main = main_model.predict(X, verbose = verbose)
Y_main_columns = [node.op.name for node in main_model.outputs]
if verbose: print("Predicting %d (x,y) coordinates for 'AUX1' model file..." % len(X))
Y_aux1 = aux1_model.predict(X, verbose = verbose)
Y_aux1_columns = [node.op.name for node in aux1_model.outputs]
if verbose: print("Predicting %d (x,y) coordinates for 'AUX2' model file..." % len(X))
Y_aux2 = aux2_model.predict(X, verbose = verbose)
Y_aux2_columns = [node.op.name for node in aux2_model.outputs]
if verbose: print("Predictions completed!")
return Y_main, Y_aux1, Y_aux2, Y_main_columns, Y_aux1_columns, Y_aux2_columns
#------------------------------------------------
# ConvNet5 Simple Model
#------------------------------------------------
def get_keras_convnet5(self, X, Y, batch_size, epoch_count, val_split = 0.1, X_val = None, Y_val = None, shuffle = True,
feature_name = "ALL_FEATURES", recalculate_pickle = True, full = True, verbose = False):
__MODEL_NAME = "Keras - ConvNet5"
__MODEL_FNAME_PREFIX = "KERAS_CONVNET5/"
if full:
__MODEL_SUFFIX = "_30"
else:
__MODEL_SUFFIX = "_8"
nested_dir = "".join([self.__models_path,__MODEL_FNAME_PREFIX])
if not os.path.exists(nested_dir):
os.makedirs(nested_dir)
__model_file_name = "".join([nested_dir, feature_name, __MODEL_SUFFIX, ".h5"])
__model_json_file = "".join([nested_dir, feature_name, __MODEL_SUFFIX, ".json"])
__history_params_file = "".join([nested_dir, feature_name, __MODEL_SUFFIX, "_params.csv"])
__history_performance_file = "".join([nested_dir, feature_name, __MODEL_SUFFIX, "_history.csv"])
__history_plot_file = "".join([nested_dir, feature_name, __MODEL_SUFFIX, "_plot.png"])
__model_architecture_plot_file = "".join([nested_dir, feature_name, __MODEL_SUFFIX, "_model_plot.png"])
if verbose: print("Retrieving model: %s..." % "".join([__MODEL_NAME, __MODEL_SUFFIX]))
# Create or load the model
if (not os.path.isfile(__model_file_name)) or (not os.path.isfile(__model_json_file)) or recalculate_pickle:
if verbose: print("Pickle file for '%s' MODEL not found or skipped by caller." % feature_name)
act = Adam(lr = 0.001, beta_1 = 0.9, beta_2 = 0.999, epsilon = 1e-08)
lss = 'mean_squared_error'
mtrc = ['mae','mse']
stop_at = np.max([int(0.1 * epoch_count), self.__MIN_early_stopping])
es = EarlyStopping(patience = stop_at, verbose = verbose)
cp = ModelCheckpoint(filepath = __model_file_name, verbose = verbose, save_best_only = True,
mode = 'min', monitor = 'val_mae')
if self.__GPU_count > 1: dev = "/cpu:0"
else: dev = "/gpu:0"
with tf.device(dev):
model = Sequential(name = 'ConvNet5')
# Input dimensions: (None, 96, 96, 1)
model.add(Convolution2D(16, (3,3), padding = 'same', activation = 'relu', input_shape=(96,96,1)))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.1))
model.add(Convolution2D(32, (3,3), padding = 'same', activation = 'relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Convolution2D(64, (3,3), padding = 'same', activation = 'relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.1))
model.add(Convolution2D(128, (3,3), padding = 'same', activation = 'relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Convolution2D(256, (3,3), padding = 'same', activation = 'relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.15))
model.add(Flatten())
model.add(Dense(1024, activation = 'relu'))
model.add(Dropout(0.1))
model.add(Dense(512, activation = 'relu'))
model.add(Dropout(0.1))
if full:
model.add(Dense(30))
else:
model.add(Dense(8))
if verbose: print(model.summary())
# Compile the model
if self.__GPU_count > 1:
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
parallel_model = multi_gpu_model(model, gpus = self.__GPU_count)
parallel_model.compile(optimizer = act, loss = lss, metrics = mtrc)
else:
parallel_model = model
parallel_model.compile(optimizer = act, loss = lss, metrics = mtrc)
if (X_val is None) or (Y_val is None):
history = parallel_model.fit(X, Y, validation_split = val_split, batch_size = batch_size * self.__GPU_count,
epochs = epoch_count, shuffle = shuffle, callbacks = [es, cp], verbose = verbose)
else:
history = parallel_model.fit(X, Y, validation_data = (X_val, Y_val), batch_size = batch_size * self.__GPU_count,
epochs = epoch_count, shuffle = shuffle, callbacks = [es, cp], verbose = verbose)
# print and/or save a performance plot
self.__plot_keras_history(history = history, metric = 'mse',
model_name = __MODEL_NAME, feature_name = feature_name, file_name = __history_plot_file,
verbose = verbose)
# save the model, parameters, and performance history
model_json = parallel_model.to_json()
with open(__model_json_file, "w") as json_file:
json_file.write(model_json)
hist_params = pd.DataFrame(history.params)
hist_params.to_csv(__history_params_file)
hist = pd.DataFrame(history.history)
hist.to_csv(__history_performance_file)
if verbose: print("Model JSON, history, and parameters file saved.")
# save a plot of the model architecture
plot_model(parallel_model, to_file = __model_architecture_plot_file, rankdir = 'TB',
show_shapes = True, show_layer_names = True, expand_nested = True, dpi=300)
else:
if verbose: print("Loading history and params files for '%s' MODEL..." % feature_name)
hist_params = pd.read_csv(__history_params_file)
hist = pd.read_csv(__history_performance_file)
if verbose: print("Loading pickle file for '%s' MODEL from file '%s'" % (feature_name, __model_file_name))
parallel_model = self.__load_keras_model(__MODEL_NAME, __model_file_name, __model_json_file, verbose = verbose)
return parallel_model, hist_params, hist
# inferencing
def predict_keras_convnet5(self, X, feature_name = "unknown", full = True, verbose = False):
__MODEL_NAME = "Keras - ConvNet5"
__MODEL_FNAME_PREFIX = "KERAS_CONVNET5/"
if full:
__MODEL_SUFFIX = "_30"
else:
__MODEL_SUFFIX = "_8"
nested_dir = "".join([self.__models_path,__MODEL_FNAME_PREFIX])
if not os.path.exists(nested_dir):
raise RuntimeError("Model path '%s' does not exist; exiting inferencing." % nested_dir)
__model_file_name = "".join([nested_dir, feature_name, __MODEL_SUFFIX, ".h5"])
__model_json_file = "".join([nested_dir, feature_name, __MODEL_SUFFIX, ".json"])
##__scaler_file = "".join([nested_dir, feature_name, "_scaler.pkl"])
if (not os.path.isfile(__model_file_name)) or (not os.path.isfile(__model_json_file)):
raise RuntimeError("One or some of the following files are missing; prediction cancelled:\n\n'%s'\n'%s'\n" %
(__model_file_name, __model_json_file))
# load the Keras model for the specified feature
model = self.__load_keras_model(__MODEL_NAME, __model_file_name, __model_json_file, verbose = verbose)
# predict
if verbose: print("Predicting %d (x,y) coordinates for '%s'..." % (len(X), feature_name))
Y = model.predict(X, verbose = verbose)
if verbose: print("Predictions completed!")
return Y
#-------------------------------------------------------------
# Inception V3
# Inspired by : http://arxiv.org/abs/1512.00567
#-------------------------------------------------------------
def get_keras_inceptionv3(self, X, Y, batch_size, epoch_count, val_split = 0.1, shuffle = True,
feature_name = "ALL_FEATURES", recalculate_pickle = True, X_val = None, Y_val = None, full = True, verbose = False):
__MODEL_NAME = "Keras - Inceptionv3"
__MODEL_FNAME_PREFIX = "KERAS_INCEPTIONV3/"
if full:
__MODEL_SUFFIX = "_30"
else:
__MODEL_SUFFIX = "_8"
nested_dir = "".join([self.__models_path,__MODEL_FNAME_PREFIX])
if not os.path.exists(nested_dir):
os.makedirs(nested_dir)
__model_file_MAIN_name = "".join([nested_dir, "inception_MAIN_", feature_name, __MODEL_SUFFIX, ".h5"])
__model_json_file = "".join([nested_dir, "inception_", feature_name, __MODEL_SUFFIX, ".json"])
__model_architecture_plot_file = "".join([nested_dir, "inception_", feature_name, __MODEL_SUFFIX, "_plot.png"])
__history_params_file = "".join([nested_dir, "inception_", feature_name, __MODEL_SUFFIX, "_params.csv"])
__history_performance_file = "".join([nested_dir, "inception_", feature_name, __MODEL_SUFFIX, "_history.csv"])
__history_plot_file_main = "".join([nested_dir, "inception_", feature_name, __MODEL_SUFFIX, "_main_output_mse_plot.png"])
if verbose: print("Retrieving model: %s..." % "".join([__MODEL_NAME, __MODEL_SUFFIX]))
# Create or load the model
if (not os.path.isfile(__model_file_MAIN_name)) or (not os.path.isfile(__model_json_file)) or recalculate_pickle:
if verbose: print("Pickle file for '%s' MODEL not found or skipped by caller." % __MODEL_NAME)
act = Adam(lr = 0.001, beta_1 = 0.9, beta_2 = 0.999, epsilon = 1e-8)
lss = 'mean_squared_error'
mtrc = ['mae','mse']
stop_at = np.max([int(0.1 * epoch_count), self.__MIN_early_stopping])
es = EarlyStopping(patience = stop_at, verbose = verbose)
cp_main = ModelCheckpoint(filepath = __model_file_MAIN_name, verbose = verbose, save_best_only = True,
mode = 'min', monitor = 'val_mae')
kernel_init = glorot_uniform()
bias_init = Constant(value = 0.2)
if self.__GPU_count > 1: dev = "/cpu:0"
else: dev = "/gpu:0"
with tf.device(dev):
# Input image shape (H, W, C)
input_img = Input(shape = (96, 96, 1))
# Begin Inception V3
x = self.__conv2d_bn(x = input_img, filters = 32, num_row = 3, num_col = 3, strides = (2, 2), padding = 'valid')
x = self.__conv2d_bn(x = x, filters = 32, num_row = 3, num_col = 3, strides = (1, 1), padding = 'valid')
x = self.__conv2d_bn(x = x, filters = 64, num_row = 3, num_col = 3, strides = (1, 1), padding = 'same')
x = MaxPooling2D((3, 3), strides = (2, 2)) (x)
x = self.__conv2d_bn(x = x, filters = 80, num_row = 1, num_col = 1, strides = (1, 1), padding = 'valid')
x = self.__conv2d_bn(x = x, filters = 192, num_row = 3, num_col = 3, strides = (1, 1), padding = 'valid')
x = MaxPooling2D((3, 3), strides = (2, 2)) (x)
branch1x1 = self.__conv2d_bn(x = x, filters = 64, num_row = 1, num_col = 1, strides = (1, 1), padding = 'same')
branch5x5 = self.__conv2d_bn(x = x, filters = 48, num_row = 1, num_col = 1, strides = (1, 1), padding = 'same')
branch5x5 = self.__conv2d_bn(x = branch5x5, filters = 64, num_row = 5, num_col = 5, strides = (1, 1), padding = 'same')
branch3x3dbl = self.__conv2d_bn(x = x, filters = 64, num_row = 1, num_col = 1, strides = (1, 1), padding = 'same')
branch3x3dbl = self.__conv2d_bn(x = branch3x3dbl, filters = 96, num_row = 3, num_col = 3, strides = (1, 1), padding = 'same')
branch3x3dbl = self.__conv2d_bn(x = branch3x3dbl, filters = 96, num_row = 3, num_col = 3, strides = (1, 1), padding = 'same')
branch_pool = AveragePooling2D((3, 3), strides = (1, 1), padding = 'same') (x)
branch_pool = self.__conv2d_bn(x = branch_pool, filters = 32, num_row = 1, num_col = 1, strides = (1, 1), padding = 'same')
x = concatenate( [branch1x1, branch5x5, branch3x3dbl, branch_pool], axis = 3, name = 'mixed0')
branch1x1 = self.__conv2d_bn(x, 64, 1, 1)
branch5x5 = self.__conv2d_bn(x, 48, 1, 1)
branch5x5 = self.__conv2d_bn(branch5x5, 64, 5, 5)
branch3x3dbl = self.__conv2d_bn(x, 64, 1, 1)
branch3x3dbl = self.__conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = self.__conv2d_bn(branch3x3dbl, 96, 3, 3)
branch_pool = AveragePooling2D((3, 3), strides = (1, 1), padding = 'same') (x)
branch_pool = self.__conv2d_bn(branch_pool, 64, 1, 1)
x = concatenate( [branch1x1, branch5x5, branch3x3dbl, branch_pool], axis = 3, name = 'mixed1')
branch1x1 = self.__conv2d_bn(x, 64, 1, 1)
branch5x5 = self.__conv2d_bn(x, 48, 1, 1)
branch5x5 = self.__conv2d_bn(branch5x5, 64, 5, 5)
branch3x3dbl = self.__conv2d_bn(x, 64, 1, 1)
branch3x3dbl = self.__conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = self.__conv2d_bn(branch3x3dbl, 96, 3, 3)
branch_pool = AveragePooling2D((3, 3), strides = (1, 1), padding = 'same') (x)
branch_pool = self.__conv2d_bn(branch_pool, 64, 1, 1)
x = concatenate( [branch1x1, branch5x5, branch3x3dbl, branch_pool], axis = 3, name = 'mixed2')
branch3x3 = self.__conv2d_bn(x, 384, 3, 3, strides=(2, 2), padding='valid')
branch3x3dbl = self.__conv2d_bn(x, 64, 1, 1)
branch3x3dbl = self.__conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = self.__conv2d_bn(branch3x3dbl, 96, 3, 3, strides=(2, 2), padding='valid')
branch_pool = MaxPooling2D((3, 3), strides=(2, 2)) (x)
x = concatenate( [branch3x3, branch3x3dbl, branch_pool], axis = 3, name = 'mixed3')
branch1x1 = self.__conv2d_bn(x, 192, 1, 1)
branch7x7 = self.__conv2d_bn(x, 128, 1, 1)
branch7x7 = self.__conv2d_bn(branch7x7, 128, 1, 7)
branch7x7 = self.__conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = self.__conv2d_bn(x, 128, 1, 1)
branch7x7dbl = self.__conv2d_bn(branch7x7dbl, 128, 7, 1)
branch7x7dbl = self.__conv2d_bn(branch7x7dbl, 128, 1, 7)
branch7x7dbl = self.__conv2d_bn(branch7x7dbl, 128, 7, 1)
branch7x7dbl = self.__conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = AveragePooling2D((3, 3), strides = (1, 1), padding = 'same') (x)
branch_pool = self.__conv2d_bn(branch_pool, 192, 1, 1)
x = concatenate( [branch1x1, branch7x7, branch7x7dbl, branch_pool], axis = 3, name = 'mixed4')
for i in range(2):
branch1x1 = self.__conv2d_bn(x, 192, 1, 1)
branch7x7 = self.__conv2d_bn(x, 160, 1, 1)
branch7x7 = self.__conv2d_bn(branch7x7, 160, 1, 7)
branch7x7 = self.__conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = self.__conv2d_bn(x, 160, 1, 1)
branch7x7dbl = self.__conv2d_bn(branch7x7dbl, 160, 7, 1)
branch7x7dbl = self.__conv2d_bn(branch7x7dbl, 160, 1, 7)
branch7x7dbl = self.__conv2d_bn(branch7x7dbl, 160, 7, 1)
branch7x7dbl = self.__conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = AveragePooling2D((3, 3), strides = (1, 1), padding = 'same') (x)
branch_pool = self.__conv2d_bn(branch_pool, 192, 1, 1)
x = concatenate( [branch1x1, branch7x7, branch7x7dbl, branch_pool], axis = 3, name = 'mixed' + str(5 + i))
branch1x1 = self.__conv2d_bn(x, 192, 1, 1)
branch7x7 = self.__conv2d_bn(x, 192, 1, 1)
branch7x7 = self.__conv2d_bn(branch7x7, 192, 1, 7)
branch7x7 = self.__conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = self.__conv2d_bn(x, 192, 1, 1)
branch7x7dbl = self.__conv2d_bn(branch7x7dbl, 192, 7, 1)
branch7x7dbl = self.__conv2d_bn(branch7x7dbl, 192, 1, 7)
branch7x7dbl = self.__conv2d_bn(branch7x7dbl, 192, 7, 1)
branch7x7dbl = self.__conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = AveragePooling2D((3, 3), strides = (1, 1), padding = 'same') (x)
branch_pool = self.__conv2d_bn(branch_pool, 192, 1, 1)
x = concatenate( [branch1x1, branch7x7, branch7x7dbl, branch_pool], axis = 3, name = 'mixed7')
branch3x3 = self.__conv2d_bn(x, 192, 1, 1)
branch3x3 = self.__conv2d_bn(branch3x3, 320, 3, 3,strides=(2, 2), padding='valid')
branch7x7x3 = self.__conv2d_bn(x, 192, 1, 1)
branch7x7x3 = self.__conv2d_bn(branch7x7x3, 192, 1, 7)
branch7x7x3 = self.__conv2d_bn(branch7x7x3, 192, 7, 1)
branch7x7x3 = self.__conv2d_bn(branch7x7x3, 192, 3, 3, strides=(2, 2), padding='valid')
branch_pool = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = concatenate( [branch3x3, branch7x7x3, branch_pool], axis = 3, name = 'mixed8')
for i in range(2):
branch1x1 = self.__conv2d_bn(x, 320, 1, 1)
branch3x3 = self.__conv2d_bn(x, 384, 1, 1)
branch3x3_1 = self.__conv2d_bn(branch3x3, 384, 1, 3)
branch3x3_2 = self.__conv2d_bn(branch3x3, 384, 3, 1)
branch3x3 = concatenate( [branch3x3_1, branch3x3_2], axis = 3, name = 'mixed9_' + str(i))
branch3x3dbl = self.__conv2d_bn(x, 448, 1, 1)
branch3x3dbl = self.__conv2d_bn(branch3x3dbl, 384, 3, 3)
branch3x3dbl_1 = self.__conv2d_bn(branch3x3dbl, 384, 1, 3)
branch3x3dbl_2 = self.__conv2d_bn(branch3x3dbl, 384, 3, 1)
branch3x3dbl = concatenate( [branch3x3dbl_1, branch3x3dbl_2], axis = 3)
branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
branch_pool = self.__conv2d_bn(branch_pool, 192, 1, 1)
x = concatenate( [branch1x1, branch3x3, branch3x3dbl, branch_pool], axis = 3, name = 'mixed' + str(9 + i))
x = GlobalAveragePooling2D(name = 'avg_pool') (x)
x = Dropout(0.3) (x)
if full:
x = Dense(30) (x)
else:
x = Dense(8) (x)
model = Model(input_img, x, name = 'InceptionV3')
if verbose: print(model.summary())
# Compile the model
if self.__GPU_count > 1:
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
parallel_model = multi_gpu_model(model, gpus = self.__GPU_count)
parallel_model.compile(optimizer = act, loss = lss, metrics = mtrc)
else:
parallel_model = model
parallel_model.compile(optimizer = act, loss = lss, metrics = mtrc)
if (X_val is None) or (Y_val is None):
history = parallel_model.fit(X, Y, validation_split = val_split, batch_size = batch_size * self.__GPU_count,
epochs = epoch_count, shuffle = shuffle, callbacks = [es, cp_main], verbose = verbose)
else:
history = parallel_model.fit(X, Y, validation_data = (X_val, Y_val), batch_size = batch_size * self.__GPU_count,
epochs = epoch_count, shuffle = shuffle, callbacks = [es, cp_main], verbose = verbose)
# print and/or save a performance plot
self.__plot_keras_history(history = history, metric = 'mse',
model_name = __MODEL_NAME, feature_name = feature_name, file_name = __history_plot_file_main,
verbose = verbose)
# save the model, parameters, and performance history
model_json = parallel_model.to_json()
with open(__model_json_file, "w") as json_file:
json_file.write(model_json)
hist_params = pd.DataFrame(history.params)
hist_params.to_csv(__history_params_file)
hist = pd.DataFrame(history.history)
hist.to_csv(__history_performance_file)
# save a plot of the model architecture
try:
plot_model(parallel_model, to_file = __model_architecture_plot_file, rankdir = 'TB',
show_shapes = True, show_layer_names = True, expand_nested = True, dpi=300)
except:
print("error during model plot generation; skiopped.")
pass
if verbose: print("Model JSON, history, and parameters file saved.")
else:
if verbose: print("Loading history and params files for '%s' MODEL..." % feature_name)
hist_params = pd.read_csv(__history_params_file)
hist = pd.read_csv(__history_performance_file)
if verbose: print("Loading pickle file for '%s' MODEL (MAIN) from file '%s'" % (__MODEL_NAME, __model_file_MAIN_name))
main_model = self.__load_keras_model(__MODEL_NAME, __model_file_MAIN_name, __model_json_file, verbose = verbose)
return main_model, hist_params, hist
# inferencing
def predict_keras_inceptionv3(self, X, feature_name = "ALL_FEATURES", full = True, verbose = False):
__MODEL_NAME = "Keras - InceptionV3"
__MODEL_FNAME_PREFIX = "KERAS_INCEPTIONV3/"
if full:
__MODEL_SUFFIX = "_30"
else:
__MODEL_SUFFIX = "_8"
nested_dir = "".join([self.__models_path,__MODEL_FNAME_PREFIX])
if not os.path.exists(nested_dir):
raise RuntimeError("Model path '%s' does not exist; exiting inferencing." % nested_dir)
__model_file_MAIN_name = "".join([nested_dir, "inception_MAIN_", feature_name, __MODEL_SUFFIX, ".h5"])
__model_json_file = "".join([nested_dir, "inception_", feature_name, __MODEL_SUFFIX, ".json"])
if (not os.path.isfile(__model_file_MAIN_name)) or (not os.path.isfile(__model_json_file)):
raise RuntimeError("One or some of the following files are missing; prediction cancelled:\n\n%s'\n'%s'\n\n" %
(__model_file_MAIN_name, __model_json_file))
# load the Keras model for the specified feature
main_model = self.__load_keras_model(__MODEL_NAME, __model_file_MAIN_name, __model_json_file, verbose = verbose)
# predict
if verbose: print("Predicting %d (x,y) coordinates for 'MAIN' model file..." % len(X))
Y_main = main_model.predict(X, verbose = verbose)
Y_main_columns = [node.op.name for node in main_model.outputs]
if verbose: print("Predictions completed!")
return Y_main, Y_main_columns
#------------------------------------------------
# Kaggle2 Model
#------------------------------------------------
def get_keras_kaggle2(self, X, Y, batch_size, epoch_count, val_split = 0.05, X_val = None, Y_val = None, shuffle = True,
feature_name = "ALL_FEATURES", recalculate_pickle = True, full = True, verbose = False):
__MODEL_NAME = "Keras - Kaggle2"
__MODEL_FNAME_PREFIX = "KERAS_KAGGLE2/"
if full:
__MODEL_SUFFIX = "_30"
else:
__MODEL_SUFFIX = "_8"
nested_dir = "".join([self.__models_path,__MODEL_FNAME_PREFIX])
if not os.path.exists(nested_dir):
os.makedirs(nested_dir)
__model_file_name = "".join([nested_dir, feature_name, __MODEL_SUFFIX, ".h5"])
__model_json_file = "".join([nested_dir, feature_name, __MODEL_SUFFIX, ".json"])
__history_params_file = "".join([nested_dir, feature_name, __MODEL_SUFFIX, "_params.csv"])
__history_performance_file = "".join([nested_dir, feature_name, __MODEL_SUFFIX, "_history.csv"])
__history_plot_file = "".join([nested_dir, feature_name, __MODEL_SUFFIX, "_plot.png"])
__model_architecture_plot_file = "".join([nested_dir, feature_name, __MODEL_SUFFIX, "_model_plot.png"])
##__scaler_file = "".join([nested_dir, feature_name, "_scaler.pkl"])
if verbose: print("Retrieving model: %s..." % "".join([__MODEL_NAME, __MODEL_SUFFIX]))
# Create or load the model
if (not os.path.isfile(__model_file_name)) or (not os.path.isfile(__model_json_file)) or recalculate_pickle:
if verbose: print("Pickle file for '%s' MODEL not found or skipped by caller." % feature_name)
#act = Adam(lr = 0.001, beta_1 = 0.9, beta_2 = 0.999, epsilon = 1e-08)
act = 'adam'
#lss = losses.mean_squared_error
lss = 'mean_squared_error'
#mtrc = [metrics.RootMeanSquaredError()]
mtrc = ['mae','mse']
stop_at = np.max([int(0.1 * epoch_count), self.__MIN_early_stopping])
es = EarlyStopping(patience = stop_at, verbose = verbose)
cp = ModelCheckpoint(filepath = __model_file_name, verbose = verbose, save_best_only = True,
mode = 'min', monitor = 'val_mae')
if self.__GPU_count > 1: dev = "/cpu:0"
else: dev = "/gpu:0"
with tf.device(dev):
model = Sequential()
# Input dimensions: (None, 96, 96, 1)
model.add(Convolution2D(32, (3,3), padding='valid', use_bias = True, input_shape = (96,96,1)))
model.add(LeakyReLU(alpha = 0.1))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Convolution2D(64, (3,3), padding = 'valid', use_bias = True))
model.add(LeakyReLU(alpha = 0.1))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Convolution2D(128, (3,3), padding = 'valid', use_bias = True))
model.add(LeakyReLU(alpha = 0.1))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(LocallyConnected2D(32, (3, 3), padding = 'valid', use_bias = True))
model.add(LeakyReLU(alpha = 0.1))
model.add(BatchNormalization())
model.add(GlobalAveragePooling2D())
# Input dimensions: (None, 3, 3, 512)
#model.add(Flatten())
model.add(Dense(512,activation='relu'))
# CDB DROPOUT INCREASED FROM 0.1 to 0.2
model.add(Dropout(0.15))
if full:
model.add(Dense(30))
else:
model.add(Dense(8))
if verbose: print(model.summary())
# Compile the model
if self.__GPU_count > 1:
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
parallel_model = multi_gpu_model(model, gpus = self.__GPU_count)
parallel_model.compile(optimizer = act, loss = lss, metrics = mtrc)
else:
parallel_model = model
parallel_model.compile(optimizer = act, loss = lss, metrics = mtrc)
if (X_val is None) or (Y_val is None):
history = parallel_model.fit(X, Y, validation_split = val_split, batch_size = batch_size * self.__GPU_count,
epochs = epoch_count, shuffle = shuffle, callbacks = [es, cp], verbose = verbose)
else:
history = parallel_model.fit(X, Y, validation_data = (X_val, Y_val), batch_size = batch_size * self.__GPU_count,
epochs = epoch_count, shuffle = shuffle, callbacks = [es, cp], verbose = verbose)
# print and/or save a performance plot
self.__plot_keras_history(history = history, metric = 'mse', #metric = 'root_mean_squared_error',
model_name = __MODEL_NAME, feature_name = feature_name, file_name = __history_plot_file,
verbose = verbose)
# save the model, parameters, and performance history
model_json = parallel_model.to_json()
with open(__model_json_file, "w") as json_file:
json_file.write(model_json)
hist_params = pd.DataFrame(history.params)
hist_params.to_csv(__history_params_file)
hist = pd.DataFrame(history.history)
hist.to_csv(__history_performance_file)
if verbose: print("Model JSON, history, and parameters file saved.")
# save a plot of the model architecture
plot_model(parallel_model, to_file = __model_architecture_plot_file, rankdir = 'TB',
show_shapes = True, show_layer_names = True, expand_nested = True, dpi=300)
else:
if verbose: print("Loading history and params files for '%s' MODEL..." % feature_name)
hist_params = pd.read_csv(__history_params_file)
hist = pd.read_csv(__history_performance_file)
if verbose: print("Loading pickle file for '%s' MODEL from file '%s'" % (feature_name, __model_file_name))
parallel_model = self.__load_keras_model(__MODEL_NAME, __model_file_name, __model_json_file, verbose = verbose)
return parallel_model, hist_params, hist
# inferencing
def predict_keras_kaggle2(self, X, feature_name = "unknown", full = True, verbose = False):
__MODEL_NAME = "Keras - Kaggle2"
__MODEL_FNAME_PREFIX = "KERAS_KAGGLE2/"
if full:
__MODEL_SUFFIX = "_30"
else:
__MODEL_SUFFIX = "_8"
nested_dir = "".join([self.__models_path,__MODEL_FNAME_PREFIX])
if not os.path.exists(nested_dir):
raise RuntimeError("Model path '%s' does not exist; exiting inferencing." % nested_dir)
__model_file_name = "".join([nested_dir, feature_name, __MODEL_SUFFIX, ".h5"])
__model_json_file = "".join([nested_dir, feature_name, __MODEL_SUFFIX, ".json"])
##__scaler_file = "".join([nested_dir, feature_name, "_scaler.pkl"])
if (not os.path.isfile(__model_file_name)) or (not os.path.isfile(__model_json_file)):## or (not os.path.isfile(__scaler_file)):
raise RuntimeError("One or some of the following files are missing; prediction cancelled:\n\n'%s'\n'%s'\n" % ##'%s'\n" %
(__model_file_name, __model_json_file))##, __scaler_file))
# Load the training scaler for this model
##if verbose: print("Loading SCALER for '%s' and zero-centering X." % feature_name)
##scaler = pickle.load(open(__scaler_file, "rb"))
##X = self.__4d_Scaler(arr = X, ss = scaler, fit = False, verbose = verbose)
# load the Keras model for the specified feature
model = self.__load_keras_model(__MODEL_NAME, __model_file_name, __model_json_file, verbose = verbose)
# predict
if verbose: print("Predicting %d (x,y) coordinates for '%s'..." % (len(X), feature_name))
Y = model.predict(X, verbose = verbose)
if verbose: print("Predictions completed!")
return Y
#------------------------------------------------
# ResNet50
# Inspired by: https://arxiv.org/abs/1512.03385
#------------------------------------------------
def get_keras_resnet50(self, X, Y, batch_size, epoch_count, val_split = 0.1, X_val = None, Y_val = None, shuffle = True,
feature_name = "ALL_FEATURES", recalculate_pickle = True, full = True, verbose = False):
__MODEL_NAME = "Keras - ResNet50"
__MODEL_FNAME_PREFIX = "KERAS_RESNET50/"
if full:
__MODEL_SUFFIX = "_30"
else:
__MODEL_SUFFIX = "_8"
nested_dir = "".join([self.__models_path,__MODEL_FNAME_PREFIX])
if not os.path.exists(nested_dir):
os.makedirs(nested_dir)
__model_file_name = "".join([nested_dir, feature_name, __MODEL_SUFFIX, ".h5"])
__model_json_file = "".join([nested_dir, feature_name, __MODEL_SUFFIX, ".json"])
__history_params_file = "".join([nested_dir, feature_name, __MODEL_SUFFIX, "_params.csv"])
__history_performance_file = "".join([nested_dir, feature_name, __MODEL_SUFFIX, "_history.csv"])
__history_plot_file = "".join([nested_dir, feature_name, __MODEL_SUFFIX, "_plot.png"])
__model_architecture_plot_file = "".join([nested_dir, feature_name, __MODEL_SUFFIX, "_model_plot.png"])
##__scaler_file = "".join([nested_dir, feature_name, "_scaler.pkl"])
if verbose: print("Retrieving model: %s..." % "".join([__MODEL_NAME, __MODEL_SUFFIX]))
# Create or load the model
if (not os.path.isfile(__model_file_name)) or (not os.path.isfile(__model_json_file)) or recalculate_pickle:
if verbose: print("Pickle file for '%s' MODEL not found or skipped by caller." % feature_name)
#act = Adam(lr = 0.001, beta_1 = 0.9, beta_2 = 0.999, epsilon = 1e-08)
act = 'adam'
#lss = losses.mean_squared_error
lss = 'mean_squared_error'
#mtrc = [metrics.RootMeanSquaredError()]
mtrc = ['mae','mse']
stop_at = np.max([int(0.1 * epoch_count), self.__MIN_early_stopping])
es = EarlyStopping(patience = stop_at, verbose = verbose)
cp = ModelCheckpoint(filepath = __model_file_name, verbose = verbose, save_best_only = True,
mode = 'min', monitor = 'val_mae')
# a few custom parameters for tuning
include_top = True
pooling = 'avg' # can be 'max' as well, only used if include_top is False
if self.__GPU_count > 1: dev = "/cpu:0"
else: dev = "/gpu:0"
with tf.device(dev):
# Input image shape (H, W, C)
input_img = Input(shape = (96, 96, 1))
bn_axis = 3
# Begin ResNet50
x = ZeroPadding2D(padding=(3, 3), name='conv1_pad')(input_img)
x = Conv2D(64, (7, 7), strides=(2, 2), padding='valid', kernel_initializer='he_normal', name='conv1') (x)
x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
x = Activation('relu')(x)
x = ZeroPadding2D(padding=(1, 1), name='pool1_pad')(x)
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = self.__conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
x = self.__identity_block(x, 3, [64, 64, 256], stage=2, block='b')
x = self.__identity_block(x, 3, [64, 64, 256], stage=2, block='c')
x = self.__conv_block(x, 3, [128, 128, 512], stage=3, block='a')
x = self.__identity_block(x, 3, [128, 128, 512], stage=3, block='b')
x = self.__identity_block(x, 3, [128, 128, 512], stage=3, block='c')
x = self.__identity_block(x, 3, [128, 128, 512], stage=3, block='d')
x = self.__conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
x = self.__identity_block(x, 3, [256, 256, 1024], stage=4, block='b')
x = self.__identity_block(x, 3, [256, 256, 1024], stage=4, block='c')
x = self.__identity_block(x, 3, [256, 256, 1024], stage=4, block='d')
x = self.__identity_block(x, 3, [256, 256, 1024], stage=4, block='e')
x = self.__identity_block(x, 3, [256, 256, 1024], stage=4, block='f')
x = self.__conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
x = self.__identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
x = self.__identity_block(x, 3, [512, 512, 2048], stage=5, block='c')
if include_top:
x = GlobalAveragePooling2D(name='avg_pool')(x)
if full:
x = Dense(30, name='fc1000') (x)
else:
x = Dense(8, name='fc1000') (x)
else:
if pooling == 'avg':
x = GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = GlobalMaxPooling2D()(x)
else:
raise RuntimeError('The output shape of `ResNet50(include_top=False)` has been changed since Keras 2.2.0.')
model = Model(input_img, x, name='resnet50')
if verbose: print(model.summary())
# Compile the model
if self.__GPU_count > 1:
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
parallel_model = multi_gpu_model(model, gpus = self.__GPU_count)
parallel_model.compile(optimizer = act, loss = lss, metrics = mtrc)
else:
parallel_model = model
parallel_model.compile(optimizer = act, loss = lss, metrics = mtrc)
if (X_val is None) or (Y_val is None):
history = parallel_model.fit(X, Y, validation_split = val_split, batch_size = batch_size * self.__GPU_count,
epochs = epoch_count, shuffle = shuffle, callbacks = [es, cp], verbose = verbose)
else:
history = parallel_model.fit(X, Y, validation_data = (X_val, Y_val), batch_size = batch_size * self.__GPU_count,
epochs = epoch_count, shuffle = shuffle, callbacks = [es, cp], verbose = verbose)
# print and/or save a performance plot
self.__plot_keras_history(history = history, metric = 'mse', #metric = 'root_mean_squared_error',
model_name = __MODEL_NAME, feature_name = feature_name, file_name = __history_plot_file,
verbose = verbose)
# save the model, parameters, and performance history
model_json = parallel_model.to_json()
with open(__model_json_file, "w") as json_file:
json_file.write(model_json)
hist_params = pd.DataFrame(history.params)
hist_params.to_csv(__history_params_file)
hist = pd.DataFrame(history.history)
hist.to_csv(__history_performance_file)
if verbose: print("Model JSON, history, and parameters file saved.")
# save a plot of the model architecture
plot_model(parallel_model, to_file = __model_architecture_plot_file, rankdir = 'TB',
show_shapes = True, show_layer_names = True, expand_nested = True, dpi=300)
else:
if verbose: print("Loading history and params files for '%s' MODEL..." % feature_name)
hist_params = pd.read_csv(__history_params_file)
hist = | pd.read_csv(__history_performance_file) | pandas.read_csv |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import (
datetime,
time,
timedelta,
)
from itertools import (
product,
starmap,
)
import operator
import warnings
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.offsets import shift_months
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
DateOffset,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
TimedeltaArray,
)
from pandas.core.ops import roperator
from pandas.tests.arithmetic.common import (
assert_cannot_add,
assert_invalid_addsub_type,
assert_invalid_comparison,
get_upcast_box,
)
# ------------------------------------------------------------------
# Comparisons
class TestDatetime64ArrayLikeComparisons:
# Comparison tests for datetime64 vectors fully parametrized over
# DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
# Test comparison with zero-dimensional array is unboxed
tz = tz_naive_fixture
box = box_with_array
dti = date_range("20130101", periods=3, tz=tz)
other = np.array(dti.to_numpy()[0])
dtarr = tm.box_expected(dti, box)
xbox = get_upcast_box(dtarr, other, True)
result = dtarr <= other
expected = np.array([True, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
"foo",
-1,
99,
4.0,
object(),
timedelta(days=2),
# GH#19800, GH#19301 datetime.date comparison raises to
# match DatetimeIndex/Timestamp. This also matches the behavior
# of stdlib datetime.datetime
datetime(2001, 1, 1).date(),
# GH#19301 None and NaN are *not* cast to NaT for comparisons
None,
np.nan,
],
)
def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):
# GH#22074, GH#15966
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
dtarr = tm.box_expected(rng, box_with_array)
assert_invalid_comparison(dtarr, other, box_with_array)
@pytest.mark.parametrize(
"other",
[
# GH#4968 invalid date/int comparisons
list(range(10)),
np.arange(10),
np.arange(10).astype(np.float32),
np.arange(10).astype(object),
pd.timedelta_range("1ns", periods=10).array,
np.array(pd.timedelta_range("1ns", periods=10)),
list(pd.timedelta_range("1ns", periods=10)),
pd.timedelta_range("1 Day", periods=10).astype(object),
pd.period_range("1971-01-01", freq="D", periods=10).array,
pd.period_range("1971-01-01", freq="D", periods=10).astype(object),
],
)
def test_dt64arr_cmp_arraylike_invalid(
self, other, tz_naive_fixture, box_with_array
):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data
obj = tm.box_expected(dta, box_with_array)
assert_invalid_comparison(obj, other, box_with_array)
def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="h", periods=5, tz=tz)._data
other = np.array([0, 1, 2, dta[3], Timedelta(days=1)])
result = dta == other
expected = np.array([False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = dta != other
tm.assert_numpy_array_equal(result, ~expected)
msg = "Invalid comparison between|Cannot compare type|not supported between"
with pytest.raises(TypeError, match=msg):
dta < other
with pytest.raises(TypeError, match=msg):
dta > other
with pytest.raises(TypeError, match=msg):
dta <= other
with pytest.raises(TypeError, match=msg):
dta >= other
def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
# GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
tz = tz_naive_fixture
box = box_with_array
ts = Timestamp("2021-01-01", tz=tz)
ser = Series([ts, NaT])
obj = tm.box_expected(ser, box)
xbox = get_upcast_box(obj, ts, True)
expected = Series([True, False], dtype=np.bool_)
expected = tm.box_expected(expected, xbox)
result = obj == ts
tm.assert_equal(result, expected)
class TestDatetime64SeriesComparison:
# TODO: moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize(
"pair",
[
(
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[NaT, NaT, Timestamp("2011-01-03")],
),
(
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[NaT, NaT, Timedelta("3 days")],
),
(
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
[NaT, NaT, Period("2011-03", freq="M")],
),
],
)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("dtype", [None, object])
@pytest.mark.parametrize(
"op, expected",
[
(operator.eq, Series([False, False, True])),
(operator.ne, Series([True, True, False])),
(operator.lt, Series([False, False, False])),
(operator.gt, Series([False, False, False])),
(operator.ge, Series([False, False, True])),
(operator.le, Series([False, False, True])),
],
)
def test_nat_comparisons(
self,
dtype,
index_or_series,
reverse,
pair,
op,
expected,
):
box = index_or_series
l, r = pair
if reverse:
# add lhs / rhs switched data
l, r = r, l
left = Series(l, dtype=dtype)
right = box(r, dtype=dtype)
result = op(left, right)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
],
)
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons_scalar(self, dtype, data, box_with_array):
box = box_with_array
left = Series(data, dtype=dtype)
left = tm.box_expected(left, box)
xbox = get_upcast_box(left, NaT, True)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left == NaT, expected)
tm.assert_equal(NaT == left, expected)
expected = [True, True, True]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left != NaT, expected)
tm.assert_equal(NaT != left, expected)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left < NaT, expected)
tm.assert_equal(NaT > left, expected)
tm.assert_equal(left <= NaT, expected)
tm.assert_equal(NaT >= left, expected)
tm.assert_equal(left > NaT, expected)
tm.assert_equal(NaT < left, expected)
tm.assert_equal(left >= NaT, expected)
tm.assert_equal(NaT <= left, expected)
@pytest.mark.parametrize("val", [datetime(2000, 1, 4), datetime(2000, 1, 5)])
def test_series_comparison_scalars(self, val):
series = Series(date_range("1/1/2000", periods=10))
result = series > val
expected = Series([x > val for x in series])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"left,right", [("lt", "gt"), ("le", "ge"), ("eq", "eq"), ("ne", "ne")]
)
def test_timestamp_compare_series(self, left, right):
# see gh-4982
# Make sure we can compare Timestamps on the right AND left hand side.
ser = Series(date_range("20010101", periods=10), name="dates")
s_nat = ser.copy(deep=True)
ser[0] = Timestamp("nat")
ser[3] = Timestamp("nat")
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# No NaT
expected = left_f(ser, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), ser)
tm.assert_series_equal(result, expected)
# NaT
expected = left_f(ser, Timestamp("nat"))
result = right_f(Timestamp("nat"), ser)
tm.assert_series_equal(result, expected)
# Compare to Timestamp with series containing NaT
expected = left_f(s_nat, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), s_nat)
tm.assert_series_equal(result, expected)
# Compare to NaT with series containing NaT
expected = left_f(s_nat, NaT)
result = right_f(NaT, s_nat)
tm.assert_series_equal(result, expected)
def test_dt64arr_timestamp_equality(self, box_with_array):
# GH#11034
ser = Series([Timestamp("2000-01-29 01:59:00"), Timestamp("2000-01-30"), NaT])
ser = tm.box_expected(ser, box_with_array)
xbox = get_upcast_box(ser, ser, True)
result = ser != ser
expected = tm.box_expected([False, False, True], xbox)
tm.assert_equal(result, expected)
warn = FutureWarning if box_with_array is pd.DataFrame else None
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[0]
expected = | tm.box_expected([False, True, True], xbox) | pandas._testing.box_expected |
"""
One table verb implementations for a :class:`pandas.DataFrame`
"""
import warnings
import numpy as np
import pandas as pd
from ..types import GroupedDataFrame
from ..options import get_option
from ..operators import register_implementations
from ..utils import Q, get_empty_env, regular_index
from .common import Evaluator, Selector
from .common import _get_groups, _get_base_dataframe
__all__ = ['arrange', 'create', 'define', 'distinct', 'do',
'group_by', 'group_indices', 'head', 'mutate',
'pull', 'query', 'rename', 'sample_frac', 'sample_n',
'select', 'slice_rows', 'summarize', 'tail',
'ungroup']
def _check_modify_groups(verb):
"""
Raise ValueError if expressions modify existing groups
Parameters
----------
verb : DataOperator
Verb to modify dataframe
"""
groups = _get_groups(verb)
# Data has no groups or verb authorises modification
if not groups:
return
new_cols = [e.column for e in verb.expressions if e.column != e.stmt]
overwritten_groups = list(set(new_cols) & set(groups))
if overwritten_groups:
raise ValueError(
"Columns {} cannot be modified because they are "
"grouping variables.".format(overwritten_groups)
)
def define(verb):
if not get_option('modify_input_data'):
verb.data = verb.data.copy()
if not verb.expressions:
return verb.data
_check_modify_groups(verb)
verb.env = verb.env.with_outer_namespace(_outer_namespace)
with regular_index(verb.data):
new_data = Evaluator(verb).process()
for col in new_data:
verb.data[col] = new_data[col]
return verb.data
def create(verb):
data = _get_base_dataframe(verb.data)
verb.env = verb.env.with_outer_namespace(_outer_namespace)
with regular_index(verb.data, data):
new_data = Evaluator(verb, drop=True).process()
for col in new_data:
data[col] = new_data[col]
return data
def sample_n(verb):
return verb.data.sample(**verb.kwargs)
def sample_frac(verb):
return verb.data.sample(**verb.kwargs)
def select(verb):
columns = Selector.get(verb)
data = verb.data.loc[:, columns]
return data
def rename(verb):
inplace = get_option('modify_input_data')
data = verb.data.rename(columns=verb.lookup, inplace=inplace)
return verb.data if inplace else data
def distinct(verb):
data = define(verb)
return data.drop_duplicates(subset=verb.columns,
keep=verb.keep)
def arrange(verb):
# Do not evaluate if all statements correspond to
# columns already in the dataframe
stmts = [expr.stmt for expr in verb.expressions]
has_all_columns = all(stmt in verb.data for stmt in stmts)
if has_all_columns:
df = verb.data.loc[:, stmts]
else:
verb.env = verb.env.with_outer_namespace({'Q': Q})
df = Evaluator(verb, keep_index=True).process()
if len(df.columns):
# The index is also rearranged, but to avoid issues with
# duplicate index values, we work with a regular index
original_index = verb.data.index
with regular_index(verb.data, df):
sorted_index = df.sort_values(by=list(df.columns)).index
data = verb.data.loc[sorted_index, :]
if verb.reset_index:
data.reset_index(drop=True, inplace=True)
else:
data.index = original_index[sorted_index]
else:
data = verb.data
return data
def group_by(verb):
verb._overwrite_groups = True
verb.data = define(verb)
copy = not get_option('modify_input_data')
try:
add = verb.add_
except AttributeError:
add = False
if add:
groups = _get_groups(verb) + verb.groups
else:
groups = verb.groups
if groups:
return GroupedDataFrame(verb.data, groups, copy=copy)
else:
return pd.DataFrame(verb.data, copy=copy)
def ungroup(verb):
return | pd.DataFrame(verb.data) | pandas.DataFrame |
"""
This module illustrates how to retrieve the top-10 items with highest rating
prediction. We first train an SVD algorithm on the MovieLens dataset, and then
predict all the ratings for the pairs (user, item) that are not in the training
set. We then retrieve the top-10 prediction for each user.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import random
import user_adder
import pandas as pd
import math
import dataset_handler as data_handler
class Recommender:
def __init__(self, algo, trainset):
self.original_trainset = trainset
self.original_dataset = data_handler.get_data_from_df(self.original_trainset).build_full_trainset()
self.algo = algo
print("Training Recommender")
self.algo.fit(self.original_dataset)
self.original_fit = [self.algo.bu, self.algo.bi, self.algo.pu, self.algo.qi]
self.new_ratings = | pd.DataFrame(columns=['userId', 'movieId', 'rating']) | pandas.DataFrame |
import warnings
import numpy as np
import pandas as pd
def create_initial_infections(
empirical_infections,
synthetic_data,
start,
end,
seed,
virus_shares,
reporting_delay,
population_size,
):
"""Create a DataFrame with initial infections.
.. warning::
In case a person is drawn to be newly infected more than once we only
infect her on the first date. If the probability of being infected is
large, not correcting for this will lead to a lower infection probability
than in the empirical data.
Args:
empirical_infections (pandas.Series): Newly infected Series with the index
levels ["date", "county", "age_group_rki"]. Should already be corrected
upwards to include undetected cases.
synthetic_data (pandas.DataFrame): Dataset with one row per simulated
individual. Must contain the columns age_group_rki and county.
start (str or pd.Timestamp): Start date.
end (str or pd.Timestamp): End date.
seed (int)
virus_shares (dict or None): If None, it is assumed that there is only one
strain. If dict, keys are the names of the virus strains and the values
are pandas.Series with a DatetimeIndex and the share among newly infected
individuals on each day as value.
reporting_delay (int): Number of days by which the reporting of cases is
delayed. If given, later days are used to get the infections of the
demanded time frame.
population_size (int): Population size behind the empirical_infections.
Returns:
pandas.DataFrame: DataFrame with same index as synthetic_data and one column
for each day between start and end. Dtype is boolean or categorical.
Values identify which individual gets infected with which variant.
"""
np.random.seed(seed)
assert reporting_delay >= 0, "Reporting delay must be >= 0"
reporting_delay = pd.Timedelta(days=reporting_delay)
start = pd.Timestamp(start) + reporting_delay
end = pd.Timestamp(end) + reporting_delay
index_cols = ["date", "county", "age_group_rki"]
correct_index_levels = empirical_infections.index.names == index_cols
assert correct_index_levels, f"Your data must have {index_cols} as index levels."
dates = empirical_infections.index.get_level_values("date").unique()
expected_dates = pd.date_range(start, end)
missing_dates = [str(x.date()) for x in expected_dates if x.date() not in dates]
assert len(missing_dates) == 0, f"The following dates are missing: {missing_dates}"
empirical_infections = empirical_infections.loc[
pd.Timestamp(start) : | pd.Timestamp(end) | pandas.Timestamp |
import re
import unicodedata
from collections import Counter
from itertools import product
import numpy as np
import pandas as pd
from sklearn.decomposition import TruncatedSVD
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import LabelEncoder
from src import sentence_splitter, data_frame, learn_sklearn, learn_lgb
def rating_dataset():
# target: ["likes", "dislikes"]
all_df = pd.concat(
[pd.read_csv("./data/input/train_data.csv").drop(["y"], axis=1),
pd.read_csv("./data/input/test_data.csv")]
).reset_index(drop=True)
# train = all_df[~all_df["ratings_disabled"] & ~all_df["comments_disabled"]].reset_index(drop=True)
# test = all_df[all_df["ratings_disabled"] & ~all_df["comments_disabled"]].reset_index(drop=True)
train = all_df[~all_df["ratings_disabled"]].reset_index(drop=True)
test = all_df[all_df["ratings_disabled"]].reset_index(drop=True)
test = test.drop(["likes", "dislikes"], axis=1)
train.likes = train.likes.apply(np.log1p)
train.dislikes = train.dislikes.apply(np.log1p)
train.comment_count = train.comment_count.apply(np.log1p)
test.comment_count = test.comment_count.apply(np.log1p)
train["publishedAt"] = pd.to_datetime(train.publishedAt).apply(lambda x: x.value)
test["publishedAt"] = pd.to_datetime(test.publishedAt).apply(lambda x: x.value)
train["title_len"] = train.title.apply(lambda x: len(str(x)))
test["title_len"] = test.title.apply(lambda x: len(str(x)))
train["channelTitle_len"] = train.channelTitle.apply(lambda x: len(str(x)))
test["channelTitle_len"] = test.channelTitle.apply(lambda x: len(str(x)))
train["description_len"] = train.description.apply(lambda x: len(str(x)))
test["description_len"] = test.description.apply(lambda x: len(str(x)))
train["tags_count"] = train.tags.apply(lambda x: str(x).count("|"))
test["tags_count"] = test.tags.apply(lambda x: str(x).count("|"))
# 日本語を含むかかどうかの判定
train["title_ja_count"] = train.title.apply(data_frame.is_japanese)
test["title_ja_count"] = test.title.apply(data_frame.is_japanese)
train["channelTitle_ja_count"] = train.channelTitle.apply(data_frame.is_japanese)
test["channelTitle_ja_count"] = test.channelTitle.apply(data_frame.is_japanese)
train["description_ja_count"] = train.description.apply(data_frame.is_japanese)
test["description_ja_count"] = test.description.apply(data_frame.is_japanese)
# アルファベットのカウント
train["title_en_count"] = train.title.apply(data_frame.count_alphabet)
test["title_en_count"] = test.title.apply(data_frame.count_alphabet)
train["channelTitle_en_count"] = train.channelTitle.apply(data_frame.count_alphabet)
test["channelTitle_en_count"] = test.channelTitle.apply(data_frame.count_alphabet)
train["description_en_count"] = train.description.apply(data_frame.count_alphabet)
test["description_en_count"] = test.description.apply(data_frame.count_alphabet)
# 数字のカウント
train["description_num_count"] = train.description.apply(data_frame.count_number)
test["description_num_count"] = test.description.apply(data_frame.count_number)
# urlのカウント
train["description_url_count"] = train.description.apply(lambda x: str(x).count("://"))
test["description_url_count"] = test.description.apply(lambda x: str(x).count("://"))
all_df: pd.DataFrame = pd.concat(
[train.drop(["likes", "dislikes"], axis=1), test], ignore_index=True).reset_index(drop=True)
category = ["channelId", "categoryId", "collection_date"]
target_list = ["comment_count", "title_len", "channelTitle_len", "description_len", "tags_count",
"description_ja_count", "description_en_count", "title_ja_count", "title_en_count",
"publishedAt"]
for col, target in product(category, target_list):
print(col, target)
data_frame.group(train, test, col, target, all_df)
data_frame.TE(train, test, "mean", train.likes, ["categoryId", "collection_date"])
data_frame.TE(train, test, "std", train.likes, ["categoryId", "collection_date"])
data_frame.TE(train, test, "mean", train.dislikes, ["categoryId", "collection_date"])
data_frame.TE(train, test, "std", train.dislikes, ["categoryId", "collection_date"])
return train, test
def comment_dataset():
# target: ["comment_dataset"]
all_df = pd.concat(
[ | pd.read_csv("./data/input/train_data.csv") | pandas.read_csv |
import pandas as pd
import numpy as np
def inverse_sample_weights(df, target_col, weight_col,
new_col_name=None, min_class_weight = .01,
return_df = True):
""" Given a target class an column to use to derive training weights,
create a column of weights where the negative class is the inverse
of the weights column.
E.g. weight column of 'Price' would use Price value for positive class
(where target == 1) and 1/Price for the negative class.
"""
df_copy = df.copy()
pos_class_weights = np.where(df[target_col] == 1 , # Where class is positive
df[weight_col], # Use this val as weight
0) # Else 0
neg_class_weights_inverse = np.where(df[target_col] == 0 , # Where class is neg
1/df[weight_col], # Use inverse of this
0) # Else 0
# Handle Edge Case where dividing by 0 results in undefined
neg_class_weights_inverse = np.where(neg_class_weights_inverse == np.inf , # Where weight is inf (divided by 0)
min_class_weight, # Replace with smallest weighting
neg_class_weights_inverse) # Otherwise keep it
# Combine weights
combined_weights_inverse = np.where(pos_class_weights == 0, # Where negative classes
neg_class_weights_inverse, # Place the inverse as negative weights
pos_class_weights) # Else keep the positive weights
if not new_col_name:
new_col_name = 'Sample_Inverse_Weights'
df_copy[new_col_name] = combined_weights_inverse
if return_df:
return df_copy
else:
return pd.Series(combined_weights_inverse, name=new_col_name)
def even_sample_weights(df, target_col, weight_col,
new_col_name=None,
return_df = True):
""" Given a target class an column to use to derive training weights,
create a column of weights where the negative class is the inverse
of the weights column.
E.g. weight column of 'Price' would use Price value for positive class
(where target == 1) and 1/Price for the negative class.
"""
df_copy = df.copy()
pos_class_weights = np.where(df[target_col] == 1 , # Where class is positive
df[weight_col], # Use this val as weight
0) # Else 0
neg_class_even_weights = np.where(df[target_col] == 0, # Where class is neg
(df[target_col] == 0).sum()/(df[target_col] == 0).shape[0] , # Create even weighting
0)
# Combine weights
combined_weights = np.where(pos_class_weights == 0, # Where negative classes
neg_class_even_weights, # Place the inverse as negative weights
pos_class_weights) # Else keep the positive weights
if not new_col_name:
new_col_name = 'Sample_Even_Weights'
df_copy[new_col_name] = combined_weights
if return_df:
return df_copy
else:
return | pd.Series(combined_weights, name=new_col_name) | pandas.Series |
import pandas as pd
import numpy as np
import logging
import os
import geojson
import math
import itertools
import geopandas as gpd
from geopy.geocoders import Nominatim
from shapely.geometry import Point, Polygon, MultiPolygon, shape
import shapely.ops
from pyproj import Proj
from bs4 import BeautifulSoup
import requests
from abc import ABC, abstractmethod
from .geolocations import *
from .visualizations import *
logging.basicConfig(level=logging.WARNING)
class OpinionNetworkModel(ABC):
""" Abstract base class for network model """
def __init__(self,
probabilities = [.45,.1,.45],
power_law_exponent = 1.5,
openness_to_neighbors = 1.5,
openness_to_influencers = 1.5,
distance_scaling_factor = 1/10,
importance_of_weight = 1.6,
importance_of_distance = 8.5,
include_opinion = True,
include_weight = True,
left_reach = 0.8,
right_reach = 0.8,
threshold = -1
):
"""Returns initialized OpinionNetworkModel.
Inputs:
probabilities: (list) probabilities of each mode; these are the
values "p_0,p_1,p_2" from [1].
power_law_exponent: (float) exponent of power law, must be > 0;
this is "gamma" from [1].
openness_to_neighbors: (float) maximum inter-mode distance that agents
can influence; this is "b" from [1].
openness_to_influencers: (float) distance in opinion space that
mega-influencers can reach; this is "epsilon" from [1].
distance_scaling_factor: (float) Scale distancy by this amount, must
be >0; this is "lambda" from [1].
importance_of_weight: (float) Raise weights to this power, must be > 0;
this is "alpha" from [1].
importance_of_distance: (float) Raise adjusted distance to this power,
must be > 0; this is "delta" from [1].
include_opinion: (boolean) If True, include distance in opinion space
in the probability measure.
include_weight: (boolean) If True, include influencer weight in the
probability measure.
left_reach: (float) this is the proportion of the susceptible population
that the left mega-influencers will actually reach, must be between
0 and 1; this is p_L from [1]
right_reach: (float) this is the proportion of the susceptible population
that the right mega-influencers will actually reach, must be between
0 and 1; this is p_R from [1]
threshold: (int) value below which opinions no longer change.
Outputs:
Fully initialized OpinionNetwork instance.
"""
self.probabilities = probabilities
self.power_law_exponent = power_law_exponent
self.openness_to_neighbors = openness_to_neighbors
self.openness_to_influencers = openness_to_influencers
self.distance_scaling_factor = distance_scaling_factor
self.importance_of_weight = importance_of_weight
self.importance_of_distance = importance_of_distance
self.include_opinion = include_opinion
self.include_weight = include_weight
self.left_reach = left_reach
self.right_reach = right_reach
self.threshold = threshold
self.agent_df = None
self.belief_df = None
self.prob_df = None
self.adjacency_df = None
self.mega_influencer_df = None
self.clustering_coefficient = 0
self.mean_degree = 0
def populate_model(self, num_agents = None, geo_df = None, bounding_box = None, show_plot = False):
""" Fully initialized but untrained OpinionNetworkModel instance.
Input:
num_agents: (int) number of agents to plot.
geo_df: (dataframe) geographic datatframe including county geometry.
bounding_box: (list) list of 4 vertices determining a bounding box
where agents are to be added. If no box is given, agents are added
to a random triangle.
show_plot: (bool) if true then plot is shown.
Output:
OpinionNetworkModel instance.
"""
if bounding_box is None:
agent_df = self.add_random_agents_to_triangle(num_agents = num_agents,
geo_df = geo_df,
show_plot = False)
else:
if geo_df is None:
raise ValueError("If a bounding box is specified, then a "
"geo_df must also be given.")
agent_df = self.add_random_agents_to_triangles(geo_df = geo_df,
bounding_box = bounding_box,
show_plot = False)
logging.info("\n {} agents added.".format(agent_df.shape[0]))
belief_df = self.assign_weights_and_beliefs(agent_df)
logging.info("\n Weights and beliefs assigned.")
prob_df = self.compute_probability_array(belief_df)
adjacency_df = self.compute_adjacency(prob_df)
logging.info("\n Adjacencies computed.")
# Connect mega-influencers
mega_influencer_df = self.connect_mega_influencers(belief_df)
# Compute network statistics.
logging.info("\n Computing network statistics...")
cc, md = self.compute_network_stats(adjacency_df)
logging.info("\n Clustering Coefficient: {}".format(cc))
logging.info("\n Mean Degree: {}".format(md))
self.agent_df = agent_df
self.belief_df = belief_df
self.prob_df = prob_df
self.adjacency_df = adjacency_df
self.mega_influencer_df = mega_influencer_df
self.clustering_coefficient = cc
self.mean_degree = md
if show_plot == True:
self.plot_initial_network()
return None
def plot_initial_network(self):
plot_network(self)
return None
def add_random_agents_to_triangle(self, num_agents, geo_df = None, triangle_object = None,
show_plot = False):
""" Assign N points on a triangle using Poisson point process.
Input:
num_agents: (int) number of agents to add to the triangle. If None,
then agents are added according to density.
geo_df: (dataframe) geographic datatframe including county geometry.
triangle_object: (Polygon) bounded triangular region to be populated.
show_plot: (bool) if true then plot is shown.
Returns:
An num_agents x 2 dataframe of point coordinates.
"""
if triangle_object is None:
# If no triangle is given, initialize triangle with area 1 km^2.
triangle_object = Polygon([[0,0],[1419,0], [1419/2,1419],[0,0]])
# If density is specified, adjust triangle size.
if geo_df is not None:
density = geo_df.loc[0,"density"]
b = 1419 * (num_agents/density) ** (1/2)
triangle_object = Polygon([[0,0],[b,0], [b/2,b],[0,0]])
bnd = list(triangle_object.boundary.coords)
gdf = gpd.GeoDataFrame(geometry = [triangle_object])
# Establish initial CRS
gdf.crs = "EPSG:3857"
# Set CRS to lat/lon
gdf = gdf.to_crs(epsg=4326)
# Extract coordinates
co = list(gdf.loc[0,"geometry"].exterior.coords)
lon, lat = zip(*co)
pa = Proj(
"+proj=aea +lat_1=37.0 +lat_2=41.0 +lat_0=39.0 +lon_0=-106.55")
x, y = pa(lon, lat)
coord_proj = {"type": "Polygon", "coordinates": [zip(x, y)]}
area = shape(coord_proj).area / (10 ** 6) # area in km^2
# Get Vertices
V1 = np.array(bnd[0])
V2 = np.array(bnd[1])
V3 = np.array(bnd[2])
# Sample from uniform distribution on [0,1]
U = np.random.uniform(0,1,num_agents)
V = np.random.uniform(0,1,num_agents)
UU = np.where(U + V > 1, 1-U, U)
VV = np.where(U + V > 1, 1-V, V)
# Shift triangle into origin and and place points.
agents = (UU.reshape(len(UU),-1) * (V2 - V1).reshape(-1,2)) + (
VV.reshape(len(VV),-1) * (V3 - V1).reshape(-1,2))
# Shift points back to original position.
agents = agents + V1.reshape(-1,2)
agent_df = pd.DataFrame(agents, columns = ["x","y"])
if show_plot == True:
plot_agents_on_triangle(triangle_object, agent_df)
return agent_df
def add_random_agents_to_triangles(self, geo_df, bounding_box = None, show_plot = False):
""" Plots county with triangular regions.
Inputs:
geo_df: (dataframe) geographic datatframe including county geometry.
bounding_box: (list) list of 4 vertices determining a bounding box
where agents are to be added. If no box is given, then the
bounding box is taken as the envelope of the county.
show_plot: (bool) if true then plot is shown.
Returns:
Populated triangles in specified county enclosed in the given
bounding box where regions are filled with proper density using a Poisson
point process.
"""
tri_dict = make_triangulation(geo_df)
tri_df = gpd.GeoDataFrame({"geometry":[Polygon(t) for t in tri_dict["geometry"]["coordinates"]]})
# Establish initial CRS
tri_df.crs = "EPSG:3857"
# Set CRS to lat/lon.
tri_df = tri_df.to_crs(epsg=4326)
# Get triangles within bounding box.
if bounding_box is None:
geo_df.crs = "EPSG:3857"
geo_df = geo_df.to_crs(epsg=4326)
sq_df = gpd.GeoDataFrame(geo_df["geometry"])
else:
sq_df = gpd.GeoDataFrame({"geometry":[Polygon(bounding_box)]})
inset = [i for i in tri_df.index if tri_df.loc[i,"geometry"].within(sq_df.loc[0,"geometry"])]
# Load triangle area.
agent_df = pd.DataFrame()
for i in inset:
co = list(tri_df.loc[i,"geometry"].exterior.coords)
lon, lat = zip(*co)
pa = Proj(
"+proj=aea +lat_1=37.0 +lat_2=41.0 +lat_0=39.0 +lon_0=-106.55")
x, y = pa(lon, lat)
coord_proj = {"type": "Polygon", "coordinates": [zip(x, y)]}
area = shape(coord_proj).area / (10 ** 6) # area in km^2
num_agents = int(area * geo_df.loc[0,"density"])
df = pd.DataFrame(columns = ["x","y"])
if num_agents > 0:
df = self.add_random_agents_to_triangle(num_agents,
geo_df = geo_df,
triangle_object = tri_df.loc[i,"geometry"],
show_plot = False)
agent_df = pd.concat([agent_df,df])
agent_df.reset_index(drop = True, inplace = True)
# Plot triangles.
if show_plot == True:
fig, ax = plt.subplots(figsize = (10,10))
tri_df.loc[inset,:].boundary.plot(ax = ax, alpha=1,
linewidth = 3,
edgecolor = COLORS["light_blue"])
ax.scatter(agent_df["x"], agent_df["y"], s = 3)
ax.set_axis_off()
ax.set_aspect(.9)
plt.show()
return agent_df
def assign_weights_and_beliefs(self, agent_df, show_plot = False):
""" Assign weights and beliefs (i.e. modes) accoring to probabilities.
Inputs:
agent_df: (dataframe) xy-coordinates for agents.
show_plot: (bool) if true then plot is shown.
Returns:
Dataframe with xy-coordinates, beliefs, and weights for each point.
"""
belief_df = agent_df.copy()
power_law_exponent = self.power_law_exponent
k = -1/(power_law_exponent)
modes = [i for i in range(len(self.probabilities))]
assert np.sum(np.array(self.probabilities)) == 1, "Probabilities must sum to 1."
belief_df["weight"] = np.random.uniform(0,1,belief_df.shape[0]) ** (k)
belief_df["belief"] = np.random.choice(modes, belief_df.shape[0],
p = self.probabilities)
belief_df["decile"] = pd.qcut(belief_df["weight"], q = 100, labels = [
i for i in range(1,101)])
if show_plot == True:
plot_agents_with_belief_and_weight(belief_df)
return belief_df
def compute_probability_array(self, belief_df):
""" Return dataframe of probability that row n influences column m.
Inputs:
belief_df: (dataframe) xy-coordinats, beliefs and weights of
agents.
Returns:
Dataframe with the probability that agent n influces agent m
in row n column m.
"""
n = belief_df.index.shape[0]
prob_array = np.ones((n,n))
dist_array = np.zeros((n,n))
for i in range(n):
point_i = Point(belief_df.loc[i,"x"],belief_df.loc[i,"y"])
# Get distances from i to each other point.
dist_array[i,:] = [point_i.distance(Point(belief_df.loc[j,"x"],
belief_df.loc[j,"y"])
) for j in belief_df.index]
# Compute the dimensionless distance metric.
diam = dist_array.max().max()
lam = (self.distance_scaling_factor * diam)
delta = -1 * self.importance_of_distance
dist_array = np.where(dist_array == 0,np.nan, dist_array)
dist_array = (1 + (dist_array/lam)) ** delta
prob_array = prob_array * dist_array
# Only allow connections to people close in opinion space.
if self.include_opinion == True:
op_array = np.zeros((n,n))
# If row i connects to column j, that means person i is
# influencing person j. This can only happen if j is
# already sufficiently close to person i in opinion space.
for i in range(n):
i_current_belief = belief_df.loc[i,"belief"]
opinion_diff = np.where(
np.abs(belief_df["belief"] - i_current_belief
) > self.openness_to_neighbors,0, 1)
op_array[i,:] = opinion_diff
prob_array = prob_array * op_array
# Incentivize connections with heavily weighted people.
if self.include_weight == True:
wt_array = belief_df["weight"] ** self.importance_of_weight
wt_array = wt_array.values.reshape(-1,1)
prob_array = prob_array * wt_array
prob_df = | pd.DataFrame(prob_array) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 31 15:54:06 2019
@author: Nathan
"""
import requests
import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from bs4 import BeautifulSoup, SoupStrainer
import pandas as pd
import re
import matplotlib.pyplot as plt
import pickle
url = "https://www.nytimes.com/section/corrections"
# download driver from https://github.com/mozilla/geckodriver/releases/
# unzip and place in working directory of python program
driver = webdriver.Firefox()
driver.get(url)
driver.implicitly_wait(300)
# Firefox opens, click show more button
html_source = driver.page_source
driver.quit()
soup = BeautifulSoup(html_source, 'html.parser')
#print(soup.prettify())
links = []
for link in BeautifulSoup(html_source, parse_only=SoupStrainer('a')):
if link.has_attr('href') and link['href'][0] == '/' and len(link['href']) > 1:
links.append(link['href'])
del links[0:4]
# save to file
#with open('links', 'wb') as fp:
# pickle.dump(links, fp)
# get list from file
with open ('links', 'rb') as fp:
links = pickle.load(fp)
###############################################################################
# Get comments from given article url
article_list = []
j = 0
for curr_url in links:
#if (j > 100):
# break
print(j)
# NYTimes Community API url
URL = "http://www.nytimes.com/svc/community/V3/requestHandler?"
# set params
api_key = '<KEY>'
cmd = "GetCommentsAll"
#curr_url = 'https://www.nytimes.com/2019/03/28/science/frogs-fungus-bd.html'
curr_url = 'https://www.nytimes.com' + curr_url
# get readable name from url
url_name = re.split('/',curr_url)
url_name = re.split('\.', url_name[len(url_name)-1])[0]
# defining a params dict for the parameters to be sent to the API
PARAMS = {'api-key': api_key, 'cmd':cmd, 'url':curr_url}
# sending get request and saving the response as response object
response = requests.get(url = URL, params = PARAMS)
# extracting data in json format
data = response.json()
del response
###########################################################################
# Get keywords from urls
URL = "https://api.nytimes.com/svc/search/v2/articlesearch.json?"
# set params
api_key = '<KEY>'
fq = 'web_url:("' + curr_url + '")'
page = 0
# defining a params dict for the parameters to be sent to the API
PARAMS = {'api-key': api_key, 'fq':fq, 'page':page}
# sleep 6 sec since api request limit is 10 per min
time.sleep(6)
# sending get request and saving the response as response object
kw_response = requests.get(url = URL, params = PARAMS)
# extracting data in json format
kw_data = kw_response.json()
del kw_response
keywords =[]
i=0
for kw in kw_data['response']['docs'][0]['keywords']:
if i<5:
keywords.append(kw['value'])
i += 1
###########################################################################
if data['results']['totalCommentsFound'] > 0:
comments = []
for com in data['results']['comments']:
comments.append(com['commentBody'])
if com['replyCount'] > 0:
for rep in com['replies'] :
comments.append(com['commentBody'])
article_list.append({'url_name':url_name, 'url':curr_url, 'total_comments':data['results']['totalCommentsFound'], 'comments':comments, 'keywords':keywords})
else:
article_list.append({'url_name':url_name, 'url':curr_url, 'total_comments':data['results']['totalCommentsFound'], 'keywords':keywords})
j += 1
# save to file
#with open('article_list', 'wb') as fp:
# pickle.dump(article_list, fp)
# get list from file
with open ('article_list', 'rb') as fp:
article_list = pickle.load(fp)
###############################################################################
article_keywords = []
for a in article_list:
for k in a['keywords']:
article_keywords.append(k)
from pandas import Series
ak = Series(article_keywords)
ak_counts = ak.value_counts()
ak_counts[0:10].plot(kind='bar')
plt.title('Corrected Article Keywords')
plt.ylabel('Keyword Frequency')
plt.xlabel('Keyword')
plt.show()
###############################################################################
URL = "https://api.nytimes.com/svc/archive/v1/2019/3.json?"
# set params
api_key = '<KEY>'
PARAMS = {'api-key': api_key}
# sending get request and saving the response as response object
response = requests.get(url = URL, params = PARAMS)
data = response.json()
keywords_orig =[]
j=0
for a in data['response']['docs']:
print (j)
if j > len(article_list):
break
curr_url = a['web_url']
URL = "https://api.nytimes.com/svc/search/v2/articlesearch.json?"
# set params
api_key = '<KEY>'
fq = 'web_url:("' + curr_url + '")'
page = 0
# defining a params dict for the parameters to be sent to the API
PARAMS = {'api-key': api_key, 'fq':fq, 'page':page}
# sleep 6 sec since api request limit is 10 per min
time.sleep(6)
# sending get request and saving the response as response object
kw_response = requests.get(url = URL, params = PARAMS)
# extracting data in json format
kw_data = kw_response.json()
del kw_response
i=0
for kw in kw_data['response']['docs'][0]['keywords']:
if i<5:
keywords_orig.append(kw['value'])
i += 1
j += 1
# save to file
#with open('keywords_orig', 'wb') as fp:
# pickle.dump(keywords_orig, fp)
# get list from file
with open ('keywords_orig', 'rb') as fp:
keywords_orig = pickle.load(fp)
ako = | Series(keywords_orig) | pandas.Series |
import sys
import pandas as pd
import pickle
from sqlalchemy import create_engine
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.metrics import confusion_matrix
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.multioutput import MultiOutputClassifier
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.model_selection import GridSearchCV
import re
import nltk
from nltk.corpus import stopwords
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.tokenize import word_tokenize
from sklearn.ensemble import RandomForestClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import classification_report
nltk.download('punkt')
nltk.download('stopwords')
nltk.download('wordnet')
nltk.download('averaged_perceptron_tagger')
def load_data(database_filepath):
'''The functions loads data from sqlite database providd the path
It returns the features X, labels Y and the label names.'''
engine = create_engine('sqlite:///'+database_filepath)
df = pd.read_sql("select * from Messages", engine)
print ('Loaded: ', df.shape)
# drop data with no categories assigned
df = df[df['cat_num']>0]
X = df['message']
Y = df.iloc[:, 4:-1]
return X, Y, Y.columns
def tokenize(text):
'''The function will remove punctuation, normalize the text case, lemmatize and remove the stop words'''
stop_words = stopwords.words("english")
lemmatizer = WordNetLemmatizer()
# normalize case and remove punctuation
text = re.sub(r"[^a-zA-Z0-9]", " ", text.lower())
text = text.lower()
# tokenize text
tokens = word_tokenize(text)
# lemmatize and remove stop words
tokens = [lemmatizer.lemmatize(word) for word in tokens if word not in stop_words]
return tokens
class TextLengthExtractor(BaseEstimator, TransformerMixin):
'''The custom transformer will return the number of characters in each message'''
def fit(self, X, y=None):
return self
def transform(self, X):
X_len = | pd.Series(X) | pandas.Series |
import ast
from datetime import datetime
import pandas as pd
import pytest
from pylighter import AdditionalOutputElement, Annotation
@pytest.mark.parametrize(
"labels, expected",
[
([["O", "O", "O", "O"]], [["O", "O", "O", "O"]]),
([["O", "B-1", "I-1", "I-1"]], [["O", "B-1", "I-1", "I-1"]]),
(None, [["O", "O", "O", "O"]]),
],
)
def test_init_labels(labels, expected):
corpus = ["This"]
annotation = Annotation(corpus, labels=labels)
assert annotation.labels == expected
def test_select_new_labeliser():
corpus = ["This is a sentence"]
annotation = Annotation(corpus)
assert annotation.selected_labeliser == annotation.labels_names[0]
annotation._select_new_labeliser(button=None, button_index=1)
assert annotation.selected_labeliser == annotation.labels_names[1]
@pytest.mark.parametrize(
"labels, start_index, char_index, expected",
[
(["O", "O", "O", "O"], None, 2, ["O", "O", "B-1", "O"]),
(["O", "O", "O", "O"], 0, 2, ["B-1", "I-1", "I-1", "O"]),
(["O", "O", "O", "O"], 2, 0, ["B-1", "I-1", "I-1", "O"]),
(["B-2", "I-2", "O", "O"], None, 2, ["B-2", "I-2", "B-1", "O"]),
(["B-2", "I-2", "O", "O"], 2, 3, ["B-2", "I-2", "B-1", "I-1"]),
(["B-2", "I-2", "O", "O"], None, 0, ["B-1", "B-2", "O", "O"]),
(["B-2", "I-2", "O", "O"], 0, 1, ["B-1", "I-1", "O", "O"]),
(["B-2", "I-2", "O", "O"], 0, 2, ["B-1", "I-1", "I-1", "O"]),
(["B-2", "I-2", "O", "O"], 2, 0, ["B-1", "I-1", "I-1", "O"]),
(["B-2", "I-2", "O", "O"], 0, 0, ["B-1", "B-2", "O", "O"]),
],
)
def test_labelise(labels, start_index, char_index, expected):
labels_names = ["1", "2", "3"]
corpus = ["This is a sentence"]
annotation = Annotation(corpus, labels=[labels], labels_names=labels_names)
assert annotation.chunks.to_labels() == annotation.labels[0]
annotation.label_start_index = start_index
annotation._labelise(
button=None,
char_index=char_index,
)
assert annotation.chunks.to_labels() == expected
@pytest.mark.parametrize(
"labels, start_index, char_index, expected",
[
(["O", "O", "O", "O"], None, 2, ["O", "O", "O", "O"]),
(["O", "O", "O", "O"], 0, 2, ["O", "O", "O", "O"]),
(["O", "O", "O", "O"], 2, 0, ["O", "O", "O", "O"]),
(["B-2", "I-2", "O", "O"], None, 2, ["B-2", "I-2", "O", "O"]),
(["B-2", "I-2", "O", "O"], 2, 3, ["B-2", "I-2", "O", "O"]),
(["B-2", "I-2", "O", "O"], None, 0, ["O", "B-2", "O", "O"]),
(["B-2", "I-2", "O", "O"], 0, 1, ["O", "O", "O", "O"]),
(["B-2", "I-2", "O", "O"], 0, 2, ["O", "O", "O", "O"]),
(["B-2", "I-2", "O", "O"], 2, 0, ["O", "O", "O", "O"]),
(["B-2", "I-2", "O", "O"], 0, 0, ["O", "B-2", "O", "O"]),
],
)
def test_eraser(labels, start_index, char_index, expected):
labels_names = ["1", "2", "3"]
corpus = ["This is a sentence"]
annotation = Annotation(corpus, labels=[labels], labels_names=labels_names)
assert annotation.chunks.to_labels() == annotation.labels[0]
annotation._select_new_labeliser(None, len(annotation.labels_buttons) - 1)
annotation.label_start_index = start_index
annotation._labelise(
button=None,
char_index=char_index,
)
assert annotation.chunks.to_labels() == expected
@pytest.mark.parametrize(
"labels, expected",
[
(["B-2", "I-2", "O", "O"], ["O", "O", "O", "O"]),
(["B-2", "B-2", "O", "O"], ["O", "B-2", "O", "O"]),
(["B-2", "B-2", "I-2", "O"], ["O", "B-2", "I-2", "O"]),
],
)
def test_delete_chunk(labels, expected):
labels_names = ["1", "2", "3"]
corpus = ["Test"]
annotation = Annotation(corpus, labels=[labels], labels_names=labels_names)
assert annotation.chunks.to_labels() == annotation.labels[0]
chunk_to_remove = annotation.chunks.chunks[0]
assert chunk_to_remove.start_index == 0
annotation._delete_chunk(None, chunk_to_remove)
assert annotation.chunks.to_labels() == expected
@pytest.mark.parametrize(
"start_index, direction, skip, expected",
[
(0, 1, False, 1),
(1, -1, False, 0),
(0, -1, False, 0),
(3, 1, False, 4),
(0, 3, False, 3),
(2, -2, False, 0),
(0, 1, True, 1),
(1, -1, True, 0),
(0, -1, True, 0),
(3, 1, True, 4),
(0, 3, True, 3),
(2, -2, True, 0),
],
)
def test_change_document(start_index, direction, skip, expected):
corpus = ["Sentence 1", "Sentence 2", "Sentence 3", "Sentence 4"]
labels_names = ["1", "2", "3"]
annotation = Annotation(
corpus,
start_index=start_index,
labels_names=labels_names,
save_path="/dev/null",
)
assert annotation.current_index == start_index
# Labelise word "sentence"
annotation.label_start_index = 0
annotation._labelise(
button=None,
char_index=7,
)
expected_labels = ["B-1", "I-1", "I-1", "I-1", "I-1", "I-1", "I-1", "I-1", "O", "O"]
annotation._change_document(button=None, direction=direction, skip=skip)
assert annotation.current_index == expected
if skip:
expected_labels = ["O"] * len(corpus[0])
assert annotation.labels[start_index] == expected_labels
@pytest.mark.parametrize(
"corpus, labels",
[
(
["Test", "Save", "!"],
[["B-1", "I-1", "I-1", "I-1"], ["O", "B-1", "I-1", "I-1"], ["O"]],
),
(
["Test", "Save", "!"],
None,
),
],
)
def test_save(corpus, labels):
save_path = "/tmp/" + str(datetime.now()).replace(" ", "_")
annotation = Annotation(
corpus,
labels=labels,
save_path=save_path,
)
if not labels:
labels = annotation.labels
annotation._save()
df = pd.read_csv(save_path, sep=";")
assert "document" in df.columns
assert "labels" in df.columns
assert df.document.to_list() == corpus
assert df.labels.apply(ast.literal_eval).to_list() == labels
@pytest.mark.parametrize(
"labels",
[
["O", "O", "O", "O"],
["B-2", "I-2", "O", "O"],
["B-2", "B-2", "B-2", "B-2"],
],
)
def test_quit(labels):
corpus = ["Test"]
annotation = Annotation(
corpus,
labels=[labels],
)
annotation.start_index = 0
annotation._labelise(None, 3)
annotation._quit()
assert annotation.labels == [labels]
@pytest.mark.parametrize(
"labels",
[
["O", "O", "O", "O"],
["B-2", "I-2", "O", "O"],
["B-2", "B-2", "B-2", "B-2"],
],
)
def test_clear(labels):
corpus = ["Test"]
annotation = Annotation(
corpus,
labels=[labels],
)
assert annotation.chunks.to_labels() == labels
assert annotation.labels[0] == labels
annotation._clear_current(None)
assert annotation.chunks.to_labels() == ["O", "O", "O", "O"]
def test_additional_infos():
additional_infos = pd.DataFrame({"col1": [1, 2], "col2": ["a", "b"]})
corpus = ["Test 1", "Test 2"]
annotation = Annotation(
corpus,
additional_infos=additional_infos,
)
assert annotation.additional_infos.shape == (2, 2)
assert annotation.additional_infos.col1.to_list() == additional_infos.col1.to_list()
assert annotation.additional_infos.col2.to_list() == additional_infos.col2.to_list()
@pytest.mark.parametrize(
"additional_outputs_elements, additional_outputs_values, new_value, expected",
[
(
[
AdditionalOutputElement(
name="element",
display_type="text",
description="Test element",
default_value="testing",
)
],
None,
None,
"testing",
),
(
[
AdditionalOutputElement(
name="element",
display_type="text",
description="Test element",
default_value="testing",
)
],
pd.DataFrame(
{"element": ["input additional value 1", "input additional value 2"]}
),
None,
"input additional value 1",
),
(
[
AdditionalOutputElement(
name="element",
display_type="text",
description="Test element",
default_value="testing",
)
],
None,
"new_value",
"new_value",
),
(
[
AdditionalOutputElement(
name="element",
display_type="text",
description="Test element",
default_value="testing",
)
],
pd.DataFrame(
{"element": ["input additional value 1", "input additional value 2"]}
),
"new_value",
"new_value",
),
],
)
def test_additional_outputs(
additional_outputs_elements, additional_outputs_values, new_value, expected
):
corpus = ["Test 1", "Test 2"]
save_path = "/tmp/" + str(datetime.now()).replace(" ", "_")
annotation = Annotation(
corpus,
additional_outputs_elements=additional_outputs_elements,
additional_outputs_values=additional_outputs_values,
save_path=save_path,
)
assert len(annotation.additional_outputs_elements_displays) == 1
assert annotation.additional_outputs_values.shape == (2, 1)
if new_value:
annotation.additional_outputs_elements_displays[0].value = new_value
# Change document to save it
annotation._change_document(None, direction=1)
assert annotation.additional_outputs_values.iloc[0]["element"] == expected
# Assess that the outputs are correctly added
annotation._save()
df = | pd.read_csv(save_path, sep=";") | pandas.read_csv |
from pykrx.website.krx.krxio import KrxWebIo
import pandas as pd
from pandas import DataFrame
# ------------------------------------------------------------------------------------------
# Ticker
class 상장종목검색(KrxWebIo):
@property
def bld(self):
return "dbms/comm/finder/finder_stkisu"
def fetch(self, mktsel: str="ALL", searchText: str = "") -> DataFrame:
"""[12003] 개별종목 시세 추이에서 검색 버튼 눌러 활성화 되는 종목 검색창 스크래핑
Args:
mktsel (str, optional): 조회 시장 (STK/KSQ/ALL)
searchText (str, optional): 검색할 종목명 - 입력하지 않을 경우 전체
Returns:
DataFrame : 상장 종목 정보를 반환
full_code short_code codeName marketCode marketName marketEngName ord1 ord2
0 KR7060310000 060310 3S KSQ 코스닥 KOSDAQ 16
1 KR7095570008 095570 AJ네트웍스 STK 유가증권 KOSPI 16
2 KR7006840003 006840 AK홀딩스 STK 유가증권 KOSPI 16
3 KR7054620000 054620 APS홀딩스 KSQ 코스닥 KOSDAQ 16
4 KR7265520007 265520 AP시스템 KSQ 코스닥 KOSDAQ 16
"""
result = self.read(mktsel=mktsel, searchText=searchText, typeNo=0)
return DataFrame(result['block1'])
class 상폐종목검색(KrxWebIo):
@property
def bld(self):
return "dbms/comm/finder/finder_listdelisu"
def fetch(self, mktsel:str = "ALL", searchText: str = "") -> DataFrame:
"""[20037] 상장폐지종목 현황
- http://data.krx.co.kr/contents/MDC/MDI/mdiLoader/index.cmd?menuId=MDC02021301
Args:
mktsel (str, optional): 조회 시장 (STK/KSQ/ALL) . Defaults to "ALL".
searchText (str, optional): 검색할 종목명으로 입력하지 않을 경우 전체 조회
Returns:
DataFrame: 상장폐지 종목 정보를 반환
full_code short_code codeName marketCode marketName ord1 ord2
0 KR7037730009 037730 3R KSQ 코스닥 16
1 KR7036360006 036360 3SOFT KSQ 코스닥 16
2 KYG887121070 900010 3노드디지탈 KSQ 코스닥 16
3 KR7038120002 038120 AD모터스 KSQ 코스닥 16
"""
result = self.read(mktsel=mktsel, searchText=searchText, typeNo=0)
return DataFrame(result['block1'])
# ------------------------------------------------------------------------------------------
# Market
class 개별종목시세(KrxWebIo):
@property
def bld(self):
return "dbms/MDC/STAT/standard/MDCSTAT01701"
def fetch(self, strtDd: str, endDd: str, isuCd: str) -> DataFrame:
"""[12003] 개별종목 시세 추이 (수정종가 아님)
Args:
strtDd (str): 조회 시작 일자 (YYMMDD)
endDd (str): 조회 종료 일자 (YYMMDD)
isuCd (str): 조회 종목 ISIN
Returns:
DataFrame: 일자별 시세 조회 결과
TRD_DD TDD_CLSPRC FLUC_TP_CD CMPPREVDD_PRC FLUC_RT TDD_OPNPRC TDD_HGPRC TDD_LWPRC ACC_TRDVOL ACC_TRDVAL MKTCAP LIST_SHRS
0 2021/01/15 88,000 2 -1,700 -1.90 89,800 91,800 88,000 33,431,809 2,975,231,937,664 525,340,864,400,000 5,969,782,550
1 2021/01/14 89,700 3 0 0.00 88,700 90,000 88,700 26,393,970 2,356,661,622,700 535,489,494,735,000 5,969,782,550
2 2021/01/13 89,700 2 -900 -0.99 89,800 91,200 89,100 36,068,848 3,244,066,562,850 535,489,494,735,000 5,969,782,550
3 2021/01/12 90,600 2 -400 -0.44 90,300 91,400 87,800 48,682,416 4,362,546,108,950 540,862,299,030,000 5,969,782,550
4 2021/01/11 91,000 1 2,200 2.48 90,000 96,800 89,500 90,306,177 8,379,237,727,064 543,250,212,050,000 5,969,782,550
"""
result = self.read(isuCd=isuCd, strtDd=strtDd, endDd=endDd)
return DataFrame(result['output'])
class 전종목시세(KrxWebIo):
@property
def bld(self):
return "dbms/MDC/STAT/standard/MDCSTAT01501"
def fetch(self, trdDd: str, mktId: str) -> DataFrame:
"""[12001] 전종목 시세
Args:
trdDd (str): 조회 일자 (YYMMDD)
mktId (str): 조회 시장 (STK/KSQ/KNX/ALL)
Returns:
DataFrame: 전종목의 가격 정보
ISU_SRT_CD ISU_ABBRV MKT_NM SECT_TP_NM TDD_CLSPRC FLUC_TP_CD CMPPREVDD_PRC FLUC_RT TDD_OPNPRC TDD_HGPRC TDD_LWPRC ACC_TRDVOL ACC_TRDVAL MKTCAP LIST_SHRS MKT_ID
0 060310 3S KOSDAQ 중견기업부 2,365 2 -5 -0.21 2,370 2,395 2,355 152,157 361,210,535 105,886,118,195 44,772,143 KSQ
1 095570 AJ네트웍스 KOSPI 5,400 1 70 1.31 5,330 5,470 5,260 90,129 485,098,680 252,840,393,000 46,822,295 STK
2 068400 AJ렌터카 KOSPI 12,000 1 400 3.45 11,600 12,000 11,550 219,282 2,611,434,750 265,755,600,000 22,146,300 STK
3 006840 AK홀딩스 KOSPI 55,000 1 800 1.48 54,700 55,300 53,600 16,541 901,619,600 728,615,855,000 13,247,561 STK
4 054620 APS홀딩스 KOSDAQ 우량기업부 4,475 1 10 0.22 4,440 4,520 4,440 31,950 142,780,675 91,264,138,975 20,394,221 KSQ
"""
result = self.read(mktId=mktId, trdDd=trdDd)
return DataFrame(result['OutBlock_1'])
class PER_PBR_배당수익률_전종목(KrxWebIo):
@property
def bld(self):
return "dbms/MDC/STAT/standard/MDCSTAT03501"
def fetch(self, trdDd: str, mktId: str) -> DataFrame:
"""[12021] PER/PBR/배당수익률
Args:
trdDd (str): 조회 일자 (YYMMDD)
mktId (str): 조회 시장 (STK/KSQ/KNX/ALL)
Returns:
DataFrame:
ISU_SRT_CD ISU_ABBRV ISU_ABBRV_STR TDD_CLSPRC FLUC_TP_CD CMPPREVDD_PRC FLUC_RT EPS PER BPS PBR DPS DVD_YLD
0 060310 3S 3S <em class ="up"></em> 2,195 1 20 0.92 - - 745 2.95 0 0.00
1 095570 AJ네트웍스 AJ네트웍스 <em class ="up"></em> 4,560 1 20 0.44 982 4.64 6,802 0.67 300 6.58
2 006840 AK홀딩스 AK홀딩스 <em class ="up"></em> 27,550 1 2,150 8.46 2,168 12.71 62,448 0.44 750 2.72
3 054620 APS홀딩스 APS홀딩스 <em class ="up"></em> 6,920 2 -250 -3.49 - - 10,530 0.66 0 0.00
4 265520 AP시스템 AP시스템 <em class ="up"></em> 25,600 1 600 2.40 671 38.15 7,468 3.43 50 0.20
"""
result = self.read(mktId=mktId, trdDd=trdDd)
return DataFrame(result['output'])
class PER_PBR_배당수익률_개별(KrxWebIo):
@property
def bld(self):
return "dbms/MDC/STAT/standard/MDCSTAT03502"
def fetch(self, strtDd: str, endDd: str, mktId: str, isuCd: str) -> DataFrame:
"""[12021] PER/PBR/배당수익률
Args:
strtDd (str): 조회 시작 일자 (YYMMDD)
endDd (str): 조회 종료 일자 (YYMMDD)
mktId (str): 조회 시장 (STK/KSQ/KNX/ALL)
isuCd (str): 조회 종목 ISIN
Returns:
DataFrame:
TRD_DD TDD_CLSPRC FLUC_TP_CD CMPPREVDD_PRC FLUC_RT EPS PER BPS PBR DPS DVD_YLD
0 2019/03/29 44,650 2 -200 -0.45 5,997 7.45 28,126 1.59 850 1.90
1 2019/03/28 44,850 2 -500 -1.10 5,997 7.48 28,126 1.59 850 1.90
2 2019/03/27 45,350 1 100 0.22 5,997 7.56 28,126 1.61 850 1.87
3 2019/03/26 45,250 2 -250 -0.55 5,997 7.55 28,126 1.61 850 1.88
4 2019/03/25 45,500 2 -1,050 -2.26 5,997 7.59 28,126 1.62 850 1.87
"""
result = self.read(mktId=mktId, strtDd=strtDd, endDd=endDd, isuCd=isuCd)
return DataFrame(result['output'])
class 전종목등락률(KrxWebIo):
@property
def bld(self):
return "dbms/MDC/STAT/standard/MDCSTAT01602"
def fetch(self, strtDd: str, endDd: str, mktId: str, adj_stkprc: int) -> DataFrame:
"""[12002] 전종목 등락률
Args:
strtDd (str): 조회 시작 일자 (YYMMDD)
endDd (str): 조회 종료 일자 (YYMMDD)
mktId (str): 조회 시장 (STK/KSQ/ALL)
adj_stkprc (int): 수정 종가 여부 (2:수정종가/1:단순종가)
Returns:
DataFrame:
ISU_SRT_CD ISU_ABBRV BAS_PRC TDD_CLSPRC CMPPREVDD_PRC FLUC_RT ACC_TRDVOL ACC_TRDVAL FLUC_TP
0 060310 3S 2,420 3,290 870 35.95 40,746,975 132,272,050,410 1
1 095570 AJ네트웍스 6,360 5,430 -930 -14.62 3,972,269 23,943,953,170 2
2 068400 AJ렌터카 13,550 11,500 -2,050 -15.13 14,046,987 166,188,922,890 2
3 006840 AK홀딩스 73,000 77,100 4,100 5.62 1,707,900 132,455,779,600 1
4 054620 APS홀딩스 6,550 5,560 -990 -15.11 7,459,926 41,447,809,620 2
"""
result = self.read(mktId=mktId, adj_stkprc=adj_stkprc, strtDd=strtDd,
endDd=endDd)
return DataFrame(result['OutBlock_1'])
class 외국인보유량_전종목(KrxWebIo):
@property
def bld(self):
return "dbms/MDC/STAT/standard/MDCSTAT03701"
def fetch(self, trdDd: str, mktId: str, isuLmtRto: int) -> DataFrame:
"""[12023] 외국인보유량(개별종목) - 전종목
Args:
trdDd (str): 조회 일자 (YYMMDD)
mktId (str): 조회 시장 (STK/KSQ/KNX/ALL)
isuLmtRto (int): 외국인 보유제한 종목
- 0 : check X
- 1 : check O
Returns:
DataFrame:
ISU_SRT_CD ISU_ABBRV TDD_CLSPRC FLUC_TP_CD CMPPREVDD_PRC FLUC_RT LIST_SHRS FORN_HD_QTY FORN_SHR_RT FORN_ORD_LMT_QTY FORN_LMT_EXHST_RT
0 060310 3S 2,185 2 -10 -0.46 44,802,511 739,059 1.65 44,802,511 1.65
1 095570 AJ네트웍스 4,510 2 -50 -1.10 46,822,295 4,983,122 10.64 46,822,295 10.64
2 006840 AK홀딩스 26,300 2 -1,250 -4.54 13,247,561 1,107,305 8.36 13,247,561 8.36
3 054620 APS홀딩스 7,010 1 90 1.30 20,394,221 461,683 2.26 20,394,221 2.26
4 265520 AP시스템 25,150 2 -450 -1.76 14,480,227 1,564,312 10.80 14,480,227 10.80
"""
result = self.read(searchType=1, mktId=mktId, trdDd=trdDd, isuLmtRto=isuLmtRto)
return DataFrame(result['output'])
class 외국인보유량_개별추이(KrxWebIo):
@property
def bld(self):
return "dbms/MDC/STAT/standard/MDCSTAT03702"
def fetch(self, strtDd: str, endDd: str, isuCd: str) -> DataFrame:
"""[12023] 외국인보유량(개별종목) - 개별추이
Args:
strtDd (str): 조회 시작 일자 (YYMMDD)
endDd (str): 조회 종료 일자 (YYMMDD)
isuCd (str): 조회 종목 ISIN
Returns:
DataFrame:
TRD_DD TDD_CLSPRC FLUC_TP_CD CMPPREVDD_PRC FLUC_RT LIST_SHRS FORN_HD_QTY FORN_SHR_RT FORN_ORD_LMT_QTY FORN_LMT_EXHST_RT
0 2021/01/15 88,000 2 -1,700 -1.90 5,969,782,550 3,317,574,926 55.57 5,969,782,550 55.57
1 2021/01/14 89,700 3 0 0.00 5,969,782,550 3,314,652,740 55.52 5,969,782,550 55.52
2 2021/01/13 89,700 2 -900 -0.99 5,969,782,550 3,316,551,070 55.56 5,969,782,550 55.56
3 2021/01/12 90,600 2 -400 -0.44 5,969,782,550 3,318,676,206 55.59 5,969,782,550 55.59
4 2021/01/11 91,000 1 2,200 2.48 5,969,782,550 3,324,115,988 55.68 5,969,782,550 55.68
"""
result = self.read(searchType=2, strtDd=strtDd, endDd=endDd, isuCd=isuCd)
return DataFrame(result['output'])
class 투자자별_거래실적_전체시장_기간합계(KrxWebIo):
@property
def bld(self):
return "dbms/MDC/STAT/standard/MDCSTAT02201"
def fetch(self, strtDd: str, endDd: str, mktId: str, etf: str, etn: str, els: str) -> DataFrame:
"""[12009] 투자자별 거래실적
Args:
strtDd (str): 조회 시작 일자 (YYMMDD)
endDd (str): 조회 종료 일자 (YYMMDD)
mktId (str): 조회 시장 (STK/KSQ/ALL)
etf (str): ETF 포함 여부 (""/EF)
etn (str): ETN 포함 여부 (""/EN)
els (str): ELS 포함 여부 (""/ES)
Returns:
DataFrame:
INVST_TP_NM ASK_TRDVOL BID_TRDVOL NETBID_TRDVOL ASK_TRDVAL BID_TRDVAL NETBID_TRDVAL
0 금융투자 183,910,512 173,135,582 -10,774,930 11,088,878,744,833 10,518,908,333,291 -569,970,411,542
1 보험 18,998,546 11,995,538 -7,003,008 1,011,736,647,106 661,574,577,285 -350,162,069,821
2 투신 78,173,801 64,724,900 -13,448,901 2,313,376,665,370 1,943,337,885,168 -370,038,780,202
3 사모 37,867,724 33,001,267 -4,866,457 1,142,499,274,494 1,000,228,858,448 -142,270,416,046
4 은행 3,252,303 901,910 -2,350,393 69,744,809,430 43,689,969,205 -26,054,840,225
"""
result = self.read(strtDd=strtDd, endDd=endDd, mktId=mktId, etf=etf, etn=etn, elw=els)
return DataFrame(result['output']).drop('CONV_OBJ_TP_CD', axis=1)
class 투자자별_거래실적_전체시장_일별추이_일반(KrxWebIo):
@property
def bld(self):
return "dbms/MDC/STAT/standard/MDCSTAT02202"
def fetch(self, strtDd: str, endDd: str, mktId: str, etf: str, etn: str, els: str, trdVolVal: int, askBid: int) -> DataFrame:
"""[12009] 투자자별 거래실적 일별추이
Args:
strtDd (str): 조회 시작 일자 (YYMMDD)
endDd (str): 조회 종료 일자 (YYMMDD)
mktId (str): 조회 시장 (STK/KSQ/ALL)
etf (str): ETF 포함 여부 (""/EF)
etn (str): ETN 포함 여부 (""/EN)
els (str): ELS 포함 여부 (""/ES)
trdVolVal (int): 1: 거래량 / 2: 거래대금
askBid (int): 1: 매도 / 2: 매수 / 3: 순매수
Returns:
DataFrame:
>> 투자자별_거래실적_전체시장_일별추이_일반().fetch("20210115", "20210122", "STK", "", "", "", 1, 1)
TRD_DD TRDVAL1 TRDVAL2 TRDVAL3 TRDVAL4 TRDVAL_TOT
0 2021/01/22 67,656,491 6,020,990 927,119,399 110,426,104 1,111,222,984
1 2021/01/21 69,180,642 13,051,423 1,168,810,381 109,023,034 1,360,065,480
2 2021/01/20 70,184,991 5,947,195 1,010,578,768 105,984,335 1,192,695,289
3 2021/01/19 56,242,065 6,902,124 1,183,520,475 106,647,770 1,353,312,434
4 2021/01/18 70,527,745 7,512,434 1,270,483,687 123,524,707 1,472,048,573
"""
result = self.read(strtDd=strtDd, endDd=endDd, mktId=mktId, etf=etf, etn=etn, elw=els, inqTpCd=2, trdVolVal=trdVolVal,
askBid=askBid)
return DataFrame(result['output'])
class 투자자별_거래실적_전체시장_일별추이_상세(KrxWebIo):
@property
def bld(self):
return "dbms/MDC/STAT/standard/MDCSTAT02203"
def fetch(self, strtDd: str, endDd: str, mktId: str, etf: str, etn: str, els: str, trdVolVal: int, askBid: int) -> DataFrame:
"""[12009] 투자자별 거래실적 일별추이 (상세)
Args:
strtDd (str): 조회 시작 일자 (YYMMDD)
endDd (str): 조회 종료 일자 (YYMMDD)
mktId (str): 조회 시장 (STK/KSQ/ALL)
etf (str): ETF 포함 여부 (""/EF)
etn (str): ETN 포함 여부 (""/EN)
els (str): ELS 포함 여부 (""/ES)
trdVolVal (int): 1: 거래량 / 2: 거래대금
askBid (int): 1: 매도 / 2: 매수 / 3: 순매수
Returns:
DataFrame:
>> 투자자별_거래실적_전체시장_일별추이_상세().fetch("20210115", "20210122", "STK", "", "", "", 1, 1)
TRD_DD TRDVAL1 TRDVAL2 TRDVAL3 TRDVAL4 TRDVAL5 TRDVAL6 TRDVAL7 TRDVAL8 TRDVAL9 TRDVAL10 TRDVAL11 TRDVAL_TOT
0 2021/01/22 27,190,933 2,735,154 8,774,207 3,338,979 454,546 170,392 24,992,280 6,020,990 927,119,399 108,740,962 1,685,142 1,111,222,984
1 2021/01/21 18,482,914 3,032,118 6,625,819 3,543,737 635,314 8,696,961 28,163,779 13,051,423 1,168,810,381 106,653,326 2,369,708 1,360,065,480
2 2021/01/20 25,584,466 2,530,140 8,106,713 4,204,627 182,144 137,315 29,439,586 5,947,195 1,010,578,768 103,998,394 1,985,941 1,192,695,289
3 2021/01/19 13,992,565 2,122,324 7,740,948 2,736,919 391,860 419,021 28,838,428 6,902,124 1,183,520,475 103,967,576 2,680,194 1,353,312,434
4 2021/01/18 22,645,478 2,471,112 6,761,600 2,867,429 263,984 196,148 35,321,994 7,512,434 1,270,483,687 120,350,740 3,173,967 1,472,048,573
"""
result = self.read(strtDd=strtDd, endDd=endDd, mktId=mktId, etf=etf, etn=etn, elw=els, trdVolVal=trdVolVal,
askBid=askBid, detailView=1)
return DataFrame(result['output'])
class 투자자별_거래실적_개별종목_기간합계(KrxWebIo):
@property
def bld(self):
return "dbms/MDC/STAT/standard/MDCSTAT02301"
def fetch(self, strtDd: str, endDd: str, isuCd: str) -> DataFrame:
"""[12009] 투자자별 거래실적(개별종목)
Args:
strtDd (str): 조회 시작 일자 (YYMMDD)
endDd (str): 조회 종료 일자 (YYMMDD)
isuCd (str): 조회 종목 ISIN
Returns:
DataFrame:
INVST_TP_NM ASK_TRDVOL BID_TRDVOL NETBID_TRDVOL ASK_TRDVAL BID_TRDVAL NETBID_TRDVAL
0 금융투자 31,324,444 28,513,421 -2,811,023 2,765,702,311,200 2,510,494,630,400 -255,207,680,800
1 보험 1,790,469 561,307 -1,229,162 158,120,209,600 49,570,523,900 -108,549,685,700
2 투신 3,966,211 1,486,178 -2,480,033 351,753,222,200 130,513,380,300 -221,239,841,900
3 사모 756,726 541,912 -214,814 67,202,238,800 47,475,872,700 -19,726,366,100
4 은행 105,323 70,598 -34,725 9,360,874,400 6,170,507,400 -3,190,367,000
"""
result = self.read(strtDd=strtDd, endDd=endDd, isuCd=isuCd, inqTpCd=1, trdVolVal=1, askBid=1)
return DataFrame(result['output']).drop('CONV_OBJ_TP_CD', axis=1)
class 투자자별_거래실적_개별종목_일별추이_일반(KrxWebIo):
@property
def bld(self):
return "dbms/MDC/STAT/standard/MDCSTAT02302"
def fetch(self, strtDd: str, endDd: str, isuCd: str, trdVolVal: int, askBid: int) -> DataFrame:
"""[12009] 투자자별 거래실적(개별종목)
Args:
strtDd (str): 조회 시작 일자 (YYMMDD)
endDd (str): 조회 종료 일자 (YYMMDD)
isuCd (str): 조회 종목 ISIN
trdVolVal (int): 1: 거래량 / 2: 거래대금
askBid (int): 1: 매도 / 2: 매수 / 3: 순매수
Returns:
DataFrame:
TRD_DD TRDVAL1 TRDVAL2 TRDVAL3 TRDVAL4 TRDVAL_TOT
0 2021/01/20 13,121,791 114,341 7,346,474 4,628,521 25,211,127
1 2021/01/19 13,912,581 323,382 20,956,376 4,702,705 39,895,044
2 2021/01/18 15,709,256 258,096 21,942,253 5,318,346 43,227,951
3 2021/01/15 16,944,750 216,653 10,371,182 5,899,224 33,431,809
4 2021/01/14 15,722,824 232,674 6,483,589 3,954,883 26,393,970
"""
result = self.read(strtDd=strtDd, endDd=endDd, isuCd=isuCd, inqTpCd=2, trdVolVal=trdVolVal, askBid=askBid)
return DataFrame(result['output'])
class 투자자별_거래실적_개별종목_일별추이_상세(KrxWebIo):
@property
def bld(self):
return "dbms/MDC/STAT/standard/MDCSTAT02303"
def fetch(self, strtDd: str, endDd: str, isuCd: str, trdVolVal: int, askBid: int) -> DataFrame:
"""[12009] 투자자별 거래실적(개별종목)
Args:
strtDd (str): 조회 시작 일자 (YYMMDD)
endDd (str): 조회 종료 일자 (YYMMDD)
isuCd (str): 조회 종목 ISIN
trdVolVal (int): 1: 거래량 / 2: 거래대금
askBid (int): 1: 매도 / 2: 매수 / 3: 순매수
Returns:
DataFrame:
TRD_DD TRDVAL1 TRDVAL2 TRDVAL3 TRDVAL4 TRDVAL5 TRDVAL6 TRDVAL7 TRDVAL8 TRDVAL9 TRDVAL10 TRDVAL11 TRDVAL_TOT
0 2021/01/20 5,328,172 259,546 313,812 58,992 3,449 256 7,157,564 114,341 7,346,474 4,615,231 13,290 25,211,127
1 2021/01/19 2,835,217 119,057 312,695 42,163 10,100 180 10,593,169 323,382 20,956,376 4,644,854 57,851 39,895,044
2 2021/01/18 4,175,051 286,158 349,739 98,050 11,261 4,486 10,784,511 258,096 21,942,253 5,262,225 56,121 43,227,951
3 2021/01/15 7,080,570 272,542 838,871 112,920 1,691 21,958 8,616,198 216,653 10,371,182 5,878,858 20,366 33,431,809
4 2021/01/14 6,926,895 366,023 707,874 67,391 25,022 10,072 7,619,547 232,674 6,483,589 3,937,223 17,660 26,393,970
5 2021/01/13 4,978,539 487,143 1,443,220 377,210 53,800 74,669 10,728,979 122,212 9,029,353 8,746,689 27,034 36,068,848
"""
result = self.read(strtDd=strtDd, endDd=endDd, isuCd=isuCd, inqTpCd=2, trdVolVal=trdVolVal, askBid=askBid, detailView=1)
return DataFrame(result['output'])
class 투자자별_순매수상위종목(KrxWebIo):
@property
def bld(self):
return "dbms/MDC/STAT/standard/MDCSTAT02401"
def fetch(self, strtDd: str, endDd: str, mktId: str, invstTpCd: str) -> DataFrame:
"""[12010] 투자자별 순매수상위종목
Args:
strtDd (str): 조회 시작 일자 (YYMMDD)
endDd (str): 조회 종료 일자 (YYMMDD)
mktId (str): 조회 시장 (STK/KSQ/KNX/ALL)
invstTpCd (str): 투자자
- 1000 - 금융투자
- 2000 - 보험
- 3000 - 투신
- 3100 - 사모
- 4000 - 은행
- 5000 - 기타금융
- 6000 - 연기금
- 7050 - 기관합계
- 7100 - 기타법인
- 8000 - 개인
- 9000 - 외국인
- 9001 - 기타외국인
- 9999 - 전체
Returns:
DataFrame:
ISU_SRT_CD ISU_NM ASK_TRDVOL BID_TRDVOL NETBID_TRDVOL ASK_TRDVAL BID_TRDVAL NETBID_TRDVAL
0 006400 삼성SDI 1,298,644 1,636,929 338,285 899,322,500,000 1,125,880,139,000 226,557,639,000
1 051910 LG화학 1,253,147 1,492,717 239,570 1,166,498,517,000 1,371,440,693,000 204,942,176,000
2 096770 SK이노베이션 4,159,038 4,823,863 664,825 1,050,577,437,000 1,208,243,272,500 157,665,835,500
3 003670 포스코케미칼 1,093,803 1,973,179 879,376 129,914,349,500 240,577,561,000 110,663,211,500
"""
result = self.read(strtDd=strtDd, endDd=endDd, mktId=mktId, invstTpCd=invstTpCd)
return DataFrame(result['output'])
# ------------------------------------------------------------------------------------------
# index
class 전체지수기본정보(KrxWebIo):
@property
def bld(self):
return "dbms/MDC/STAT/standard/MDCSTAT00401"
def fetch(self, idxIndMidclssCd: str) -> DataFrame:
"""[11004] 전체지수 기본정보
Args:
idxIndMidclssCd (str): 검색할 시장
- 01 : KRX
- 02 : KOSPI
- 03 : KOSDAQ
- 04 : 테마
Returns:
DataFrame: [description]
IDX_NM IDX_ENG_NM BAS_TM_CONTN ANNC_TM_CONTN BAS_IDX_CONTN CALC_CYCLE_CONTN CALC_TM_CONTN COMPST_ISU_CNT IND_TP_CD IDX_IND_CD
0 KRX 300 KRX 300 2010.01.04 2018.02.05 1,000.00 1초 09:00:10 ~ 15:30:00 300 5 300
1 KTOP 30 KTOP 30 1996.01.03 2015.07.13 888.85 2초 09:00:10 ~ 15:30:00 30 5 600
2 KRX 100 KRX 100 2001.01.02 2005.06.01 1,000.00 1초 09:00:10 ~ 15:30:00 100 5 042
"""
result = self.read(idxIndMidclssCd=idxIndMidclssCd)
return DataFrame(result['output'])
class 주가지수검색(KrxWebIo):
@property
def bld(self):
return "dbms/comm/finder/finder_equidx"
def fetch(self, market: str) -> DataFrame:
"""[11004] 전체지수 기본정보
Args:
market (str): 검색 시장
- 1 : 전체
- 2 : KRX
- 3 : KOSPI
- 4 : KOSDAQ
- 5 : 테마
Returns:
DataFrame:
full_code short_code codeName marketCode marketName
0 5 300 KRX 300 KRX KRX
1 5 600 KTOP 30 KRX KRX
2 5 042 KRX 100 KRX KRX
3 5 301 KRX Mid 200 KRX KRX
4 5 043 KRX 자동차 KRX KRX
marketCode : ['KRX' 'STK' 'KSQ' 'GBL']
marketName : ['KRX' 'KOSPI' 'KOSDAQ' '테마']
"""
result = self.read(mktsel=market)
return DataFrame(result['block1'])
class 개별지수시세(KrxWebIo):
@property
def bld(self):
return "dbms/MDC/STAT/standard/MDCSTAT00301"
def fetch(self, ticker: str, group_id: str, fromdate: str, todate: str) -> DataFrame:
"""[11003] 개별지수 시세 추이
Args:
ticker (str): index ticker
group_id (str): index group id
fromdate (str): 조회 시작 일자 (YYMMDD)
todate (str): 조회 종료 일자 (YYMMDD)
Returns:
DataFrame:
TRD_DD CLSPRC_IDX FLUC_TP_CD PRV_DD_CMPR UPDN_RATE OPNPRC_IDX HGPRC_IDX LWPRC_IDX ACC_TRDVOL ACC_TRDVAL MKTCAP
0 2021/01/15 2,298.05 2 -68.84 -2.91 2,369.94 2,400.69 2,292.92 22,540,416 1,967,907,809,615 137,712,088,395,380
1 2021/01/14 2,366.89 2 -23.88 -1.00 2,390.59 2,393.24 2,330.76 23,685,783 2,058,155,913,335 142,206,993,223,695
2 2021/01/13 2,390.77 1 25.68 1.09 2,367.94 2,455.05 2,300.10 33,690,790 3,177,416,322,985 144,549,058,033,310
3 2021/01/12 2,365.09 2 -48.63 -2.01 2,403.51 2,428.76 2,295.91 41,777,076 3,933,263,957,150 143,250,319,286,660
4 2021/01/11 2,413.72 1 33.32 1.40 2,403.37 2,613.83 2,352.21 50,975,686 6,602,833,901,895 146,811,113,380,140
"""
result = self.read(indIdx2=ticker, indIdx=group_id, strtDd=fromdate, endDd=todate)
return DataFrame(result['output'])
class 전체지수등락률(KrxWebIo):
@property
def bld(self):
return "dbms/MDC/STAT/standard/MDCSTAT00201"
def fetch(self, strtDd: str, endDd: str, idxIndMidclssCd: str) -> DataFrame:
"""[11002] 전체지수 등락률
Args:
strtDd (str): 조회 시작 일자 (YYMMDD)
endDd (str): 조회 종료 일자 (YYMMDD)
idxIndMidclssCd (str): 검색 시장
- 01: KRX
- 02: KOSPI
- 03: KOSDAQ
- 04: 테마
Returns:
DataFrame:
IDX_IND_NM OPN_DD_INDX END_DD_INDX FLUC_TP PRV_DD_CMPR FLUC_RT ACC_TRDVOL ACC_TRDVAL
0 KRX 300 1,845.82 1,920.52 1 74.70 4.05 3,293,520,227 201,056,395,899,602
1 KTOP 30 10,934.77 11,589.88 1 655.11 5.99 820,597,395 109,126,566,806,196
2 KRX 100 6,418.50 6,695.11 1 276.61 4.31 1,563,383,456 154,154,503,633,541
3 KRX Mid 200 1,751.19 1,722.32 2 -28.87 -1.65 2,807,696,801 27,059,313,040,039
4 KRX 자동차 2,046.67 2,298.05 1 251.38 12.28 288,959,592 29,886,192,965,797
"""
result = self.read(idxIndMidclssCd=idxIndMidclssCd, strtDd=strtDd, endDd=endDd)
return DataFrame(result['output'])
class 지수구성종목(KrxWebIo):
@property
def bld(self):
return "dbms/MDC/STAT/standard/MDCSTAT00601"
def fetch(self, date: str, ticker: str, group_id: str) -> DataFrame:
"""[11006] 지수구성종목
Args:
ticker (str): index ticker
group_id (str): index group id
date (str): 조회 일자 (YYMMDD)
Returns:
>> 지수구성종목().fetch("20210125", "001", "1"))
DataFrame:
ISU_SRT_CD ISU_ABBRV TDD_CLSPRC FLUC_TP_CD STR_CMP_PRC FLUC_RT MKTCAP
0 005930 삼성전자 89,400 1 2,600 3.00 533,698,559,970,000
1 000660 SK하이닉스 135,000 1 6,500 5.06 98,280,319,275,000
2 051910 LG화학 990,000 1 15,000 1.54 69,886,419,570,000
3 035420 NAVER 349,000 1 5,500 1.60 57,327,924,855,000
"""
result = self.read(indIdx2=ticker, indIdx=group_id, trdDd=date)
return | DataFrame(result['output']) | pandas.DataFrame |
#!/usr/bin/env python
import os
import sys
from time import time
import argparse
import numpy as np
import pandas as pd
from davis2017.evaluation import DAVISEvaluation
default_davis_path = 'data/ref-davis/DAVIS'
time_start = time()
parser = argparse.ArgumentParser()
parser.add_argument('--davis_path', type=str, help='Path to the DAVIS folder containing the JPEGImages, Annotations, '
'ImageSets, Annotations_unsupervised folders',
required=False, default=default_davis_path)
parser.add_argument('--set', type=str, help='Subset to evaluate the results', default='val') # val subset
parser.add_argument('--task', type=str, help='Task to evaluate the results', default='unsupervised',
choices=['semi-supervised', 'unsupervised'])
parser.add_argument('--results_path', type=str, help='Path to the folder containing the sequences folders',
required=True)
args, _ = parser.parse_known_args()
csv_name_global = f'global_results-{args.set}.csv'
csv_name_per_sequence = f'per-sequence_results-{args.set}.csv'
# Check if the method has been evaluated before, if so read the results, otherwise compute the results
csv_name_global_path = os.path.join(args.results_path, csv_name_global)
csv_name_per_sequence_path = os.path.join(args.results_path, csv_name_per_sequence)
if os.path.exists(csv_name_global_path) and os.path.exists(csv_name_per_sequence_path):
print('Using precomputed results...')
table_g = pd.read_csv(csv_name_global_path)
table_seq = | pd.read_csv(csv_name_per_sequence_path) | pandas.read_csv |
# coding:utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2018 yutiansut/QUANTAXIS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from functools import reduce
import math
import numpy as np
import pandas as pd
from ctypes import *
# from .talib_series import LINEARREG_SLOPE
from easyquant.easydealutils.easymongo import MongoIo
import datetime
try:
import talib
except:
print('PLEASE install TALIB to call these methods')
import os
# lib = cdll.LoadLibrary("%s/%s" % (os.path.abspath("."), "talib_ext.so"))
lib = cdll.LoadLibrary("/usr/share/talib/%s" % ("talib_ext.so"))
"""
Series 类
这个是下面以DataFrame为输入的基础函数
return pd.Series format
"""
__STOCK_INFOS = pd.DataFrame()
__REALTIME_DATAS = {}
def __INITDATAS(dateStr = None):
mongo = MongoIo()
global __STOCK_INFOS, __REALTIME_DATAS
if len(__STOCK_INFOS) == 0:
__STOCK_INFOS = mongo.get_stock_info()
# STOCK_INFOS =
if dateStr == None:
dateObj = datetime.datetime.now()
else:
# datetime.datetime.strptime(st, "%Y-%m-%d %H:%M:%S"))
dateObj = datetime.datetime.strptime(dateStr, "%Y-%m-%d")
weekDay = dateObj.weekday()
if weekDay > 4:
dateObj = dateObj - datetime.timedelta(weekDay - 4)
dateStr = dateObj.strftime('%Y-%m-%d')
if dateStr not in __REALTIME_DATAS.keys():
__REALTIME_DATAS[dateStr] = mongo.get_realtime(dateStr=dateStr)
return dateStr
def __STOCK_INFO(code):
__INITDATAS()
return __STOCK_INFOS.query("code=='%s'" % code)
def __REALTIME_DATA(code, dateStr):
global __REALTIME_DATAS
dateStr = __INITDATAS(dateStr)
try:
return __REALTIME_DATAS[dateStr].query("code=='%s'" % code)
except Exception as e:
# print("__REALTIME_DATA", code, dateStr, e)
return pd.DataFrame()
def EMA(Series, N):
# return pd.Series.ewm(Series, span=N, min_periods=N - 1, adjust=True).mean()
Series = Series.fillna(0)
res = talib.EMA(Series.values, N)
return pd.Series(res, index=Series.index)
def EXPMA(Series, N):
# return pd.Series.ewm(Series, span=N, min_periods=N - 1, adjust=True).mean()
Series = Series.fillna(0)
res = talib.EMA(Series.values, N)
return pd.Series(res, index=Series.index)
def MA(Series, N):
# return pd.Series.rolling(Series, N).mean()
Series = Series.fillna(0)
res = talib.MA(Series.values, N)
return pd.Series(res, index=Series.index)
# 威廉SMA 参考https://www.joinquant.com/post/867
def SMA(Series, N, M=1):
"""
威廉SMA算法
本次修正主要是对于返回值的优化,现在的返回值会带上原先输入的索引index
2018/5/3
@yutiansut
"""
ret = []
i = 1
length = len(Series)
# 跳过X中前面几个 nan 值
while i < length:
if np.isnan(Series.iloc[i]):
ret.append(0)
i += 1
else:
break
if i < length:
preY = Series.iloc[i] # Y'
else:
preY = None
ret.append(preY)
while i < length:
Y = (M * Series.iloc[i] + (N - M) * preY) / float(N)
ret.append(Y)
preY = Y
i += 1
return pd.Series(ret, index=Series.tail(len(ret)).index)
def DIFF(Series, N=1):
return pd.Series(Series).diff(N)
def HHV(Series, NS):
if isinstance(NS, pd.Series):
ncount = len(NS)
tf_p = c_float * ncount
np_OUT = tf_p(0)
na_Series = np.asarray(Series).astype(np.float32)
na_NS = np.asarray(NS).astype(np.int32)
np_S = cast(na_Series.ctypes.data, POINTER(c_float))
np_N = cast(na_NS.ctypes.data, POINTER(c_int))
lib.hhv(ncount, np_OUT, np_S, np_N)
return pd.Series(np.asarray(np_OUT), dtype=np.float64, index=Series.index)
if NS == 0:
return Series
return pd.Series(Series).rolling(NS).max()
def LLV(Series, NS):
if isinstance(NS, pd.Series):
ncount = len(NS)
tf_p = c_float * ncount
np_OUT = tf_p(0)
na_Series = np.asarray(Series).astype(np.float32)
na_NS = np.asarray(NS).astype(np.int32)
np_S = cast(na_Series.ctypes.data, POINTER(c_float))
np_N = cast(na_NS.ctypes.data, POINTER(c_int))
lib.llv(ncount, np_OUT, np_S, np_N)
return pd.Series(np.asarray(np_OUT), dtype=np.float64, index=Series.index)
if NS == 0:
return Series
return pd.Series(Series).rolling(NS).min()
def SUMS(Series, NS):
ncount = len(NS)
tf_p=c_float * ncount
np_OUT =tf_p(0)
na_Series=np.asarray(Series).astype(np.float32)
na_NS=np.asarray(NS).astype(np.int32)
np_S=cast(na_Series.ctypes.data, POINTER(c_float))
np_N=cast(na_NS.ctypes.data, POINTER(c_int))
lib.sum(ncount, np_OUT, np_S, np_N)
return pd.Series(np.asarray(np_OUT), dtype=np.float64)
def DMA(Series, Weight):
ncount = len(Series)
tf_p = c_float * ncount
np_OUT = tf_p(0)
na_Series = np.asarray(Series).astype(np.float32)
na_Weight = np.asarray(Weight.fillna(1)).astype(np.float32)
np_S = cast(na_Series.ctypes.data, POINTER(c_float))
np_W = cast(na_Weight.ctypes.data, POINTER(c_float))
lib.dma(ncount, np_OUT, np_S, np_W)
return pd.Series(np.asarray(np_OUT), dtype=np.float64, index=Series.index)
def SUM(Series, N):
if N == 0:
return Series.cumsum()
else:
return pd.Series.rolling(Series, N).sum()
def ABS(Series):
return abs(Series)
def MAX(A, B):
var = IF(A > B, A, B)
return var
def MIN(A, B):
var = IF(A < B, A, B)
return var
def SINGLE_CROSS(A, B):
if A.iloc[-2] < B.iloc[-2] and A.iloc[-1] > B.iloc[-1]:
return True
else:
return False
def CROSS(A, B):
"""A<B then A>B A上穿B B下穿A
Arguments:
A {[type]} -- [description]
B {[type]} -- [description]
Returns:
[type] -- [description]
"""
if isinstance(A, int) or isinstance(A, float):
A1 = pd.Series(B).copy()
A1[:] = A
A = A1
var = np.where(A < B, 1, 0)
return ( | pd.Series(var, index=A.index) | pandas.Series |
"""
python prepare_input.py
"""
import argparse
import pickle
import pandas as pd
import numpy as np
from tqdm import tqdm
from joblib import Parallel, delayed
from config import parallel, data_path, ID_col, t_col, var_col, val_col
# Ashutosh added extra imports
import gc
#import dask.dataframe as dd
from itertools import *
import os
# Ashutosh performing monkey patching for the pickle version issue while reading the file :
#pickle.HIGHEST_PROTOCOL = 4
def main():
parser = argparse.ArgumentParser(description='')
parser.add_argument('--outcome', type=str, required=True)
parser.add_argument('--T', type=float, required=True)
parser.add_argument('--dt', type=float, required=True)
args = parser.parse_args()
outcome = args.outcome
T = args.T
dt = args.dt
print('Preparing pipeline input for: outcome={}, T={}, dt={}'.format(outcome, T, dt))
import pathlib
pathlib.Path(data_path, 'features', 'outcome={},T={},dt={}'.format(outcome, T, dt)) \
.mkdir(parents=True, exist_ok=True)
# Load in study population
population = pd.read_csv(data_path + 'population/{}_{}h.csv'.format(outcome, T)) \
.rename(columns={'ICUSTAY_ID': 'ID'}).set_index('ID')[[]]
# Load in raw data (from prepare.py)
with open(data_path + 'formatted/all_data.stacked.p', 'rb') as f:
data = pickle.load(f)
# Resample continuous, resolve duplicates (discrete & continuous)
data = resolve_duplicates_discrete(data)
data = filter_prediction_time(data, T)
data = resample_continuous_events(data, T, dt)
data = resolve_duplicates_continuous(data)
# Combine all DataFrames into one
df_data = pd.concat(data, axis='index', ignore_index=True)
df_data = df_data.sort_values(by=[ID_col, t_col, var_col, val_col], na_position='first')
# Filter by IDs in study population
df_data = population.join(df_data.set_index('ID')).reset_index()
assert set(df_data['ID'].unique()) == set(population.index)
# Save
# Ashutosh saving with pickle protocol 4 due to error in protocol
df_data.to_pickle(data_path + 'features/outcome={},T={},dt={}/input_data.p'.format(outcome, T, dt), protocol=4)
################################
#### Helper functions ####
################################
def print_header(*content, char='='):
print()
print(char * 80)
print(*content)
print(char * 80, flush=True)
def filter_prediction_time(data_in, T):
"""
Filter each table in `data_in` by:
- Removing records outside of the prediction window [0, T) hours
`data_in` is a dict {
TABLE_NAME: pd.DataFrame object,
}
"""
print_header('Filter by prediction time T={}'.format(T), char='-')
filtered_data = {}
for table_name in tqdm(sorted(data_in)):
df = data_in[table_name]
t_cols = t_cols = df.columns.intersection(['t', 't_start', 't_end']).tolist()
# Focus on the prediction window of [0, T)
if len(t_cols) == 1: # point
if all(pd.isnull(df['t'])):
pass
else:
df = df[(0 <= df['t']) & (df['t'] < T)].copy()
elif len(t_cols) == 2: # range
df = df[(0 <= df['t_end']) & (df['t_start'] < T)].copy()
filtered_data[table_name] = df
print('Done!')
return filtered_data
def resample_continuous_events(data, T, dt):
print_header('Resample continuous events, T={}, dt={}'.format(T, dt), char='-')
for fname, df in sorted(data.items(), reverse=True):
t_cols = df.columns.intersection(['t', 't_start', 't_end']).tolist()
if len(t_cols) == 1: # point time
continue
else: # ranged time
assert len(t_cols) == 2
print(fname)
df_out = []
for index, row in tqdm(df.iterrows(), total=df.shape[0]):
t_start, t_end = row.t_start, row.t_end
t_range = dt/2 + np.arange(max(0, (t_start//dt)*dt), min(T, (t_end//dt+1)*dt), dt)
if len(t_range) == 0:
continue
df_tmp = pd.concat(len(t_range) * [row], axis=1).T.drop(columns=['t_start', 't_end'])
df_tmp['t'] = t_range
df_out.append(df_tmp)
df_out = pd.concat(df_out)[['ID', 't', 'variable_name', 'variable_value']]
data[fname] = df_out
return data
def resolve_duplicates_discrete(data):
"""
Assume input format:
––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––
| ID | t (or t_start + t_end) | variable_name | variable_value |
––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––
"""
print_header('Resolve duplicated event records (discrete)', char='-')
### Chart events - duplicate rows
print('*** CHARTEVENTS')
print(' getting dups and ~dups')
df = data['CHARTEVENTS']
m_dups = df.duplicated(subset=['ID', 't', 'variable_name'], keep=False)
dups = df[m_dups]
dup_variables = dups['variable_name'].unique()
all_dups = df[df['variable_name'].isin(dup_variables)]
not_dups = df[~df['variable_name'].isin(dup_variables)]
#print("Performing the delete action now of DF")
del [[df,m_dups,dups]]
gc.collect()
#print("Performed the delete action !!! ")
def _resolve_duplicates_impl(v, df_v):
# Categorical variables
# Map to different variable names with value 0/1
if | pd.to_numeric(df_v['variable_value'], errors='ignore') | pandas.to_numeric |
import pandas as pd
import numpy as np
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
from os import listdir
def load_tensorboard(path):
'''Function to load tensorboard file from a folder.
Assumes one file per folder!'''
event_file = next(filter(lambda filename: filename[:6] == 'events', listdir(path)))
summary_iterator = EventAccumulator(str(path + event_file)).Reload()
tags = summary_iterator.Tags()['scalars']
steps = [[event.step for event in summary_iterator.Scalars(tag)] for tag in tags]
data = [[event.value for event in summary_iterator.Scalars(tag)] for tag in tags]
# Creating dataframe: we have missing values due to coefficients being deleted,
#so we have to do this column by column
df = pd.DataFrame()
for idx, tag in enumerate(tags):
df[tag] = | pd.Series(index=steps[idx], data=data[idx]) | pandas.Series |
from __future__ import annotations
from collections import namedtuple
from typing import TYPE_CHECKING
import warnings
from matplotlib.artist import setp
import numpy as np
from pandas.core.dtypes.common import is_dict_like
from pandas.core.dtypes.missing import remove_na_arraylike
import pandas as pd
import pandas.core.common as com
from pandas.io.formats.printing import pprint_thing
from pandas.plotting._matplotlib.core import (
LinePlot,
MPLPlot,
)
from pandas.plotting._matplotlib.style import get_standard_colors
from pandas.plotting._matplotlib.tools import (
create_subplots,
flatten_axes,
maybe_adjust_figure,
)
if TYPE_CHECKING:
from matplotlib.axes import Axes
class BoxPlot(LinePlot):
_kind = "box"
_layout_type = "horizontal"
_valid_return_types = (None, "axes", "dict", "both")
# namedtuple to hold results
BP = namedtuple("BP", ["ax", "lines"])
def __init__(self, data, return_type="axes", **kwargs):
# Do not call LinePlot.__init__ which may fill nan
if return_type not in self._valid_return_types:
raise ValueError("return_type must be {None, 'axes', 'dict', 'both'}")
self.return_type = return_type
MPLPlot.__init__(self, data, **kwargs)
def _args_adjust(self):
if self.subplots:
# Disable label ax sharing. Otherwise, all subplots shows last
# column label
if self.orientation == "vertical":
self.sharex = False
else:
self.sharey = False
@classmethod
def _plot(cls, ax, y, column_num=None, return_type="axes", **kwds):
if y.ndim == 2:
y = [remove_na_arraylike(v) for v in y]
# Boxplot fails with empty arrays, so need to add a NaN
# if any cols are empty
# GH 8181
y = [v if v.size > 0 else np.array([np.nan]) for v in y]
else:
y = remove_na_arraylike(y)
bp = ax.boxplot(y, **kwds)
if return_type == "dict":
return bp, bp
elif return_type == "both":
return cls.BP(ax=ax, lines=bp), bp
else:
return ax, bp
def _validate_color_args(self):
if "color" in self.kwds:
if self.colormap is not None:
warnings.warn(
"'color' and 'colormap' cannot be used "
"simultaneously. Using 'color'"
)
self.color = self.kwds.pop("color")
if isinstance(self.color, dict):
valid_keys = ["boxes", "whiskers", "medians", "caps"]
for key in self.color:
if key not in valid_keys:
raise ValueError(
f"color dict contains invalid key '{key}'. "
f"The key must be either {valid_keys}"
)
else:
self.color = None
# get standard colors for default
colors = get_standard_colors(num_colors=3, colormap=self.colormap, color=None)
# use 2 colors by default, for box/whisker and median
# flier colors isn't needed here
# because it can be specified by ``sym`` kw
self._boxes_c = colors[0]
self._whiskers_c = colors[0]
self._medians_c = colors[2]
self._caps_c = "k" # mpl default
def _get_colors(self, num_colors=None, color_kwds="color"):
pass
def maybe_color_bp(self, bp):
if isinstance(self.color, dict):
boxes = self.color.get("boxes", self._boxes_c)
whiskers = self.color.get("whiskers", self._whiskers_c)
medians = self.color.get("medians", self._medians_c)
caps = self.color.get("caps", self._caps_c)
else:
# Other types are forwarded to matplotlib
# If None, use default colors
boxes = self.color or self._boxes_c
whiskers = self.color or self._whiskers_c
medians = self.color or self._medians_c
caps = self.color or self._caps_c
# GH 30346, when users specifying those arguments explicitly, our defaults
# for these four kwargs should be overridden; if not, use Pandas settings
if not self.kwds.get("boxprops"):
setp(bp["boxes"], color=boxes, alpha=1)
if not self.kwds.get("whiskerprops"):
setp(bp["whiskers"], color=whiskers, alpha=1)
if not self.kwds.get("medianprops"):
setp(bp["medians"], color=medians, alpha=1)
if not self.kwds.get("capprops"):
setp(bp["caps"], color=caps, alpha=1)
def _make_plot(self):
if self.subplots:
self._return_obj = pd.Series(dtype=object)
for i, (label, y) in enumerate(self._iter_data()):
ax = self._get_ax(i)
kwds = self.kwds.copy()
ret, bp = self._plot(
ax, y, column_num=i, return_type=self.return_type, **kwds
)
self.maybe_color_bp(bp)
self._return_obj[label] = ret
label = [pprint_thing(label)]
self._set_ticklabels(ax, label)
else:
y = self.data.values.T
ax = self._get_ax(0)
kwds = self.kwds.copy()
ret, bp = self._plot(
ax, y, column_num=0, return_type=self.return_type, **kwds
)
self.maybe_color_bp(bp)
self._return_obj = ret
labels = [left for left, _ in self._iter_data()]
labels = [pprint_thing(left) for left in labels]
if not self.use_index:
labels = [pprint_thing(key) for key in range(len(labels))]
self._set_ticklabels(ax, labels)
def _set_ticklabels(self, ax: Axes, labels):
if self.orientation == "vertical":
ax.set_xticklabels(labels)
else:
ax.set_yticklabels(labels)
def _make_legend(self):
pass
def _post_plot_logic(self, ax, data):
pass
@property
def orientation(self):
if self.kwds.get("vert", True):
return "vertical"
else:
return "horizontal"
@property
def result(self):
if self.return_type is None:
return super().result
else:
return self._return_obj
def _grouped_plot_by_column(
plotf,
data,
columns=None,
by=None,
numeric_only=True,
grid=False,
figsize=None,
ax=None,
layout=None,
return_type=None,
**kwargs,
):
grouped = data.groupby(by)
if columns is None:
if not isinstance(by, (list, tuple)):
by = [by]
columns = data._get_numeric_data().columns.difference(by)
naxes = len(columns)
fig, axes = create_subplots(
naxes=naxes, sharex=True, sharey=True, figsize=figsize, ax=ax, layout=layout
)
_axes = flatten_axes(axes)
ax_values = []
for i, col in enumerate(columns):
ax = _axes[i]
gp_col = grouped[col]
keys, values = zip(*gp_col)
re_plotf = plotf(keys, values, ax, **kwargs)
ax.set_title(col)
ax.set_xlabel(pprint_thing(by))
ax_values.append(re_plotf)
ax.grid(grid)
result = pd.Series(ax_values, index=columns)
# Return axes in multiplot case, maybe revisit later # 985
if return_type is None:
result = axes
byline = by[0] if len(by) == 1 else by
fig.suptitle(f"Boxplot grouped by {byline}")
maybe_adjust_figure(fig, bottom=0.15, top=0.9, left=0.1, right=0.9, wspace=0.2)
return result
def boxplot(
data,
column=None,
by=None,
ax=None,
fontsize=None,
rot=0,
grid=True,
figsize=None,
layout=None,
return_type=None,
**kwds,
):
import matplotlib.pyplot as plt
# validate return_type:
if return_type not in BoxPlot._valid_return_types:
raise ValueError("return_type must be {'axes', 'dict', 'both'}")
if isinstance(data, pd.Series):
data = data.to_frame("x")
column = "x"
def _get_colors():
# num_colors=3 is required as method maybe_color_bp takes the colors
# in positions 0 and 2.
# if colors not provided, use same defaults as DataFrame.plot.box
result = get_standard_colors(num_colors=3)
result = np.take(result, [0, 0, 2])
result = np.append(result, "k")
colors = kwds.pop("color", None)
if colors:
if is_dict_like(colors):
# replace colors in result array with user-specified colors
# taken from the colors dict parameter
# "boxes" value placed in position 0, "whiskers" in 1, etc.
valid_keys = ["boxes", "whiskers", "medians", "caps"]
key_to_index = dict(zip(valid_keys, range(4)))
for key, value in colors.items():
if key in valid_keys:
result[key_to_index[key]] = value
else:
raise ValueError(
f"color dict contains invalid key '{key}'. "
f"The key must be either {valid_keys}"
)
else:
result.fill(colors)
return result
def maybe_color_bp(bp, **kwds):
# GH 30346, when users specifying those arguments explicitly, our defaults
# for these four kwargs should be overridden; if not, use Pandas settings
if not kwds.get("boxprops"):
setp(bp["boxes"], color=colors[0], alpha=1)
if not kwds.get("whiskerprops"):
setp(bp["whiskers"], color=colors[1], alpha=1)
if not kwds.get("medianprops"):
setp(bp["medians"], color=colors[2], alpha=1)
if not kwds.get("capprops"):
setp(bp["caps"], color=colors[3], alpha=1)
def plot_group(keys, values, ax: Axes):
keys = [pprint_thing(x) for x in keys]
values = [np.asarray(remove_na_arraylike(v), dtype=object) for v in values]
bp = ax.boxplot(values, **kwds)
if fontsize is not None:
ax.tick_params(axis="both", labelsize=fontsize)
if kwds.get("vert", 1):
ticks = ax.get_xticks()
if len(ticks) != len(keys):
i, remainder = divmod(len(ticks), len(keys))
assert remainder == 0, remainder
keys *= i
ax.set_xticklabels(keys, rotation=rot)
else:
ax.set_yticklabels(keys, rotation=rot)
maybe_color_bp(bp, **kwds)
# Return axes in multiplot case, maybe revisit later # 985
if return_type == "dict":
return bp
elif return_type == "both":
return BoxPlot.BP(ax=ax, lines=bp)
else:
return ax
colors = _get_colors()
if column is None:
columns = None
else:
if isinstance(column, (list, tuple)):
columns = column
else:
columns = [column]
if by is not None:
# Prefer array return type for 2-D plots to match the subplot layout
# https://github.com/pandas-dev/pandas/pull/12216#issuecomment-241175580
result = _grouped_plot_by_column(
plot_group,
data,
columns=columns,
by=by,
grid=grid,
figsize=figsize,
ax=ax,
layout=layout,
return_type=return_type,
)
else:
if return_type is None:
return_type = "axes"
if layout is not None:
raise ValueError("The 'layout' keyword is not supported when 'by' is None")
if ax is None:
rc = {"figure.figsize": figsize} if figsize is not None else {}
with plt.rc_context(rc):
ax = plt.gca()
data = data._get_numeric_data()
if columns is None:
columns = data.columns
else:
data = data[columns]
result = plot_group(columns, data.values.T, ax)
ax.grid(grid)
return result
def boxplot_frame(
self,
column=None,
by=None,
ax=None,
fontsize=None,
rot=0,
grid=True,
figsize=None,
layout=None,
return_type=None,
**kwds,
):
import matplotlib.pyplot as plt
ax = boxplot(
self,
column=column,
by=by,
ax=ax,
fontsize=fontsize,
grid=grid,
rot=rot,
figsize=figsize,
layout=layout,
return_type=return_type,
**kwds,
)
plt.draw_if_interactive()
return ax
def boxplot_frame_groupby(
grouped,
subplots=True,
column=None,
fontsize=None,
rot=0,
grid=True,
ax=None,
figsize=None,
layout=None,
sharex=False,
sharey=True,
**kwds,
):
if subplots is True:
naxes = len(grouped)
fig, axes = create_subplots(
naxes=naxes,
squeeze=False,
ax=ax,
sharex=sharex,
sharey=sharey,
figsize=figsize,
layout=layout,
)
axes = flatten_axes(axes)
ret = pd.Series(dtype=object)
for (key, group), ax in zip(grouped, axes):
d = group.boxplot(
ax=ax, column=column, fontsize=fontsize, rot=rot, grid=grid, **kwds
)
ax.set_title( | pprint_thing(key) | pandas.io.formats.printing.pprint_thing |
"""Rank genes according to differential expression.
"""
import numpy as np
import pandas as pd
from math import sqrt, floor
from scipy.sparse import issparse
from .. import utils
from .. import settings
from .. import logging as logg
from ..preprocessing._simple import _get_mean_var
def rank_genes_groups(
adata,
groupby,
use_raw=True,
groups='all',
reference='rest',
n_genes=100,
rankby_abs=False,
key_added=None,
copy=False,
method='t-test_overestim_var',
corr_method='benjamini-hochberg',
**kwds):
"""Rank genes for characterizing groups.
Parameters
----------
adata : :class:`~anndata.AnnData`
Annotated data matrix.
groupby : `str`
The key of the observations grouping to consider.
use_raw : `bool`, optional (default: `True`)
Use `raw` attribute of `adata` if present.
groups : `str`, `list`, optional (default: `'all'`)
Subset of groups, e.g. `['g1', 'g2', 'g3']`, to which comparison shall
be restricted. If not passed, a ranking will be generated for all
groups.
reference : `str`, optional (default: `'rest'`)
If `'rest'`, compare each group to the union of the rest of the group. If
a group identifier, compare with respect to this group.
n_genes : `int`, optional (default: 100)
The number of genes that appear in the returned tables.
method : `{'logreg', 't-test', 'wilcoxon', 't-test_overestim_var'}`, optional (default: 't-test_overestim_var')
If 't-test', uses t-test, if 'wilcoxon', uses Wilcoxon-Rank-Sum. If
't-test_overestim_var', overestimates variance of each group. If
'logreg' uses logistic regression, see [Ntranos18]_, `here
<https://github.com/theislab/scanpy/issues/95>`__ and `here
<http://www.nxn.se/valent/2018/3/5/actionable-scrna-seq-clusters>`__, for
why this is meaningful.
corr_method : `{'benjamini-hochberg', 'bonferroni'}`, optional (default: 'benjamini-hochberg')
p-value correction method. Used only for 't-test', 't-test_overestim_var',
and 'wilcoxon' methods.
rankby_abs : `bool`, optional (default: `False`)
Rank genes by the absolute value of the score, not by the
score. The returned scores are never the absolute values.
**kwds : keyword parameters
Are passed to test methods. Currently this affects only parameters that
are passed to `sklearn.linear_model.LogisticRegression
<http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html>`__.
For instance, you can pass `penalty='l1'` to try to come up with a
minimal set of genes that are good predictors (sparse solution meaning
few non-zero fitted coefficients).
Returns
-------
names : structured `np.ndarray` (`.uns['rank_genes_groups']`)
Structured array to be indexed by group id storing the gene
names. Ordered according to scores.
scores : structured `np.ndarray` (`.uns['rank_genes_groups']`)
Structured array to be indexed by group id storing the z-score
underlying the computation of a p-value for each gene for each
group. Ordered according to scores.
logfoldchanges : structured `np.ndarray` (`.uns['rank_genes_groups']`)
Structured array to be indexed by group id storing the log2
fold change for each gene for each group. Ordered according to
scores. Only provided if method is 't-test' like.
pvals : structured `np.ndarray` (`.uns['rank_genes_groups']`)
p-values.
pvals_adj : structured `np.ndarray` (`.uns['rank_genes_groups']`)
Corrected p-values.
Notes
-----
There are slight inconsistencies depending on whether sparse
or dense data are passed. See `here <https://github.com/theislab/scanpy/blob/master/scanpy/tests/test_rank_genes_groups.py>`__.
"""
if 'only_positive' in kwds:
rankby_abs = not kwds.pop('only_positive') # backwards compat
logg.info('ranking genes', r=True)
avail_methods = {'t-test', 't-test_overestim_var', 'wilcoxon', 'logreg'}
if method not in avail_methods:
raise ValueError('Method must be one of {}.'.format(avail_methods))
avail_corr = {'benjamini-hochberg', 'bonferroni'}
if corr_method not in avail_corr:
raise ValueError('Correction method must be one of {}.'.format(avail_corr))
adata = adata.copy() if copy else adata
utils.sanitize_anndata(adata)
# for clarity, rename variable
groups_order = groups
if isinstance(groups_order, list) and isinstance(groups_order[0], int):
groups_order = [str(n) for n in groups_order]
if reference != 'rest' and reference not in set(groups_order):
groups_order += [reference]
if (reference != 'rest'
and reference not in set(adata.obs[groupby].cat.categories)):
raise ValueError('reference = {} needs to be one of groupby = {}.'
.format(reference,
adata.obs[groupby].cat.categories.tolist()))
groups_order, groups_masks = utils.select_groups(
adata, groups_order, groupby)
if key_added is None:
key_added = 'rank_genes_groups'
adata.uns[key_added] = {}
adata.uns[key_added]['params'] = {
'groupby': groupby,
'reference': reference,
'method': method,
'use_raw': use_raw,
'corr_method': corr_method,
}
# adata_comp mocks an AnnData object if use_raw is True
# otherwise it's just the AnnData object
adata_comp = adata
if adata.raw is not None and use_raw:
adata_comp = adata.raw
X = adata_comp.X
# for clarity, rename variable
n_genes_user = n_genes
# make sure indices are not OoB in case there are less genes than n_genes
if n_genes_user > X.shape[1]:
n_genes_user = X.shape[1]
# in the following, n_genes is simply another name for the total number of genes
n_genes = X.shape[1]
n_groups = groups_masks.shape[0]
ns = np.zeros(n_groups, dtype=int)
for imask, mask in enumerate(groups_masks):
ns[imask] = np.where(mask)[0].size
logg.msg('consider \'{}\' groups:'.format(groupby), groups_order, v=4)
logg.msg('with sizes:', ns, v=4)
if reference != 'rest':
ireference = np.where(groups_order == reference)[0][0]
reference_indices = np.arange(adata_comp.n_vars, dtype=int)
rankings_gene_scores = []
rankings_gene_names = []
rankings_gene_logfoldchanges = []
rankings_gene_pvals = []
rankings_gene_pvals_adj = []
if method in {'t-test', 't-test_overestim_var'}:
from scipy import stats
from statsmodels.stats.multitest import multipletests
# loop over all masks and compute means, variances and sample numbers
means = np.zeros((n_groups, n_genes))
vars = np.zeros((n_groups, n_genes))
for imask, mask in enumerate(groups_masks):
means[imask], vars[imask] = _get_mean_var(X[mask])
# test each either against the union of all other groups or against a
# specific group
for igroup in range(n_groups):
if reference == 'rest':
mask_rest = ~groups_masks[igroup]
else:
if igroup == ireference: continue
else: mask_rest = groups_masks[ireference]
mean_rest, var_rest = _get_mean_var(X[mask_rest])
ns_group = ns[igroup] # number of observations in group
if method == 't-test': ns_rest = np.where(mask_rest)[0].size
elif method == 't-test_overestim_var': ns_rest = ns[igroup] # hack for overestimating the variance for small groups
else: raise ValueError('Method does not exist.')
denominator = np.sqrt(vars[igroup]/ns_group + var_rest/ns_rest)
denominator[np.flatnonzero(denominator == 0)] = np.nan
scores = (means[igroup] - mean_rest) / denominator #Welch t-test
mean_rest[mean_rest == 0] = 1e-9 # set 0s to small value
foldchanges = (means[igroup] + 1e-9) / mean_rest
scores[np.isnan(scores)] = 0
#Get p-values
denominator_dof = (np.square(vars[igroup]) / (np.square(ns_group)*(ns_group-1))) + (
(np.square(var_rest) / (np.square(ns_rest) * (ns_rest - 1))))
denominator_dof[np.flatnonzero(denominator_dof == 0)] = np.nan
dof = np.square(vars[igroup]/ns_group + var_rest/ns_rest) / denominator_dof # dof calculation for Welch t-test
dof[np.isnan(dof)] = 0
pvals = stats.t.sf(abs(scores), dof)*2 # *2 because of two-tailed t-test
if corr_method == 'benjamini-hochberg':
pvals[np.isnan(pvals)] = 1 # set Nan values to 1 to properly convert using Benhjamini Hochberg
_, pvals_adj, _, _ = multipletests(pvals, alpha=0.05, method='fdr_bh')
elif corr_method == 'bonferroni':
pvals_adj = np.minimum(pvals * n_genes, 1.0)
scores_sort = np.abs(scores) if rankby_abs else scores
partition = np.argpartition(scores_sort, -n_genes_user)[-n_genes_user:]
partial_indices = np.argsort(scores_sort[partition])[::-1]
global_indices = reference_indices[partition][partial_indices]
rankings_gene_scores.append(scores[global_indices])
rankings_gene_logfoldchanges.append(np.log2(np.abs(foldchanges[global_indices])))
rankings_gene_names.append(adata_comp.var_names[global_indices])
rankings_gene_pvals.append(pvals[global_indices])
rankings_gene_pvals_adj.append(pvals_adj[global_indices])
elif method == 'logreg':
# if reference is not set, then the groups listed will be compared to the rest
# if reference is set, then the groups listed will be compared only to the other groups listed
from sklearn.linear_model import LogisticRegression
reference = groups_order[0]
if len(groups) == 1:
raise Exception('Cannot perform logistic regression on a single cluster.')
adata_copy = adata[adata.obs[groupby].isin(groups_order)]
adata_comp = adata_copy
if adata.raw is not None and use_raw:
adata_comp = adata_copy.raw
X = adata_comp.X
clf = LogisticRegression(**kwds)
clf.fit(X, adata_copy.obs[groupby].cat.codes)
scores_all = clf.coef_
for igroup, group in enumerate(groups_order):
if len(groups_order) <= 2: # binary logistic regression
scores = scores_all[0]
else:
scores = scores_all[igroup]
partition = np.argpartition(scores, -n_genes_user)[-n_genes_user:]
partial_indices = np.argsort(scores[partition])[::-1]
global_indices = reference_indices[partition][partial_indices]
rankings_gene_scores.append(scores[global_indices])
rankings_gene_names.append(adata_comp.var_names[global_indices])
if len(groups_order) <= 2:
break
elif method == 'wilcoxon':
from scipy import stats
from statsmodels.stats.multitest import multipletests
CONST_MAX_SIZE = 10000000
means = np.zeros((n_groups, n_genes))
vars = np.zeros((n_groups, n_genes))
# initialize space for z-scores
scores = np.zeros(n_genes)
# First loop: Loop over all genes
if reference != 'rest':
for imask, mask in enumerate(groups_masks):
means[imask], vars[imask] = _get_mean_var(X[mask]) # for fold-change
if imask == ireference: continue
else: mask_rest = groups_masks[ireference]
ns_rest = np.where(mask_rest)[0].size
mean_rest, var_rest = _get_mean_var(X[mask_rest]) # for fold-change
if ns_rest <= 25 or ns[imask] <= 25:
logg.hint('Few observations in a group for '
'normal approximation (<=25). Lower test accuracy.')
n_active = ns[imask]
m_active = ns_rest
# Now calculate gene expression ranking in chunkes:
chunk = []
# Calculate chunk frames
n_genes_max_chunk = floor(CONST_MAX_SIZE / (n_active + m_active))
if n_genes_max_chunk < n_genes - 1:
chunk_index = n_genes_max_chunk
while chunk_index < n_genes - 1:
chunk.append(chunk_index)
chunk_index = chunk_index + n_genes_max_chunk
chunk.append(n_genes - 1)
else:
chunk.append(n_genes - 1)
left = 0
# Calculate rank sums for each chunk for the current mask
for chunk_index, right in enumerate(chunk):
# Check if issparse is true: AnnData objects are currently sparse.csr or ndarray.
if issparse(X):
df1 = pd.DataFrame(data=X[mask, left:right].todense())
df2 = pd.DataFrame(data=X[mask_rest, left:right].todense(),
index=np.arange(start=n_active, stop=n_active + m_active))
else:
df1 = | pd.DataFrame(data=X[mask, left:right]) | pandas.DataFrame |
"""
Utility functions used by report.py
"""
import pypandoc
import pandas as pd
import numpy as np
import altair as alt
from jinja2 import Template
EPSILON = 1e-9
def run_calc(calc, year, var_list):
"""
Parameters
----------
calc: tax calculator object
year: year to run calculator for
var_list: list of variables to return waited total of
"""
calc.advance_to_year(year)
calc.calc_all()
totals = {}
for var in var_list:
totals[var] = calc.weighted_total(var) * 1e-9
return totals
def add_bins(
dframe,
income_measure,
num_bins,
wt="s006",
decile_details=False,
weight_by_income_measure=False,
):
"""
Add a variable to specified Pandas DataFrame, dframe, that specifies the
table row and is called 'table_row'. The rows hold equal number of
filing units when weight_by_income_measure=False or equal number of
income dollars when weight_by_income_measure=True. Assumes that
specified dframe contains columns for the specified income_measure and
for sample weights, s006. When num_quantiles is 10 and decile_details
is True, the bottom decile is broken up into three subgroups (neg, zero,
and pos income_measure ) and the top decile is broken into three subgroups
(90-95, 95-99, and top 1%).
"""
assert isinstance(dframe, pd.DataFrame)
assert income_measure in dframe
if decile_details and num_bins != 10:
msg = "decile_details is True when num_quantiles is {}"
raise ValueError(msg.format(num_bins))
dframe.sort_values(by=income_measure, inplace=True)
if weight_by_income_measure:
dframe["cumsum_temp"] = np.cumsum(
np.multiply(dframe[income_measure].values, dframe[wt].values)
)
min_cumsum = dframe["cumsum_temp"].values[0]
else:
dframe["cumsum_temp"] = np.cumsum(dframe[wt].values)
min_cumsum = 0.0 # because s006 values are non-negative
max_cumsum = dframe["cumsum_temp"].values[-1]
cumsum_range = max_cumsum - min_cumsum
bin_width = cumsum_range / float(num_bins)
bin_edges = list(min_cumsum + np.arange(0, (num_bins + 1)) * bin_width)
bin_edges[-1] = 9e99 # raise top of last bin to include all observations
bin_edges[0] = -9e99 # lower bottom of 1st bin to include all observations
if decile_details:
assert bin_edges[1] > 1e-9 # bin_edges[1] is top of bottom decile
bin_edges.insert(1, 1e-9) # top of zeros
bin_edges.insert(1, -1e-9) # top of negatives
bin_edges.insert(-1, bin_edges[-2] + 0.5 * bin_width) # top of 90-95
bin_edges.insert(-1, bin_edges[-2] + 0.4 * bin_width) # top of 95-99
num_bins += 4
labels = range(1, (num_bins + 1))
dframe["bins"] = pd.cut(
dframe["cumsum_temp"], bin_edges, right=False, labels=labels
)
dframe.drop("cumsum_temp", axis=1, inplace=True)
return dframe
def weighted_mean(pdf, col_name, wt_name="s006"):
"""
Return weighted mean of col_name
Parameters
----------
pdf: Pandas DataFrame object
col_name: variable to be averaged
wt_name: weight
"""
return float((pdf[col_name] * pdf[wt_name]).sum()) / float(
pdf[wt_name].sum() + EPSILON
)
def weighted_sum(pdf, col_name, wt_name="s006"):
"""
Return weighted sum of col_name
Parameters
----------
pdf: Pandas DataFrame object
col_name: variable to be averaged
wt_name: weight
"""
return float((pdf[col_name] * pdf[wt_name]).sum())
def percentile(
pdf,
col_name,
num_bins,
income_measure,
wt="s006",
income_wt=False,
result_type="avg",
decile_details=False,
):
"""
"""
qpdf = add_bins(
pdf,
income_measure=income_measure,
num_bins=num_bins,
wt=wt,
decile_details=decile_details,
weight_by_income_measure=income_wt,
)
gpdf = qpdf.groupby("bins", as_index=False)
if result_type == "avg":
wpdf = gpdf.apply(weighted_mean, col_name)
elif result_type == "sum":
wpdf = gpdf.apply(weighted_sum, col_name)
else:
msg = 'result_type must be "avg" or "sum"'
raise ValueError(msg)
return wpdf
def distribution(item, weight, agi):
"""
Return distribution of item by AGI level
"""
total = (item * weight).sum()
agi_1 = (item[agi < 0] * weight[agi < 0]).sum()
pct1 = round(agi_1 / total, 2)
agi_2 = (item[(agi > 1) & (agi < 5000)] * weight[(agi > 1) & (agi < 5000)]).sum()
pct2 = round(agi_1 / total, 2)
agi_3 = (
item[(agi > 5000) & (agi < 10000)] * weight[(agi > 5000) & (agi < 10000)]
).sum()
pct3 = round(agi_3 / total, 2)
agi_4 = (
item[(agi > 10000) & (agi < 15000)] * weight[(agi > 10000) & (agi < 15000)]
).sum()
pct4 = round(agi_4 / total, 2)
agi_5 = (
item[(agi > 15000) & (agi < 20000)] * weight[(agi > 15000) & (agi < 20000)]
).sum()
pct5 = round(agi_5 / total, 2)
agi_6 = (
item[(agi > 20000) & (agi < 25000)] * weight[(agi > 20000) & (agi < 25000)]
).sum()
pct6 = round(agi_6 / total, 2)
agi_7 = (
item[(agi > 25000) & (agi < 30000)] * weight[(agi > 25000) & (agi < 30000)]
).sum()
pct7 = round(agi_7 / total, 2)
agi_8 = (
item[(agi > 30000) & (agi < 40000)] * weight[(agi > 30000) & (agi < 40000)]
).sum()
pct8 = round(agi_8 / total, 2)
agi_9 = (
item[(agi > 40000) & (agi < 50000)] * weight[(agi > 40000) & (agi < 50000)]
).sum()
pct9 = round(agi_9 / total, 2)
agi_10 = (
item[(agi > 50000) & (agi < 75000)] * weight[(agi > 50000) & (agi < 75000)]
).sum()
pct10 = round(agi_10 / total, 2)
agi_11 = (
item[(agi > 75000) & (agi < 100_000)] * weight[(agi > 75000) & (agi < 100_000)]
).sum()
pct11 = round(agi_11 / total, 2)
agi_12 = (
item[(agi > 100_000) & (agi < 200_000)]
* weight[(agi > 100_000) & (agi < 200_000)]
).sum()
pct12 = round(agi_12 / total, 2)
agi_13 = (
item[(agi > 200_000) & (agi < 500_000)]
* weight[(agi > 200_000) & (agi < 500_000)]
).sum()
pct13 = round(agi_13 / total, 2)
agi_14 = (
item[(agi > 500_000) & (agi < 1_000_000)]
* weight[(agi > 500_000) & (agi < 1_000_000)]
).sum()
pct14 = round(agi_14 / total, 2)
agi_15 = (
item[(agi > 1_000_000) & (agi < 1_500_000)]
* weight[(agi > 1_000_000) & (agi < 1_500_000)]
).sum()
pct15 = round(agi_15 / total, 2)
agi_16 = (
item[(agi > 1_500_000) & (agi < 2_000_000)]
* weight[(agi > 1_500_000) & (agi < 2_000_000)]
).sum()
pct16 = round(agi_16 / total, 2)
agi_17 = (
item[(agi > 2_000_000) & (agi < 5_000_000)]
* weight[(agi > 2_000_000) & (agi < 5_000_000)]
).sum()
pct17 = round(agi_17 / total, 2)
agi_18 = (
item[(agi > 5_000_000) & (agi < 10_000_000)]
* weight[(agi > 5_000_000) & (agi < 10_000_000)]
).sum()
pct18 = round(agi_18 / total, 2)
agi_19 = (item[agi > 10_000_000] * weight[agi > 10_000_000]).sum()
pct19 = round(agi_19 / total, 2)
df = [
agi_1,
agi_2,
agi_3,
agi_4,
agi_5,
agi_6,
agi_7,
agi_8,
agi_9,
agi_10,
agi_11,
agi_12,
agi_13,
agi_14,
agi_15,
agi_16,
agi_17,
agi_18,
agi_19,
]
pct = [
pct1,
pct2,
pct3,
pct4,
pct5,
pct6,
pct7,
pct8,
pct9,
pct10,
pct11,
pct12,
pct13,
pct14,
pct15,
pct16,
pct17,
pct18,
pct19,
]
index = [
"Zero or Negative",
"$1-$5K",
"$5K-$10K",
"$10K-$15K",
"$15K-$20K",
"$20K-$25K",
"$25K-$30K",
"$30K-$40K",
"$40K-$50K",
"$50K-$75K",
"$75K-$100K",
"$100K-$200K",
"$200K-$500K",
"$500K-$1M",
"$1M-$1.5M",
"$1.5M-$2M",
"$2M-$5M",
"$5M-$10M",
"$10M and over",
]
return df, pct, index
def distplot(
calcs: list,
calc_labels: list,
var: str,
income_measure: str = "expanded_income",
result_type: str = "pct",
width=800,
height=350,
title="",
):
"""
Parameters
----------
calcs: list of tax calculator objects
calc_labels: labels for each calculator
var: variable whose distribution we're plotting
income_measure: income measure used to create bins
result_type: pct or sum
"""
def getdata(calc, var, income_measure):
agg, pct, index = distribution(
calc.array(var), calc.array("s006"), calc.array(income_measure)
)
return agg, pct, index
assert result_type in ["pct", "sum"]
pltdata = pd.DataFrame()
for (calc, label) in zip(calcs, calc_labels):
agg, pct, index = getdata(calc, var, income_measure)
if result_type == "pct":
pltdata[label] = pct
else:
pltdata[label] = [_ * 1e-9 for _ in agg]
pltdata["index"] = index
melted = | pd.melt(pltdata, id_vars="index") | pandas.melt |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 14 12:06:14 2018
@author: <NAME>
Spatial Wastewater Treatment and Allocation Tool
SWaTAT
"""
import glob
import pandas as pd
import numpy as np
import os
import logging
from functools import reduce
from math import pi, exp, sqrt
from sklearn.cluster import AgglomerativeClustering
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.colors as colors
from plotnine import *
from pandas.api.types import CategoricalDtype
import pyeto
import math
math.exp = np.exp
math.pow = np.power
math.sqrt = np.sqrt
math.log = np.log
class DataFrame:
"""
Processes the dataframe and adds all the columns to determine the cheapest option and the final costs and summaries
"""
def __init__(self, create_dataframe=None, lyr_names=None, dir_files: str = None,
input_file=None, file_name=None, save_csv=False, empty=None,
cell_area=None):
"""
Reads all layer datasets stored under a common directory and merges them into
a single dataframe, having the option to save the output into a new csv file.
"""
if not empty:
self.lyr_names = lyr_names
self.cell_area = cell_area # cell area in km2
if create_dataframe:
try:
directory = os.getcwd()
os.chdir(dir_files)
filenames = glob.glob("*.csv")
os.chdir(directory)
ldf = []
for file in filenames:
ldf.append(pd.read_csv(os.path.join(dir_files, file), header=None,
names=[lyr_names["X"], lyr_names["Y"], file.split(".")[0]]))
self.df = reduce(lambda a, b: pd.merge(a, b, on=[lyr_names["X"], lyr_names["Y"]]), ldf)
self.df.loc[self.df[lyr_names["TDS"]] < 0, lyr_names["TDS"]] = 0
self.df.loc[self.df[lyr_names["Region"]] == 1, lyr_names["Region"]] = "Algeria"
self.df.loc[self.df[lyr_names["Region"]] == 2, lyr_names["Region"]] = "Tunisia"
self.df.loc[self.df[lyr_names["Region"]] == 3, lyr_names["Region"]] = "Libya"
self.df.loc[self.df[lyr_names["Region"]] == 0, lyr_names["Region"]] = None
if save_csv:
self.df.to_csv(file_name + ".gz", index=False)
except FileNotFoundError:
print('No .csv files were found in the directory!')
else:
try:
self.df = pd.read_csv(input_file)
except FileNotFoundError:
print('Not such file in directory!')
self.df = self.df.dropna(subset=['Region'])
def copy(self, copy_data):
"""
Creates a copy of the object
"""
self.lyr_names = copy_data.lyr_names.copy()
self.cell_area = copy_data.cell_area
self.df = copy_data.df.copy()
def is_region(self, value, name='Region', over=False):
"""
Calculates a boolean vector telling which data cells are from a specific region
"""
if over:
return self.df[name] > value
else:
return self.df[name] == value
def is_urban(self):
"""
Calculates a boolean vector telling which data cells are urban population
"""
return self.df[self.lyr_names['IsUrban']] == 1
def calibrate_pop_and_urban(self, geo_boundary, region, pop_actual, pop_future, urban, urban_future, urban_cutoff):
"""
Calibrate the actual current population, the urban split and forecast the future population
"""
is_region = self.is_region(region, name=geo_boundary)
is_urban = self.is_urban()
max_pop = self.df.loc[is_region, self.lyr_names["Population"]].sum()
# Calculate the ratio between the actual population and the total population from the GIS layer
logging.info('Calibrate current population')
pop_ratio = pop_actual / self.df.loc[is_region, self.lyr_names["Population"]].sum()
# And use this ratio to calibrate the population in a new column
self.df.loc[is_region, self.lyr_names["Population"]] = self.df.loc[
is_region, self.lyr_names['Population']] * pop_ratio
# Calculate the urban split, by calibrating the cutoff until the target ratio is achieved
# Keep looping until it is satisfied or another break conditions is reached
logging.info('Calibrate urban split')
if urban == 0:
urban_cutoff = 'Unknown'
urban_modelled = 0
self.df.loc[is_region, self.lyr_names['IsUrban']] = 0
urban_growth = 0
rural_growth = ((1 - urban_future) * pop_future) / ((1 - urban) * pop_actual)
elif urban == 1:
urban_cutoff = 'Unknown'
urban_modelled = 1
self.df.loc[is_region, self.lyr_names['IsUrban']] = 1
urban_growth = (urban_future * pop_future) / (urban * pop_actual)
rural_growth = 0
else:
count = 0
prev_vals = [] # Stores cutoff values that have already been tried to prevent getting stuck in a loop
accuracy = 0.005
max_iterations = 30
urban_modelled = 0
while True:
# Assign the 1 (urban)/0 (rural) values to each cell
self.df.loc[is_region, self.lyr_names['IsUrban']] = self.df.loc[is_region, self.lyr_names[
"Population"]] > urban_cutoff
is_urban = self.is_urban()
# Get the calculated urban ratio, and limit it to within reasonable boundaries
pop_urb = self.df.loc[(is_region) & (is_urban), self.lyr_names["Population"]].sum()
urban_modelled = pop_urb / pop_actual
if abs(urban_modelled - urban) < accuracy:
break
else:
urban_cutoff = sorted([0.005, urban_cutoff - urban_cutoff * 2 *
(urban - urban_modelled) / urban, max_pop])[1]
if urban_modelled == 0:
urban_modelled = 0.05
elif urban_modelled == 1:
urban_modelled = 0.999
if urban_cutoff in prev_vals:
logging.info('NOT SATISFIED: repeating myself')
break
else:
prev_vals.append(urban_cutoff)
if count >= max_iterations:
logging.info('NOT SATISFIED: got to {}'.format(max_iterations))
break
count += 1
# Project future population, with separate growth rates for urban and rural
logging.info('Project future population')
urban_growth = (urban_future * pop_future) / (urban * pop_actual)
rural_growth = ((1 - urban_future) * pop_future) / ((1 - urban) * pop_actual)
self.df.loc[(is_urban) & (is_region), self.lyr_names['PopulationFuture']] = self.df.loc[
(is_urban) & (is_region),
self.lyr_names[
"Population"]] * urban_growth
self.df.loc[(1 - is_urban) & (is_region), self.lyr_names['PopulationFuture']] = self.df.loc[(1 - is_urban) & (
is_region), self.lyr_names["Population"]] * rural_growth
return urban_cutoff, urban_modelled
def calculate_irrigation_system(self, geo_boundary, region, total_irrigated_area, irrigation_per_ha, irrigated_area_growth):
"""
creates a column with the irrigation water needs per cell area
"""
is_region = self.is_region(region, name=geo_boundary)
if self.df.loc[is_region, "IrrigatedArea"].sum() == 0:
area_ratio = 0
else:
area_ratio = total_irrigated_area / self.df.loc[is_region, "IrrigatedArea"].sum()
self.df.loc[is_region, 'IrrigatedArea'] = self.df.loc[is_region, 'IrrigatedArea'] * area_ratio
self.df.loc[is_region, 'IrrigatedAreaFuture'] = self.df.loc[is_region,
'IrrigatedArea'] * irrigated_area_growth
self.df.loc[is_region, 'IrrigatedAreaAverage'] = (self.df.loc[is_region, 'IrrigatedArea'] +
self.df.loc[is_region, 'IrrigatedAreaFuture']) / 2
self.df.loc[is_region, 'IrrigationWater'] = irrigation_per_ha * self.df['IrrigatedArea']
self.df.loc[is_region, 'IrrigationWaterFuture'] = irrigation_per_ha * self.df['IrrigatedAreaFuture']
self.df.loc[is_region, 'IrrigationWaterAverage'] = (self.df.loc[is_region, 'IrrigationWater'] +
self.df.loc[is_region, 'IrrigationWaterFuture']) / 2
def calculate_population_water(self, geo_boundary, region, urban_uni_water, rural_uni_water):
"""
Calculate the population water consumption
"""
is_region = self.is_region(region, name=geo_boundary)
is_urban = self.is_urban()
self.df.loc[is_region & is_urban,
'PopulationWater'] = self.df.loc[is_region & is_urban, "Population"] * urban_uni_water
self.df.loc[is_region & (1 - is_urban),
'PopulationWater'] = self.df.loc[is_region & (1 - is_urban), "Population"] * rural_uni_water
self.df.loc[is_region & is_urban,
'PopulationWaterFuture'] = self.df.loc[is_region & is_urban, "PopulationFuture"] * urban_uni_water
self.df.loc[is_region & (1 - is_urban),
'PopulationWaterFuture'] = self.df.loc[is_region & (1 - is_urban),
"PopulationFuture"] * rural_uni_water
self.df.loc[is_region, 'PopulationWaterAverage'] = (self.df.loc[is_region, 'PopulationWater'] +
self.df.loc[is_region, 'PopulationWaterFuture']) / 2
def total_withdrawals(self, region=None):
"""
Calculates the total water withdrawals per cell area
"""
self.df['TotalWithdrawals'] = self.df['PopulationWater'] + self.df['IrrigationWater']
self.df['TotalAverageWithdrawals'] = self.df['PopulationWaterAverage'] + self.df['IrrigationWaterAverage']
def recharge_rate(self, geo_boundary, region, recharge_rate, environmental_flow):
"""
Calculates the recharge rate and environmental flow per cell area
"""
is_region = self.is_region(region, name=geo_boundary)
self.df.loc[is_region, 'RechargeRate'] = recharge_rate / 1000 * self.cell_area ** 2 * 1000 ** 2
self.df.loc[is_region, 'EnvironmentalFlow'] = environmental_flow / 1000 * self.cell_area ** 2 * 1000 ** 2
def groundwater_stress(self, geo_boundary, region, withdrawals, time=''):
"""
calculates the groundwater stress of each cell, based on the area annual water withdrawals,
the area-average annual recharge rate and the environmental stream flow
"""
is_region = self.is_region(region, name=geo_boundary)
self.df.loc[is_region, f'GroundwaterStress{time}'] = withdrawals.loc[is_region] / (self.df.loc[is_region,
'RechargeRate'] -
self.df.loc[is_region,
'EnvironmentalFlow'])
def groundwater_pumping_energy(self, geo_boundary, region, hours, density, delivered_head, pump_efficiency=1,
calculate_friction=False, viscosity=None, pipe=None):
'''
Calculates the energy requirements for pumping groundwater based on the water table level,
and the friction losses (in kWh/m3)
'''
is_region = self.is_region(region, name=geo_boundary)
if calculate_friction:
flow = self.df.loc[is_region, 'IrrigationWater'] / (hours * 60 * 60)
self.df.loc[is_region, 'GWPumpingEnergy'] = (density * 9.81 * (
delivered_head + self.df.loc[is_region, 'GroundwaterDepth']) +
pipe.calculate_pressure_drop(density, flow, viscosity,
self.df.loc[
is_region, 'GroundwaterDepth'])) / 3600000 / pump_efficiency
else:
self.df.loc[is_region, 'GWPumpingEnergy'] = (density * 9.81 * (
delivered_head + self.df.loc[is_region, 'GroundwaterDepth'])) / 3600000 / pump_efficiency
def reverse_osmosis_energy(self, geo_boundary, region, threshold, osmosis):
"""
Calculates the energy required for desalinisation of groundwater in each cell (kWh/m3)
"""
is_region = self.is_region(region, name=geo_boundary)
temperature = self.df.loc[is_region, 'GroundwaterTemperature']
solutes = self.df.loc[is_region, 'GroundwaterSolutes']
concentration = self.df.loc[is_region, 'TDS']
self.df.loc[is_region, 'DesalinationEnergy'] = osmosis.minimum_energy(solutes, concentration, temperature)
self.df.loc[is_region & (self.df['TDS'] <= threshold), 'DesalinationEnergy'] = 0
def total_energy(self):
"""
Aggregates groundwater pumping and desalination energy requirements
"""
self.df['IrrigationPumpingEnergy'] = self.df['GWPumpingEnergy'] * self.df['IrrigationWaterAverage']
self.df['IrrigationDesalinationEnergy'] = self.df['DesalinationEnergy'] * self.df['IrrigationWaterAverage']
self.df['IrrigationEnergyTotal'] = self.df['IrrigationDesalinationEnergy'] + self.df['IrrigationPumpingEnergy']
self.df['PopulationPumpingEnergy'] = self.df['GWPumpingEnergy'] * self.df['PopulationWaterAverage']
self.df['PopulationDesalinationEnergy'] = self.df['DesalinationEnergy'] * self.df['PopulationWaterAverage']
self.df['PopulationEnergyTotal'] = self.df['PopulationDesalinationEnergy'] + self.df['PopulationPumpingEnergy']
def clustering_algorithm(self, population_min, irrigated_min, cluster_num, clusterize):
"""
Runs a clustering algorithm that combines and classify the population and irrigated area into clusters
"""
if clusterize:
clustering_vector = self.df.loc[
(self.df['PopulationFuture'] > population_min) | (self.df['IrrigatedAreaFuture'] > irrigated_min), ['X', 'Y']]
hc = AgglomerativeClustering(n_clusters=cluster_num, affinity='euclidean', linkage='ward')
# save clusters for chart
y_hc = hc.fit_predict(clustering_vector)
clustering_vector['Cluster'] = y_hc
self.newdf = self.df.merge(clustering_vector, on=[self.lyr_names["X"], self.lyr_names["Y"]], how='outer')
else:
self.df.loc[(self.df['PopulationFuture'] <= population_min) & (
self.df['IrrigatedAreaFuture'] <= irrigated_min), 'Cluster'] = None
def calculate_per_cluster(self, cluster, parameter, variable, min_variable):
"""
Calculates the sum of a parameter per cluster
"""
is_region = self.is_region(cluster, 'Cluster')
is_type = self.is_region(min_variable, variable, over=True)
self.df.loc[is_region & is_type, parameter + 'PerCluster'] = self.df.loc[
is_region & is_type, parameter].sum()
def get_evap_i(self, lat, elev, wind, srad, tmin, tmax, tavg, month):
"""
Uses the Pyeto library to calculate all climatic variables needed for running FAO56 Penman Monteith
for an open water body and runs and returns the FAO56 Penman Monteith result
"""
J = 15 + (month - 1) * 30
latitude = pyeto.deg2rad(lat)
atmosphericVapourPressure = pyeto.avp_from_tmin(tmin)
saturationVapourPressure = pyeto.svp_from_t(tavg)
ird = pyeto.inv_rel_dist_earth_sun(J)
solarDeclination = pyeto.sol_dec(J)
sha = [pyeto.sunset_hour_angle(l, solarDeclination) for l in latitude]
extraterrestrialRad = [pyeto.et_rad(x, solarDeclination, y, ird) for
x, y in zip(latitude, sha)]
clearSkyRad = pyeto.cs_rad(elev, extraterrestrialRad)
netInSolRadnet = pyeto.net_in_sol_rad(srad * 0.001, albedo=0.05)
netOutSolRadnet = pyeto.net_out_lw_rad(tmin, tmax, srad * 0.001, clearSkyRad,
atmosphericVapourPressure)
netRadiation = pyeto.net_rad(netInSolRadnet, netOutSolRadnet)
tempKelvin = pyeto.celsius2kelvin(tavg)
windSpeed2m = pyeto.wind_speed_2m(wind, 10)
slopeSvp = pyeto.delta_svp(tavg)
atmPressure = pyeto.atm_pressure(elev)
psyConstant = pyeto.psy_const(atmPressure)
return self.fao56_penman_monteith(netRadiation, tempKelvin, windSpeed2m,
saturationVapourPressure,
atmosphericVapourPressure,
slopeSvp, psyConstant, 0.002, 0)
def get_eto(self, eto, lat, elevation, wind, srad, tmin, tmax, tavg):
'''
calculate ETo for each row for each month
'''
for i in range(1, 13):
self.df['{}_{}'.format(eto, i)] = 0
self.df['{}_{}'.format(eto, i)] = self.get_evap_i(self.df[lat],
self.df[elevation],
self.df['{}_{}'.format(wind, i)],
self.df['{}_{}'.format(srad, i)],
self.df['{}_{}'.format(tmin, i)],
self.df['{}_{}'.format(tmax, i)],
self.df['{}_{}'.format(tavg, i)],
i) * 30
def fao56_penman_monteith(self, net_rad, t, ws, svp, avp, delta_svp, psy, h, rs=70, shf=0.0):
"""
Estimate reference evapotranspiration (ETo) from a hypothetical
short grass reference surface using the FAO-56 Penman-Monteith equation.
Based on equation 6 in Allen et al (1998).
:param net_rad: Net radiation at crop surface [MJ m-2 day-1]. If
necessary this can be estimated using ``net_rad()``.
:param t: Air temperature at 2 m height [deg Kelvin].
:param ws: Wind speed at 2 m height [m s-1]. If not measured at 2m,
convert using ``wind_speed_at_2m()``.
:param svp: Saturation vapour pressure [kPa]. Can be estimated using
``svp_from_t()''.
:param avp: Actual vapour pressure [kPa]. Can be estimated using a range
of functions with names beginning with 'avp_from'.
:param delta_svp: Slope of saturation vapour pressure curve [kPa degC-1].
Can be estimated using ``delta_svp()``.
:param psy: Psychrometric constant [kPa deg C]. Can be estimatred using
``psy_const_of_psychrometer()`` or ``psy_const()``.
:param shf: Soil heat flux (G) [MJ m-2 day-1] (default is 0.0, which is
reasonable for a daily or 10-day time steps). For monthly time steps
*shf* can be estimated using ``monthly_soil_heat_flux()`` or
``monthly_soil_heat_flux2()``.
:return: Reference evapotranspiration (ETo) from a hypothetical
grass reference surface [mm day-1].
:rtype: float
"""
ra_constant = math.log((2 - 2 / 3 * h) / (0.123 * h)) * math.log((2 - 2 / 3 * h) / (0.1 * 0.123 * h)) / (
0.41 ** 2)
constant = 86400 * 0.622 / (1.01 * 0.287 * ra_constant)
a1 = (0.408 * (net_rad - shf) * delta_svp /
(delta_svp + (psy * (1 + (rs / ra_constant) * ws))))
a2 = (constant * ws / t * (svp - avp) * psy /
(delta_svp + (psy * (1 + (rs / ra_constant) * ws))))
return a1 + a2
def calculate_capex(self, treatment_system_name, treatment_system, values,
parameter, variable, limit, limit_func):
"""
Calculates the CAPEX for each treatment technology in each cluster
"""
population_total = self.df['PopulationFuturePerCluster'].dropna()
water_total = self.df[variable + 'ReclaimedWater'].dropna()
limit = np.array(list(limit) * water_total.shape[0])
if 'water' in limit_func:
water = limit
limit_multiplier = np.floor(water_total / limit)
population = np.floor(population_total / limit_multiplier)
func = create_function(treatment_system, parameter, values)
self.df.loc[self.df['Cluster'].notna(), treatment_system_name] = eval(func) * limit_multiplier
water = water_total % limit
population = population_total % limit_multiplier
self.df.loc[self.df['Cluster'].notna(), treatment_system_name] = self.df[treatment_system_name].dropna(
subset=['Cluster']) + eval(func)
elif 'population' in limit_func:
population = limit
limit_multiplier = np.floor(population_total / limit)
water = np.floor(water_total / limit_multiplier)
func = create_function(treatment_system, parameter, values)
self.df.loc[self.df['Cluster'].notna(), treatment_system_name] = eval(func) * limit_multiplier
population = population_total % limit
water = water_total % limit_multiplier
self.df.loc[self.df['Cluster'].notna(), treatment_system_name] = self.df[treatment_system_name].dropna(
subset=['Cluster']) + eval(func)
else:
water = water_total
population = population_total
func = create_function(treatment_system, parameter, values)
self.df.loc[self.df[variable + 'ReclaimedWater'].notna(), treatment_system_name] = eval(func)
def calculate_opex(self, treatment_system_name, treatment_system, values,
water_fraction, parameter, variable, years):
"""
Calculates the OPEX for each treatment technology in each cluster
"""
if variable == 'Population':
growth = 'PopulationGrowthPerCluster'
elif variable == 'Irrigation':
growth = 'IrrigatedGrowthPerCluster'
year = np.arange(years + 1)
population = np.array(
[x * (1 + y) ** year for x, y in np.array(self.df[['PopulationPerCluster', growth]].dropna())])
water = np.array(
[x * (1 + y) ** year for x, y in np.array(self.df[[variable + 'ReclaimedWater', growth]].dropna())])
func = create_function(treatment_system, parameter, values)
return eval(func)
def calculate_treatment_energy(self, treatment_system_name, treatment_system, values,
parameter, variable, time=''):
"""
Calculates the energy requirements for the specified treatment system
"""
population = self.df.groupby('Cluster').agg({'PopulationFuturePerCluster': 'first'})
water = self.df.groupby('Cluster').agg({variable + f'{time}ReclaimedWater': 'first'})
func = create_function(treatment_system, parameter, values)
self.df[treatment_system_name] = self.df['Cluster'].map(eval(func).iloc[:, 0])
def calculate_lcow_capex(self, variable, investment_var, water_var,
degradation_factor, income_tax_factor, future_var, years, discount_rate):
"""
Calculates the levelised cost of water for the capex
"""
if 'Population' in variable:
growth = 'PopulationGrowthPerCluster'
elif 'Irrigation' in variable:
growth = 'IrrigatedGrowthPerCluster'
year = np.arange(years + 1)
discount_factor = (1 / (1 + discount_rate)) ** year
water = np.array(
[x * (1 + y) ** year for x, y in np.array(self.df[[variable + 'ReclaimedWater', growth]].dropna())])
self.df[investment_var + '_LCOW'] = None
a = self.df.loc[self.df[variable + 'ReclaimedWater'].notna(), investment_var]
b = np.array([sum(x * discount_factor) for x in water])
self.df.loc[
self.df[variable + 'ReclaimedWater'].notna(), investment_var + '_LCOW'] = income_tax_factor * np.divide(a,
b,
out=np.zeros_like(
a),
where=b != 0)
def calculate_lcow_opex(self, variable, op_cost_var, water_var, degradation_factor,
present_var, future_var, years, discount_rate, opex_data):
"""
Calculates the levelised cost of water for the opex
"""
if variable == 'Population':
growth = 'PopulationGrowthPerCluster'
elif variable == 'Irrigation':
growth = 'IrrigatedGrowthPerCluster'
year = np.arange(years + 1)
discount_factor = (1 / (1 + discount_rate)) ** year
water = np.array(
[x * (1 + y) ** year for x, y in np.array(self.df[[variable + 'ReclaimedWater', growth]].dropna())])
self.df[op_cost_var + '_LCOW'] = None
a = np.array([sum(x * discount_factor) for x in opex_data])
b = np.array([sum(x * discount_factor) for x in water])
self.df.loc[self.df[variable + 'ReclaimedWater'].notna(), op_cost_var + '_LCOW'] = np.divide(a, b, out=np.zeros_like(a),
where=b != 0)
def calculate_lcow(self, name):
"""
Calculates the total LCOW for a technology
"""
self.df[name + 'LCOW'] = self.df[name + 'OPEX_LCOW'] + self.df[name + 'CAPEX_LCOW']
def least_cost_technology(self, systems, variable):
"""
Chooses the least-cost system in the cluster
"""
systems_list = []
for system in systems:
self.df[system] = pd.to_numeric(self.df[system])
self.df[variable + 'Technology'] = self.df[systems].idxmin(axis=1)
class_name = {}
for i, system in enumerate(systems):
class_name[str(i + 1)] = system.split('LCOW')[0]
systems_list.append(self.df[system])
bool_vector = self.df[variable + 'Technology'] == system
self.df.loc[bool_vector, variable + 'Technology'] = str(i + 1)
self.df[variable] = reduce(lambda a, b: np.minimum(a, b), systems_list)
self.df.loc[self.df[variable].isna(), variable + 'Technology'] = str(0)
class_name['0'] = 'Na'
return class_name
def least_cost_system(self, variables, dic_1, dic_2):
"""
Gets te least cost system
"""
class_name = {}
self.df['LeastCostSystem'] = self.df[variables[0]] + self.df[variables[1]]
for system in set(self.df['LeastCostSystem'].dropna()):
class_name[system] = dic_1[system[0]] + ', ' + dic_2[system[1]]
return class_name
def calculate_reclaimed_water(self, pop_water_fraction, time=''):
"""
Calculates the potential reused water per cluster
"""
not_na = self.df[f'IrrigationWater{time}PerCluster'].notna()
self.df[f'Population{time}ReclaimedWater'] = None
self.df[f'Irrigation{time}ReclaimedWater'] = None
self.df[f'Population{time}ReclaimedWater'] = self.df[f'PopulationWater{time}PerCluster'].dropna() * pop_water_fraction
self.df.loc[not_na, f'Irrigation{time}ReclaimedWater'] = self.df.loc[not_na].set_index('Cluster').index.map(
self.df.loc[not_na].groupby('Cluster')[f'Irrigation{time}ReusedWater'].sum())
self.df.loc[self.df[f'Irrigation{time}ReclaimedWater'] == 0, f'Irrigation{time}ReclaimedWater'] = None
def get_storage(self, leakage, area_percent, storage_depth, agri_water_req, agri_non_recoverable, time=''):
"""
Calculate the losses in the on-farm storage through the year, based on
a water balance (leakage + evaporation)
Parameters
----------
leakage : float
Leakage in mm per day of water percolated in the on-farm storage.
area_percent : float
Percentage of area covered by the on-farm storage.
storage_depth : float
Depth of the on-farm storage in meters.
"""
not_na = self.df[f'IrrigationWater{time}PerCluster'].notna()
self.df.loc[not_na, 'available_storage'] = area_percent * storage_depth * self.df.loc[
not_na, 'IrrigatedAreaFuture'] * 10000
self.df.loc[not_na, 'leakage_month'] = (leakage / 1000) * 30 * area_percent * self.df.loc[
not_na, 'IrrigatedAreaFuture'] * 10000
recoverable_water = (self.df.loc[not_na, f'IrrigationWater{time}'] - (
self.df.loc[not_na, agri_water_req] * self.df.loc[not_na, 'IrrigatedAreaFuture'] / (
1 - agri_non_recoverable)))
recoverable_water[recoverable_water < 0] = 0
for i in range(1, 13):
self.df.loc[not_na, f'stored_{i}'] = recoverable_water / 12 - \
(self.df.loc[not_na, 'leakage_month'] + \
((self.df.loc[not_na, f'eto_{i}'] / 1000) * area_percent *
self.df.loc[not_na, 'IrrigatedAreaFuture'] * 10000))
self.df.loc[not_na & (self.df[f'stored_{i}'] < 0), f'stored_{i}'] = 0
self.df.loc[not_na, f'stored_percentage_{i}'] = self.df.loc[not_na, f'stored_{i}'] / self.df.loc[
not_na, 'available_storage']
self.df.loc[not_na & (self.df[f'stored_percentage_{i}'] > 1), f'stored_{i}'] = self.df.loc[
not_na & (self.df[f'stored_percentage_{i}'] > 1), 'available_storage']
def reused_water(self, pop_water_fraction, pop_percentage_of_reuse, time=''):
"""
Calculates the total final amount of water extracted for irrigation after reuse
"""
not_na = self.df[f'IrrigationWater{time}PerCluster'].notna()
self.df.loc[not_na,
f'Irrigation{time}ReusedWater'] = self.df.loc[not_na].filter(regex='stored_[1-9]').sum(axis=1)
self.calculate_reclaimed_water(pop_water_fraction, time=time)
# self.df['IrrigationReclaimedWater'] = self.df.set_index('Cluster').index.map(
# self.df.groupby('Cluster')['IrrigationReusedWater'].sum())
self.df[f'Final{time}IrrigationWater'] = 0
self.df.loc[not_na, f'Final{time}IrrigationWater'] = self.df.loc[not_na, f'IrrigationWater{time}'] - self.df.loc[
not_na, f'Irrigation{time}ReusedWater']
self.losses = 0
self.df[f'Population{time}ReusedWater'] = 0
for cluster in set(self.df['Cluster'].dropna()):
is_cluster = self.is_region(cluster, 'Cluster')
count = self.df.loc[is_cluster, f'IrrigationWater{time}PerCluster'].dropna().count()
pop_water = self.df.loc[is_cluster, f'Population{time}ReclaimedWater'].dropna().mean() * pop_percentage_of_reuse
while (count > 0) and (pop_water > 0):
self.df.loc[
(is_cluster) & (not_na) & (self.df[f'Final{time}IrrigationWater'] > 0), f'Final{time}IrrigationWater'] -= (
pop_water / count)
self.df.loc[
(is_cluster) & (not_na) & (self.df[f'Final{time}IrrigationWater'] > 0), f'Population{time}ReusedWater'] += (
pop_water / count)
remaining_water = self.df.loc[
(is_cluster) & (self.df[f'Final{time}IrrigationWater'] < 0), f'Final{time}IrrigationWater'].dropna().sum()
self.df.loc[(is_cluster) & (self.df[f'Final{time}IrrigationWater'] < 0), f'Final{time}IrrigationWater'] = 0
count = self.df.loc[
(is_cluster) & (not_na) & (self.df[f'Final{time}IrrigationWater'] > 0), f'Final{time}IrrigationWater'].count()
pop_water = remaining_water * (-1)
self.df[f'Final{time}WaterWithdrawals'] = self.df[[f'Final{time}IrrigationWater', f'PopulationWater{time}']].sum(axis=1)
def get_water_stats(self):
"""
Caluculates basic water use statistics
"""
df = self.df.loc[self.df.Cluster.notna()]
withdrawals_per_cluster = df.groupby('Cluster').agg({
'FinalAverageWaterWithdrawals': 'sum',
'TotalWithdrawals': 'sum',
'IrrigationReusedWater': 'sum',
'PopulationReusedWater': 'sum',
'PopulationWater': 'sum',
'FinalIrrigationWater': 'sum'})
withdrawals_total = pd.DataFrame({'Irrigation extractions': df['FinalIrrigationWater'].sum(),
'Population extractions': df['PopulationWater'].sum(),
'Reused water from irrigation': df['IrrigationReusedWater'].sum(),
'Reused water from population': df['PopulationReusedWater'].sum(),
'Final withdrawals': df['FinalWaterWithdrawals'].sum(),
'Baseline withdrawals': df['TotalWithdrawals'].sum()}, index=[0])
withdrawals_baseline = pd.DataFrame({'Irrigation extractions': df['IrrigationWater'].sum(),
'Population extractions': df['PopulationWater'].sum(),
'Reused water from irrigation': 0,
'Reused water from population': 0,
'Baseline withdrawals': df['TotalWithdrawals'].sum()}, index=[0])
return withdrawals_per_cluster, withdrawals_total, withdrawals_baseline
def calculate_final_energy(self, treatment_systems_pop, treatment_systems_agri, time=''):
"""
Calculates the energy requirements for pumping, desalinating and treatment for each cell area
"""
self.df[f'Final{time}PumpingEnergy'] = self.df['GWPumpingEnergy'] * self.df[f'Final{time}WaterWithdrawals']
self.df[f'Final{time}DesalinationEnergy'] = self.df['DesalinationEnergy'] * self.df[f'Final{time}WaterWithdrawals']
self.df[f'Final{time}Energy'] = self.df[f'Final{time}PumpingEnergy'] + self.df[f'Final{time}DesalinationEnergy']
systems_vector_pop = self.df['PopulationLeastCostTechnology'].apply(
lambda row: treatment_systems_pop[row] + f'{time}Energy')
systems_vector_agri = self.df['IrrigationLeastCostTechnology'].apply(
lambda row: treatment_systems_agri[row] + f'{time}Energy')
self.df[f'Final{time}PopTreatmentEnergy'] = None
self.df[f'Final{time}AgriTreatmentEnergy'] = None
systems_vector_pop.loc[systems_vector_pop == f'Na{time}Energy'] = None
systems_vector_agri.loc[systems_vector_agri == f'Na{time}Energy'] = None
for value in set(systems_vector_pop.dropna()):
index_vec = systems_vector_pop == value
self.df.loc[index_vec, f'Final{time}PopTreatmentEnergy'] = self.df.loc[index_vec, value]
for value in set(systems_vector_agri.dropna()):
index_vec = systems_vector_agri == value
self.df.loc[index_vec, f'Final{time}AgriTreatmentEnergy'] = self.df.loc[index_vec, value]
self.df[f'Final{time}TreatmentEnergy'] = self.df[[f'Final{time}PopTreatmentEnergy', f'Final{time}AgriTreatmentEnergy']].sum(axis=1)
def least_cost_option(self):
"""
Gets te best option for each cell between the conventional and the
least-cost evaluated system
"""
self.df['IrrigationWaterPerCluster'] = self.df['IrrigationWaterPerCluster'].fillna(0)
self.df['PopulationWaterPerCluster'] = self.df['PopulationWaterPerCluster'].fillna(0)
self.df['IrrigationLeastCost'] = self.df['IrrigationLeastCost'].fillna(0)
self.df['PopulationLeastCost'] = self.df['PopulationLeastCost'].fillna(0)
self.df['PotentialReusedWater'] = self.df['IrrigationWaterPerCluster'] + self.df['PopulationWaterPerCluster']
self.df['PotentialTotalCost'] = self.df['IrrigationLeastCost'] * self.df['IrrigationWaterPerCluster'] + \
self.df['PopulationLeastCost'] * self.df['PopulationWaterPerCluster']
self.df['CombinedLeastCost'] = pd.to_numeric(self.df['PotentialTotalCost'] / self.df['PotentialReusedWater'])
self.df['LeastCostOption'] = self.df[['CombinedLeastCost', 'IrrigationWaterCost']].idxmin(axis=1)
self.df.loc[self.df['LeastCostOption'] == 'CombinedLeastCost', 'LeastCostOption'] = self.df.loc[
self.df['LeastCostOption'] == 'CombinedLeastCost', 'LeastCostSystem']
self.df.loc[self.df['LeastCostOption'] == 'IrrigationWaterCost', 'LeastCostOption'] = '-1'
class PipeSystem:
"""
Creates an object for the piping system used in and specific region
"""
def __init__(self, diameter, roughness):
"""
Stores the information of the technology into parameters
"""
self.diameter = diameter
self.roughness = roughness / 1000
self.area = pi * (self.diameter / 2) ** 2
def calculate_velocity(self, flow):
"""
Calculates the fluid velocity
"""
flow = flow
return flow / self.area
def calculate_reynolds(self, velocity, viscosity):
"""
Calculates the Reynolds number
"""
viscosity = viscosity / (1000 ** 2)
return velocity * self.diameter / viscosity
def calculate_friction_factor(self, velocity, viscosity):
"""
Calculates the friction factor
"""
Re = self.calculate_reynolds(velocity, viscosity)
return 8 * ((8 / Re) ** 12 + (
(2.457 * (1 / ((7 / Re) ** 0.9) + 0.27 * self.roughness / self.diameter)) ** 16 + (
37530 / Re) ** 16) ** (-1.5)) ** (1 / 12)
def calculate_pressure_drop(self, density, flow, viscosity, length):
"""
Calculates the pressure drop due to friction of the fluid against the walls of the pipe
"""
velocity = self.calculate_velocity(flow)
return self.calculate_friction_factor(velocity, viscosity) * (length / self.diameter) * (
density * (velocity ** 2) / 2)
class ReverseOsmosis:
"""
Creates an object to model the reverse osmosis energy needs
"""
def __init__(self, osmotic_coefficient, efficiency):
"""
Stores the information of the technology into parameters
"""
self.efficiency = efficiency
self.osmotic_coefficient = osmotic_coefficient
self.solutes_dissociation = {'NaCl': 2, 'SrSO4': 2, 'glucose': 1}
self.solutes_molar_mass = {'NaCl': 58.4, 'SrSO4': 183.6, 'glucose': 180}
def molar_concentration(self, solute, concentration):
"""
Calculates the molar concentration of ions
"""
solutes_dissociation = np.array([self.solutes_dissociation[x] for x in solute])
solutes_molar_mass = np.array([self.solutes_molar_mass[x] for x in solute])
return solutes_dissociation * concentration / (10 ** 3 * solutes_molar_mass)
def osmotic_pressure(self, solutes, concentration, temperature):
"""
Calculate the osmotic pressure of the feed water
"""
return self.osmotic_coefficient * self.molar_concentration(solutes, concentration) * 0.083145 * (
temperature + 273)
def minimum_energy(self, solutes, concentration, temperature):
"""
Calculates the minimun energy (in kWh/m3)required for desalination
"""
return self.osmotic_pressure(solutes, concentration, temperature) / 36
def convert_m3_to_mm(cell_area, path, df, *layers):
"""
Convert water withdrawals from cubic meters to mm, based on the cell area, and saves it
"""
for layer in layers:
print(' - Saving {} layer...'.format(layer))
temp_layer = df.loc[:, ['X', 'Y', layer]]
temp_layer[layer] = temp_layer[layer] / (1000 ** 2) * 100
temp_layer.to_csv(path + "/CSV/" + layer + ".csv", index=False)
def save_layers(path, df, *layers):
"""
Saves the specified results layers in separate csv files
"""
try:
for layer in layers:
print(' - Saving {} layer...'.format(layer))
temp_layer = df[['X', 'Y', layer]]
temp_layer.to_csv(path + "/CSV/" + layer + ".gz", index=False)
except:
print(layer + ' layer not found')
def delete_files(folder):
"""
Delete file from folder
"""
for the_file in os.listdir(folder):
file_path = os.path.join(folder, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
except Exception as e:
print(e)
def multiple_sheet_excel(xls, input_string):
"""
Read multiple sheets from excel file into a dictionary
"""
if input_string != 'all':
sheets = [xls.sheet_names[int(x.strip()) - 1] for x in input_string.split(',')]
else:
sheets = xls.sheet_names
xls_dfs = {}
for sheet in sheets:
xls_dfs[sheet] = xls.parse(sheet)
return xls_dfs
def create_function(treatment_system, parameter, values):
"""
Creates a function based on user input
"""
func = str(treatment_system.loc[0, parameter])
func = func.replace(' ', '')
for key, value in values.items():
func = func.replace(key, str(value))
return func
def calculate_lcows(data, clusters, variable, water_fraction, degradation_factor,
income_tax_factor, years, discount_rate, treatment_systems, variable_present, opex_data):
"""
Loops through the given treatment systems and calls for the lcow functions
"""
for name, system in treatment_systems.items():
print('\nCalculating {} treatment system LCOW...'.format(name))
data.df[name + 'LCOW_CAPEX'] = None
data.df[name + 'LCOW_OPEX'] = None
data.df[name + 'LCOW'] = None
data.calculate_lcow_capex(variable=variable, investment_var=name + 'CAPEX',
water_var=variable + 'Water',
degradation_factor=degradation_factor, income_tax_factor=income_tax_factor,
future_var=variable_present + 'Future', years=years, discount_rate=discount_rate)
data.calculate_lcow_opex(variable=variable, op_cost_var=name + 'OPEX',
water_var=variable + 'Water',
degradation_factor=degradation_factor, present_var=variable_present,
future_var=variable_present + 'Future', years=years, discount_rate=discount_rate,
opex_data=opex_data[name])
data.calculate_lcow(name)
def gws_plot_mathplot(gws_values, file_name):
"""
Creates the groundwater stress indicator plot
"""
color_list, color_legend = create_gws_color_list(gws_values)
fig = plt.figure()
text_color = to_rgb(80, 80, 80)
p = fig.add_subplot(111)
box = p.get_position()
p.set_position([box.x0, box.y0, box.width * 0.6, box.height])
bar = p.bar(np.arange(len(gws_values)), gws_values, color=color_list,
edgecolor=[tuple(x) for x in np.array(color_list) * 0.8], linewidth=3)
plt.xticks(np.arange(len(gws_values)), ['Baseline', 'Reusing Water'], color=text_color)
plt.ylabel('Groundwater Stress Indicator', color=text_color)
legend = plt.legend(handles=color_legend, edgecolor=text_color,
facecolor=to_rgb(240, 240, 240), loc='lower left',
bbox_to_anchor=(1, 0))
plt.setp(legend.get_texts(), color=text_color)
autolabel(bar, [tuple(x) for x in np.array(color_list) * 0.8])
p.spines['right'].set_visible(False)
p.spines['top'].set_visible(False)
p.spines['left'].set_color(text_color)
p.spines['bottom'].set_color(text_color)
p.tick_params(colors=text_color)
plt.show()
plt.savefig(file_name, format='pdf')
def create_gws_color_list(values):
"""
Creates te color list for the gws indicatior
"""
color_values = [to_rgb(255, 254, 187),
to_rgb(255, 202, 110),
to_rgb(255, 139, 76),
to_rgb(245, 55, 43),
to_rgb(193, 0, 41)]
color_list = []
for value in values:
if value < 1:
color_list.append(color_values[0])
elif value < 5:
color_list.append(color_values[1])
elif value < 10:
color_list.append(color_values[2])
elif value < 20:
color_list.append(color_values[3])
else:
color_list.append(color_values[4])
color_legend = [mpatches.Patch(color=color_values[0], label='Low (<1)'),
mpatches.Patch(color=color_values[1], label='Low to medium (1-5)'),
mpatches.Patch(color=color_values[2], label='Medium to high (5-10)'),
mpatches.Patch(color=color_values[3], label='High (10-20)'),
mpatches.Patch(color=color_values[4], label='Extremely high (>20)')]
return color_list, color_legend
def autolabel(rects, colors):
"""
Attach a text label above each bar displaying its height
"""
for i, rect in enumerate(rects):
height = rect.get_height()
plt.text(rect.get_x() + rect.get_width() / 2., height + 0.1,
str(round(height, 2)),
ha='center', va='bottom', color=colors[i],
weight='bold')
def to_rgb(*values):
"""
Creates RGB color from a RGB value (255 scale)
"""
return tuple(np.array(values) / 255)
def legend_generator(values):
color_list = []
for value in values:
if value < 1:
color_list.append('1')
elif value < 5:
color_list.append('2')
elif value < 10:
color_list.append('3')
elif value < 20:
color_list.append('4')
else:
color_list.append('5')
return color_list
def gws_plot(gws_values, names, order):
"""
Creates a graph showing the groundwater stress indicator for the different scenarios
"""
color_values = [to_rgb(255, 254, 187),
to_rgb(255, 202, 110),
to_rgb(255, 139, 76),
to_rgb(245, 55, 43),
to_rgb(193, 0, 41)]
color_list = [colors.to_hex(x) for x in color_values]
border_color = '#E8E8E8'
df = pd.DataFrame(gws_values, columns=['GWS'])
df['Legend'] = legend_generator(gws_values)
df['X'] = names
new_df = df.append(pd.DataFrame({'GWS': [0, 0, 0, 0, 0], 'Legend': ['1', '2', '3', '4', '5'], 'X': 'Baseline'}))
point_df = df.copy()
point_df['GWS'] += 0.3
df['GWS'] = round(df['GWS'], 2)
new_df['X'] = new_df['X'].astype('category')
new_df['X_cat'] = new_df['X'].cat.reorder_categories(order, ordered=True)
p = (ggplot() +
geom_bar(new_df, aes(x='X_cat', y='GWS', fill='Legend'), stat='identity', size=0.5, color='gray') +
geom_text(df, aes(x='X', y='GWS/2', label='GWS'), color='black', nudge_y=0, size=8) +
scale_fill_manual(labels=['Low (<1)', 'Low to medium (1-5)', 'Medium to high (5-10)', 'High (10-20)',
'Extremely high (>20)'], values=color_list) +
scale_y_continuous(expand=[0, 0]) +
coord_flip() +
theme_classic() +
labs(y='Groundwater Stress Indicator', x='Scenario') +
theme(legend_title=element_blank(),
axis_title_x=element_text(color='black'),
axis_title_y=element_text(color='black'))
)
p.save('GWS.pdf', height=4, width=2.5)
def energy_plot(energy_start, energy_end, sensitivity_energy, order):
"""
Creates a graph summarizing the energy uses in the entire region for the different scenarios
"""
energy_start[1].index = ['Desalination energy', 'Pumping energy']
df_end = pd.DataFrame(columns=list('XYZ'))
for i, value in energy_end.items():
energy_end[i].index = ['Desalination energy', 'Pumping energy', 'Treatment energy']
temp_df = pd.DataFrame({'X': i, 'Y': value.values, 'Z': energy_end[i].index})
df_end = df_end.append(temp_df)
df_start = | pd.DataFrame({'X': energy_start[0], 'Y': energy_start[1].values, 'Z': energy_start[1].index}) | pandas.DataFrame |
import numpy as np
import pandas as pd
from itertools import combinations
class LabelEncoder:
"""
This class encodes the categorical values to either numerical values or the labels specified by the user.
Encoding categorical values is a must as ML models work only with numbers
"""
def __init__(self,labels=None):
"""
Simple initialization. Takes in only the class labels.
:param labels: The labels that the user wants to encode with. They should be of the same length as that of the unique values of the column. list or numpy type.
"""
self.labels = labels
def fit(self,X):
"""
Forms the labels for the classes.
:param X: input X.
"""
self.classes = np.unique(X)
if self.labels:
if len(self.labels)!=len(self.classes):
raise ValueError("Length mismatch: The number of unique elements in the input is not equal to the output")
self.classes_and_labels = {self.classes[x]:self.labels[x] for x in range(len(self.labels))}
else:
self.classes_and_labels = {self.classes[x]:x for x in range(len(self.classes))}
return self
def transform(self,X):
"""
Transforms the input array using the labels already acquired in the fit() function
:param X: input array or series or list
:return:
"""
if len(self.classes_and_labels) != len(np.unique(X)):
raise ValueError("Previously unseen values")
enc_arr = np.zeros(len(X),dtype=object)
for i in range(len(X)):
enc_arr[i] = self.classes_and_labels[X[i]]
return enc_arr
class StandardScaler:
"""
StandardScaler is used to scale the input array or dataframe such that the array values have a mean of 0 and a standard deviation of 1.
Note: This version of StandardScaler only accepts a dataframe or an array of as input.
:return: It returns a multidimensional array
"""
def fit(self,X):
if type(X)!=type(pd.DataFrame()) and type(X)!=type(np.array([1,2])): # checks for the datatype
raise TypeError(f"StandardScaler accepts either a dataframe or a numpy array as input. It does not accept {type(X)} as input dtype")
if type(X)==type(pd.DataFrame()):
X = X.values # gets the numpy array from the DataFrame
self.mean_X,self.std_X = np.zeros(X.shape[1]),np.zeros(X.shape[1])
for i in range(X.shape[1]):
req_arr = np.squeeze(X[:,i])
self.mean_X[i],self.std_X[i] = np.mean(req_arr,axis=0),np.std(req_arr,axis=0) # computes the mean and std of each feature.
else:
req_arr = np.squeeze(X)
self.mean_X,self.std_X = np.mean(req_arr,axis=0),np.std(req_arr,axis=0)
return self
def transform(self,X):
"""
:param X: input array or dataframe
:return: returns a scaled multidimensional numpy array.
Important note: StandardScaler assumes that the input dataframe has its columns in the same order as the one passed to the fit method .
"""
if type(X)==type(pd.DataFrame()):
if X.shape[1]!=len(self.mean_X):
raise ValueError("Length mismatch: The transformer was trained on a different length") # checks for the number of features and if they don't match it outputs an error
if type(X)==type(pd.DataFrame()):
new_X = np.zeros(X.shape)
X = X.values
for i in range(X.shape[1]):
new_X[:,i] = (np.squeeze(X[:,i])-self.mean_X[i])/self.std_X[i]
else:
X = np.squeeze(X)
new_X = (X-self.mean_X)/self.std_X
return new_X
class MinMaxScaler:
"""
MinMaxScaler is used to normalize the input array or dataframe such that the array values get into the range-[0,1].
Note: This version of MinMaxScaler only accepts a dataframe or an array of as input.
:return: It returns a multidimensional array
"""
def fit(self,X):
if type(X)!=type(pd.DataFrame()) and type(X)!=type(np.array([1,2])): # checks for the datatype
raise TypeError(f"MinMaxScaler accepts either a dataframe or a numpy array as input. It does not accept {type(X)} as input dtype")
if type(X)==type(pd.DataFrame()):
X = X.values # gets the numpy array from the DataFrame
self.min_X,self.max_X = np.zeros(X.shape[1]),np.zeros(X.shape[1])
for i in range(X.shape[1]):
self.min_X[i],self.max_X[i] = np.min(np.squeeze(X[:,i])),np.max(np.squeeze(X[:,i]))
else:
req_arr = np.squeeze(X)
self.min_X,self.max_X = np.min(req_arr,axis=0),np.max(req_arr,axis=0)
return self
def transform(self,X):
"""
:param X: input array or dataframe
:return: returns a normalized multidimensional numpy array.
Important note: MinMaxScaler assumes that the input dataframe has its columns in the same order as the one passed to the fit method .
"""
if type(X)==type(pd.DataFrame()):
if X.shape[1]!=len(self.min_X):
raise ValueError("Length mismatch: The transformer was trained on a different length") # checks for the number of features and if they don't match it outputs an error
if type(X)==type( | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import glob
import os
from matplotlib import rcParams
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['Hiragino Maru Gothic Pro', 'Yu Gothic', 'Meirio', 'Takao', 'IPAexGothic', 'IPAPGothic', 'VL PGothic', 'Noto Sans CJK JP']
# In[2]:
# Make directory
#
# df_hospital_beds = pd.read_csv('data_Koro/hospital_beds.csv',index_col=0)
# dirnames = (df_hospital_beds['japan_prefecture_code']+df_hospital_beds['都道府県名']).values
# for i in range(len(dirnames)):
# path = 'resultD_transport_strategy_hospital/' + dirnames[i]
# os.makedirs(path, exist_ok=True)
# In[3]:
# MODE = 'all'
MODE = 'normal'
filenames = glob.glob('data_hospital/x_*')
filenames.sort()
forecast_dates = [filename.split('_')[-1].split('.')[0] for filename in filenames]
# In[ ]:
# In[18]:
def visualization(gamma,x_type,forecast_date):
print("forcasted date ={0}".format(f_date))
# 重みの入力
df_w = pd.read_csv('data_Kokudo/w_distance.csv',index_col=0)
W= df_w.values
w_pulp = W.T.reshape(-1)
# x, x_q0025も計算
df_x0975 = pd.read_csv('data_hospital/x0975_{0}.csv'.format(forecast_date),index_col=0 )
df_x0025 = pd.read_csv('data_hospital/x0025_{0}.csv'.format(forecast_date),index_col=0 )
df_xmean = pd.read_csv('data_hospital/x_{0}.csv'.format(forecast_date),index_col=0 )
gammas = np.load('data_hospital_transport/gammas_{0}_{1:03}_{2}.npy'.format(x_type,int(gamma*100),forecast_date))
x_mean = df_xmean.values
x_q0975 = df_x0975.values
x_q0025 = df_x0025.values
N = x_mean.shape[1]
T = x_mean.shape[0]
L = np.kron(np.ones((1,N)),np.eye(N)) - np.kron(np.eye(N),np.ones((1,N)))
uv = np.load('data_hospital_transport/u_{0}_{1:03}_{2}.npy'.format(x_type,int(gamma*100),forecast_date))
y_mean = np.zeros(x_mean.shape)
y_q0975 = np.zeros(x_mean.shape)
y_q0025 = np.zeros(x_mean.shape)
y_mean[0] = x_mean[0]
y_q0975[0] = x_q0975[0]
y_q0025[0] = x_q0025[0]
sum_u = np.zeros(T)
sum_cost = np.zeros(T)
for k in range(T-1):
y_mean[k+1] = y_mean[k] + x_mean[k+1] - x_mean[k] + L.dot(uv[k])
y_q0975[k+1] = y_q0975[k] + x_q0975[k+1] - x_q0975[k] + L.dot(uv[k])
y_q0025[k+1] = y_q0025[k] + x_q0025[k+1] - x_q0025[k] + L.dot(uv[k])
sum_u[k+1] = np.sum(uv[k])
sum_cost[k+1] = np.sum(w_pulp*uv[k])
# ベット数の入力
df_hospital_beds = pd.read_csv('data_Koro/hospital_beds.csv',index_col=0)
dirnames = (df_hospital_beds['japan_prefecture_code']+df_hospital_beds['都道府県名']).values
names = df_hospital_beds['都道府県名'].values
weeks = df_hospital_beds.columns[2:].values
new_week = max(weeks)
M = df_hospital_beds[new_week].values
times = pd.to_datetime(df_xmean.index)
date_s = min(times)
date_e = max(times)
# 全国の入院者数の予測値
plt.figure(figsize = (6,4))
plt.fill_between(times,x_q0025.sum(axis=1),x_q0975.sum(axis=1),facecolor = 'lime',alpha = 0.3,label = '95%信頼区間')
plt.plot(times,x_mean.sum(axis=1),'*-',color = 'lime',label = '平均値')
plt.plot([date_s,date_e],np.ones(2)*0.8*M.sum(),"--",label = '病床使用率 80%',color = 'red',linewidth = 2.0)
plt.plot([date_s,date_e],np.ones(2)*M.sum(),"--",label = '病床使用率 100%',color = 'purple',linewidth = 2.0)
plt.gca().tick_params(axis='x', rotation= -60)
plt.title('全国の入院者数の予測値, 予測日={0}'.format(forecast_date),fontsize = 15)
plt.xlim([date_s,date_e])
plt.ylim([0, 1.5* M.sum(),])
plt.ylabel('入院者数 [人]')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0)
plt.grid()
plt.savefig('resultB_google_prediction/all_hospital_{0}.png'.format(forecast_date),bbox_inches='tight',dpi = 100)
if MODE == 'normal':
plt.savefig('resultB_google_prediction/all_hospital.png',bbox_inches='tight',dpi = 100)
plt.close()
# 県ごとの入院者数
plt.figure(figsize = (50,25))
plt.subplots_adjust(wspace=0.1, hspace=0.5)
for i in range(47):
plt.subplot(10,5,i+1)
plt.fill_between(times,x_q0025[:,i],x_q0975[:,i],facecolor = 'lime',alpha = 0.3,label = '95%信頼区間')
plt.plot(times,x_mean[:,i],'*-',color = 'lime',label = '平均値')
plt.plot([date_s,date_e],np.ones(2)*0.8*M[i],"--",label = '病床使用率 80%',color = 'red',linewidth = 2.0)
plt.plot([date_s,date_e],np.ones(2)*M[i],"--",label = '病床使用率 100%',color = 'purple',linewidth = 2.0)
plt.gca().tick_params(axis='x', rotation= -60)
plt.title(names[i],fontsize = 20)
plt.xlim([date_s,date_e])
plt.ylim([0, 1.5* M[i]])
plt.grid()
if i < 42:
plt.tick_params(labelbottom=False)
if i == 0:
plt.legend()
plt.savefig('resultB_google_prediction/each_hospital_{0}.png'.format(forecast_date),bbox_inches='tight',dpi = 100)
if MODE == 'normal':
plt.savefig('resultB_google_prediction/each_hospital.png',bbox_inches='tight',dpi = 100)
plt.close()
# 県ごとの感染者数の予測結果
plt.figure(figsize = (50,25))
plt.subplots_adjust(wspace=0.1, hspace=0.5)
for i in range(47):
plt.subplot(10,5,i+1)
max_beds = M[i]
# ベットの限界
plt.plot([date_s,date_e],[0.8*max_beds,0.8*max_beds],'--',label = '病床使用率80%',color = 'red',linewidth = 2.0)
plt.plot([date_s,date_e],[max_beds,max_beds],'--',label = '病床使用率100%',color = 'purple',linewidth = 2.0)
# 輸送なし
plt.fill_between(times,x_q0025[:,i],x_q0975[:,i],facecolor = 'lime',alpha = 0.5,label = '医療シェアリングなし',)
plt.plot(times,x_mean[:,i],"*-",linewidth = 2,color= 'lime')
# 輸送あり
plt.fill_between(times,y_q0025[:,i],y_q0975[:,i],facecolor = 'orange',alpha = 0.5,label = '医療シェアリングあり',)
plt.plot(times,y_mean[:,i],"*-",linewidth = 2,color = 'orange')
plt.xlim([date_s,date_e])
plt.ylim([0,1.5*max_beds])
plt.grid()
plt.gca().tick_params(axis='x', rotation= -60)
plt.title(names[i],fontsize = 20)
if i < 42:
plt.tick_params(labelbottom=False)
if i == 0:
plt.legend()
if MODE == 'normal':
plt.savefig('resultD_transport_strategy_hospital/main/each_severe_{0}_{1:03}.png'.format(x_type,int(gamma*100)),bbox_inches='tight',dpi = 100)
plt.savefig('resultD_transport_strategy_hospital/main/each_severe_{0}_{1:03}_{2}.png'.format(x_type,int(gamma*100),forecast_date),bbox_inches='tight',dpi = 100)
plt.close()
# コスト評価
times = pd.to_datetime(df_xmean.index)[:-1]
date_s = min(times)
date_e = max(times)
max_beds = M.sum()
# 輸送人数
plt.plot(times,sum_u[:-1],"*-",linewidth = 2,color= 'black',label = '入院者数')
plt.xlim([date_s,date_e])
plt.gca().tick_params(axis='x', rotation= -60)
# plt.title('',fontsize = 20)
plt.ylabel('毎日の医療シェアが必要な入院者の合計 [人]')
plt.legend()
if MODE == 'normal':
plt.savefig('resultD_transport_strategy_hospital/cost/num_{0}_{1:03}.png'.format(x_type,int(gamma*100)),bbox_inches='tight',dpi = 100)
plt.savefig('resultD_transport_strategy_hospital/cost/num_{0}_{1:03}_{2}.png'.format(x_type,int(gamma*100),forecast_date),bbox_inches='tight',dpi = 100)
plt.close()
times = | pd.to_datetime(df_xmean.index) | pandas.to_datetime |
import pickle
from io import BytesIO
from unittest.mock import Mock, patch
import numpy as np
import pandas as pd
from rdt.transformers import (
CategoricalTransformer, LabelEncodingTransformer, OneHotEncodingTransformer)
def test_categorical_numerical_nans():
"""Ensure CategoricalTransformer works on numerical + nan only columns."""
data = pd.DataFrame([1, 2, float('nan'), np.nan], columns=['column_name'])
transformer = CategoricalTransformer()
transformer.fit(data, list(data.columns))
transformed = transformer.transform(data)
reverse = transformer.reverse_transform(transformed)
| pd.testing.assert_frame_equal(reverse, data) | pandas.testing.assert_frame_equal |
import sox
import random
import yaml
import os
import numpy as np
from inspect import getmembers, signature, isclass, isfunction, ismethod
import librosa
import librosa.display
import yaml
import tempfile
import glob
import logging
import pandas as pd
import itertools
import sys
from collections import OrderedDict
import soloact
random.seed(666) # seed is set for the (hidden) global Random()
def flatten(x, par = '', sep ='.'):
"""
Recursively flatten dictionary with parent key separator
Use to flatten augmentation labels in DataFrame output
Args:
x (dict): with nested dictionaries.
par (str): parent key placeholder for subsequent levels from root
sep (str: '.', '|', etc): separator
Returns:
Flattened dictionary
Example:
x = {'Cookies' : {'milk' : 'Yes', 'beer' : 'No'}}
par, sep = <defaults>
output = {'Cookies.milk' : 'Yes', 'Cookies.beer' : 'No'}
"""
store = {}
for k,v in x.items():
if isinstance(v,dict):
store = {**store, **flatten(v, par = par + sep + k if par else k)}
else:
store[par + sep + k] = v
return store
def load_track(path, sr = 44100):
"""Librosa load to numpy array
Args:
path (str): filepath
sr (int): sampling rate - 44100 default and preferred
"""
x, sr = librosa.load(path, sr = sr) # default
return x
def rand(x,y):
"""
Randomizer for augmentation pipepline
Args:
x (int, float): lower_bound
y (int, float): upper_bound
Returns:
random number between bounds
"""
# use uniform if parameters passed are below 1
if all([v < 1 for v in [x,y]]):
return random.uniform(x, y)
else:
return random.randint(x,y)
def validate_reduce_fx(effects):
"""
Function crossvalidating existence of effects
between pysox and configuration
Args:
effects (dict): desired effects for augmentation
Returns:
Bool & dict
True - all effects present, return original effects dictionary
False - one or more not present, return effects not in pysox
"""
FX = sox.Transformer()
sox_arsenal = dict(getmembers(FX, predicate=lambda x: ismethod(x)))
try:
assert all(f in sox_arsenal for f in effects), 'Invalid methods provided'
return True, effects
except Exception as e:
invalid = {f for f in effects if f not in sox_arsenal}
return False, invalid
logger = logging.getLogger()
logger.setLevel('CRITICAL')
def feature_pipeline(arr, **kwargs):
"""
Current feature pipeline supporting mfcc only
Args:
arr(np array): row vector generated by librosa.load
kwargs (dict): arguments to feature.mfcc
Returns:
vector of shape (n_mfcc, )
"""
mfcc = librosa.feature.mfcc(arr,
sr = 44100,
n_mfcc = 26, **kwargs)
mfcc_mean = np.mean(mfcc, axis = 0)
return mfcc_mean
def pad(l_arrays):
"""
Naive padding using max shape from list of numpy arrays
to normalize shapes for all
Args:
l_array (list of np arrays):
Returns:
np.matrix of shape (length of array, features, 1)
"""
# Retrieve max
max_shape = max([x.shape for x in l_arrays], key = lambda x: x[0])
def padder(inp, max_shape):
zero_grid = np.zeros(max_shape)
x,y = inp.shape
zero_grid[:x, :y] = inp
return zero_grid
# Pad with zero grid skeleton
reshapen = [padder(x, max_shape) for x in l_arrays]
# Make ndarray
batch_x = np.array(reshapen)
return batch_x
def augment_track(file, n, effects,
exercise = 'regression',
sustain = ['overdrive', 'reverb'],
write = False
):
"""
Track-wise augmentation procedure
- Classification: randomize effect on or off - no randomization at parameter level
- Regression: Persistent effects with randomization at parameter level
dependent on config state ('default', 'random')
Args:
file (str): filepath to .wav file
effects (dict): candidate effects defined by config state
state (str):
'random' : requires upper and lower bounds
'constant': will take upper if default is False, otherwise effect default
exercise (str): regression or classification
sustain: effect to persist despite randomized on/off in classification
write (bool: False, str: Path):
Path: if directory not present will make
"""
# Init transformer
FX = sox.Transformer()
labels = {}
for effect, parameters in effects.items():
if exercise.lower() == 'classification' and effect not in sustain:
# turn effects on or off randomly
if int(random.choice([True, False])) == 0:
# print ('{} skipped!'.format(effect))
continue # skip effect
effect_f = getattr(FX, effect)
f_defaults = signature(effect_f).parameters
# store defaults, could be done out of scope
f_defaults = {k: f_defaults[k].default for k in f_defaults.keys()}
used = {}
for param, val in parameters.items():
state = val.get('state')
default = val.get('default') # boolean whether to use default or not
if state == 'constant':
used[param] = f_defaults.get(param) if default is True else val.get('upper') # upper can be a list
elif state == 'random':
# retrieve bounds
if not isinstance(f_defaults.get(param), list):
lower, upper = [val.get(bound) for bound in ['lower', 'upper']]
assert upper > lower, \
'Upper bound for {} must be greater than its lower bound'.format(effect + '.' + param)
used[param] = rand(lower, upper)
continue
raise TypeError('Will not parse random list values!')
effect_f(**used)
labels[effect] = used
if write is not False:
# outputs augmented tracks to desired folder ignoring feature extraction pipleine
model = file.split('/')[-2] if '/audio/' not in file else file.split('/')[-3]
outfile = os.path.join(write, model, str(n) + '_' + file.split('/')[-1])
# print (outfile)
FX.build(file, outfile)
with tempfile.NamedTemporaryFile(suffix = '.wav') as tmp:
# pysox doesn't have output to array, save to temp file and reload as array with librosa
FX.build(file, tmp.name)
array, sr = librosa.load(tmp.name, sr = 41000)
FX.clear_effects()
# return data with feature extraction
flattened_labels = flatten(labels)
flattened_labels['group'] = n
return flattened_labels, feature_pipeline(array)
def augment_data(SOURCES, subsample = False, n_augment = 1,
write_with_effects_to = False, make_training_set = False, source = 'power'):
"""
Augmentation pipeline with options to subsample or write augmented data
Args:
subsample (bool, int): if not False augment only k files
write_with_effects (bool, str: path): if not False write augmented .wav files without feature extraction
write_training (bool):
True -> write to data/processed folder with feature extraction (intended to mimic pipleine structure)
False -> return ndarray of features and dataframe of labels
n_augment(int): number to augment per file
Returns:
if no write_with_effects path provided:
ndarray of features and dataframe of labels
otherwise:
files written to provided directory
"""
# SOURCES = soloact.make_source_paths()
TRACK_KIND = SOURCES[source] # note or chord
SOURCE_DIR = TRACK_KIND['trace']
# must be a yaml file
# config = 'config.yaml' if config is None else config
config = yaml.load(open(SOURCES['config'], 'r'))
# SPLIT CONFIGURATIONS
augmentation_config = config['DataAugmentation']
pipeline_config = config['pipeline_config']
# PREDETERMINED MODEL GUITAR SPLIT
use_models_train = pipeline_config['train_models']
# NOT GENERATING THIS, HOLDING OUT UNTIL WE HAVE USEFUL WORKING MODELS
use_models_test = pipeline_config['test_models']
train_soundfiles = [glob.glob(os.path.join(SOURCE_DIR, mod, TRACK_KIND.get('ext')) + '/*.wav')
for mod in use_models_train]
train_soundfiles = list(itertools.chain.from_iterable(train_soundfiles))
#
if subsample is not False:
print ('Subsampling {} files from {} available'.format(subsample, len(train_soundfiles)))
train_soundfiles = random.choices(population = train_soundfiles, k = subsample)
else:
print ('Using all available data, {} files'.format(len(train_soundfiles)))
# VALIDATE EFFECTS BEFORE STARTING AUGMENTATION CHAIN
effects = augmentation_config.get('effects')
# Independent of the next step
valid, effects = validate_reduce_fx(effects)
# REDUCE LIST TO ACTIVE ONLY
effects = {k:v for k,v in effects.items() if k in augmentation_config.get('active')}
# NOT A KEYWORD TO AUGMENTATION FUNCTION
augmentation_config.pop('active')
# FIXED ORDER
order = ['overdrive'] + [f for f in effects.keys() if f not in ['reverb', 'overdrive']] + ['reverb']
ordered_effects = OrderedDict.fromkeys(order)
# ADD EFFECTS BACK TO ORDERED DICT
for k,v in effects.items():
ordered_effects[k] = v
# REPLACE
augmentation_config['effects'] = ordered_effects
if write_with_effects_to:
OUT_DIR = os.path.join(INTERIM_DIR, write_with_effects_to) + '_' + source.upper()
print ('Are you sure you want to {} files to "{}"?'.format(
len(train_soundfiles) * n_augment, OUT_DIR))
print ('1 to proceed, any other key to terminate')
if int(input()) == 1:
# can't take relative path with join here
augmentation_config['write'] = OUT_DIR
for m in use_models_train:
# make directories for each model!
os.makedirs(os.path.dirname(os.path.join(OUT_DIR, m) + '/'), exist_ok=True)
else:
sys.exit('Operation cancelled')
store_all = []
for sf in train_soundfiles:
for i in range(n_augment):
store_all.append(augment_track(sf, n = i, **augmentation_config))
labels, features = zip(*store_all)
all_features = [np.expand_dims(x, axis = 1) for x in features]
X_train = pad(all_features)
Y_train = pd.DataFrame(list(labels))
# add guitar kind and chord
def gt(x, ix): return x.split('/')[ix]
# get models and chords (ordered)
file_meta = [(gt(x, ix = -2 if source != 'sn' else -3), gt(x, ix = -1).rstrip('.wav')) for x in train_soundfiles]
models, chordnames = zip(*file_meta)
# repeat sequence by number of augmentations
models = list(itertools.chain.from_iterable([[m] * n_augment for m in models]))
chordnames = list(itertools.chain.from_iterable([[m] * n_augment for m in chordnames]))
Y_train['model'] = pd.Series(models)
Y_train['chords'] = | pd.Series(chordnames) | pandas.Series |
from __future__ import print_function
from pprint import pprint
from time import time
import logging
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn import feature_extraction, model_selection, naive_bayes, metrics, svm
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.model_selection import KFold
import pandas as pd
import numpy as np
import sklearn
# path to data folder
path = '/home/irina/winterwell/egbot/data/raw'
#========== reading data
df = pd.DataFrame()
cols = ['egbot_answer_body','egbot_answer_id','egbot_answer_label']
try:
with open(path + '/d127+labelled.json', 'r') as read_file:
df = | pd.read_json(read_file, encoding='utf-8') | pandas.read_json |
"""Module to read, check and write a HDSR meetpuntconfiguratie."""
__title__ = "histTags2mpt"
__description__ = "to evaluate a HDSR FEWS-config with a csv with CAW histTags"
__version__ = "0.1.0"
__author__ = "<NAME>"
__author_email__ = "<EMAIL>"
__license__ = "MIT License"
from meetpuntconfig.fews_utilities import FewsConfig, xml_to_dict
from pathlib import Path
import json
import numpy as np
import pandas as pd
import logging
from openpyxl import load_workbook
from openpyxl.styles import Font, PatternFill
import os
import sys
import re
from shapely.geometry import Point
pd.options.mode.chained_assignment = None
def idmap2tags(row, idmap):
"""Add FEWS-locationIds to hist_tags in df.apply() method."""
exloc, expar = row["serie"].split("_", 1)
fews_locs = [
col["internalLocation"]
for col in idmap
if col["externalLocation"] == exloc and col["externalParameter"] == expar
]
if len(fews_locs) == 0:
fews_locs = np.NaN
return fews_locs
def get_validation_attribs(validation_rules, int_pars=None, loc_type=None):
"""Get attributes from validationRules."""
if int_pars is None:
int_pars = [rule["parameter"] for rule in validation_rules]
result = []
for rule in validation_rules:
if "type" in rule.keys():
if rule["type"] == loc_type:
if any(re.match(rule["parameter"], int_par) for int_par in int_pars):
for key, attribute in rule["extreme_values"].items():
if isinstance(attribute, list):
result += [value["attribute"] for value in attribute]
else:
result += [attribute]
elif any(re.match(rule["parameter"], int_par) for int_par in int_pars):
for key, attribute in rule["extreme_values"].items():
if isinstance(attribute, list):
result += [value["attribute"] for value in attribute]
else:
result += [attribute]
return result
def update_hlocs(row, h_locs, mpt_df):
"""Add startdate and enddate op hoofdloc dataframe with df.apply() method."""
loc_id = row.name
start_date = row["STARTDATE"]
end_date = row["ENDDATE"]
if loc_id in h_locs:
start_date = (
mpt_df[mpt_df.index.str.contains(loc_id[0:-1])]["STARTDATE"].dropna().min()
)
end_date = (
mpt_df[mpt_df.index.str.contains(loc_id[0:-1])]["ENDDATE"].dropna().max()
)
return start_date, end_date
def update_date(row, mpt_df, date_threshold):
"""Return start and end-date in df.apply() method."""
int_loc = row["LOC_ID"]
if int_loc in mpt_df.index:
start_date = mpt_df.loc[int_loc]["STARTDATE"].strftime("%Y%m%d")
end_date = mpt_df.loc[int_loc]["ENDDATE"]
if end_date > date_threshold:
end_date = pd.Timestamp(year=2100, month=1, day=1)
end_date = end_date.strftime("%Y%m%d")
else:
start_date = row["START"]
end_date = row["EIND"]
return start_date, end_date
def update_histtag(row, grouper):
"""Assign last histTag to waterstandsloc in df.apply method."""
return next(
(
df.sort_values("total_max_end_dt", ascending=False)["serie"].values[0]
for loc_id, df in grouper
if loc_id == row["LOC_ID"]
),
None,
)
def _sort_validation_attribs(rule):
result = {}
for key, value in rule.items():
if isinstance(value, str):
result[key] = [value]
elif isinstance(value, list):
periods = [val["period"] for val in value]
attribs = [val["attribute"] for val in value]
result[key] = [attrib for _, attrib in sorted(zip(periods, attribs))]
return result
class MeetpuntConfig:
"""Meetpuntconfig class."""
def __init__(self, config_path, log_level="INFO"):
self.paths = dict()
self.fews_config = None
self.location_sets = dict()
self.hist_tags = None
self.hist_tags_ignore = None
self.fixed_sheets = None
self.idmap_files = None
self.idmap_sections = None
self.external_parameters_allowed = None
self.consistency = None
self.parameter_mapping = None
self.validation_rules = None
self.logging = logging
self.hoofdloc = None
self.subloc = None
self.waterstandloc = None
self.mswloc = None
self.mpt_hist_tags = None
self._locs_mapping = dict(
hoofdlocaties="hoofdloc",
sublocaties="subloc",
waterstandlocaties="waterstandloc",
mswlocaties="mswloc",
)
self.logging.basicConfig(level=os.environ.get("LOGLEVEL", log_level))
self._read_config(Path(config_path))
def _read_config(self, config_json):
if config_json.exists():
with open(config_json) as src:
config = json.load(src)
workdir = Path(config_json).parent
else:
self.logging.error(f"{config_json} does not exist")
sys.exit()
# add paths to config
for key, path in config["paden"].items():
path = Path(path)
if not path.is_absolute():
path = workdir.joinpath(path).resolve()
if path.exists():
self.paths[key] = path
else:
if path.suffix == "":
logging.warning(f"{path} does not exist. Folder will be created")
path.mkdir()
else:
self.logging.error(
(
f"{path} does not exist. "
f"Please define existing file "
f"in {config_json}."
)
)
sys.exit()
# add fews_config
self.fews_config = FewsConfig(self.paths["fews_config"])
# add location_sets
for key, value in config["location_sets"].items():
if value in self.fews_config.locationSets.keys():
if "csvFile" in self.fews_config.locationSets[value].keys():
self.location_sets[key] = {
"id": value,
"gdf": self.fews_config.get_locations(value),
}
else:
self.logging.error((f"{key} not a csvFile location-set"))
else:
self.logging.error(
(
f"locationSet {key} specified in {config_json} "
f"not in fews-config"
)
)
# add rest of config
self.idmap_files = config["idmap_files"]
self.idmap_sections = config["idmap_sections"]
self.external_parameters_allowed = config["external_parameters_allowed"]
self.parameter_mapping = config["parameter_mapping"]
self.validation_rules = config["validation_rules"]
self.fixed_sheets = config["fixed_sheets"]
# read consistency df from input-excel
self.consistency = pd.read_excel(
self.paths["consistency_xlsx"], sheet_name=None, engine="openpyxl"
)
self.consistency = {
key: value
for key, value in self.consistency.items()
if key in self.fixed_sheets
}
def _read_hist_tags(self, force=False):
if (not self.hist_tags) or force:
if "hist_tags_csv" in self.paths.keys():
self.logging.info(f"reading histags: {self.paths['hist_tags_csv']}")
dtype_cols = ["total_min_start_dt", "total_max_end_dt"]
self.hist_tags = pd.read_csv(
self.paths["hist_tags_csv"],
parse_dates=dtype_cols,
sep=None,
engine="python",
)
for col in dtype_cols:
if not pd.api.types.is_datetime64_dtype(self.hist_tags[col]):
self.logging.error(
(
f"col '{col}' in '{self.paths['hist_tags_csv']} "
"can't be converted to np.datetime64 format. "
"Check if values are dates."
)
)
sys.exit()
def _read_hist_tags_ignore(self, force=False):
if (not self.hist_tags_ignore) or force:
if "mpt_ignore_csv" in self.paths.keys():
self.logging.info(
f"Reading hist tags to be ingored from "
f"{self.paths['mpt_ignore_csv']}"
)
self.hist_tags_ignore = pd.read_csv(
self.paths["mpt_ignore_csv"], sep=None, header=0, engine="python"
)
elif "histTag_ignore" in self.consistency.keys():
self.hist_tags_ignore = self.consistency["histTag_ignore"]
self.logging.info(
f"Reading hist tags to be ignored from "
f"{self.paths['consistency_xlsx']}"
)
else:
self.logging.error(
(
f"specify a histTag_ignore worksheet in "
f"{self.paths['consistency_xlsx']} or a csv-file "
"in the config-json"
)
)
sys.exit()
self.hist_tags_ignore["UNKNOWN_SERIE"] = self.hist_tags_ignore[
"UNKNOWN_SERIE"
].str.replace("#", "")
def _get_idmaps(self, idmap_files=None):
if not idmap_files:
idmap_files = self.idmap_files
idmaps = [
xml_to_dict(self.fews_config.IdMapFiles[idmap])["idMap"]["map"]
for idmap in idmap_files
]
return [item for sublist in idmaps for item in sublist]
def _read_locs(self):
self.hoofdloc = self.fews_config.get_locations("OPVLWATER_HOOFDLOC")
self.subloc = self.fews_config.get_locations("OPVLWATER_SUBLOC")
self.waterstandloc = self.fews_config.get_locations(
"OPVLWATER_WATERSTANDEN_AUTO"
)
self.mswloc = self.fews_config.get_locations("MSW_STATIONS")
def _update_staff_gauge(self, row):
"""Assign upstream and downstream staff gauges to subloc."""
result = {"HBOV": "", "HBEN": ""}
for key in result.keys():
df = self.waterstandloc.loc[self.waterstandloc["LOC_ID"] == row[key]]
if not df.empty:
result[key] = df["PEILSCHAAL"].values[0]
return result["HBOV"], result["HBEN"]
def hist_tags_to_mpt(self, sheet_name="mpt"):
"""Convert histTag-ids to mpt-ids."""
if self.hist_tags is None:
self._read_hist_tags()
idmaps = self._get_idmaps()
hist_tags_df = self.hist_tags.copy()
hist_tags_df["fews_locid"] = hist_tags_df.apply(
idmap2tags, args=[idmaps], axis=1
)
hist_tags_df = hist_tags_df[hist_tags_df["fews_locid"].notna()]
mpt_hist_tags_df = hist_tags_df.explode("fews_locid").reset_index(drop=True)
self.mpt_hist_tags = mpt_hist_tags_df
mpt_df = pd.concat(
[
mpt_hist_tags_df.groupby(["fews_locid"], sort=False)[
"total_min_start_dt"
].min(),
mpt_hist_tags_df.groupby(["fews_locid"], sort=False)[
"total_max_end_dt"
].max(),
],
axis=1,
)
mpt_df = mpt_df.sort_index(axis=0)
mpt_df.columns = ["STARTDATE", "ENDDATE"]
mpt_df.index.name = "LOC_ID"
kw_locs = list(mpt_df[mpt_df.index.str.contains("KW", regex=False)].index)
h_locs = np.unique(["{}0".format(loc[0:-1]) for loc in kw_locs])
h_locs_missing = [loc for loc in h_locs if loc not in list(mpt_df.index)]
h_locs_df = pd.DataFrame(
data={
"LOC_ID": h_locs_missing,
"STARTDATE": [pd.NaT] * len(h_locs_missing),
"ENDDATE": [pd.NaT] * len(h_locs_missing),
}
)
h_locs_df = h_locs_df.set_index("LOC_ID")
mpt_df = pd.concat([mpt_df, h_locs_df], axis=0)
mpt_df[["STARTDATE", "ENDDATE"]] = mpt_df.apply(
update_hlocs, args=[h_locs, mpt_df], axis=1, result_type="expand"
)
mpt_df = mpt_df.sort_index()
self.consistency["mpt"] = mpt_df
def check_idmap_sections(self, sheet_name="idmap section error"):
"""Check if all KW/OW locations are in the correct section."""
self.consistency[sheet_name] = pd.DataFrame(
columns=[
"bestand",
"externalLocation",
"externalParameter",
"internalLocation",
"internalParameter",
]
)
for idmap, subsecs in self.idmap_sections.items():
for section_type, sections in subsecs.items():
for section in sections:
if section_type == "KUNSTWERKEN":
prefix = "KW"
if section_type == "WATERSTANDLOCATIES":
prefix = "OW"
if section_type == "MSWLOCATIES":
prefix = "(OW|KW)"
pattern = fr"{prefix}\d{{6}}$"
idmapping = xml_to_dict(
self.fews_config.IdMapFiles[idmap], **section
)["idMap"]["map"]
idmap_wrong_section = [
idmap
for idmap in idmapping
if not bool(re.match(pattern, idmap["internalLocation"]))
]
if idmap_wrong_section:
section_start = (
section["section_start"]
if "section_start" in section.keys()
else ""
)
section_end = (
section["section_end"]
if "section_end" in section.keys()
else ""
)
self.logging.warning(
(
f"{len(idmap_wrong_section)} "
f"internalLocations not {prefix}XXXXXX "
f"between {section_start} and {section_end} "
f"in {idmap}."
)
)
df = | pd.DataFrame(idmap_wrong_section) | pandas.DataFrame |
#!/usr/bin/env python
"""
aperturephot.py - <NAME> (<EMAIL>) - Dec 2014
Contains aperture photometry routines for HATPI. Needs reduced frames.
The usual sequence is:
1. run parallel_extract_sources on all frames with threshold ~ 10000 to get
bright stars for astrometry.
2. run parallel_anet to get WCS headers for all frames.
3. run make_fov_catalog to get a FOV source catalog for the field.
4. run reform_fov_catalog to cut this down to the columns needed for magfit
only.
5. run parallel_fitsdir_photometry for photometry on all frames
6. run get_magfit_frames to select a single magfit photometry reference and set
up per-CCD work directories, symlinks, etc. for the next steps.
7. run make_magfit_config to generate magfit config files for
MagnitudeFitting.py
8. run make_fiphot_list to make lists of fiphot files for each CCD.
9. run MagnitudeFitting.py in single reference mode.
10. run do_masterphotref.py to get the master mag fit reference.
11. run MagnitudeFitting.py in master reference mode.
12. run parallel_collect_lightcurves to collect all lightcurves into .rlc files.
13. run serial_run_epd or parallel_run_epd to do EPD on all LCs.
14. run parallel_lc_statistics to collect stats on .epdlc files.
15. run choose_tfa_template to choose TFA template stars using the .epdlc stats.
16. run parallel_run_tfa for TFA to get .tfalc files (and .tfalc.TF{1,2,3}
files).
17. run parallel_lc_statistics to collect stats on .tfalc files.
18. run parallel_bin_lightcurves to bin LCs to desired time-bins.
19. run parallel_binnedlc_statistics to collect stats for the binned LCs.
20. run plot_stats_file to make MAD vs. mag plots for all unbinned and binned
LCs.
21. run plot_magrms_comparison to compare the mag-RMS relation for various CCDs.
22. run plot_ismphot_comparison to compare against ISM photometry statistics for
the same field (requires common stars).
"""
#############
## IMPORTS ##
#############
import os, os.path, glob, sys
import multiprocessing as mp
try:
import subprocess32 as subprocess
except:
import subprocess
import shlex
from datetime import datetime
import re, json, shutil, random
try:
import cPickle as pickle
except:
import pickle
import sqlite3, gzip
import numpy as np, pandas as pd
from scipy.spatial import cKDTree as kdtree
from scipy.signal import medfilt
from scipy.linalg import lstsq
from scipy.stats import sigmaclip as stats_sigmaclip
from scipy.optimize import curve_fit
import scipy.stats
import numpy.random as nprand
import matplotlib
matplotlib.use('AGG')
import matplotlib.pyplot as plt
import astropy.io.fits as pyfits
import imageutils
from imageutils import get_header_keyword, read_fits, extract_img_background
from shared_variables import FITS_TAIL
import shared_variables as sv
from astropy.table import Table
from astropy.io import fits
# get fiphot binary reader
try:
from HATpipepy.Common.BinPhot import read_fiphot
HAVEBINPHOT = True
except:
print("can't import binary fiphot reading functions from "
"HATpipe, binary fiphot files will be unreadable!")
HAVEBINPHOT = False
########################
## USEFUL DEFINITIONS ##
########################
# set this to show extra info
DEBUG = True
# CCD minimum and maximum X,Y pixel coordinates
# used to strip things outside FOV from output of make_frame_sourcelist
CCDEXTENT = {'x':[0.0,2048.0],
'y':[0.0,2048.0]}
# zeropoint mags for the HATPI lenses given exp time of 30 seconds
# from Chelsea's src directory on phs3: run_phot_astrom.py (2014-12-15)
# FIXME: check where these came from and fix if out of date, especially if
# cameras moved around
ZEROPOINTS = {3:17.11,
5:17.11,
6:17.11,
7:17.11,
8:16.63}
# used to get the station ID, frame number, and CCD number from a FITS filename
FRAMEREGEX = re.compile(r'(\d{1})\-(\d{6}\w{0,1})_(\d{1})')
# command string to use gaia2read for specified FOV
GAIADR2READCMD = ("gaia2read -r {ra:f} -d {dec:f} -s {boxlen:f} "
"--mR {brightrmag:f} --MR {faintrmag:f} "
"--xieta-coords --header --extra "
"--idrequest {idrequest:s} -o {outfile}")
# command string to do a 2massread for a specified FOV
TWOMASSREADCMD = ("2massread -r {ra:f} -d {dec:f} -s {boxlen:f} "
"--cat {catalogpath} -mr {brightrmag:f} -Mr {faintrmag:f} "
"--xieta-coords {ra:f} {dec:f} -o {outfile}")
# command string to do a 2massread for a specified FOV
UCAC4READCMD = ("ucac4read -r {ra:f} -d {dec:f} -s {boxlen:f} "
"--cat {catalogpath} -mr {brightrmag:f} -Mr {faintrmag:f} "
"-o {outfile}")
# locations of catalogs
CATALOGS = {
'2MASS':{'cmd':TWOMASSREADCMD,
'path':sv.TWOMASSPATH},
'UCAC4':{'cmd':UCAC4READCMD,
'path':sv.UCAC4PATH},
'GAIADR2': {'cmd':GAIADR2READCMD,
'path':sv.GAIADR2PATH}
}
# command string to run fistar
# parameters:
# {frame}
# {extractedlist}
# {ccdgain}
# {zeropoint}
# {exptime}
# {fluxthreshold}
# {ccdsection}
FISTARCMD = ("{fistarexec} -i {frame} -o {extractedlist} "
"--model elliptic --iterations symmetric=4,general=2 "
"--algorithm uplink --format id,x,y,bg,amp,s,d,k,flux,s/n "
"-g {ccdgain} --mag-flux {zeropoint},{exptime} --sort flux "
"--flux-threshold {fluxthreshold} --section {ccdsection} "
"--comment")
# command string to run transformation from RA/Dec in 2MASS catalogs to X/Y in
# FITs image to eventually run photometry at those locations using the
# transformations noted in each frame's .wcs file. we do the transform, then
# remove any objects outside the [0:2048, 0:2048] box for the CCD. we then use
# the resulting source list as input to fiphot. the parameters are:
# {transformer}: 'anrd2xy' or similar, coord -> pix converter executable
# {framewcsfile}: tWCS transformation file associated with frame
# {catalogsourcelist}: 2MASS catalog file associated with camera FOV
# {outfile}: temporary output file
TRANSFORMCMD = ("{transformer} -w {framewcsfile} "
"-o {outputfile} "
"-c 2,3 "
"{catalogsourcelist} ")
WCSRD2XYCMD = ( "wcs-rd2xy -w {framewcsfile} -o {outputfile} "
"-i {rdlsfile} -R 'ra' -D 'dec' " )
# fiphot command string to run fiphot. requires a sourcelist obtained from
# running TRANSFORMCMD and removing objects outside the CCD. the parameters are:
# {fits}: name of input FITS frame
# {sourcelist}: name of the source list file
# {zeropoint}: zeropoint magnitude from ZEROPOINTS above
# {xycols}: comma-separated 1-indexed column numbers of x and y coords
# {ccdgain}: gain of the CCD
# {ccdexptime}: exposure time of the CCD in seconds
# {aperturelist}: aperture list in the following format (all units are pixels):
# aper1rad:sky1inner:sky1rad,...,aperNrad,skyNinner,skyNrad
# {fitsbase}: the FITS base filename without any extensions
# e.g. for 1-377741e_5.fits, this is 1-377741e_5
# (used for later collection of fiphot files into an LC)
# {outfile}: name of the output .phot file (binary format)
# {format}: for text output, e.g.,'ISXY,BbMms', see fiphot manpage
#
# single-background: a fiphot I/O parameter for which the number sets different
# aspects of the output formatting for the .fiphot file that is produced. It
# is a HATpipebin-specific dependency; the public `fitsh` downloadable from
# Andras' webpage does not have this parameter. See `fiphot-io.c` and
# `fiphot.c` in the HATpipebin source. The corresponding `singlebg` parameter
# is a switch used in `write_comment`, `write_photometry_text`,
# `output_phot_columns`, and similar formatting functions.
#
FIPHOTCMD = ("fiphot --input {fits} --input-list {sourcelist} "
"--col-id 1 --col-xy {xycols} --gain {ccdgain:f} "
"--mag-flux {zeropoint:f},{ccdexptime:f} "
"--apertures {aperturelist} "
"--sky-fit 'mode,sigma=3,iterations=2' --disjoint-radius 2 "
"--serial {fitsbase} "
"--format {formatstr} --nan-string 'NaN' "
"--aperture-mask-ignore 'saturated' --comment '--comment' "
"--single-background 3 {binaryout} --output {outfile} -k")
# MagnitudeFitting.py commandline
# {magfitexec}: path to the executable to run for magfit (usually
# MagnitudeFitting.py)
# {network}: HATNet or HATSouth
# {fit_type}: single for single reference mag fit, master for
# master mag fit
# {sphotref_frame}: FITS to use as the single photometric reference
# {sphotref_phot} : fiphot file for the single photometric reference
# {nprocs} : number of processors to use
# {magfit_config_file}: path to the magfit config file
# {magfit_frame_list}: path to the magfit frame list produced by
# get_magfit_frames
MAGFITCMD = ("python {magfitexec} {network} {fit_type} "
"{sphotref_frame} {sphotref_phot} "
"-p {nprocs} --config-file={magfit_config_file} "
"--manual-frame-list={magfit_frame_list} --stat")
# command to generate master photometric reference
# {mphotrefexec}: path to executable to run (usually do_masterphotref.py)
# {network}: HATNet or HATSouth
# {sphotref_frame}: FITS used for the single photometric reference
# {fiphot_list}: list of fiphot files containing all photometry
# {magfit_config_file}: path to the magfit config file
MPHOTREFCMD = ("python {mphotrefexec} {network} {sphotref_frame} "
"--manual-frame-list={fiphot_list} "
"--config-file={magfit_config_file} --nostat")
####################
## SQLITE SCHEMAS ##
####################
PHOTS_TABLE = 'create table phots (phot text, rjd double precision, frame text)'
HATIDS_TABLE = 'create table hatids (hatid text, phot text, photline integer)'
META_TABLE = 'create table metainfo (photdir text, framedir text)'
PRAGMA_CMDS = 'pragma journal_mode = WAL'
PHOTS_INDEX_CMD = 'create index phots_index on phots (phot)'
HATIDS_INDEX_CMD = 'create index hatid_index on hatids (hatid)'
HATIDS_PHOT_INDEX_CMD = 'create index hatid_phot_index on hatids (phot)'
PHOTS_INSERT_CMD = 'insert into phots values (?,?,?)'
HATIDS_INSERT_CMD = 'insert into hatids values (?,?,?)'
META_INSERT_CMD = 'insert into metainfo values (?,?)'
PHOT_SELECT_CMD = ('select a.rjd, a.phot, b.photline from '
'phots a join hatids b on (a.phot = b.phot) '
'where b.hatid = ? order by a.rjd')
META_SELECT_CMD = ('select * from metainfo')
DISTINCT_HATIDS_CMD = ('select distinct hatid from hatids')
###############################
## ANET ASTROMETRY FUNCTIONS ##
###############################
def reform_fistars(fistardir,
fistarglob='1-*_?.fistar',
linestokeep=300,
outpostfix='astrometry'):
"""
This truncates all fistars in the directory fistardir to linestokeep
lines. This is useful for astrometry since the fistar files we produce are
sorted by decreasing flux, and we only only need a couple of thousand bright
sources to do astrometry with anet.
Mostly so we don't need to do source extraction over again
"""
fistars = glob.glob(os.path.join(os.path.abspath(fistardir),
fistarglob))
for fistar in fistars:
inf = open(fistar,'rb')
outfname = os.path.join(os.path.dirname(fistar),
'%s-%s' % (os.path.basename(fistar),
outpostfix))
outf = open(outfname, 'wb')
for ind, line in enumerate(inf):
if ind < linestokeep:
outf.write(line.encode('utf-8'))
print('%s -> %s' % (fistar, outfname))
outf.close()
inf.close()
def fistarfile_to_xy(fistarfile):
"""
Takes a single fistar file and convert it to a binary fits table of the
source positions. The latter is readable by astrometry.net.
"""
if not isinstance(fistarfile, str):
raise AssertionError('called fistarfile_to_xy on not a path')
if sys.version_info[0] == 2:
# used for python 2.X
df = pd.read_csv(fistarfile, comment='#',
names=['ident', 'x', 'y', 'bg', 'amp', 's', 'd', 'k',
'flux', 's/n'],
delimiter=r"\s*", engine='python')
elif sys.version_info[0] == 3:
# used for python 3.X
df = pd.read_csv(fistarfile, comment='#',
names=['ident', 'x', 'y', 'bg', 'amp', 's', 'd', 'k',
'flux', 's/n'],
delim_whitespace=True)
if not len(df) > 1:
print('skipping %s, did not get any sources' % fistarfile)
return 0
else:
col1 = fits.Column(name='ximage', format='D', array=np.array(df['x']))
col2 = fits.Column(name='yimage', format='D', array=np.array(df['y']))
coldefs = fits.ColDefs([col1, col2])
hdu = fits.BinTableHDU.from_columns(coldefs)
outfname = fistarfile.replace('.fistar','.fistar-fits-xy')
hdu.writeto(outfname, overwrite=True)
print('%s -> %s' % (fistarfile, outfname))
def fistardir_to_xy(fistardir, fistarglob='1-*_?.fistar'):
"""
Convert a directory of fistar outputs to binary fits table of x,y source
positions. The latter is readable by astrometry.net.
"""
fistars = glob.glob(
os.path.join(os.path.abspath(fistardir), fistarglob)
)
for fistar in fistars:
if not os.path.exists(fistar.replace('.fistar','.fistar-fits-xy')):
fistarfile_to_xy(fistar)
else:
print('found {:s}, continue.'.format(
fistar.replace('.fistar','.fistar-fits-xy'))
)
def astrometrydotnet_solve_frame(srclist,
wcsout,
ra,
dec,
radius=30,
scalelow=1,
scalehigh=30,
scaleunits='arcsecperpix',
tweakorder=6,
nobjs=200,
xpix=2048,
ypix=2048,
xcolname='ximage',
ycolname='yimage',
useimagenotfistar=False,
downsample=4,
pixelerror=1,
uniformize=10
):
"""
This uses astrometry.net to solve frame astrometry. This is the
free version of anet_solve_frame.
Uses the frame extracted sources (.fistar-fits-xy file, see
aperturephot.fistar_to_xy) and returns a .wcs file containing the
astrometric transformation between frame x,y and RA/DEC.
Optionally, if `useimagenotfistar` is true, uses the fits image
corresponding to the frame extracted sources and the astrometry.net
in-built source extraction to produce the solution, along with sick
constellation plots.
Example astrometry.net frame-solve command (using the fits table of x,y
positions):
solve-field --ra 274.5 --dec 58.0 --radius 30 --scale-low 1
--scale-high 30 --scale-units arcsecperpix --tweak-order 2
--wcs /dirname/tess2019135090826-4-2-0016_cal_img_bkgdsub.wcs
--overwrite --objs 200 -w 2048 -e 2048 --x-column ximage --y-column yimage
/dirname/tess2019135090826-4-2-0016_cal_img_bkgdsub.fistar-fits-xy
For astrometry.net to work, you need to install it, and get all the index
files. See http://astrometry.net/doc/readme.html.
If you care about your astrometric projection's precision at the < 1 pixel
level, you will likely need to tweak downsample, pixelerror, and
uniformize. `wcsqualityassurance.py` implements some relevant tests.
"""
if useimagenotfistar:
ASTROMETRYDOTNETCMD = (
"solve-field --ra {ra} --dec {dec} --radius {radius} "
"--scale-low {scalelow} --scale-high {scalehigh} "
"--scale-units {scaleunits} --tweak-order {tweakorder} "
"--wcs {wcsout} --downsample {downsample} "
"--overwrite --fits-image --no-verify "
"--pixel-error {pixelerror} "
"--uniformize {uniformize} "
"{srcimage}"
)
astrometrycmd = ASTROMETRYDOTNETCMD.format(
ra=ra,
dec=dec,
radius=radius,
scalelow=scalelow,
scalehigh=scalehigh,
scaleunits=scaleunits,
tweakorder=tweakorder,
downsample=downsample,
wcsout=wcsout,
pixelerror=pixelerror,
uniformize=uniformize,
srcimage=srclist.replace('.fistar-fits-xy','.fits')
)
else:
ASTROMETRYDOTNETCMD = (
"solve-field --ra {ra} --dec {dec} --radius {radius} "
"--scale-low {scalelow} --scale-high {scalehigh} "
"--scale-units {scaleunits} --tweak-order {tweakorder} "
"--wcs {wcsout} "
"--overwrite --objs {nobjs} --width {xpix} --height {ypix} "
" --x-column {xcolname} --y-column {ycolname} --no-plot "
"{srclist}"
)
astrometrycmd = ASTROMETRYDOTNETCMD.format(
ra=ra,
dec=dec,
radius=radius,
scalelow=scalelow,
scalehigh=scalehigh,
scaleunits=scaleunits,
tweakorder=tweakorder,
xpix=xpix,
ypix=ypix,
nobjs=nobjs,
xcolname=xcolname,
ycolname=ycolname,
wcsout=wcsout,
srclist=srclist
)
if DEBUG:
print(astrometrycmd)
# execute the anet shell command
anetproc = subprocess.Popen(shlex.split(astrometrycmd),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# get results
anet_stdout, anet_stderr = anetproc.communicate()
if DEBUG:
for l in anet_stdout.decode('utf-8').split('\n'):
print(l)
for l in anet_stderr.decode('utf-8').split('\n'):
print(l)
# get results if succeeded, log outcome, and return path of outfile
if (anetproc.returncode == 0 and
os.path.exists(os.path.abspath(wcsout)) and
os.stat(os.path.abspath(wcsout)).st_size > 0):
print('%sZ: astrometrydotnet %s generated for frame sourcelist %s' %
(datetime.utcnow().isoformat(),
os.path.abspath(wcsout), os.path.abspath(srclist)))
return os.path.abspath(wcsout)
# if astrometry did not succeed, complain and remove the zero-size wcs file
# anet inexplicably makes anyway
else:
print('{:s}Z: astrometrydotnet {:s} failed for frame sourcelist {:s}! '
.format(datetime.utcnow().isoformat(),
os.path.abspath(wcsout),
os.path.abspath(srclist)
)+
'Error was: {:s}'.format(repr(anet_stderr))
)
# remove the broken wcs if astrometry failed
if os.path.exists(os.path.abspath(wcsout)):
os.remove(os.path.abspath(wcsout))
return None
def anet_solve_frame(srclist,
wcsout,
ra,
dec,
infofromframe=False,
width=13,
tweak=6,
radius=13,
xpix=2048,
ypix=2048,
cols=(2,3),
scale=None,
usescalenotwidth=False):
"""
This uses anet to solve frame astrometry.
Uses the frame extracted sources (.fistar file) and returns a .wcs file
containing the astrometric transformation between frame x,y and RA/DEC.
Example anet command:
anet --ra 60. --dec -22.5 --radius 4 --width 13 --tweak 6 --cols 2,3 1-383272f_5.fistar --wcs 1-383272f_5.wcs
assuming an ~/.anetrc file with the following contents:
xsize = 2048 # the width of the frame in pixels
ysize = 2048 # the height of the frame in pixels
tweak = 3 # the order of polynomial fit to frame distortion
xcol = 2 # the column to be used for x coord (1-indexed)
ycol = 3 # the column to be used for y coord (1-indexed)
verify = 1 # number of verify iterations to run
log = 1 # set to 1 to log operations
indexpath = /P/HP0/CAT/ANET_INDEX/ucac4_2014/ # path to the indexes
otherwise, we'll need to provide these as kwargs to the anet executable.
If usescalenotwidth, instead executes
anet --ra 60. --dec -22 --radius 12 --scale 21.1 -s 2048,2048 --tweak 6 --cols 2,3 foo.fistar --wcs foo.wcs
The input sourcelist can come from fistar, with a fluxthreshold set to 10000
to just get the bright stars. This makes anet way faster.
"""
if infofromframe:
# find the frame
srcframe = os.path.basename(srclist)
srcframe = os.path.splitext(srcframe)[0] + '.fits'
srcframepath = os.path.join(os.path.dirname(srclist), srcframe)
srcframefz = os.path.splitext(srcframe)[0] + FITS_TAIL
srcframefzpath = os.path.join(os.path.dirname(srclist), srcframefz)
# get the RA, DEC, and FOV header keywords
if os.path.exists(srcframepath):
ra = get_header_keyword(srcframepath,'rac')
dec = get_header_keyword(srcframepath,'decc')
fov = get_header_keyword(srcframepath,'fov')
xpix = get_header_keyword(srcframepath,'naxis1')
ypix = get_header_keyword(srcframepath,'naxis2')
if fov is not None:
width = fov
ra = ra*360.0/24.0
elif os.path.exists(srcframefzpath):
# ext 1 is the header for the fpacked image
ra = get_header_keyword(srcframefzpath,'rac',ext=1)
dec = get_header_keyword(srcframefzpath,'decc',ext=1)
fov = get_header_keyword(srcframefzpath,'fov',ext=1)
xpix = get_header_keyword(srcframefzpath,'naxis1',ext=1)
ypix = get_header_keyword(srcframefzpath,'naxis2',ext=1)
if fov is not None:
width = fov
ra = ra*360.0/24.0
if usescalenotwidth:
ANETCMDSTR = ("anet -r {ra} -d {dec} --scale {scale} "
"--tweak {tweak} --radius {radius} -s {xpix},{ypix} "
"--cols {colx},{coly} --wcs {outwcsfile} {sourcelist}")
anetcmd = ANETCMDSTR.format(ra=ra,
dec=dec,
scale=scale,
tweak=tweak,
radius=radius,
xpix=xpix,
ypix=ypix,
colx=cols[0],
coly=cols[1],
outwcsfile=wcsout,
sourcelist=srclist)
else:
ANETCMDSTR = ("anet -r {ra} -d {dec} -w {width} "
"--tweak {tweak} --radius {radius} -s {xpix},{ypix} "
"--cols {colx},{coly} --wcs {outwcsfile} {sourcelist}")
anetcmd = ANETCMDSTR.format(ra=ra,
dec=dec,
width=width,
tweak=tweak,
radius=radius,
xpix=xpix,
ypix=ypix,
colx=cols[0],
coly=cols[1],
outwcsfile=wcsout,
sourcelist=srclist)
if DEBUG:
print(anetcmd)
# execute the anet shell command
anetproc = subprocess.Popen(shlex.split(anetcmd),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# get results
anet_stdout, anet_stderr = anetproc.communicate()
# get results if succeeded, log outcome, and return path of outfile
if (anetproc.returncode == 0 and
os.path.exists(os.path.abspath(wcsout)) and
os.stat(os.path.abspath(wcsout)).st_size > 0):
print('%sZ: anet WCS %s generated for frame sourcelist %s' %
(datetime.utcnow().isoformat(),
os.path.abspath(wcsout), os.path.abspath(srclist)))
return os.path.abspath(wcsout)
# if astrometry did not succeed, complain and remove the zero-size wcs file
# anet inexplicably makes anyway
else:
print('%sZ: anet WCS %s failed for frame sourcelist %s! Error was: %s' %
(datetime.utcnow().isoformat(),
os.path.abspath(wcsout),
os.path.abspath(srclist),
anet_stderr))
# remove the broken wcs if astrometry failed
if os.path.exists(os.path.abspath(wcsout)):
os.remove(os.path.abspath(wcsout))
return None
def parallel_anet_worker(task):
"""
This expands the task arg into the args and kwargs necessary for
anet_solve_frame.
"""
return (
task[0],
anet_solve_frame(
task[0], task[1], task[2], task[3], **task[4]
)
)
def parallel_astrometrydotnet_worker(task):
"""
This expands the task arg into the args and kwargs necessary for
astrometrydotnet_solve_frame.
"""
return (
task[0],
astrometrydotnet_solve_frame(
task[0], task[1], task[2], task[3], **task[4]
)
)
def parallel_anet(srclistdir,
outdir,
ra, dec,
fistarglob='?-*_?.fistar-astrometry',
nworkers=16,
maxtasksperworker=1000,
infofromframe=True,
width=13,
tweak=6,
radius=13,
xpix=2048,
ypix=2048,
cols=(2,3),
overwrite=True):
"""
This does parallel anet astrometry for all frames in srclistdir and
generates their wcs files.
"""
# get a list of all fits files in the directory
fistarlist = glob.glob(os.path.join(srclistdir, fistarglob))
if not overwrite:
existing = glob.glob(
os.path.join(fitsdir, fistarglob.replace('.fistar', '.wcs'))
)
requested = list(map(os.path.basename, fistarlist))
requested = [r.replace('.fistar','') for r in requested]
alreadyexists = list(map(os.path.basename, existing))
alreadyexists = [ae.replace('.wcs','') for ae in alreadyexists]
setdiff = np.setdiff1d(requested, alreadyexists)
if len(setdiff) == 0:
print("%sZ: astrometry already done for all .fistar files..." %
datetime.utcnow().isoformat())
return
fistarlist = [os.path.join(fitsdir, sd+'.fistar') for sd in setdiff]
print('%sZ: found %s fistar files in %s, starting astrometry...' %
(datetime.utcnow().isoformat(),
len(fistarlist), srclistdir))
if outdir and not os.path.exists(outdir):
print('%sZ: making new output directory %s' %
(datetime.utcnow().isoformat(),
outdir))
os.mkdir(outdir)
# get the files for which astrometry hasn't already been done
fistarlist = check_files(fistarlist,
'astrometry',
outdir,
intailstr='.fistar',
outtailstr='.wcs',
skipifpartial=True)
if type(fistarlist) == int:
if fistarlist == -1:
return -1
pool = mp.Pool(nworkers, maxtasksperchild=maxtasksperworker)
inpostfix = os.path.splitext(fistarglob)[-1]
tasks = [
[x, os.path.join(
outdir, os.path.basename(
x.replace(inpostfix, '.wcs')
)
),
ra, dec, {'width':width,
'tweak':tweak,
'radius':radius,
'xpix':xpix,
'ypix':ypix,
'cols':cols,
'infofromframe':infofromframe}]
for x in fistarlist
]
# fire up the pool of workers
results = pool.map(parallel_anet_worker, tasks)
# wait for the processes to complete work
pool.close()
pool.join()
# this is the return dictionary
returndict = {x:y for (x,y) in results}
return returndict
def parallel_astrometrydotnet(
srclistdir,
outdir,
ra, dec,
fistarfitsxyglob='tess2019135090826-4-2-0016_cal_img_bkgdsub.fistar-fits-xy',
nworkers=10,
maxtasksperworker=1000,
radius=30,
scalelow=1,
scalehigh=30,
scaleunits='arcsecperpix',
tweakorder=6,
nobjs=200,
xpix=2048,
ypix=2048,
xcolname='ximage',
ycolname='yimage',
useimagenotfistar=False,
downsample=4,
pixelerror=1,
uniformize=10
):
"""
Uses astrometrydotnet_solve_frame to do parallel astrometry for all frames
in srclistdir and generate their wcs files.
"""
# get a list of all fits files in the directory
if useimagenotfistar:
fistarfitsxylist = glob.glob(
os.path.join(srclistdir,
fistarfitsxyglob.replace('fistar-fits-xy','fits'))
)
intailstr = '.fits'
else:
fistarfitsxylist = glob.glob(
os.path.join(srclistdir, fistarfitsxyglob)
)
intailstr = '.fistar-fits-xy'
print('%sZ: found %s fistar files in %s, starting astrometry...' %
(datetime.utcnow().isoformat(),
len(fistarfitsxylist), srclistdir))
if outdir and not os.path.exists(outdir):
print('%sZ: making new output directory %s' %
(datetime.utcnow().isoformat(),
outdir))
os.mkdir(outdir)
# get the files for which astrometry hasn't already been done
fistarfitsxylist = check_files(fistarfitsxylist, 'astrometry', outdir,
intailstr=intailstr,
outtailstr='.wcs', skipifpartial=False)
if type(fistarfitsxylist) == int:
if fistarfitsxylist == -1:
return -1
inpostfix = os.path.splitext(fistarfitsxylist[0])[-1]
tasks = [
[x, os.path.join(
outdir, os.path.basename(
x.replace(inpostfix, '.wcs')
)
),
ra, dec, {'radius':radius,
'scalelow':scalelow,
'scalehigh':scalehigh,
'scaleunits':scaleunits,
'tweakorder':tweakorder,
'nobjs':nobjs,
'xpix':xpix,
'ypix':ypix,
'xcolname':xcolname,
'ycolname':ycolname,
'useimagenotfistar':useimagenotfistar,
'pixelerror':pixelerror,
'uniformize':uniformize
}
]
for x in fistarfitsxylist
]
pool = mp.Pool(nworkers, maxtasksperchild=maxtasksperworker)
# fire up the pool of workers
results = pool.map(parallel_astrometrydotnet_worker, tasks)
# wait for the processes to complete work
pool.close()
pool.join()
# this is the return dictionary
returndict = {x:y for (x,y) in results}
return returndict
def parallel_anet_list(srclistlist,
outdir,
ra, dec,
fistarglob='?-*_?.fistar-astrometry',
nworkers=16,
maxtasksperworker=1000,
infofromframe=True,
width=13,
tweak=6,
radius=13,
xpix=2048,
ypix=2048,
cols=(2,3)):
"""
This runs anet on a list of frames in parallel.
"""
# get a list of all fits files in the directory
fistarlist = [x for x in srclistlist if os.path.exists(x)]
print('%sZ: found %s fistar files, starting astrometry...' %
(datetime.utcnow().isoformat(), len(fistarlist)))
if outdir and not os.path.exists(outdir):
print('%sZ: making new output directory %s' %
(datetime.utcnow().isoformat(),
outdir))
os.mkdir(outdir)
pool = mp.Pool(nworkers, maxtasksperchild=maxtasksperworker)
inpostfix = os.path.splitext(fistarglob)[-1]
tasks = [
[x, os.path.join(
outdir, os.path.basename(
x.replace(inpostfix, '.wcs')
)
),
ra, dec, {'width':width,
'tweak':tweak,
'radius':radius,
'xpix':xpix,
'ypix':ypix,
'cols':cols,
'infofromframe':infofromframe}]
for x in fistarlist
]
# fire up the pool of workers
results = pool.map(parallel_anet_worker, tasks)
# wait for the processes to complete work
pool.close()
pool.join()
# this is the return dictionary
returndict = {x:y for (x,y) in results}
return returndict
##########################
## PHOTOMETRY FUNCTIONS ##
##########################
def make_fov_catalog(ra=None, dec=None, size=None,
brightrmag=sv.FIELDCAT_BRIGHT,
faintrmag=sv.FIELDCAT_FAINT,
fits=None,
outfile=None,
outdir=None,
catalog='2MASS',
catalogpath=None,
columns=None,
observatory='hatpi',
gaiaidrequest='HAT'):
"""
This function gets all the sources in the field of view of the frame, given
its central pointing coordinates and plate-scale from either 2MASS or
UCAC4. Makes a catalog file that can then be used as input to project
catalog (ra,dec) to frame (x,y).
if ra, dec, size are None, fits must not be None. fits is the filename of
the FITS file to get the center RA, DEC, and platescale values from.
Kwargs:
ra, dec (float): field center, in degrees
size (float): size of box, in degrees
brightrmag (float): bright cutoff from catalog. If 2MASS is used,
"rmag" is 2MASS r. If GAIADR2 is used, it's Gaia R.
faintrmag (float): faint cutoff from catalog
fits (str): path to fits file containing center RA, DEC, and
platescale (see preamble).
outfile (str): path to write the output catalog
catalog (str): 'UCAC4', '2MASS', or 'GAIADR2'. You should usually use
GAIADR2.
gaiaidrequest (str): if catalog is GAIADR2, then you can request
either "GAIA", "HAT", or "TMASS" identifiers. These are collected from
a crossmatch; if you request "HAT" identifiers, the output catalog may
not include exclusively HAT-XXX-XXXXXXX ID's (there may also be some
GAIA ID's). The default is set to "HAT".
Returns:
path of the catalog file produced.
"""
if ra and dec and size:
catra, catdec, catbox = ra, dec, size
elif fits:
frame, hdr = read_fits(fits)
catbox = sv.FIELDCAT_FOV
if observatory=='hatpi':
catra = float(hdr['RACA']) # RA [DECIMAL hr] (averaged field center)
catdec = float(hdr['DECCA']) # Dec [decimal deg] (averaged field center)
print('WRN! %sZ: converting decimal hour RA to decimal degree' %
(datetime.utcnow().isoformat()))
from astropy.coordinates import Angle
import astropy.units as units
tempra = Angle(str(catra)+'h')
catra = tempra.to(units.degree).value
else:
raise NotImplementedError
else:
print('%sZ: need a FITS file to work on, or center coords and size' %
(datetime.utcnow().isoformat(),))
return
if not outfile:
outfile = '%s-RA%s-DEC%s-SIZE%s.catalog' % (catalog,
catra,
catdec,
catbox)
if outdir:
outfile = os.path.join(outdir, outfile)
print('%sZ: making FOV catalog for '
'center RA, DEC = %.5f, %.5f with size = %.5f deg' %
(datetime.utcnow().isoformat(),
catra, catdec, catbox))
if catalog == 'GAIADR2':
if gaiaidrequest not in ['GAIA','HAT','TMASS']:
raise ValueError(
'expected gaiaidrequest one of "GAIA", "HAT", "TMASS"')
catalogcmd = CATALOGS[catalog]['cmd'].format(
ra=catra,
dec=catdec,
boxlen=catbox,
catalogpath=catalogpath if catalogpath else CATALOGS[catalog]['path'],
brightrmag=brightrmag,
faintrmag=faintrmag,
outfile=outfile,
idrequest=gaiaidrequest)
elif catalog in ['2MASS', 'UCAC4']:
catalogcmd = CATALOGS[catalog]['cmd'].format(
ra=catra,
dec=catdec,
boxlen=catbox,
catalogpath=catalogpath if catalogpath else CATALOGS[catalog]['path'],
brightrmag=brightrmag,
faintrmag=faintrmag,
outfile=outfile)
else:
raise ValueError('catalog must be one of GAIADR2,2MASS,UCAC4')
if DEBUG:
print(catalogcmd)
if os.path.exists(outfile):
print('%sZ: found FOV catalog %s, continuing... ' %
(datetime.utcnow().isoformat(), os.path.abspath(outfile)))
return os.path.abspath(outfile)
# execute the cataloger shell command
catalogproc = subprocess.Popen(shlex.split(catalogcmd),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# get results
catalog_stdout, catalog_stderr = catalogproc.communicate()
# get results if succeeded, log outcome, and return path of outfile
if catalogproc.returncode == 0:
print('%sZ: FOV catalog %s generated for '
'center RA, DEC = %.5f, %.5f with size = %.5f deg' %
(datetime.utcnow().isoformat(),
os.path.abspath(outfile), catra, catdec, catbox))
return os.path.abspath(outfile)
else:
print('%sZ: FOV catalog %s generation failed for '
'center RA, DEC = %.5f, %.5f with size = %.5f deg!' %
(datetime.utcnow().isoformat(),
os.path.abspath(outfile), catra, catdec, catbox))
return None
def reform_gaia_fov_catalog(
incat, outcat, columns='id,ra,dec,xi,eta,G,Rp,Bp,plx,pmra,pmdec,varflag'
):
"""
This converts the output catalog for gaia2read to the format required
for magfit.
columns is a CSV string containing the required columns.
"""
allcolumns = ['id','ra','dec','raerr','decerr','plx','plxerr','pmra',
'pmdec','pmraerr','pmdecerr','epoch','astexcnoise',
'astexcnoisesig','astpriflag','G_nobs','G_flux',
'G_fluxerr','G_fluxovererr','G','Bp_nobs','Bp_flux',
'Bp_fluxerr','Bp_fluxovererr','Bp','Rp_nobs','Rp_flux',
'Rp_fluxerr','Rp_fluxovererr','Rp','BpRp_excess','RV',
'RV_err','varflag','Teff','Teff_lowq','Teff_highq',
'extinction','extinction_lowq','extinction_highq',
'reddening','reddening_lowq','reddening_highq',
'Rstar','Rstar_lowq','Rstar_highq','L','L_lowq','L_highq',
'xi','eta']
columns = columns.split(',')
colstoget = [allcolumns.index(x) for x in columns]
inf = open(incat,'r')
outf = open(outcat,'wb')
for line in inf:
if '#' not in line:
sline = line.split()
outcols = [sline[x] for x in colstoget]
outline = ' '.join(outcols)
outf.write('{:s}\n'.format(outline).encode('utf-8'))
def reform_fov_catalog(incat,
outcat,
columns='id,ra,dec,xi,eta,J,K,qlt,I,r,i,z'):
"""
This converts the full output catalog from 2massread, etc. to the format
required for magfit. Also useful for general reforming of the columns.
columns is a CSV string containing columns needed from allcolumns below.
"""
allcolumns = ['id','ra','dec','xi','eta','arcdis','J',
'Junc','H','Hunc','K','Kunc','qlt','B',
'V','R','I','u','g','r','i','z','field','num']
columns = columns.split(',')
colstoget = [allcolumns.index(x) for x in columns]
inf = open(incat,'rb')
outf = open(outcat,'wb')
for line in inf:
if '#' not in line:
sline = line.split()
outcols = [sline[x] for x in colstoget]
outline = ' '.join(outcols)
outf.write('%s\n' % outline)
def extract_frame_sources(fits,
outfile,
fistarexec='fistar',
ccdextent='0:2048,0:2048',
ccdgain=2.725,
fluxthreshold=1000,
zeropoint=17.11,
exptime=30.0):
"""
This uses fistar to extract sources from the image.
fistar -i 1-377741e_5.fits -o test.fistar --model elliptic \
--iterations symmetric=4,general=2 --algorithm uplink \
--format id,x,y,bg,amp,s,d,k,flux,s/n -g 2.725 \
--mag-flux 17,30 --sort flux \
--flux-threshold 1000 --section 0:0,2048:2048
"""
if not os.path.exists(fits):
print('%sZ: no FITS file to work on!' %
(datetime.utcnow().isoformat(),))
return None
if not outfile:
outfile = re.sub(sv.FITS_TAIL, '.fistar', fits)
fistarcmd = FISTARCMD.format(
fistarexec=fistarexec, # assuming fistar is in the path
frame=fits,
extractedlist=outfile,
ccdgain=ccdgain,
zeropoint=zeropoint,
exptime=exptime,
fluxthreshold=fluxthreshold,
ccdsection=ccdextent
)
if DEBUG:
print(fistarcmd)
print('%sZ: starting fistar for %s...' %
(datetime.utcnow().isoformat(), fits))
# execute the fistar shell command
fistarproc = subprocess.Popen(shlex.split(fistarcmd),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# get results
fistar_stdout, fistar_stderr = fistarproc.communicate()
# get results if succeeded, log outcome, and return path of outfile
if fistarproc.returncode == 0:
print('%sZ: fistar completed for %s -> %s' %
(datetime.utcnow().isoformat(),fits, outfile))
return outfile
else:
print('%sZ: fistar failed for %s!' %
(datetime.utcnow().isoformat(), fits))
return None
def parallel_sourceextract_worker(task):
"""
This expands the task arg into the args and kwargs necessary for
extract_frame_sources.
"""
return (task[0][0], extract_frame_sources(*task[0],**task[1]))
def check_files(inlist, operationstr, outdir, intailstr='.fits',
outtailstr='.fistar', skipifpartial=False):
"""
You have a list of files you think you want to run an operation on.
However, you do not want to repeat the operation if you have already run
it.
Further, sometimes you want to skip the operation entirely if it failed
last time you tried it.
input:
inlist: a list of paths that you want to perform an operation on
operationstr: a string for logging, e.g., "source extraction"
outdir: path of the directory being written to
intailsstr: the tail string of the input files
outtailstr: the tail string of the output files
skipifpartial: if we find ANY matches between the inlist and outlist,
then return -1 and do not perform any operations at all.
output:
default: the list of files on which to operate. If there are no files
on which to operate, or skipifpartial==True and there are some matches,
returns -1.
"""
# construct list of file names that would be created for the operation to
# be performed
outlist = [os.path.join(outdir,
os.path.basename(re.sub(intailstr, outtailstr, x))) for
x in inlist]
exists = np.array(outlist)[np.array([os.path.exists(f) for f in outlist])]
_exists = [re.sub(outtailstr,'',e) for e in exists]
_inlist = [re.sub(intailstr,'',w) for w in inlist]
if len(exists) > 0:
to_operate_on = np.array(inlist)[~np.in1d(_inlist, _exists)]
else:
to_operate_on = np.array(inlist)
print('%sZ: found %s FITS files to %s...' %
(datetime.utcnow().isoformat(), len(to_operate_on), operationstr))
if len(to_operate_on) == 0:
print('%sZ: escaping %s because no new files...' %
(datetime.utcnow().isoformat(), operationstr))
return -1
if skipifpartial and (len(to_operate_on) < len(inlist)):
print('%sZ: escaping %s because got partial list of files...' %
(datetime.utcnow().isoformat(), operationstr))
return -1
else:
return to_operate_on
def parallel_extract_sources(fitsdir,
outdir,
nworkers=8,
maxtasksperworker=1000,
fistarexec='fistar',
ccdextent='0:0,2048:2048',
ccdgain=2.725,
fluxthreshold=1000,
zeropoint=17.11,
exptime=30.0,
tailstr=FITS_TAIL,
fnamestr='*_?.fits'):
"""
This does parallel source extraction from all FITS in fitsdir, and puts the
results in outdir.
"""
# get a list of all fits files in the directory
fitslist = glob.glob(os.path.join(fitsdir,fnamestr))
print('%sZ: found %s FITS files in %s, starting source extraction...' %
(datetime.utcnow().isoformat(),
len(fitslist), fitsdir))
if outdir and not os.path.exists(outdir):
print('%sZ: making new output directory %s' %
(datetime.utcnow().isoformat(),
outdir))
os.mkdir(outdir)
# get the files for which source extraction hasn't already been done
toextract = check_files(fitslist,
'source extraction',
outdir,
intailstr=tailstr,
outtailstr='.fistar')
if type(toextract) == int:
if toextract == -1:
return -1
pool = mp.Pool(nworkers, maxtasksperchild=maxtasksperworker)
tasks = [
[(x, os.path.join(outdir,
os.path.basename(re.sub(tailstr,'.fistar',x)))),
{'fistarexec':fistarexec,
'ccdextent':ccdextent,
'ccdgain':ccdgain,
'fluxthreshold':fluxthreshold,
'zeropoint':zeropoint,
'exptime':exptime,}]
for x in toextract
]
# fire up the pool of workers
results = pool.map(parallel_sourceextract_worker, tasks)
# wait for the processes to complete work
pool.close()
pool.join()
# this is the return dictionary
returndict = {x:y for (x,y) in results}
return returndict
def parallel_srcextract_list_worker(task):
"""
This is the worker for the function below.
task[0] = fits
task[1] = {'fistarexec','ccdextent','ccdgain','fluxthreshold',
'zeropoint', 'exptime'}
"""
try:
fits, kwargs = task
if not os.path.exists(fits):
return fits, None
# get the required header keywords from the FITS file
header = imageutils.get_header_keyword_list(fits,
['GAIN',
'GAIN1',
'GAIN2',
'EXPTIME'])
# handle the gain and exptime parameters
if 'GAIN1' in header and 'GAIN2' in header:
ccdgain = (header['GAIN1'] + header['GAIN2'])/2.0
elif 'GAIN' in header:
ccdgain = header['GAIN']
else:
ccdgain = None
ccdexptime = header['EXPTIME'] if 'EXPTIME' in header else None
if not (ccdgain or ccdexptime):
print('ERR! %sZ: no GAIN or EXPTIME defined for %s' %
(datetime.utcnow().isoformat(),
fits))
return fits, None
# figure out the outputfile
outfile = fits.replace('.fits','.fistar')
# figure out the input kwargs to fistar
kwargs['exptime'] = ccdexptime
kwargs['ccdgain'] = ccdgain
# figure out this frame's CCD and therefore zeropoint
frameinfo = FRAMEREGEX.findall(fits)
if frameinfo:
kwargs['zeropoint'] = ZEROPOINTS[int(frameinfo[0][-1])]
elif not frameinfo and not kwargs['zeropoint']:
print('ERR! %sZ: no zeropoint mag defined for %s' %
(datetime.utcnow().isoformat(),
fits))
return fits, None
# run fistar
fistar = extract_frame_sources(fits,
outfile,
**kwargs)
if fistar and os.path.exists(fistar):
return fits, fistar
else:
return fits, None
except Exception as e:
print('ERR! %sZ: could not extract sources for %s, error: %s' %
(datetime.utcnow().isoformat(),
fits, e))
return fits, None
def parallel_extract_sources_for_list(fitslist,
nworkers=16,
maxworkerstasks=1000,
fistarexec='fistar',
ccdextent='0:0,2048:2048',
ccdgain=2.725,
fluxthreshold=1000,
zeropoint=17.11,
exptime=30.0):
"""
This runs a parallel fistar operation on all sources in fitslist.
Puts the results in the same directories as the FITS themselves.
"""
pool = mp.Pool(nworkers, maxtasksperchild=maxtasksperworker)
tasks = [
(x,
{'fistarexec':fistarexec,
'ccdextent':ccdextent,
'ccdgain':ccdgain,
'fluxthreshold':fluxthreshold,
'zeropoint':zeropoint,
'exptime':exptime,}) for x in fitslist
]
# fire up the pool of workers
results = pool.map(parallel_srcextract_list_worker, tasks)
# wait for the processes to complete work
pool.close()
pool.join()
# this is the return dictionary
returndict = {x:y for (x,y) in results}
return returndict
def match_fovcatalog_framesources(frame_extracted_sourcelist,
frame_projected_fovcatalog,
outfile,
srclist_cols=(0,1,2,5,6,7),
fovcat_cols=(0,1,2,12,13),
match_pixel_distance=0.5):
"""
Does frame_projected_fovcatalog and frame_extracted_sourcelist matching.
frame_extracted_sourcelist: *.fistar file
frame_projected_fovcatalog: *.projcatalog file
outfile: *.sourcelist file to be created by this function. Each line
looks like:
ID RA DEC x y phot_id x y s,d,k
HAT-381-0000008 249.39070 5.27754 1261.64440 517.39870 4620 1261.75400 517.42700 1.39400 0.24400 -0.17900
This matches the fovcatalog transformed to pixel coordinates to the
extracted source list pixel coordinates and gets the IDs of the sources to
be later used when creating the fiphot file.
The procedure is:
1. read frame sourcelist xy cols into ndarray
2. read projected fovcatalog xy cols into ndarray
3. vstack both ndarrays, combined = [sourcelist, fovcatalog]
4. create a kd-Tree using tree = cKDTree(combined)
5. find pairs using tree.query_pairs(match_pixel_distance, p=2) [p=2 ->
standard euclidean distance for the Minkowski norm]
6. convert result to list of index pairs
7. get indices of fovcatalog for each pair and use to get ID from fovcatalog
8. get indices of framelist for each pair and use to get S, D, K info
9. make sure fovcatalog IDs are unique
10. write to a combined fiphot file.
"""
srclist = np.genfromtxt(frame_extracted_sourcelist,
dtype='S17,f8,f8,f8,f8,f8',
names=['id','x','y','s','d','k'],
usecols=srclist_cols)
fovcat = np.genfromtxt(frame_projected_fovcatalog,
dtype='S17,f8,f8,f8,f8',
names=['id','ra','dec','x','y'],
usecols=fovcat_cols)
srclist_xys = np.column_stack((srclist['x'],srclist['y']))
fovcat_xys = np.column_stack((fovcat['x'],fovcat['y']))
fovcat_tree = kdtree(fovcat_xys)
distances, fovcat_indices = fovcat_tree.query(srclist_xys)
# now that we have matches, put the two files together based on the indices
# the output format will be:
# srcext = source extracted frame list from fistar
# fovcat = frame projected FOV catalog object list from
# make_frame_sourcelist
# fovcat HAT-ID, fovcat RA, fovcat DEC, fovcat x, fovcat y,
# srcext id, srcext x, srcext y, srcext S, srcext D, srcext K
if not outfile:
outfile = (re.sub(sv.FITS_TAIL, '.matched-sources',
frame_extracted_sourcelist))
outf = open(outfile,'wb')
outlinestr = '%s %.5f %.5f %.5f %.5f %s %.5f %.5f %.5f %.5f %.5f\n'
for dist, fovcat_ind, srclist_ind in zip(distances,
fovcat_indices,
range(len(srclist))):
if dist < match_pixel_distance:
outf.write(outlinestr %
(fovcat['id'][fovcat_ind],
fovcat['ra'][fovcat_ind],
fovcat['dec'][fovcat_ind],
fovcat['x'][fovcat_ind],
fovcat['y'][fovcat_ind],
srclist['id'][srclist_ind],
srclist['x'][srclist_ind],
srclist['y'][srclist_ind],
srclist['s'][srclist_ind],
srclist['d'][srclist_ind],
srclist['k'][srclist_ind]))
outf.close()
return outfile
def make_frameprojected_catalog(fits,
catalog,
catalogxycols=(12,13),
transformer='anrd2xy',
wcs=None,
ccdextent=None,
out=None,
removetemp=True,
pixborders=0.0,
observatory='hatpi'):
"""
This makes the projected catalog for the frame to use with fiphot using the
anet rdtoxy transform stored in the filename wcs and projects the catalog
into pixel coordinates of the image. If wcs is None, a file with .wcs
extension in the same directory as fits will be used as input. catalog is
the path to the FOV catalog generated by 2massread or ucac4read. out is the
name of the output sourcelist. Uses the executable defined in the
transformer kwarg (usually a variant of the rd2xy binary from
astrometry.net). ccdextent is a dictionary like CCDEXTENT above noting the
extent of the CCD and tells us which objects are outside the FOV so they're
removed from the output source list.
Returns the path of the source list file produced if successful, otherwise
returns None.
"""
fitspath = os.path.abspath(fits)
if wcs:
framewcsfile = wcs
else:
wcspath = re.sub(sv.FITS_TAIL,'',fitspath)
wcspath = wcspath + '.wcs'
framewcsfile = wcspath
if out:
outfile = out
temppath = out + '.projcattemp'
else:
outpath = re.sub(sv.FITS_TAIL,'',fitspath)
temppath = outpath + '.projcattemp'
outpath = outpath + '.projcatalog'
outfile = outpath
# format the transformer shell command
transformcmd = TRANSFORMCMD.format(transformer=transformer,
framewcsfile=framewcsfile,
catalogsourcelist=catalog,
outputfile=temppath)
if DEBUG:
print(transformcmd)
# make sure the wcs file makes sense before trying the transform
if (framewcsfile and
os.path.exists(os.path.abspath(framewcsfile)) and
os.stat(os.path.abspath(framewcsfile)).st_size > 0):
# execute the transformer shell command
transformproc = subprocess.Popen(shlex.split(transformcmd),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
print('%sZ: %s' % (datetime.utcnow().isoformat(), transformcmd))
# get results
transform_stdout, transform_stderr = transformproc.communicate()
# get results if succeeded, log outcome, and return path of outfile
if transformproc.returncode == 0:
if observatory=='hatpi':
# now we need to take out sources outside the CCD extent
sourcelist_x, sourcelist_y = np.loadtxt(temppath,
usecols=catalogxycols,
unpack=True)
# get the extent of the CCD
if not ccdextent:
ccdextent = CCDEXTENT
# get indices for the lines to be kept optionally, remove all
# sources within pixborders pixels of the edges of the image.
keep_ind = np.where(
(sourcelist_x > (ccdextent['x'][0] + pixborders)) &
(sourcelist_x < (ccdextent['x'][1] - pixborders)) &
(sourcelist_y > (ccdextent['y'][0] + pixborders)) &
(sourcelist_y < (ccdextent['y'][1] - pixborders))
)[0].tolist()
# output the lines to be kept
outf = open(outfile, 'wb')
with open(temppath,'rb') as tempf:
templines = tempf.readlines()
templines = [x.decode('utf-8') for x in templines if '#' not in
x.decode('utf-8')]
for ind in keep_ind:
outf.write(templines[ind].encode('utf-8'))
outf.close()
elif observatory=='tess':
df = pd.read_csv(
temppath,
delim_whitespace=True,
names='id,ra,dec,xi,eta,G,Rp,Bp,plx,pmra,pmdec,varflag,x_proj,y_proj'.split(',')
)
sourcelist_x, sourcelist_y = (np.array(df['x_proj']),
np.array(df['y_proj']))
# get the extent of the CCD
if not ccdextent:
ccdextent = CCDEXTENT
# do a hack for TESS-SPOC WCS. the WCS is derived pre-trim of the
# rows and columns. this corrects it.
if ('proj' in fitspath and 's0' in fitspath and 'cam' in fitspath
and 'ccd' in fitspath and 'photref' in fitspath):
print('WRN! doing hack for TESS/SPOC WCS to fix the trim '+
'x/y CRPIX offset')
hdulist = pyfits.open(fitspath)
hdr = hdulist[0].header
sourcelist_x -= (hdr['SCCSA']-1)
sourcelist_y -= (hdr['SCIROWS']-1)
hdulist.close()
# the WCS-project and measured positions differ by this because
# of a coordinate offset problem, somewhere. this is
# CHECKED formally by wcsqualityassurance, and visually by
# inspecting tessutils.plot_apertures_on_frame
sourcelist_x -= 0.5
sourcelist_y -= 0.5
df['x_proj'] = sourcelist_x
df['y_proj'] = sourcelist_y
# get indices for the lines to be kept optionally, remove all
# sources within pixborders pixels of the edges of the image.
keep_ind = np.where(
(sourcelist_x > (ccdextent['x'][0] + pixborders)) &
(sourcelist_x < (ccdextent['x'][1] - pixborders)) &
(sourcelist_y > (ccdextent['y'][0] + pixborders)) &
(sourcelist_y < (ccdextent['y'][1] - pixborders))
)[0].tolist()
outdf = df.iloc[keep_ind]
outdf.to_csv(outfile, index=False, header=False, sep=' ')
print('TESS wrote {}'.format(outfile))
print(outdf.describe())
if removetemp:
os.remove(temppath)
print('%sZ: frame source list generation OK for %s' %
(datetime.utcnow().isoformat(),
fits))
return outfile
else:
print('%sZ: frame source list generation '
'failed for %s: error was %s' %
(datetime.utcnow().isoformat(),
fits,
transform_stderr))
if removetemp:
try:
os.remove(temppath)
except:
pass
return None
# the wcs file doesn't make sense for this FITS image, complain and return
else:
print('%sZ: WCS transform file does not work for %s, '
'skipping this frame...' %
(datetime.utcnow().isoformat(), fits))
return None
def run_fiphot(fits,
sourcelist=None,
xycols='25,26', # set for full 2MASS fov catalog output format
ccdgain=None,
zeropoint=None,
ccdexptime=None,
aperturelist='1.95:7.0:6.0,2.45:7.0:6.0,2.95:7.0:6.0',
formatstr='ISXY,BbMms',
outfile=None,
removesourcelist=False,
binaryoutput=True,
observatory='hatpi'):
"""
Thus runs fiphot for a single frame. Only the fits filename is required. If
other parameters are not provided, they will be obtained from the image
header and defaults.
Returns the path of the .fiphot file produced if successful, otherwise
returns None.
"""
# get the required header keywords from the FITS file
if observatory=='hatpi':
headerlist = ['GAIN', 'GAIN1', 'GAIN2', 'EXPTIME', 'RAC', 'DECC',
'FOV']
elif observatory=='tess':
headerlist = ['GAINA', 'TELAPSE', 'CRVAL1', 'CRVAL2']
header = imageutils.get_header_keyword_list(fits, headerlist)
# handle the gain and exptime parameters
if not ccdgain:
# FIXME: is this right? should we use separate gain values for each side
# of the CCD? what about stars that straddle the middle?
if 'GAIN1' in header and 'GAIN2' in header:
ccdgain = (header['GAIN1'] + header['GAIN2'])/2.0
elif 'GAIN' in header:
ccdgain = header['GAIN']
else:
ccdgain = None
if not ccdexptime:
ccdexptime = header['EXPTIME'] if 'EXPTIME' in header else None
if not (ccdgain or ccdexptime):
print('%sZ: no GAIN or EXPTIME defined for %s' %
(datetime.utcnow().isoformat(),
fits))
return None
# figure out the fitsbase from the fits filename
fitsbase = re.sub(sv.FITS_TAIL,'',os.path.basename(fits))
# handle the zeropoints
if not zeropoint:
# if the zeropoint isn't provided and if this is a HAT frame, the ccd
# number will get us the zeropoint in the ZEROPOINTS dictionary
frameinfo = FRAMEREGEX.findall(fits)
if frameinfo:
zeropoint = ZEROPOINTS[int(frameinfo[0][-1])]
else:
print('%sZ: no zeropoint magnitude defined for %s' %
(datetime.utcnow().isoformat(),
fits))
return None
# figure out the output path
if not outfile:
outfile = os.path.abspath(re.sub(sv.FITS_TAIL,'.fiphot',fits))
# figure out the sourcelist path
if not sourcelist:
sourcelist = os.path.abspath(re.sub(sv.FITS_TAIL,'.sourcelist',fits))
if not os.path.exists(sourcelist):
print("%sZ: can't find a source list for %s" %
(datetime.utcnow().isoformat(),
fits))
return None
# figure out if we want binary output or not
if binaryoutput:
binaryout = '--binary-output'
else:
binaryout = ''
# assemble the fiphot command string
fiphotcmd = FIPHOTCMD.format(fits=fits,
sourcelist=sourcelist,
xycols=xycols,
zeropoint=zeropoint,
ccdgain=ccdgain,
ccdexptime=ccdexptime,
aperturelist=aperturelist,
fitsbase=fitsbase,
formatstr=formatstr,
binaryout=binaryout,
outfile=outfile)
if DEBUG:
print(fiphotcmd)
# execute the fiphot command
fiphotproc = subprocess.Popen(shlex.split(fiphotcmd),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# get results
fiphot_stdout, fiphot_stderr = fiphotproc.communicate()
# get results if succeeded, log outcome, and return path of outfile
if fiphotproc.returncode == 0:
print('%sZ: photometry using fiphot OK for %s' %
(datetime.utcnow().isoformat(),
fits))
if removesourcelist:
os.remove(sourcelist)
return outfile
else:
print('%sZ: fiphot failed for %s: error was %s' %
(datetime.utcnow().isoformat(),
fits,
fitphot_stderr))
return None
def do_photometry(fits,
reformedfovcatalog,
extractsources=True,
fluxthreshold=500.0,
fovcat_xycols=(12,13),
projcat_xycols=(24,25),
fiphot_xycols='7,8', # set for matched source list
outdir=None,
ccdextent=None,
ccdgain=None,
zeropoint=None,
ccdexptime=None,
removesourcetemp=True,
pixborders=0.0,
aperturelist='1.95:7.0:6.0,2.45:7.0:6.0,2.95:7.0:6.0',
formatstr='ISXY,BbMms',
removesourcelist=False,
binaryoutput=True,
minsrcbgv=100.0,
maxmadbgv=20.0,
maxframebgv=2000.0,
minnstars=500,
observatory='hatpi',
extractforsdk=False,
overwrite=True):
"""
This rolls up the sourcelist and fiphot functions above.
Runs both stages on fits, and puts the output in outdir if it exists. If it
doesn't or is None, then puts the output in the same directory as fits.
kwargs:
reformedfovcatalog is the path to the 2MASS/UCAC4 catalog for all sources in the
observed field. (12 columns!)
extractforsdk (bool): if you want to run fistar source extraction on
the frame to get SDK values, even though you're really forcing the
photometry from a projected catalog.
"""
outprojcat = re.sub(sv.FITS_TAIL,'.projcatalog',os.path.basename(fits))
outsourcelist = re.sub(sv.FITS_TAIL,'.sourcelist',os.path.basename(fits))
outfiphot = re.sub(sv.FITS_TAIL,'.fiphot',os.path.basename(fits))
if outdir and os.path.exists(outdir):
outprojcat = os.path.join(outdir, outprojcat)
outsourcelist = os.path.join(outdir,outsourcelist)
outfiphot = os.path.join(outdir, outfiphot)
elif outdir and not os.path.exists(outdir):
print('%sZ: making new output directory %s' %
(datetime.utcnow().isoformat(),
outdir))
os.mkdir(outdir)
outprojcat = os.path.join(outdir, outprojcat)
outsourcelist = os.path.join(outdir,outsourcelist)
outfiphot = os.path.join(outdir, outfiphot)
else:
outprojcat, outsourcelist, outfiphot = None, None, None
if DEBUG:
print('output projected catalog will be %s' % outprojcat)
print('output source list will be %s' % outsourcelist)
print('output fiphot will be %s' % outfiphot)
# make the .projcatalog files (projects catalog onto the frame)
if overwrite or not os.path.exists(outprojcat):
projcatfile = make_frameprojected_catalog(fits,
reformedfovcatalog,
ccdextent=ccdextent,
out=outprojcat,
removetemp=removesourcetemp,
pixborders=pixborders,
observatory=observatory)
else:
print('%sZ: %s already exists, skipping making projcatalog.' %
(datetime.utcnow().isoformat(), outprojcat))
projcatfile = outprojcat
if projcatfile:
# if we're supposed to extract sources and run photometry on them
# instead of just the sources in the projected fovcatalog, do so
if extractsources:
outfistar = os.path.join(outdir, re.sub(sv.FITS_TAIL, '.fistar',
os.path.basename(fits)))
# extract sources
if observatory=='hatpi':
if overwrite or not os.path.exists(outfistar):
framesources = extract_frame_sources(
fits, outfistar,
fluxthreshold=fluxthreshold
)
else:
print('%sZ: %s already exists, skipping fistar.' %
(datetime.utcnow().isoformat(), outfistar))
framesources = outfistar
elif observatory=='tess':
framesources = extract_frame_sources(
fits,
os.path.join(
outdir,
re.sub(sv.FITS_TAIL,'.fistar',os.path.basename(fits))
),
fluxthreshold=fluxthreshold,
ccdgain=ccdgain,
zeropoint=zeropoint,
exptime=ccdexptime
)
if framesources:
# match extracted frame sources to the projected fovcatalog.
# this makes a .sourcelist file, named "outsourcelist".
matchedsources = match_fovcatalog_framesources(
framesources, # *.fistar file
projcatfile, # *.projcatalog file
outsourcelist # *.sourcelist file, created by this function
)
fiphot_xycols = '7,8'
else:
print('%sZ: extracting sources failed for %s!' %
(datetime.utcnow().isoformat(), fits))
return None, None
else:
# even if you don't want to extract sources for *photometry*, you
# might want to extract them for their SDK values. for example,
# this is needed so that you can select photometric reference
# frames, while still running forced photometry from a base
# catalog.
if extractforsdk:
_ = extract_frame_sources(
fits,
os.path.join(
outdir,
re.sub(sv.FITS_TAIL,'.fistar',os.path.basename(fits))
),
fluxthreshold=fluxthreshold,
ccdgain=ccdgain,
zeropoint=zeropoint,
exptime=ccdexptime
)
outsourcelist = projcatfile
fiphot_xycols = '13,14'
# run fiphot on the source list
fiphotfile = run_fiphot(fits,
sourcelist=outsourcelist,
aperturelist=aperturelist,
outfile=outfiphot,
xycols=fiphot_xycols,
ccdgain=ccdgain,
zeropoint=zeropoint,
formatstr=formatstr,
ccdexptime=ccdexptime,
removesourcelist=removesourcelist,
binaryoutput=binaryoutput,
observatory=observatory)
if fiphotfile:
return fiphotfile, None
else:
print('%sZ: photometry failed for %s!' %
(datetime.utcnow().isoformat(), fits))
else:
print('%sZ: creating a projected source catalog failed for %s!' %
(datetime.utcnow().isoformat(), fits))
return None, None
def parallel_photometry_worker(task):
"""
This is the parallel photometry worker function for use with
parallel_fitsdir_photometry below. Just calls do_photometry with expanded
args and kwargs from the two element task list. task[0] is a tuple of args,
and task[1] is a dictionary of kwargs. task[2] is a boolean indicating if we
should kill bad frames.
Returns a tuple of form: (fits, fiphot)
"""
try:
# first, do the photometry
framephot, frameinfo = do_photometry(*task[0], **task[1])
badframesdir = os.path.join(task[1]['outdir'],'badframes')
# make sure all is OK with this frame
if framephot:
result = (framephot, frameinfo)
# if the frame photometry is bad or the frame isn't OK, delete its
# fiphot so we don't have to deal with it later
else:
# if the fiphot exists and we're allowed to kill it, do so
if os.path.exists(framephot) and task[2]:
filestomove = glob.glob(
os.path.join(
os.path.dirname(framephot),
os.path.basename(framephot).replace('.fiphot','.*')
)
)
for filetomove in filestomove:
shutil.move(filetomove, badframesdir)
# tell the user what happened
print('WRN! frame %s rejected, %s. %s' %
(task[0][0],
'files moved' if task[2] else 'fiphot %s' % framephot,
frameinfo if frameinfo else 'photometry failed!'))
result = (framephot, frameinfo)
except Exception as e:
print('ERR! photometry failed! reason: %s' % e)
result = (None, None)
return result
def parallel_fitsdir_photometry(
fitsdir,
outdir,
fovcatalog,
fluxthreshold=500.0,
ccdextent=None,
pixborders=0.0,
aperturelist='1.95:7.0:6.0,2.45:7.0:6.0,2.95:7.0:6.0',
removesourcetemp=True,
removesourcelist=False,
binaryoutput=True,
nworkers=16,
maxtasksperworker=1000,
saveresults=True,
rejectbadframes=True,
minsrcbgv=200.0,
maxmadbgv=150.0,
maxframebgv=2000.0,
minnstars=500,
formatstr='ISXY,BbMms',
ccdgain=None,
ccdexptime=None,
zeropoint=None,
fitsglob='?-*_?.fits',
extractsources=True,
fovcat_xycols=(12,13),
projcat_xycols=(24,25),
fiphot_xycols='7,8',
observatory='hatpi',
overwrite=True
):
"""
This does photometry for all FITS files in a directory using nworkers
parallel workers.
"""
# get a list of all fits files in the directory
fitslist = glob.glob(os.path.join(fitsdir,fitsglob))
if not overwrite:
existing = glob.glob(
os.path.join(fitsdir, fitsglob.replace('.fits', '.fiphot'))
)
requested = list(map(os.path.basename, fitslist))
requested = [r.replace('.fits','') for r in requested]
alreadyexists = list(map(os.path.basename, existing))
alreadyexists = [ae.replace('.fiphot','') for ae in alreadyexists]
setdiff = np.setdiff1d(requested, alreadyexists)
if len(setdiff) == 0:
print("%sZ: aperture photometry already done for all FITS files..." %
datetime.utcnow().isoformat())
return
fitslist = [os.path.join(fitsdir, sd+'.fits') for sd in setdiff]
print('%sZ: found %s FITS files in %s, starting photometry...' %
(datetime.utcnow().isoformat(),
len(fitslist), fitsdir))
if outdir and not os.path.exists(outdir):
print('%sZ: making new output directory %s' %
(datetime.utcnow().isoformat(),
outdir))
os.mkdir(outdir)
pool = mp.Pool(nworkers,maxtasksperchild=maxtasksperworker)
tasks = [[(x, fovcatalog),
{'outdir':outdir,
'ccdextent':ccdextent,
'pixborders':pixborders,
'aperturelist':aperturelist,
'removesourcetemp':removesourcetemp,
'removesourcelist':removesourcelist,
'fluxthreshold':fluxthreshold,
'binaryoutput':binaryoutput,
'minsrcbgv':minsrcbgv,
'maxmadbgv':maxmadbgv,
'maxframebgv':maxframebgv,
'minnstars':minnstars,
'formatstr':formatstr,
'ccdgain':ccdgain,
'ccdexptime':ccdexptime,
'zeropoint':zeropoint,
'extractsources':extractsources,
'observatory':observatory,
'fovcat_xycols':fovcat_xycols,
'projcat_xycols':projcat_xycols,
'fiphot_xycols':fiphot_xycols,
'overwrite':overwrite}, rejectbadframes] for x in fitslist]
# if the badframes directory doesn't exist, make it
badframesdir = os.path.join(outdir, 'badframes')
if not os.path.exists(badframesdir):
os.mkdir(badframesdir)
# fire up the pool of workers
results = pool.map(parallel_photometry_worker, tasks)
# wait for the processes to complete work
pool.close()
pool.join()
# this is the return dictionary
returndict = {x:y for (x,y) in results}
if saveresults:
resultsfile = open(os.path.join(outdir,'TM-photometry.pkl'),'wb')
pickle.dump(returndict, resultsfile)
resultsfile.close()
return returndict
def parallel_fitslist_photometry(
fitslist,
outdir,
photokey,
fovcatalog,
fluxthreshold=500.0,
ccdextent=None,
pixborders=0.0,
aperturelist='1.95:7.0:6.0,2.45:7.0:6.0,2.95:7.0:6.0',
removesourcetemp=True,
removesourcelist=False,
binaryoutput=True,
nworkers=16,
maxtasksperworker=1000,
saveresults=True,
rejectbadframes=True,
formatstr='ISXY,BbMms',
minsrcbgv=200.0,
maxmadbgv=150.0,
maxframebgv=2000.0,
minnstars=500
):
"""
This does photometry for all FITS files in the given list using nworkers
parallel workers.
photokey is required to set the name of the output photometry info pickle.
"""
# get a list of all fits files in the directory
goodlist = [x for x in fitslist if os.path.exists(x)]
# if we have no files, then bail out
if not goodlist:
print('%sZ: no good FITS in list, bailing out...' %
(datetime.utcnow().isoformat(),))
return
print('%sZ: found %s FITS files in input list, starting photometry...' %
(datetime.utcnow().isoformat(),
len(goodlist)))
if outdir and not os.path.exists(outdir):
print('%sZ: making new output directory %s' %
(datetime.utcnow().isoformat(),
outdir))
os.mkdir(outdir)
pool = mp.Pool(nworkers,maxtasksperchild=maxtasksperworker)
tasks = [[(x, fovcatalog),
{'outdir':outdir,
'ccdextent':ccdextent,
'pixborders':pixborders,
'aperturelist':aperturelist,
'removesourcetemp':removesourcetemp,
'removesourcelist':removesourcelist,
'fluxthreshold':fluxthreshold,
'binaryoutput':binaryoutput,
'minsrcbgv':minsrcbgv,
'formatstr':formatstr,
'maxmadbgv':maxmadbgv,
'maxframebgv':maxframebgv,
'minnstars':minnstars}, rejectbadframes] for x in goodlist]
# if the badframes directory doesn't exist, make it
badframesdir = os.path.join(outdir, 'badframes')
if not os.path.exists(badframesdir):
os.mkdir(badframesdir)
# fire up the pool of workers
results = pool.map(parallel_photometry_worker, tasks)
# wait for the processes to complete work
pool.close()
pool.join()
# this is the return dictionary
returndict = {x:y for (x,y) in results}
if saveresults:
resultsfile = open(os.path.join(outdir,
'TM-photometry-%s.pkl' % photokey),'wb')
pickle.dump(returndict, resultsfile)
resultsfile.close()
return returndict
##############################
## FRAME INFO AND FILTERING ##
##############################
def collect_image_info(fits, fistar,
minsrcbgv=100.0,
minsrcsval=1.5,
maxframebgv=2000.0,
maxmadbgv=150.0,
minnstars=500):
"""
This collects the following info about a frame.
- nstars detected
- median background for all source detections
- MAD of the background for all source detections
- the overall image background
bad frames are usually those with:
- large MAD for the background
- median source background < 0
- nstars < 500
furthermore, a running average over, say, 10 frames will reject large
deviations in:
- nstars
- median source background
- overall image background
"""
frame, hdr = read_fits(fits)
# the overall image background
# assume this is the same as the median for now
# previously, we were assuming that this is 100 ADU below the median
imgbackg = extract_img_background(frame,median_diffbelow=0.0)
# at some point this used to return an array, now it doesn't?
# guard against this madness
if isinstance(imgbackg,np.ndarray):
framebgv = float(imgbackg[0])
elif isinstance(imgbackg,int) or isinstance(imgbackg,float):
framebgv = float(imgbackg)
# get the fistar file columns we need
framecols = np.genfromtxt(fistar,
usecols=(3,5),
names=['bgv','sval'],
dtype='f8,f8',comments='#')
finitesrcbgvs = framecols['bgv'][np.isfinite(framecols['bgv'])]
nstars = len(finitesrcbgvs)
mediansrcbgv = np.median(finitesrcbgvs)
madsrcbgv = np.median(np.abs(finitesrcbgvs - mediansrcbgv))
mediansval = np.median(framecols['sval'][np.isfinite(framecols['sval'])])
# check if the frame was aborted in the middle of the exposure
if 'ABORTED' in hdr and hdr['ABORTED'] and hdr['ABORTED'] == 1:
frameaborted = True
elif 'ABORTED' in hdr and hdr['ABORTED'] and hdr['ABORTED'] == 0:
frameaborted = False
else:
frameaborted = None
frameok = ((mediansrcbgv > minsrcbgv) and
(madsrcbgv < maxmadbgv) and
(-2*minsrcbgv < framebgv < maxframebgv) and
(nstars >= minnstars) and
(mediansval > minsrcsval) and
(frameaborted is not True))
frameinfo = {'fits':fits,
'fistar':fistar,
'nstars':nstars,
'medsrcbgv':mediansrcbgv,
'madsrcbgv':madsrcbgv,
'medsrcsval':mediansval,
'framebgv':framebgv,
'frameok':frameok}
return frameinfo
def frame_filter_worker(task):
"""
This wraps collect_image_info above and removes the fiphot for image if it's
rejected.
task[0] = fits
task[1] = fistar
task[2] = {'minsrcbgv', 'maxframebgv', 'maxmaxbgv',
'minnstars', 'minsrcsval'}
this returns:
True: if the frame was not filtered out
False: if the frame was filtered out
None: if the frame filtering failed
"""
try:
# first, make sure that fits and fistar both exist
if (task[0] and task[1] and
os.path.exists(task[0]) and os.path.exists(task[1])):
frameinfo = collect_image_info(task[0],
task[1],
**task[2])
# get rid of the frame if we're allowed to do so
if not frameinfo['frameok']:
returnval = False
else:
returnval = True
return returnval
else:
print("ERR! fits/fiphot don't exist for this frame: %s" % task[0])
return None
except Exception as e:
print("ERR! frame stats collection failed for %s, reason: %s" %
(task[0], e))
return None
def parallel_frame_filter(fitsdir,
fitsglob='?-*_?.fits',
fistarext='.fistar',
fiphotext='.fiphot',
removebadframes=False,
badframesdir=None,
minsrcbgv=100.0,
maxmadbgv=200.0,
minsrcsval=1.5,
maxframebgv=2000.0,
minnstars=500,
nworkers=16,
maxworkertasks=1000):
"""
This goes through a fitsdir and removes bad frames.
"""
# find all the fits files
fitslist = glob.glob(os.path.join(os.path.abspath(fitsdir),
fitsglob))
# make sure all of these have accompanying fistar and fiphot files
tasks = []
print('%s total FITS, finding good FITS files...' % len(fitslist))
for fits in fitslist:
fistar = fits.replace('.fits',fistarext)
if os.path.exists(fistar):
tasks.append((fits, fistar, {'minsrcbgv':minsrcbgv,
'minsrcsval':minsrcsval,
'maxmadbgv':maxmadbgv,
'maxframebgv':maxframebgv,
'minnstars':minnstars}))
print('%s FITS to work on.' % len(tasks))
if len(tasks) > 0:
if not badframesdir:
badframesdir = os.path.join(fitsdir, 'badframes')
if not os.path.exists(badframesdir):
os.mkdir(badframesdir)
print('checking FITS files...')
# now start up the workers
pool = mp.Pool(nworkers,maxtasksperchild=maxworkertasks)
results = pool.map(frame_filter_worker, tasks)
# wait for the processes to complete work
pool.close()
pool.join()
outdict = {}
# now remove the fiphots if we're asked to do so
for x, result in zip(tasks, results):
fits = x[0]
if (result is False or result is None) and removebadframes:
filestomove = glob.glob(fits.replace('.fits','.*'))
for deadfile in filestomove:
shutil.move(deadfile, badframesdir)
print('moved all files for %s to %s' % (fits, badframesdir))
elif (result is False or result is None) and not removebadframes:
print('bad frame %s, not moving' % fits)
else:
print('frame %s is OK' % fits)
outdict[fits] = (result, fits.replace('.fits',fiphotext))
resultsfile = open(os.path.join(fitsdir,
'TM-framerejection.pkl'),'wb')
pickle.dump(outdict, resultsfile)
resultsfile.close()
return outdict
#################################
## MAGNITUDE FITTING FUNCTIONS ##
#################################
def get_magfit_frames(fitsdir,
fitsglob,
photdir,
workdir=None,
headerfilters=None,
selectreference=True,
framestats=False,
linkfiles=True,
outlistfile=None,
observatory='hatpi'):
"""
fitsdir = directory where the FITS object frames are
fitsglob = glob to select a subset of the FITS object frames
photdir = directory where the TEXT fiphot files are (to select a
singlephotref)
workdir = directory where to create FITS and fiphot symlinks so
MagnitudeFitting.py can work there
This does the following:
1. gets a list of frames in fitsdir that match fitsglob
2. associates these files with their photometry files in photdir
3. optionally filters them by the definitions in headerfilters (TBD)
4. optionally creates symlinks to the chosen frame files in the workdir
(photdir by default, directory will be created if non-existent)
5. optionally selects a frame that might be a good reference for single
reference magfit
6. optionally writes the frame list to outlistfile.
headerfilters is a list of string elements of the form:
'<FITS header key> <operator> <FITS header value>'
where <operator> is a standard binary operator. This string will be evaled
to get the filter results.
if selectreference is True, the following algorithm chooses a reference
frame:
1. from frames, get zenith distances (Z), moon distance (MOONDIST), moon
elevation (MOONELEV).
2. from (text) fiphot files, get nsources with G flags, MAD of magnitudes in
largest aperture, median magnitude error in largest aperture.
3. sort Z asc, MOONDIST desc, MOONELEV asc, nsources desc, good nsources
desc, mag MAD asc, magerr asc, use these sort indices to sort framelists
4. create sets of first 200 frames in each framelist above (or all frames if
total nframes with phot < 200), and then intersect them all to find a set
of frames that have the best properties. pick the first one out of the
final intersection set as the reference frame.
Returns a dict of the form:
{'framelist':<list of frames passing fitsglob and headerfilters>,
'photlist':<list of fitphot files associated with selected frames>,
'framelistfile':<path to the outlistfile if created>,
'referenceframe':<path to the chosen reference frame>,
'referencestats':<stats dictionary for the reference frame>,
'framestats':<a stats dictionary for all the frames>}
"""
# first, get the frames
fitslist = glob.glob(os.path.join(fitsdir, fitsglob))
print('%sZ: %s FITS files found in %s matching glob %s' %
(datetime.utcnow().isoformat(),
len(fitslist), fitsdir, fitsglob))
# TODO: add filtering by header keywords using headerfilters
photfits = []
photlist = []
# associate the frames with their fiphot files
for fits in fitslist:
searchpath = os.path.join(
photdir,
re.sub(sv.FITS_TAIL,'.fiphot',os.path.basename(fits))
)
if os.path.exists(searchpath):
photfits.append(fits)
photlist.append(searchpath)
# remove all frames with no fiphot files
fitslist = photfits
# check if the workdir exists. if it doesn't, create it
if workdir and not os.path.exists(workdir):
os.mkdir(workdir)
print('%sZ: made work directory %s' %
(datetime.utcnow().isoformat(),
workdir))
# if the workdir is not provided, use the photdir as workdir
if not workdir:
workdir = photdir
# make sure the workdir has a stats and MPHOTREF directory
if not os.path.exists(os.path.join(workdir, 'stats')):
os.mkdir(os.path.join(workdir,'stats'))
if not os.path.exists(os.path.join(workdir,'MPHOTREF')):
os.mkdir(os.path.join(workdir,'MPHOTREF'))
workfitslist, workphotlist = [], []
# link the frames in fitsdir to workdir
if linkfiles:
# temporarily change the directory so symlinking works
cwd = os.getcwd()
try:
os.chdir(workdir)
for fits in fitslist:
os.symlink(fits, os.path.basename(fits))
workfitslist.append(
os.path.join(workdir, os.path.basename(fits))
)
# change back at the end
os.chdir(cwd)
print('%sZ: linked frames to workdir %s' %
(datetime.utcnow().isoformat(),
workdir))
except Exception as e:
print('%sZ: linking fiphot files to workdir %s failed!' %
(datetime.utcnow().isoformat(),
workdir))
os.chdir(cwd)
raise
# if the workdir != photdir, then link the fiphot files too
if workdir != photdir and linkfiles:
# temporarily change the directory so symlinking works
cwd = os.getcwd()
try:
os.chdir(workdir)
for fiphot in photlist:
os.symlink(fiphot, os.path.basename(fiphot))
workphotlist.append(
os.path.join(workdir, os.path.basename(fiphot))
)
# change back at the end
os.chdir(cwd)
print('%sZ: linked fiphot files to workdir %s' %
(datetime.utcnow().isoformat(),
workdir))
except Exception as e:
print('%sZ: linking fiphot files to workdir %s failed!' %
(datetime.utcnow().isoformat(),
workdir))
os.chdir(cwd)
raise
if not workfitslist:
workfitslist = fitslist
if not workphotlist:
workphotlist = photlist
# collect the minimum returndict
returndict = {'origframelist':fitslist,
'origphotlist':photlist,
'workframelist':workfitslist,
'workphotlist':workphotlist,
'framelistfile':outlistfile}
# find a reference frame
goodframes, goodphots = [], []
if selectreference:
print('%sZ: selecting a reference frame...' %
(datetime.utcnow().isoformat()))
if observatory=='hatpi':
zenithdist, moondist, moonelev = [], [], []
elif observatory=='tess':
pass
ngoodobjects, medmagerr, magerrmad = [], [], []
# for each FITS and fiphot combo, collect stats
for fits, fiphot in zip(workfitslist, workphotlist):
if observatory=='hatpi':
headerdata = imageutils.get_header_keyword_list(
fits, ['Z','MOONDIST','MOONELEV'])
# decide if the fiphot file is binary or not. read the first 600
# bytes and look for the '--binary-output' text
with open(fiphot,'rb') as fiphotf:
header = fiphotf.read(1000)
if '--binary-output' in header and HAVEBINPHOT:
photdata_f = read_fiphot(fiphot)
if photdata_f:
photdata = {
'mag':np.array(photdata_f['per aperture'][2]['mag']),
'err':np.array(photdata_f['per aperture'][2]['mag err']),
'flag':np.array(
photdata_f['per aperture'][2]['status flag']
)
}
else:
print('no photdata in %s, skipping...' % fiphot)
continue
del photdata_f
elif '--binary-output' in header and not HAVEBINPHOT:
print('%sZ: %s is a binary fiphot file, '
'but no binary fiphot reader is present, skipping...' %
(datetime.utcnow().isoformat(), fiphot))
continue
else:
# read in the phot file
photdata = np.genfromtxt(
fiphot,
usecols=(12,13,14),
dtype='f8,f8,S5',
names=['mag','err','flag']
)
# calculate stats for photometry
# find good frames
if '--binary-output' in header:
goodind = np.where(photdata['flag'] == 0)
else:
goodind = np.where(photdata['flag'] == 'G')
ngood = len(goodind[0])
median_mag = np.median(photdata['mag'][goodind])
median_magerr = np.median(photdata['err'][goodind])
medabsdev_mag = np.median(
np.abs(photdata['mag'][goodind] - median_mag)
)
# append to result lists
goodframes.append(fits)
goodphots.append(fiphot)
ngoodobjects.append(ngood)
medmagerr.append(median_magerr)
magerrmad.append(medabsdev_mag)
if observatory=='hatpi':
zenithdist.append(headerdata['Z'])
moondist.append(headerdata['MOONDIST'])
moonelev.append(headerdata['MOONELEV'])
if DEBUG:
if observatory=='hatpi':
print('frame = %s, phot = %s, Z = %s, MOONDIST = %s, '
'MOONELEV = %s, ngood = %s, medmagerr = %.5f, '
'magerrmad = %.5f' %
(fits, fiphot, headerdata['Z'],
headerdata['MOONDIST'], headerdata['MOONELEV'],
ngood, median_magerr, medabsdev_mag))
elif observatory=='tess':
print('frame = %s, phot = %s, '
'ngood = %s, medmagerr = %.5f, '
'magerrmad = %.5f' %
(fits, fiphot, ngood, median_magerr, medabsdev_mag))
# now that we're done collecting data, sort them in orders we want
goodframes = np.array(goodframes)
goodphots = np.array(goodphots)
ngood_ind = np.argsort(ngoodobjects)[::-1]
mederr_ind = np.argsort(medmagerr)
magmad_ind = np.argsort(magerrmad)
if observatory=='hatpi':
zenithdist_ind = np.argsort(zenithdist)
moondist_ind = np.argsort(moondist)[::-1]
moonelev_ind = np.argsort(moonelev)
# get the first 200 of these or all 200 if n < 200
if len(goodframes) > 200:
ngood_ind = ngood_ind[:500]
mederr_ind = mederr_ind[:500]
magmad_ind = magmad_ind[:500]
if observatory=='hatpi':
zenithdist_ind = zenithdist_ind[:500]
moondist_ind = moondist_ind[:500]
moonelev_ind = moonelev_ind[:500]
# intersect all arrays to find a set of common indices that belong to
# the likely reference frames
photgood_ind = np.intersect1d(np.intersect1d(ngood_ind,
magmad_ind,
assume_unique=True),
mederr_ind,assume_unique=True)
if observatory=='hatpi':
headergood_ind = np.intersect1d(np.intersect1d(moondist_ind,
moonelev_ind,
assume_unique=True),
zenithdist_ind,assume_unique=True)
allgood_ind = np.intersect1d(photgood_ind, headergood_ind,
assume_unique=True)
elif observatory=='tess':
allgood_ind = photgood_ind
else:
raise NotImplementedError
# if the headers and photometry produce a good reference frame, use
# that. if they don't, use the photometry to choose a good reference
# frame
if len(allgood_ind) > 0:
selectedreference = goodframes[allgood_ind[0]]
selectedind = allgood_ind[0]
elif len(photgood_ind) > 0:
selectedreference = goodframes[photgood_ind[0]]
selectedind = photgood_ind[0]
elif len(headergood_ind) > 0:
selectedreference = goodframes[headergood_ind[0]]
selectedind = headergood_ind[0]
else:
selectedreference = None
selectedind = None
print('%sZ: selected reference frame = %s' %
(datetime.utcnow().isoformat(), selectedreference))
print('%sZ: selected reference phot = %s' %
(datetime.utcnow().isoformat(), goodphots[selectedind]))
# update the returndict with reference frame and stats
returndict['referenceframe'] = selectedreference
returndict['referencephot'] = goodphots[selectedind]
if selectedreference and observatory=='hatpi':
returndict['referencestats'] = {
'zenithdist':zenithdist[selectedind],
'moondist':moondist[selectedind],
'moonelev':moonelev[selectedind],
'ngood':ngoodobjects[selectedind],
'magmad':magerrmad[selectedind],
'mederr':medmagerr[selectedind],
}
elif selectedreference and observatory=='tess':
returndict['referencestats'] = {
'ngood':ngoodobjects[selectedind],
'magmad':magerrmad[selectedind],
'mederr':medmagerr[selectedind],
}
else:
returndict['referencestats'] = None
# add all frame stats to the returndict
if framestats:
if observatory=='tess':
raise NotImplementedError
returndict['framestats'] = {'frame':goodframes,
'phot':goodphots,
'zenithdist':zenithdist,
'moondist':moondist,
'moonelev':moonelev,
'ngood':ngoodobjects,
'magmad':magerrmad,
'mederr':medmagerr}
# done with reference frame selection #
# update the workfitslist and workphotlist if we did reference frame
# selection
if (len(goodframes) > 0 and len(goodphots) > 0 and
len(goodframes) == len(goodphots)):
returndict['workframelist'] = goodframes
returndict['workphotlist'] = goodphots
# write the framelistfile using the new frame locations
if outlistfile:
outf = open(outlistfile,'wb')
for fitsline, photline in zip(workfitslist,workphotlist):
outf.write('%s %s\n' % (photline, fitsline))
outf.close()
print('%sZ: wrote good frame list to %s' %
(datetime.utcnow().isoformat(), os.path.abspath(outlistfile)))
print('%sZ: done with fitsdir = %s' %
(datetime.utcnow().isoformat(), fitsdir))
# at the end, return returndict
return returndict
def textphot_links_to_binphot_links(workdir,
binphotdir):
"""
This is used to convert the fiphot links in workdir (which are text fiphot
files) to the equivalent binary fiphot links in the same directory. Useful
only for MagnitudeFitting.py.
"""
# get a list of text fiphot links
text_fiphot_links = glob.glob(os.path.join(workdir,'*.fiphot'))
print('%sZ: found %s text fiphot links in %s' %
(datetime.utcnow().isoformat(),
len(text_fiphot_links), workdir))
# temporarily change working directory to the workdir
cwd = os.getcwd()
os.chdir(workdir)
for textphot in text_fiphot_links:
binphot = os.path.join(os.path.abspath(binphotdir),
os.path.basename(textphot))
if os.path.exists(binphot):
print('%sZ: converting textphot link %s -> binphot link for %s' %
(datetime.utcnow().isoformat(),
os.path.basename(textphot), binphot))
os.remove(os.path.abspath(textphot))
os.symlink(binphot, os.path.basename(textphot))
else:
print('%sZ: textphot link has no '
'equivalent binphot file, removing it!' %
(datetime.utcnow().isoformat(),
os.path.basename(textphot),))
os.remove(os.path.abspath(textphot))
def make_magfit_config(configoutfile,
phot_apertures=3,
phot_band='r',
phot_brightmag=0.0,
phot_faintmag=16.0,
phot_fovcatalog='',
singlephot_statdir='',
masterphot_statdir='',
masterphot_outdir=''):
"""
This creates a magfit config file.
"""
# open the output file object
outf = open(configoutfile,'wb')
# write the top of the config file
outf.write('num_apertures=%i\n' % phot_apertures)
#
# [mcat] section
#
outf.write('\n[mcat]\n')
outf.write('rawphot_ver=0\n')
# the mcat stat template entry
templatestr = phot_fovcatalog
outf.write("template=Template('%s')\n" % templatestr)
outf.write('faint_mag=%.1f\n' % phot_faintmag)
outf.write('bright_mag=%.1f\n' % phot_brightmag)
outf.write('round_pointing=1\n')
outf.write(("columns="
"['id','ra','dec','xi','eta','J','K','qlt','I','%s']\n") %
phot_band)
outf.write('fov_alarm=20.0\n')
outf.write('fov_safety_fac=1.1\n')
#
# [magfit.single] section
#
outf.write('\n[magfit.single]\n')
# the magfit.single stat template entry
templatestr = '/'.join(
[singlephot_statdir,'SPHOTREFSTAT_object_${OBJECT}_${CMPOS}']
)
outf.write("stat_template=Template('%s')\n" % templatestr)
outf.write("first_column=15\n")
outf.write("reference_subpix=True\n")
outf.write("file_extension='.sphotref'\n")
outf.write("column_precision=1.0e-5\n")
outf.write("column_name='sprmag'\n")
outf.write("version=0\n")
outf.write("count_weight=1.0\n")
outf.write("error_avg='weightedmean'\n")
outf.write("max_rej_iter=20\n")
outf.write("rej_level=3.0\n")
outf.write("max_mag_err=0.03\n")
outf.write("noise_offset=0.0005\n")
outf.write("ref_frame_stars=10000\n")
outf.write("max_JmK=1.0\n")
outf.write("min_JmK=0.0\n")
outf.write("bright_mag_min=8.5\n")
outf.write("AAAonly=True\n")
outf.write("param_str='spatial:4;r:2,2;JmK:1,2;subpix:1,2'\n")
outf.write("fntype='linear'\n")
#
# [magfit.master] section
#
outf.write('\n[magfit.master]\n')
# the magfit.master stat template entry
templatestr = '/'.join(
[masterphot_statdir,'MPHOTREFSTAT_object_${OBJECT}_${CMPOS}']
)
outf.write("stat_template=Template('%s')\n" % templatestr)
outf.write("first_column=15\n")
outf.write("reference_subpix=False\n")
outf.write("file_extension='.mphotref'\n")
outf.write("column_precision=1.0e-5\n")
outf.write("column_name='mprmag'\n")
outf.write("version=0\n")
outf.write("count_weight=1.0\n")
outf.write("error_avg='weightedmean'\n")
outf.write("max_rej_iter=20\n")
outf.write("rej_level=3.0\n")
outf.write("max_mag_err=0.03\n")
outf.write("noise_offset=0.0005\n")
outf.write("ref_frame_stars=10000\n")
outf.write("max_JmK=1.0\n")
outf.write("min_JmK=0.0\n")
outf.write("bright_mag_min=8.5\n")
outf.write("AAAonly=True\n")
outf.write("param_str='spatial:4;r:2,2;JmK:1,2;subpix:1,2'\n")
outf.write("fntype='linear'\n")
#
# [mphotref] section
#
outf.write('\n[mphotref]\n')
outf.write('version=0\n')
# the mphotref template entry
templatestr = '/'.join(
[masterphot_outdir,'mphotref_${CMPOS}.AP${APERTURE}']
)
outf.write("template=Template('%s')\n" % templatestr)
outf.write("rms_fit_bright_mag_min=9.0\n")
outf.write("max_rms_quantile=0.1\n")
outf.write("max_rms_above_fit=4\n")
outf.write("rms_fit_rej_lvl=3.0\n")
outf.write("rms_fit_err_avg=median\n")
outf.write("rms_fit_param='spatial:2;r:2,2'\n")
outf.write("grcollect_tempdir='/dev/shm'\n")
outf.write("min_meas_med=0.9\n")
outf.write("min_measurements=0.01\n")
outf.write("rej_iterations=20\n")
outf.write("rej_outliers='iterations=20,median,meddev=8'\n")
outf.close()
print('%sZ: wrote magfit config to %s' %
(datetime.utcnow().isoformat(),
os.path.abspath(configoutfile),))
return configoutfile
def make_fiphot_list(searchdirs,
searchglob,
listfile):
"""
This makes a list of fiphot files in the list of directories specified in
searchdirs, using the searchglob to filter by filename. Returns a list of
absolute paths to the fiphot files and writes this to the file specified in
listfile.
"""
fiphotlist = []
for fdir in searchdirs:
fiphotfiles = glob.glob(os.path.join(fdir, searchglob))
fiphotlist.extend([os.path.abspath(x) for x in fiphotfiles])
# write to the output file
outf = open(listfile,'wb')
for fdir in fiphotlist:
outf.write('%s\n' % fdir)
outf.close()
return listfile
def run_magfit(sphotref_frame,
sphotref_phot,
magfit_frame_list,
magfit_config_file,
magfit_type,
nprocs=16,
hatnetwork='HATSouth',
magfitexec='MagnitudeFitting.py'):
"""
This runs magfit in single/master photometric reference mode.
lcohpsrv1 invocation:
single photref
python /home/hatuser/wbhatti/src/MagnitudeFittingOrig.py HATSouth single /nfs/lcohpsrv1/ar1/scratch/PHOT_WB/projid16/photometry-ap/G557-ccd5-work/1-470798a_5.fits /nfs/lcohpsrv1/ar1/scratch/PHOT_WB/projid16/photometry-ap/G557-ccd5-work/1-470798a_5.fiphot -p 8 --log-config=/home/hatuser/wbhatti/src/logging.conf --config-file=/nfs/lcohpsrv1/ar1/scratch/PHOT_WB/projid16/photometry-ap/ccd5-magfit.cfg --manual-frame-list=/nfs/lcohpsrv1/ar1/scratch/PHOT_WB/projid16/photometry-ap/ccd5-magfit-frames.list --stat
master photref:
nohup python /home/hatuser/wbhatti/src/MagnitudeFittingOrig.py HATSouth master /nfs/lcohpsrv1/ar1/scratch/PHOT_WB/projid16/photometry-ap/G557-ccd6-work/1-470789a_6.fits /nfs/lcohpsrv1/ar1/scratch/PHOT_WB/projid16/photometry-ap/G557-ccd6-work/1-470789a_6.fiphot -p 8 --log-config=/home/hatuser/wbhatti/src/logging.conf --config-file=/nfs/lcohpsrv1/ar1/scratch/PHOT_WB/projid16/photometry-ap/ccd6-magfit.cfg --manual-frame-list=/nfs/lcohpsrv1/ar1/scratch/PHOT_WB/projid16/photometry-ap/ccd6-magfit-frames.list --stat > ccd6-mmagfit.log
"""
if not (os.path.exists(magfit_frame_list) and
os.path.exists(sphotref_frame) and
os.path.exists(sphotref_phot) and
os.path.exists(magfit_config_file)):
print('%sZ: some required files are missing!' %
(datetime.utcnow().isoformat(),))
return None
magfitcmd = MAGFITCMD.format(
magfitexec=os.path.abspath(magfitexec),
network=hatnetwork,
fit_type=magfit_type,
sphotref_frame=sphotref_frame,
sphotref_phot=sphotref_phot,
nprocs=nprocs,
magfit_config_file=magfit_config_file,
magfit_frame_list=magfit_frame_list
)
if DEBUG:
print(magfitcmd)
print('%sZ: starting %s magfit with %s processes...' %
(datetime.utcnow().isoformat(), magfit_type, nprocs))
# execute the magfit shell command
magfitproc = subprocess.Popen(shlex.split(magfitcmd),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# get results
magfit_stdout, magfit_stderr = magfitproc.communicate()
# get results if succeeded, log outcome, and return path of outfile
if magfitproc.returncode == 0:
print('%sZ: %s magfit completed.' %
(datetime.utcnow().isoformat(), magfit_type))
return True
else:
print('%sZ: %s magfit failed!' %
(datetime.utcnow().isoformat(), magfit_type))
print('%sZ: error returned was %s' % magfit_stderr)
return False
def get_master_photref(sphotref_frame,
fiphot_list,
magfit_config_file,
hatnetwork='HATSouth',
photrefexec='do_masterphotref.py'):
"""
Generates the master photometric reference from single ref photometry.
lcohpsrv1 invocation:
nohup python /home/hatuser/wbhatti/src/do_masterphotref.py HATSouth /nfs/lcohpsrv1/ar1/scratch/PHOT_WB/projid8/ccd5-fits/1-404411d_5.fits --manual-frame-list=/nfs/lcohpsrv1/ar1/scratch/PHOT_WB/projid8/photometry-ap/ccd5-fiphot.list --config-file=/nfs/lcohpsrv1/ar1/scratch/PHOT_WB/projid8/photometry-ap/ccd5-magfit.cfg --log-config=/home/hatuser/wbhatti/src/logging.conf --nostat > ccd5-masterphotref.log &
"""
if not (os.path.exists(fiphot_list) and
os.path.exists(sphotref_frame) and
os.path.exists(magfit_config_file)):
print('%sZ: some required files are missing!' %
(datetime.utcnow().isoformat(),))
return None
photrefcmd = MPHOTREFCMD.format(
mphotrefexec=os.path.abspath(photrefexec),
network=hatnetwork,
sphotref_frame=sphotref_frame,
fiphot_list=fiphot_list,
magfit_config_file=magfit_config_file
)
if DEBUG:
print(photrefcmd)
print('%sZ: starting photref...' %
(datetime.utcnow().isoformat(),))
# execute the photref shell command
photrefproc = subprocess.Popen(shlex.split(photrefcmd),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# get results
photref_stdout, photref_stderr = photrefproc.communicate()
# get results if succeeded, log outcome, and return path of outfile
if photrefproc.returncode == 0:
print('%sZ: photref completed.' %
(datetime.utcnow().isoformat(),))
return True
else:
print('%sZ: photref failed!' %
(datetime.utcnow().isoformat(),))
return False
###########################
## FIPHOT DUMP FUNCTIONS ##
###########################
def dump_binary_fiphot(fiphot,
sourcelist,
outfile):
"""
This dumps all columns from a fiphot binary format file to a text fiphot
file. This also needs the sourcelist file for the same frame to get the S,
D, K values correctly for each detection. This assumes that the sourcelist
source detections are ordered in the same way as the source detections in
the fiphot file (this appears to be valid, but need a workaround).
keys to dump and in which order:
HAT-field-sourceid
serial
x
y
bg
bg err
per aperture[0] mag
per aperture[0] mag err
per aperture[0] status flag
per aperture[1] mag
per aperture[1] mag err
per aperture[1] status flag
per aperture[2] mag
per aperture[2] mag err
per aperture[2] status flag
mprmag[0]
mprmag[1]
mprmag[2]
NOTE: each line has a length of 210 characters (this will be useful as input
to the fast parallel LC collection function in imagesubphot.py).
"""
# first, read the fiphot in
binphot = read_fiphot(fiphot)
srclist = np.genfromtxt(sourcelist,
usecols=(8,9,10),
dtype='f8,f8,f8',
names=['fsv','fdv','fkv'])
# get all the columns
source = binphot['source']
serial = binphot['serial']
field = binphot['field']
srcx = binphot['x']
srcy = binphot['y']
bkg = binphot['bg']
bkgerr = binphot['bg err']
im1 = binphot['per aperture'][0]['mag']
ie1 = binphot['per aperture'][0]['mag err']
iq1 = binphot['per aperture'][0]['status flag']
im2 = binphot['per aperture'][1]['mag']
ie2 = binphot['per aperture'][1]['mag err']
iq2 = binphot['per aperture'][1]['status flag']
im3 = binphot['per aperture'][2]['mag']
ie3 = binphot['per aperture'][2]['mag err']
iq3 = binphot['per aperture'][2]['status flag']
rm1 = binphot['mprmag[0]'] if 'mprmag[0]' in binphot else [np.nan for x in srcx]
rm2 = binphot['mprmag[1]'] if 'mprmag[1]' in binphot else [np.nan for x in srcx]
rm3 = binphot['mprmag[2]'] if 'mprmag[2]' in binphot else [np.nan for x in srcx]
# format the output line
lineform = (
'HAT-%3i-%07i %12s ' # hatid, rstfc
'%12.5f %12.5f ' # srcx, srcy
'%12.5f %12.5f ' # bkg, bkgerr
'%12.5f %12.5f %12.5f ' # fsv, fdv, fkv
'%12.5f %12.5f %3i ' # im1, ie1, iq1
'%12.5f %12.5f %3i ' # im2, ie2, iq2
'%12.5f %12.5f %3i ' # im3, ie3, iq3
'%12.5f %12.5f %12.5f\n' # rm1, rm2, rm3
)
# open the outfile
outf = open(outfile, 'wb')
for ind in xrange(len(srcx)):
outf.write(lineform % (field[ind], source[ind], serial,
srcx[ind], srcy[ind],
bkg[ind], bkgerr[ind],
srclist['fsv'][ind],
srclist['fdv'][ind],
srclist['fkv'][ind],
im1[ind], ie1[ind], iq1[ind],
im2[ind], ie2[ind], iq2[ind],
im3[ind], ie3[ind], iq3[ind],
rm1[ind], rm2[ind], rm3[ind]))
outf.close()
return outfile
def dump_binary_worker(task):
"""
This is a worker for parallelization of binary fiphot dumping.
task[0] -> path to input binary fiphot
task[1] -> path to accompanying sourcelist file
task[2] -> output directory
task[3] -> output fiphot extension to use
"""
try:
outbasename = task[0].replace('fiphot',task[3])
outfile = os.path.join(os.path.abspath(task[2]), outbasename)
print('%sZ: binary fiphot %s -> text fiphot %s OK' %
(datetime.utcnow().isoformat(), task[0], outfile))
return task[0], dump_binary_fiphot(task[0], task[1], outfile)
except Exception as e:
print('ERR! %sZ: could not dump '
'binary fiphot %s to text fiphot, error was: %s' %
(datetime.utcnow().isoformat(), task[0], e))
return task[0], None
def parallel_dump_binary_fiphots(fiphotdir,
fiphotglob='*.fiphot',
sourcelistdir=None,
sourcelistext='.sourcelist',
outdir=None,
textfiphotext='text-fiphot',
nworkers=16,
maxworkertasks=1000):
"""
This dumps all binary fiphots found in fiphotdir (we check if the file is
binary or not) to text fiphots with all the same row lengths in outdir. This
is needed if we want to use the fast LC collection method implemented in
imagesubphot.py.
"""
if not sourcelistdir:
sourcelistdir = fiphotdir
fiphotlist = glob.glob(os.path.join(os.path.abspath(fiphotdir), fiphotglob))
fiphotext = os.path.splitext(fiphotglob)[-1]
sourcelistlist = [x.replace(fiphotext, sourcelistext) for x in fiphotlist]
print('%sZ: %s files to process in %s' %
(datetime.utcnow().isoformat(), len(fiphotlist), fiphotdir))
pool = mp.Pool(nworkers,maxtasksperchild=maxworkertasks)
if not outdir:
outdir = fiphotdir
tasks = [(x, y, outdir, textfiphotext)
for (x,y) in zip(fiphotlist, sourcelistlist)]
# fire up the pool of workers
results = pool.map(dump_binary_worker, tasks)
# wait for the processes to complete work
pool.close()
pool.join()
return {x:y for (x,y) in results}
#############################
## NEW STYLE LC COLLECTION ##
#############################
def make_photometry_indexdb(framedir,
outfile,
frameglob='*_5.fits', # avoid ISM FITS products
photdir=None,
photext='text-fiphot',
maxframes=None,
overwrite=False):
"""
This is like make_photometry_index below, but uses an sqlite3 database
instead of an in-memory disk.
"""
# make sure we don't overwrite anything unless we're supposed to
if os.path.exists(outfile) and not overwrite:
print('WRN! %sZ: a photometry index DB by this name already exists!' %
(datetime.utcnow().isoformat(),))
return outfile
if overwrite and os.path.exists(outfile):
os.remove(outfile)
db = sqlite3.connect(outfile)
cur = db.cursor()
# make the database tables
# cur.execute(PRAGMA_CMDS) # not sure if we want WAL mode or not
cur.execute(PHOTS_TABLE)
cur.execute(HATIDS_TABLE)
cur.execute(META_TABLE)
db.commit()
# first, figure out the directories
if not photdir:
photdir = framedir
# send these to the database
cur.execute(META_INSERT_CMD, (photdir, framedir))
db.commit()
# first, find all the frames
framelist = glob.glob(os.path.join(os.path.abspath(framedir),
frameglob))
# restrict to maxframes max frames
if maxframes:
framelist = framelist[:maxframes]
# go through all the frames
for frame in framelist:
print('%sZ: working on frame %s' %
(datetime.utcnow().isoformat(), frame))
# generate the names of the associated phot and sourcelist files
frameinfo = FRAMEREGEX.findall(os.path.basename(frame))
phot = '%s-%s_%s.%s' % (frameinfo[0][0],
frameinfo[0][1],
frameinfo[0][2],
photext)
originalframe = '%s-%s_%s.fits' % (frameinfo[0][0],
frameinfo[0][1],
frameinfo[0][2])
phot = os.path.join(os.path.abspath(photdir), phot)
originalframe = os.path.join(os.path.abspath(framedir),
originalframe)
# check these files exist, and populate the dict if they do
if os.path.exists(phot) and os.path.exists(originalframe):
# get the JD from the FITS file.
# NOTE: this is the ORIGINAL FITS frame, since the subtracted one
# contains some weird JD header (probably inherited from the photref
# frame)
framerjd = get_header_keyword(originalframe, 'JD')
# update the DB with this info
cur.execute(PHOTS_INSERT_CMD,
(os.path.basename(phot),
framerjd,
os.path.basename(originalframe)))
# get the phot file
photf = open(phot, 'rb')
phothatids = [x.split()[0] for x in photf]
photf.close()
for ind, hatid in enumerate(phothatids):
# update the DB with phot info
cur.execute(HATIDS_INSERT_CMD,
(hatid,
os.path.basename(phot),
ind))
# if some associated files don't exist for this frame, ignore it
else:
print('WRN! %sZ: ignoring frame %s, '
'photometry for this frame is not available!' %
(datetime.utcnow().isoformat(), frame))
# make the indices for fast lookup
print('%sZ: making photometry index DB indices...' %
(datetime.utcnow().isoformat(),))
cur.execute(PHOTS_INDEX_CMD)
cur.execute(HATIDS_INDEX_CMD)
cur.execute(HATIDS_PHOT_INDEX_CMD)
# commit the DB at the end of writing
db.commit()
print('%sZ: done. photometry index DB written to %s' %
(datetime.utcnow().isoformat(), outfile))
return outfile
def get_fiphot_line(fiphot, linenum, fiphotlinechars=249):
"""
This gets a random fiphot line out of the file fiphot.
"""
fiphotf = open(fiphot, 'rb')
filelinenum = fiphotlinechars*linenum
fiphotf.seek(filelinenum)
fiphotline = fiphotf.read(fiphotlinechars)
fiphotf.close()
return fiphotline
def get_fiphot_line_linecache(fiphot, linenum, fiphotlinechars=249):
"""
This uses linecache's getline function to get the line out of the file
fiphot.
"""
return getline(fiphot, linenum)
def collect_aperturephot_lightcurve(hatid,
photindex,
outdir,
skipcollected=True,
fiphotlinefunc=get_fiphot_line,
fiphotlinechars=249):
"""
This collects the imagesubphot lightcurve of a single object into a .ilc
file.
hatid -> the hatid of the object to collect the light-curve for
photindexfile -> the file containing the master index of which .fiphot,
.sourcelist, and .fits contain the lines corresponding to
this HATID. this way, we can look these lines up
super-fast using the linecache module.
outdir -> the directory where to the place the collected lightcurve
skipcollected -> if True, looks for an existing LC for this hatid in
outdir. if found, returns the path to that LC instead of
actually processing. if this is False, redoes the
processing for this LC anyway.
fiphotlinefunc -> this is the function to use for getting a specific line
out of the specified fiphot file.
The collected LC is similar to the aperturephot LC, but some extra columns
added by fiphot running on the subtracted frames. columns are:
00 rjd Reduced Julian Date (RJD = JD - 2400000.0)
01 hat HAT ID of the object
02 rstfc Unique frame key ({STID}-{FRAMENUMBER}_{CCDNUM})
03 xcc original X coordinate on CCD
04 ycc original y coordinate on CCD
05 bgv Background value
06 bge Background measurement error
07 fsv Measured S value
08 fdv Measured D value
09 fkv Measured K value
10 im1 Instrumental magnitude in aperture 1
11 ie1 Instrumental magnitude error for aperture 1
12 iq1 Instrumental magnitude quality flag for aperture 1 (0/G OK, X bad)
13 im2 Instrumental magnitude in aperture 2
14 ie2 Instrumental magnitude error for aperture 2
15 iq2 Instrumental magnitude quality flag for aperture 2 (0/G OK, X bad)
16 im3 Instrumental magnitude in aperture 3
17 ie3 Instrumental magnitude error for aperture 3
18 iq3 Instrumental magnitude quality flag for aperture 3 (0/G OK, X bad)
19 rm1 Reduced Mags from magfit in aperture 1
20 rm2 Reduced Mags from magfit in aperture 2
21 rm3 Reduced Mags from magfit in aperture 3
"""
# connect to the photindex sqlite3 database
indexdb = sqlite3.connect(photindex)
cur = indexdb.cursor()
# first, look up the metainfo
cur.execute(META_SELECT_CMD)
metarow = cur.fetchone()
photdir, framedir = metarow
# look up the hatid and its info in the photindex db
cur.execute(PHOT_SELECT_CMD, (str(hatid),))
rows = cur.fetchall()
if rows and len(rows) > 0:
# prepare the output file
outfile = os.path.join(os.path.abspath(outdir), '%s.rlc' % hatid)
# if the file already exists and skipcollected is True, then return
# that file instead of processing any further
if os.path.exists(outfile) and skipcollected:
print('WRN! %sZ: object %s LC already exists, not overwriting: %s' %
(datetime.utcnow().isoformat(), hatid, outfile))
return outfile
# otherwise, open the file and prepare to write to it
outf = open(outfile, 'wb')
# go through the phots and sourcelists, picking out the timeseries
# information for this hatid
for row in rows:
# unpack the row to get our values
framerjd, phot, photline = row
try:
# next, get the requested line from phot file
phot_elem = fiphotlinefunc(
os.path.join(photdir, phot),
photline,
fiphotlinechars=fiphotlinechars
).split()
# parse these lines and prepare the output
# rstfc_elems = FRAMEREGEX.findall(os.path.basename(phot))
# rstfc = '%s-%s_%s' % (rstfc_elems[0])
out_line = '%s %s\n' % (framerjd, ' '.join(phot_elem))
outf.write(out_line)
# if this frame isn't available, ignore it
except Exception as e:
print('WRN! %sZ: phot %s isn\'t available (error: %s)'
', skipping...' %
(datetime.utcnow().isoformat(), phot, e))
continue
# close the output LC once we're done with it
outf.close()
print('%sZ: object %s -> %s' %
(datetime.utcnow().isoformat(), hatid, outfile))
returnf = outfile
# if the hatid isn't found in the photometry index, then we can't do
# anything
else:
print('ERR! %sZ: object %s is not in the '
'photometry index, ignoring...' %
(datetime.utcnow().isoformat(), hatid))
returnf = None
# at the end, close the DB and return
indexdb.close()
return returnf
def aperturephotlc_collection_worker(task):
"""
This wraps collect_aperurephot_lightcurve for parallel_collect_lightcurves
below.
task[0] -> hatid
task[1] -> photindex DB name
task[2] -> outdir
task[3] -> {skipcollected, fiphotlinefunc, fiphotlinechars}
"""
try:
return task[0], collect_aperturephot_lightcurve(task[0],
task[1],
task[2],
**task[3])
except Exception as e:
print('ERR! %sZ: failed to get LC for %s, error: %s' %
(datetime.utcnow().isoformat(), task[0], e ))
return task[0], None
def parallel_collect_aperturephot_lightcurves(framedir,
outdir,
frameglob='*_5.fits',
photindexdb=None,
photdir=None,
photext='text-fiphot',
maxframes=None,
overwritephotindex=False,
skipcollectedlcs=True,
fiphotlinefunc=get_fiphot_line,
fiphotlinechars=249,
nworkers=16,
maxworkertasks=1000):
"""
This collects all .fiphot files into lightcurves.
"""
# first, check if the output directory exists
if not os.path.exists(outdir):
os.mkdir(outdir)
# next, check if we have to make a photometry index DB, and launch the
if not photindexdb:
photdbf = os.path.join(framedir,'TM-aperturephot-index.sqlite')
photindexdb = make_photometry_indexdb(framedir,
photdbf,
frameglob=frameglob,
photdir=photdir,
photext=photext,
maxframes=maxframes,
overwrite=overwritephotindex)
# only proceed if the photometry index DB exists
if os.path.exists(photindexdb):
# get the list of distinct HATIDs from the photindexdb
db = sqlite3.connect(photindexdb)
cur = db.cursor()
cur.execute(DISTINCT_HATIDS_CMD)
rows = cur.fetchall()
# make sure these are unique
hatids = [x[0].strip() for x in rows]
hatids = list(set(hatids))
db.close()
# generate the task list
tasks = [(hatid,
photindexdb,
outdir,
{'skipcollected':skipcollectedlcs,
'fiphotlinefunc':fiphotlinefunc,
'fiphotlinechars':fiphotlinechars}) for hatid in hatids]
# now start up the parallel collection
print('%sZ: %s HATIDs to get LCs for, starting...' %
(datetime.utcnow().isoformat(), len(hatids), ))
pool = mp.Pool(nworkers,maxtasksperchild=maxworkertasks)
# fire up the pool of workers
results = pool.map(aperturephotlc_collection_worker, tasks)
# wait for the processes to complete work
pool.close()
pool.join()
return {x:y for (x,y) in results}
# if the photometry index DB doesn't exist, nothing we can do
else:
print('ERR! %sZ: specified photometry index DB does not exist!' %
(datetime.utcnow().isoformat(), ))
###################
## EPD FUNCTIONS ##
###################
def epd_diffmags(coeff, fsv, fdv, fkv, xcc, ycc, bgv, bge, mag):
"""
This calculates the difference in mags after EPD coefficients are
calculated.
final EPD mags = median(magseries) + epd_diffmags()
"""
return -(coeff[0]*fsv**2. +
coeff[1]*fsv +
coeff[2]*fdv**2. +
coeff[3]*fdv +
coeff[4]*fkv**2. +
coeff[5]*fkv +
coeff[6] +
coeff[7]*fsv*fdv +
coeff[8]*fsv*fkv +
coeff[9]*fdv*fkv +
coeff[10]*np.sin(2*np.pi*xcc) +
coeff[11]*np.cos(2*np.pi*xcc) +
coeff[12]*np.sin(2*np.pi*ycc) +
coeff[13]*np.cos(2*np.pi*ycc) +
coeff[14]*np.sin(4*np.pi*xcc) +
coeff[15]*np.cos(4*np.pi*xcc) +
coeff[16]*np.sin(4*np.pi*ycc) +
coeff[17]*np.cos(4*np.pi*ycc) +
coeff[18]*bgv +
coeff[19]*bge -
mag)
def epd_magseries(mag, fsv, fdv, fkv, xcc, ycc, bgv, bge,
smooth=21, sigmaclip=3.0):
"""
Detrends a magnitude series given in mag using accompanying values of S in
fsv, D in fdv, K in fkv, x coords in xcc, y coords in ycc, background in
bgv, and background error in bge. smooth is used to set a smoothing
parameter for the fit function. Does EPD voodoo.
"""
# find all the finite values of the magnitude
finiteind = np.isfinite(mag)
# calculate median and stdev
mag_median = np.median(mag[finiteind])
mag_stdev = np.nanstd(mag)
# if we're supposed to sigma clip, do so
if sigmaclip:
excludeind = abs(mag - mag_median) < sigmaclip*mag_stdev
finalind = finiteind & excludeind
else:
finalind = finiteind
final_mag = mag[finalind]
final_len = len(final_mag)
if DEBUG:
print('final epd fit mag len = %s' % final_len)
# smooth the signal
smoothedmag = medfilt(final_mag, smooth)
# make the linear equation matrix
epdmatrix = np.c_[fsv[finalind]**2.0,
fsv[finalind],
fdv[finalind]**2.0,
fdv[finalind],
fkv[finalind]**2.0,
fkv[finalind],
np.ones(final_len),
fsv[finalind]*fdv[finalind],
fsv[finalind]*fkv[finalind],
fdv[finalind]*fkv[finalind],
np.sin(2*np.pi*xcc[finalind]),
np.cos(2*np.pi*xcc[finalind]),
np.sin(2*np.pi*ycc[finalind]),
np.cos(2*np.pi*ycc[finalind]),
np.sin(4*np.pi*xcc[finalind]),
np.cos(4*np.pi*xcc[finalind]),
np.sin(4*np.pi*ycc[finalind]),
np.cos(4*np.pi*ycc[finalind]),
bgv[finalind],
bge[finalind]]
# solve the equation epdmatrix * x = smoothedmag
# return the EPD differential mags if the solution succeeds
try:
coeffs, residuals, rank, singulars = lstsq(epdmatrix, smoothedmag)
if DEBUG:
print('coeffs = %s, residuals = %s' % (coeffs, residuals))
return epd_diffmags(coeffs, fsv, fdv, fkv, xcc, ycc, bgv, bge, mag)
# if the solution fails, return nothing
except Exception as e:
print('%sZ: EPD solution did not converge! Error was: %s' %
(datetime.utcnow().isoformat(), e))
return None
def epd_lightcurve(rlcfile,
mags=[19,20,21],
sdk=[7,8,9],
xy=[3,4],
backgnd=[5,6],
smooth=21,
sigmaclip=3.0,
rlcext='rlc',
outfile=None,
minndet=200):
"""
Runs the EPD process on rlcfile, using columns specified to get the required
parameters. If outfile is None, the .epdlc will be placeed in the same
directory as rlcfile.
"""
# read the lightcurve in
rlc = np.genfromtxt(rlcfile,
usecols=tuple(xy + backgnd + sdk + mags),
dtype='f8,f8,f8,f8,f8,f8,f8,f8,f8,f8',
names=['xcc','ycc','bgv','bge','fsv','fdv','fkv',
'rm1','rm2','rm3'])
if len(rlc['xcc']) >= minndet:
# calculate the EPD differential mags
epddiffmag1 = epd_magseries(rlc['rm1'],rlc['fsv'],rlc['fdv'],rlc['fkv'],
rlc['xcc'],rlc['ycc'],rlc['bgv'],rlc['bge'],
smooth=smooth, sigmaclip=sigmaclip)
epddiffmag2 = epd_magseries(rlc['rm2'],rlc['fsv'],rlc['fdv'],rlc['fkv'],
rlc['xcc'],rlc['ycc'],rlc['bgv'],rlc['bge'],
smooth=smooth, sigmaclip=sigmaclip)
epddiffmag3 = epd_magseries(rlc['rm3'],rlc['fsv'],rlc['fdv'],rlc['fkv'],
rlc['xcc'],rlc['ycc'],rlc['bgv'],rlc['bge'],
smooth=smooth, sigmaclip=sigmaclip)
# add the EPD diff mags back to the median mag to get the EPD mags
if epddiffmag1 is not None:
mag_median = np.median(rlc['rm1'][np.isfinite(rlc['rm1'])])
epdmag1 = epddiffmag1 + mag_median
else:
epdmag1 = np.array([np.nan for x in rlc['rm1']])
print('%sZ: no EP1 mags available for %s!' %
(datetime.utcnow().isoformat(), rlcfile))
if epddiffmag2 is not None:
mag_median = np.median(rlc['rm2'][np.isfinite(rlc['rm2'])])
epdmag2 = epddiffmag2 + mag_median
else:
epdmag2 = np.array([np.nan for x in rlc['rm2']])
print('%sZ: no EP2 mags available for %s!' %
(datetime.utcnow().isoformat(), rlcfile))
if epddiffmag3 is not None:
mag_median = np.median(rlc['rm3'][np.isfinite(rlc['rm3'])])
epdmag3 = epddiffmag3 + mag_median
else:
epdmag3 = np.array([np.nan for x in rlc['rm3']])
print('%sZ: no EP3 mags available for %s!' %
(datetime.utcnow().isoformat(), rlcfile))
# now write the EPD LCs out to the outfile
if not outfile:
outfile = '%s.epdlc' % re.sub('.%s' % rlcext, '', rlcfile)
inf = open(rlcfile,'rb')
inflines = inf.readlines()
inf.close()
outf = open(outfile,'wb')
for line, epd1, epd2, epd3 in zip(inflines, epdmag1, epdmag2, epdmag3):
outline = '%s %.6f %.6f %.6f\n' % (line.rstrip('\n'), epd1, epd2, epd3)
outf.write(outline)
outf.close()
return outfile
else:
print('not running EPD for %s, ndet = %s < min ndet = %s' %
(rlcfile, len(rlc['xcc']), minndet))
return None
def parallel_epd_worker(task):
"""
Function to wrap the epd_lightcurve function for use with mp.Pool.
task[0] = rlcfile
task[1] = {'mags', 'sdk', 'xy', 'backgnd',
'smooth', 'sigmaclip', 'rlcext',
'minndet'}
"""
try:
return task[0], epd_lightcurve(task[0], **task[1])
except Exception as e:
print('EPD failed for %s, error was: %s' % (task[0], e))
return task[0], None
def parallel_run_epd(rlcdir,
mags=[19,20,21],
sdk=[7,8,9],
xy=[3,4],
backgnd=[5,6],
smooth=21,
sigmaclip=3.0,
rlcext='rlc',
rlcglobprefix='*',
outfile=None,
nworkers=16,
maxworkertasks=1000,
minndet=200):
"""
This runs EPD in parallel on the lightcurves in rlcdir.
"""
# find all the rlc files in the rlcdir
rlclist = glob.glob(os.path.join(os.path.abspath(rlcdir), '%s.%s' %
(rlcglobprefix, rlcext)))
tasks = [(x, {'mags':mags, 'sdk':sdk, 'xy':xy, 'backgnd':backgnd,
'smooth':smooth, 'sigmaclip':sigmaclip, 'rlcext':rlcext,
'minndet':minndet})
for x in rlclist]
# now start up the parallel EPD processes
print('%sZ: %s HATIDs for EPD, starting...' %
(datetime.utcnow().isoformat(), len(rlclist), ))
pool = mp.Pool(nworkers,maxtasksperchild=maxworkertasks)
# fire up the pool of workers
results = pool.map(parallel_epd_worker, tasks)
# wait for the processes to complete work
pool.close()
pool.join()
return {x:y for (x,y) in results}
###################
## TFA FUNCTIONS ##
###################
def choose_tfa_template(statsfile,
fovcatalog,
epdlcdir,
ignoretfamin=False,
fovcat_idcol=0,
fovcat_xicol=3,
fovcat_etacol=4,
fovcathasgaiaids=False,
fovcat_magcol=9,
max_nstars=1000,
min_nstars=20,
target_nstars=None,
brightest_mag=8.5,
faintest_mag=12.0,
max_rms=0.1,
max_sigma_above_rmscurve=4.0,
outprefix=None,
tfastage1=True,
epdlcext='.epdlc'):
"""
This chooses suitable stars for TFA template purposes. This "template set"
is a subsample of the stars, and is supposed to represent all the types of
systematics across the dataset. Kovacs et al (2005) give details.
statsfile = the file with LC statistics made when running EPD
fovcatalog = the fovcatalog file, this must have xi and eta coordinates,
ras, decs, and magnitudes
Returns a dict with lists of stars chosen, their stats, and filenames of
where each star list was written.
"""
# read in the stats file
stats = read_stats_file(statsfile, fovcathasgaiaids=fovcathasgaiaids)
# read in the fovcatalog
if not fovcathasgaiaids:
# assume HAT-IDs, HAT-123-4567890, 17 character strings
fovcat = np.genfromtxt(fovcatalog,
usecols=(fovcat_idcol,
fovcat_xicol,
fovcat_etacol,
fovcat_magcol),
dtype='U17,f8,f8,f8',
names=['objid','xi','eta','mag'])
staridstr = 'HAT-'
else:
# assume GAIA-IDs. From gaia2read, with "GAIA" id option, this is just
# 19 character integers. The (xi,eta) and mag precision also change.
fovcat = np.genfromtxt(fovcatalog,
usecols=(fovcat_idcol,
fovcat_xicol,
fovcat_etacol,
fovcat_magcol),
dtype='U19,f8,f8,f8',
names=['objid','xi','eta','mag'])
staridstr = '' # no pre-identifier for Gaia IDs.
# figure out the number of stars to use in the initial TFA template
# number of stars = TFA_TEMPLATE_FRACTION * median ndet
# 1. ndet >= median_ndet
# 2. max rms <= 0.1
# 3. brightest_mag < median_mag < faintest_mag
# 4. fit rms-mag, then discard anything above max_sigma_above_rmscurve
# find the objects in the fovcat and stats file that match these
# conditions, then pick up to 1000 random stars
outdict = {'statsfile':os.path.abspath(statsfile),
'fovcat':os.path.abspath(fovcatalog),
'lcdir':os.path.abspath(epdlcdir),
'staridstr':staridstr}
# do this per aperture
for aperture in [1,2,3]:
outdict[aperture] = {}
# first, pick the stars that meet our stats requirements
epdstr = 'ep%s' % aperture
objid_col = 'lcobj'
median_mag_col = 'med_sc_%s' % epdstr
mad_mag_col = 'mad_sc_%s' % epdstr
ndet_col = 'ndet_sc_%s' % epdstr
objectid = stats[objid_col]
mags_median = stats[median_mag_col]
mags_mad = stats[mad_mag_col]
obj_ndet = stats[ndet_col]
goodind = np.isfinite(mags_median) & np.isfinite(mags_mad)
objectid = objectid[goodind]
mags_median = mags_median[goodind]
mags_mad = mags_mad[goodind]
obj_ndet = obj_ndet[goodind]
print('\naperture %s: total good objects = %s' % (aperture,
len(objectid)))
median_ndet = np.nanmedian(obj_ndet)
if not target_nstars:
TFA_TEMPLATE_FRACTION = 0.1
target_nstars = TFA_TEMPLATE_FRACTION * median_ndet
else:
pass
print('aperture %s: median ndet = %s' % (aperture, median_ndet))
print('aperture %s: target TFA template size = %s' %
(aperture, int(target_nstars)))
outdict[aperture]['target_tfa_nstars'] = (
target_nstars
)
stars_ndet_condition = obj_ndet >= median_ndet
print('aperture %s: objects with ndet condition = %s' %
(aperture, len(objectid[stars_ndet_condition])))
stars_rms_condition = mags_mad < max_rms
print('aperture %s: objects with rms condition = %s' %
(aperture, len(objectid[stars_rms_condition])))
rmsfit_condition = stars_ndet_condition
print('aperture %s: objects with rmsfit condition = %s' %
(aperture, len(objectid[rmsfit_condition])))
# selection 1: fit a parabola to the median mag - mag MAD relation and
# reject all stars with RMS > max_sigma_above_rmscurve
polyfit_coeffs = np.polyfit(mags_median[rmsfit_condition],
mags_mad[rmsfit_condition],
2)
print('aperture %s: rms fit params = %s' % (aperture,polyfit_coeffs))
# generate the model RMS curve with fit parameters
model_rms = (polyfit_coeffs[0]*mags_median*mags_median +
polyfit_coeffs[1]*mags_median +
polyfit_coeffs[2])
# find objects that lie below the requested rms threshold from this
# curve
threshold_condition = (mags_mad/model_rms) < max_sigma_above_rmscurve
print('aperture %s: objects with threshold condition = %s' %
(aperture, len(objectid[threshold_condition])))
final_statistics_ind = (threshold_condition &
stars_rms_condition &
stars_ndet_condition)
print('aperture %s: stars with good stats = %s' % (
aperture,
len(objectid[final_statistics_ind])
))
good_stats_objects = objectid[final_statistics_ind]
# selection 2: get the stars with good magnitudes
mag_condition = ((fovcat['mag'] < faintest_mag) &
(fovcat['mag'] > brightest_mag))
good_mag_objects = fovcat['objid'][mag_condition]
print('aperture %s: stars with good mags = %s' %
(aperture,len(good_mag_objects)))
# finally, intersect these two arrays to find a set of good TFA objects
good_tfa_objects = np.intersect1d(good_stats_objects,
good_mag_objects)
print('aperture %s: stars suitable for TFA = %s' %
(aperture,len(good_tfa_objects)))
# put this initial list into the outdict
outdict[aperture]['tfa_suitable_objects'] = good_tfa_objects
# selection 3: pick the target number of stars for TFA. Note
# target_nstars can be larger than max_nstars, in which case max_nstars
# template stars are chosen.
if target_nstars > max_nstars:
if len(good_tfa_objects) > max_nstars:
tfa_stars = nprand.choice(good_tfa_objects, replace=False,
size=max_nstars)
elif len(good_tfa_objects) > min_nstars:
tfa_stars = good_tfa_objects
else:
print("aperture %s: not enough stars suitable for TFA!" %
aperture)
if not ignoretfamin:
tfa_stars = None
else:
tfa_stars = good_tfa_objects
else:
if len(good_tfa_objects) > target_nstars:
tfa_stars = nprand.choice(good_tfa_objects, replace=False,
size=target_nstars)
elif len(good_tfa_objects) > min_nstars:
tfa_stars = good_tfa_objects
else:
print("aperture %s: not enough stars suitable for TFA!" %
aperture)
if not ignoretfamin:
tfa_stars = None
else:
tfa_stars = good_tfa_objects
# now get these stars IDs, LC fnames, xis, etas, and other things
# needed for the first stage of TFA (this will choose exactly
# target_nstars to use as the template for the final stage of TFA)
if tfa_stars is not None:
print('aperture %s: %s objects chosen as TFA templates for stage 1' %
(aperture,len(tfa_stars)))
outdict[aperture]['tfa_chosen_objects'] = tfa_stars
tfa_stars_catmag = []
tfa_stars_statrms = []
tfa_stars_statndet = []
tfa_stars_xi = []
tfa_stars_eta = []
tfa_stars_lcfile = []
print('aperture %s: getting object info...' %
(aperture,))
# get the stats for these objects
for i, tfaobj in enumerate(tfa_stars):
# search for the LC file for this object and make sure it exists
lcfile_searchpath = os.path.join(
epdlcdir, '{:s}{:s}'.format(tfaobj,epdlcext)
)
if os.path.exists(lcfile_searchpath):
tfa_stars_lcfile.append(
os.path.abspath(lcfile_searchpath)
)
tfa_stars_catmag.append(
fovcat['mag'][fovcat['objid'] == tfaobj]
)
tfa_stars_statrms.append(
mags_mad[objectid == tfaobj]
)
tfa_stars_statndet.append(
obj_ndet[objectid == tfaobj]
)
tfa_stars_xi.append(
fovcat['xi'][fovcat['objid'] == tfaobj]
)
tfa_stars_eta.append(
fovcat['eta'][fovcat['objid'] == tfaobj]
)
# if it doesn't, then add nans to the file
else:
print('ERR! couldn\'t find a LC for %s' % tfaobj)
tfa_stars_lcfile.append(None)
tfa_stars_catmag.append(np.nan)
tfa_stars_statrms.append(np.nan)
tfa_stars_statndet.append(np.nan)
tfa_stars_xi.append(np.nan)
tfa_stars_eta.append(np.nan)
outdict[aperture]['tfa_chosen_lcfile'] = tfa_stars_lcfile
outdict[aperture]['tfa_chosen_mag'] = np.ravel(tfa_stars_catmag)
outdict[aperture]['tfa_chosen_rms'] = np.ravel(tfa_stars_statrms)
outdict[aperture]['tfa_chosen_ndet'] = np.ravel(tfa_stars_statndet)
outdict[aperture]['tfa_chosen_xi'] = np.ravel(tfa_stars_xi)
outdict[aperture]['tfa_chosen_eta'] = np.ravel(tfa_stars_eta)
# if no TFA stars could be chosen, return Nones for this aperture
else:
outdict[aperture]['tfa_chosen_lcfile'] = None
outdict[aperture]['tfa_chosen_objects'] = None
outdict[aperture]['tfa_chosen_mag'] = None
outdict[aperture]['tfa_chosen_rms'] = None
outdict[aperture]['tfa_chosen_ndet'] = None
# make the input file for TFA stage 1 for this aperture
if not outprefix:
outfile = os.path.abspath(
os.path.join(os.getcwd(),
'tfa-stage1-input-aperture-%s.txt' % aperture)
)
else:
outfile = os.path.abspath(
os.path.join(outprefix,
'tfa-stage1-input-aperture-%s.txt' % aperture)
)
outf = open(outfile,'wb')
outline = '%s %s %.6f %.6f %i %.6f %.6f\n'
for objid, lcf, mag, rms, ndet, xi, eta in zip(
outdict[aperture]['tfa_chosen_objects'],
outdict[aperture]['tfa_chosen_lcfile'],
outdict[aperture]['tfa_chosen_mag'],
outdict[aperture]['tfa_chosen_rms'],
outdict[aperture]['tfa_chosen_ndet'],
outdict[aperture]['tfa_chosen_xi'],
outdict[aperture]['tfa_chosen_eta']
):
outf.write(
(
outline % (objid, lcf, mag, rms, ndet, xi, eta)
).encode('utf-8')
)
outf.close()
print('aperture %s: wrote object info to %s' %
(aperture, outfile))
outdict[aperture]['info_file'] = os.path.abspath(outfile)
# END OF PER APERTURE STUFF
# run TFAs stage 1 if we're supposed to do so
if tfastage1:
print('\nrunning TFA stage 1...')
# run TFA stage 1 to pick the good objects
tfa_stage1 = run_tfa_stage1(outdict)
for aperture in tfa_stage1:
outdict[aperture]['stage1_templatelist'] = tfa_stage1[aperture]
# make the TFA template list file for this aperture
if outdict[aperture]['stage1_templatelist']:
templatelistfname = os.path.join(
outdict['lcdir'],
'aperture-%s-tfa-template.list' % aperture
)
outf = open(templatelistfname,'wb')
for tfaobjid in outdict[aperture]['stage1_templatelist']:
templatelc = os.path.join(
outdict['lcdir'],
tfaobjid + '.epdlc'
)
if os.path.exists(templatelc):
outf.write(
('%s\n' % os.path.abspath(templatelc)
).encode('utf-8')
)
outf.close()
outdict[aperture]['stage1_tfa_templatefile'] = (
templatelistfname
)
print('aperture %s: wrote TFA template list to %s' %
(aperture, templatelistfname))
return outdict
def run_tfa_stage1(tfainfo):
"""
This just runs the TFA in fake mode to generate a list of template
stars. Uses the tfainfo dict created in choose_tfa_template above.
Communicates with the tfa program over pipe and then writes its output to a
tfa input file for the next stage.
tfa -r <REFFILE> --col-ref-id <REFFILE_IDCOLNUM>
--col-ref-x <REFFILE_XCOLNUM>
--col-ref-y <REFFILE_YCOLNUM>
-n <NTEMPLATES_TO_USE>
-T - (for stdout)
-i /dev/null (no input file?)
"""
staridstr = tfainfo['staridstr']
tfa_stage1_results = {}
for aperture in [1,2,3]:
tfacmdstr = ("tfa -r {inputfile} --col-ref-id 1 "
"--col-ref-x 6 --col-ref-y 7 "
"-n {ntemplates} -T - -i /dev/null")
# the +30 is to force selection of slightly more than the number
# of strictly required templates
tfacmd = tfacmdstr.format(
inputfile=tfainfo[aperture]['info_file'],
ntemplates=int(tfainfo[aperture]['target_tfa_nstars'])+30
)
print('aperture %s: starting TFA stage 1...' % aperture)
if DEBUG:
print(tfacmd)
tfaproc = subprocess.Popen(shlex.split(tfacmd),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
tfa_stdout, tfa_stderr = tfaproc.communicate()
# get results if succeeded, log outcome, and return path of outfile.
# (note: this suppresses errors...)
if tfaproc.returncode == 0 or tfa_stdout:
tfaobjects = tfa_stdout.decode('utf-8').split('\n')
tfaobjects = [x for x in tfaobjects
if x.startswith(staridstr) and x != '']
print('aperture %s: TFA stage 1 completed, %s templates selected' %
(aperture, len(tfaobjects)))
tfa_stage1_results[aperture] = tfaobjects
else:
print('aperture %s: TFA stage 1 failed, error was: %s' %
(aperture, tfa_stderr))
tfa_stage1_results[aperture] = None
return tfa_stage1_results
def run_tfa_singlelc(epdlc,
templatefiles,
outfile=None,
epdlc_jdcol=0,
epdlc_magcol=(22,23,24),
template_sigclip=5.0,
epdlc_sigclip=5.0):
"""
This runs TFA for all apertures defined in epdlc_magcol for the input
epdlc file, given an existing TFA template list in templatefile. If outfile
is None, the output TFA LC will be in the same directory as epdlc but with
an extension of .tfalc.
"""
tfacmdstr = ("tfa -i {epdlc} -t {templatefile} "
"--col-jd {epdlc_jdcol} "
"--col-mag {epdlc_magcol} "
"--col-mag-out {tfalc_magcol} "
"--epsilon-time 1e-9 "
"--join-by-time "
"--templ-filter --templ-outl-limit {template_sigclip} "
"--lc-filter --lc-outl-limit {epdlc_sigclip} "
"--log-svn-version "
"-o {out_tfalc}")
if not outfile:
outfile = epdlc.replace('.epdlc','.tfalc')
tfalc_output = []
# figure out the number of templates for each aperture; only the stars with
# ndets > 2 x number of templates will have TFA light-curves generated
# get the ndets for this epdlc
with open(epdlc,'rb') as epdfile:
epdlines = epdfile.readlines()
epdlen = len(epdlines)
tfarunnable = False
# check if the number of detections in this LC is more than 2 x ntemplates
# for each aperture
for tfatempf in templatefiles:
with open(tfatempf,'rb') as tfatemplist:
templistlines = tfatemplist.readlines()
tfatemplen = len(templistlines)
if epdlen >= 2*tfatemplen:
tfarunnable = True
if tfarunnable:
# run tfa for each aperture
for templatef, magcol, magind in zip(templatefiles,
epdlc_magcol,
range(len(epdlc_magcol))):
in_jdcol = epdlc_jdcol + 1
in_magcol = magcol + 1
out_magcol = in_magcol + 3
aperture_outfile = outfile + ('.TF%s' % (magind+1))
tfacmd = tfacmdstr.format(epdlc=epdlc,
templatefile=templatef,
epdlc_jdcol=in_jdcol,
epdlc_magcol=in_magcol,
tfalc_magcol=out_magcol,
template_sigclip=template_sigclip,
epdlc_sigclip=epdlc_sigclip,
out_tfalc=aperture_outfile)
if DEBUG:
print(tfacmd)
# execute the tfa shell command
tfaproc = subprocess.Popen(shlex.split(tfacmd),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# get results
tfa_stdout, tfa_stderr = tfaproc.communicate()
# get results if succeeded, log outcome, and return path of outfile
if tfaproc.returncode == 0:
tfalc_output.append(aperture_outfile)
else:
print('%sZ: aperture %s TFA failed for %s! Error was: %s' %
(datetime.utcnow().isoformat(), magind+1, epdlc, tfa_stderr))
tfalc_output.append(None)
return tfalc_output
else:
print('ERR! %sZ: no TFA possible for %s! ndet < 2 x n(TFA templates)' %
(datetime.utcnow().isoformat(), epdlc))
return None
def parallel_tfa_worker(task):
"""
This wraps run_tfa_singlelc above.
"""
try:
result = run_tfa_singlelc(task[0], task[1], **task[2])
except Exception as e:
print('%sZ: TFA failed for %s! Error was: %s' %
(datetime.utcnow().isoformat(), task[0], e))
result = None
return result
def parallel_run_tfa(lcdir,
templatefiles,
epdlc_glob='*.epdlc',
epdlc_jdcol=0,
epdlc_magcol=(22,23,24),
template_sigclip=5.0,
epdlc_sigclip=5.0,
nworkers=16,
workerntasks=1000):
"""
This runs TFA on the EPD lightcurves.
"""
epdlcfiles = glob.glob(os.path.join(lcdir, epdlc_glob))
tasks = [(x, templatefiles, {'epdlc_jdcol':epdlc_jdcol,
'epdlc_magcol':epdlc_magcol,
'template_sigclip':template_sigclip,
'epdlc_sigclip':epdlc_sigclip})
for x in epdlcfiles]
print('%sZ: %s objects to process, starting parallel TFA...' %
(datetime.utcnow().isoformat(), len(epdlcfiles)))
pool = mp.Pool(nworkers, maxtasksperchild=workerntasks)
results = pool.map(parallel_tfa_worker, tasks)
pool.close()
pool.join()
print('%sZ: done. %s LCs processed.' %
(datetime.utcnow().isoformat(), len(epdlcfiles)))
return {x:y for x,y in zip(epdlcfiles, results)}
#############################
## LC STATISTICS FUNCTIONS ##
#############################
# FIXME: break these out into their own module
# 1. RMS vs. MAG plot for EPD and TFA lightcurves
# 2. MAD vs. MAG plot for EPD and TFA lightcurves
# 3. ratios of RMS and MAD vs. MAG for CCD 6,7,8 to that of CCD 5
# 4. binned LC versions of these plots, using 10, 30, and 60 minute binning
# FIXME: get adding median magnitude measurement errors to this as well. This
# will allow for getting the predicted error relation and scintillation noise.
def get_magnitude_measurement_errs(photfile,
frame,
errcols=[],
airmassheaderkey='X'):
"""
This gets the median mag errs for the object and the airmass.
Used to calculate the expected noise curve in an RMS plot.
predicted total noise = sqrt((median mag err)**2 + (scintillation noise)**2)
"""
def get_lc_statistics(lcfile,
rmcols=[19,20,21],
epcols=[22,23,24],
tfcols=[25,26,27],
rfcols=None,
sigclip=4.0,
tfalcrequired=False,
epdlcrequired=True,
fitslcnottxt=False,
istessandmaskedges=False):
"""
This calculates the following statistics for the magnitude columns in the
given lcfile.
mean, median, MAD, stdev.
Args:
lcfile: if using text file lightcurves, the lcfile is always the .epdlc
file (which contains the rlc, and is used to derive the filenames of
the tfalcs)
fitslcnottxt (bool): a workaround for the above.
rfcols are for the flux in aperture 1, 2, 3. used for ISM only
"""
tf1lc_check = os.path.exists(lcfile.replace('.epdlc','.tfalc.TF1'))
tf2lc_check = os.path.exists(lcfile.replace('.epdlc','.tfalc.TF2'))
tf3lc_check = os.path.exists(lcfile.replace('.epdlc','.tfalc.TF3'))
# otherwise, proceed with stat collection
if fitslcnottxt:
hdulist = pyfits.open(lcfile)
# by default, raw fluxes are not populated. however, they are useful to
# get a true estimate of the expected shot noise.
rf1, rf2, rf3 = [], [], []
if hdulist[0].header['DTR_EPD']:
rm1 = hdulist[1].data['IRM1']
rm2 = hdulist[1].data['IRM2']
rm3 = hdulist[1].data['IRM3']
if epdlcrequired:
ep1 = hdulist[1].data['EP1']
ep2 = hdulist[1].data['EP2']
ep3 = hdulist[1].data['EP3']
rf1 = hdulist[1].data['IFL1']
rf2 = hdulist[1].data['IFL2']
rf3 = hdulist[1].data['IFL3']
elif not hdulist[0].header['DTR_EPD'] and not epdlcrequired:
# a hack. some code has been written to rely on "EPD"
# statistics. however, if you're skipping EPD, you want to
# instead rely on IRM statistics. populating the EPD statistics
# columns with IRM statistics is hacky, but fine.
rm1 = hdulist[1].data['IRM1']
rm2 = hdulist[1].data['IRM2']
rm3 = hdulist[1].data['IRM3']
ep1 = hdulist[1].data['IRM1']
ep2 = hdulist[1].data['IRM2']
ep3 = hdulist[1].data['IRM3']
rf1 = hdulist[1].data['IFL1']
rf2 = hdulist[1].data['IFL2']
rf3 = hdulist[1].data['IFL3']
else:
print('expected DTR_EPD to be true in get_lc_statistics')
raise AssertionError
if hdulist[0].header['DTR_TFA']:
tf1 = hdulist[1].data['TFA1']
tf2 = hdulist[1].data['TFA2']
tf3 = hdulist[1].data['TFA3']
elif not hdulist[0].header['DTR_TFA'] and tfalcrequired:
print(
'{:s}Z: no TFA for {:s} and TFA is required, skipping...'.
format(datetime.utcnow().isoformat(), lcfile)
)
return None
else:
tf1,tf2,tf3 = [], [], []
else:
# check if we need TFALCs to proceed
if tfalcrequired and ((not tf1lc_check) or
(not tf2lc_check) or
(not tf3lc_check)):
print('%sZ: no TFA mags available for %s and '
'TFALC is required, skipping...' %
(datetime.utcnow().isoformat(), lcfile))
return None
try:
# get the reduced magnitude columns
(rm1, rm2, rm3,
ep1, ep2, ep3) = np.genfromtxt(lcfile,
usecols=tuple(rmcols + epcols),
unpack=True)
tf1, tf2, tf3 = np.genfromtxt(
lcfile.replace('.epdlc','.tfalc'),
usecols=tfcols,
unpack=True)
if rfcols and len(rfcols) == 3:
rf1, rf2, rf3 = np.genfromtxt(lcfile,usecols=tuple(rfcols),
unpack=True)
else:
rf1, rf2, rf3 = [], [], []
# if we don't have TF columns, cut down to RM and EP only
except Exception as e:
print('%sZ: no TFA mags available for %s!' %
(datetime.utcnow().isoformat(), lcfile))
try:
(rm1, rm2, rm3,
ep1, ep2, ep3) = np.genfromtxt(lcfile,
usecols=tuple(rmcols + epcols),
unpack=True)
tf1, tf2, tf3 = [], [], []
if rfcols and len(rfcols) == 3:
rf1, rf2, rf3 = np.genfromtxt(lcfile,usecols=tuple(rfcols),
unpack=True)
else:
rf1, rf2, rf3 = [], [], []
except Exception as e:
print('%sZ: no EPD mags available for %s!' %
(datetime.utcnow().isoformat(), lcfile))
rm1, rm2, rm3 = np.genfromtxt(lcfile,
usecols=tuple(rmcols),
unpack=True)
ep1, ep2, ep3, tf1, tf2, tf3 = [], [], [], [], [], []
if rfcols and len(rfcols) == 3:
rf1, rf2, rf3 = np.genfromtxt(lcfile,usecols=tuple(rfcols),
unpack=True)
else:
rf1, rf2, rf3 = [], [], []
#
# optionally, if it's TESS data, mask the orbit edges since they are known
# to be rampy
#
if istessandmaskedges:
time = hdulist[1].data['TMID_BJD']
from tessutils import mask_orbit_start_and_end
orbitgap = 1
expected_norbits = 2
orbitpadding = 6/24
raise_error = False
_, rm1 = mask_orbit_start_and_end(time, rm1, orbitgap=orbitgap,
expected_norbits=expected_norbits,
orbitpadding=orbitpadding,
raise_error=raise_error)
_, rm2 = mask_orbit_start_and_end(time, rm2, orbitgap=orbitgap,
expected_norbits=expected_norbits,
orbitpadding=orbitpadding,
raise_error=raise_error)
_, rm3 = mask_orbit_start_and_end(time, rm3, orbitgap=orbitgap,
expected_norbits=expected_norbits,
orbitpadding=orbitpadding,
raise_error=raise_error)
_, ep1 = mask_orbit_start_and_end(time, ep1, orbitgap=orbitgap,
expected_norbits=expected_norbits,
orbitpadding=orbitpadding,
raise_error=raise_error)
_, ep2 = mask_orbit_start_and_end(time, ep2, orbitgap=orbitgap,
expected_norbits=expected_norbits,
orbitpadding=orbitpadding,
raise_error=raise_error)
_, ep3 = mask_orbit_start_and_end(time, ep3, orbitgap=orbitgap,
expected_norbits=expected_norbits,
orbitpadding=orbitpadding,
raise_error=raise_error)
_, rf1 = mask_orbit_start_and_end(time, rf1, orbitgap=orbitgap,
expected_norbits=expected_norbits,
orbitpadding=orbitpadding,
raise_error=raise_error)
_, rf2 = mask_orbit_start_and_end(time, rf2, orbitgap=orbitgap,
expected_norbits=expected_norbits,
orbitpadding=orbitpadding,
raise_error=raise_error)
_, rf3 = mask_orbit_start_and_end(time, rf3, orbitgap=orbitgap,
expected_norbits=expected_norbits,
orbitpadding=orbitpadding,
raise_error=raise_error)
if len(tf1) > 0:
_, tf1 = mask_orbit_start_and_end(time, tf1, orbitgap=orbitgap,
expected_norbits=expected_norbits,
orbitpadding=orbitpadding,
raise_error=raise_error)
if len(tf2) > 0:
_, tf2 = mask_orbit_start_and_end(time, tf2, orbitgap=orbitgap,
expected_norbits=expected_norbits,
orbitpadding=orbitpadding,
raise_error=raise_error)
if len(tf3) > 0:
_, tf3 = mask_orbit_start_and_end(time, tf3, orbitgap=orbitgap,
expected_norbits=expected_norbits,
orbitpadding=orbitpadding,
raise_error=raise_error)
##################################
# get statistics for each column #
##################################
# fluxes
# RF1
if len(rf1) > 4:
finiteind = np.isfinite(rf1)
rf1 = rf1[finiteind]
median_rf1 = np.median(rf1)
mad_rf1 = np.median(np.fabs(rf1 - median_rf1))
mean_rf1 = np.mean(rf1)
stdev_rf1 = np.std(rf1)
ndet_rf1 = len(rf1)
if sigclip:
sigclip_rf1, lo, hi = stats_sigmaclip(rf1,
low=sigclip,
high=sigclip)
median_sigclip_rf1 = np.median(sigclip_rf1)
mad_sigclip_rf1 = np.median(np.fabs(sigclip_rf1 -
median_sigclip_rf1))
mean_sigclip_rf1 = np.mean(sigclip_rf1)
stdev_sigclip_rf1 = np.std(sigclip_rf1)
ndet_sigclip_rf1 = len(sigclip_rf1)
else:
median_sigclip_rf1 = np.nan
mad_sigclip_rf1 = np.nan
mean_sigclip_rf1 = np.nan
stdev_sigclip_rf1 = np.nan
ndet_sigclip_rf1 = np.nan
else:
median_rf1, mad_rf1, mean_rf1, stdev_rf1 = np.nan, np.nan, np.nan, np.nan
ndet_rf1 = np.nan
median_sigclip_rf1, mad_sigclip_rf1 = np.nan, np.nan
mean_sigclip_rf1, stdev_sigclip_rf1 = np.nan, np.nan
ndet_sigclip_rf1 = np.nan
# RF2
if len(rf2) > 4:
finiteind = np.isfinite(rf2)
rf2 = rf2[finiteind]
median_rf2 = np.median(rf2)
mad_rf2 = np.median(np.fabs(rf2 - median_rf2))
mean_rf2 = np.mean(rf2)
stdev_rf2 = np.std(rf2)
ndet_rf2 = len(rf2)
if sigclip:
sigclip_rf2, lo, hi = stats_sigmaclip(rf2,
low=sigclip,
high=sigclip)
median_sigclip_rf2 = np.median(sigclip_rf2)
mad_sigclip_rf2 = np.median(np.fabs(sigclip_rf2 -
median_sigclip_rf2))
mean_sigclip_rf2 = np.mean(sigclip_rf2)
stdev_sigclip_rf2 = np.std(sigclip_rf2)
ndet_sigclip_rf2 = len(sigclip_rf2)
else:
median_sigclip_rf2 = np.nan
mad_sigclip_rf2 = np.nan
mean_sigclip_rf2 = np.nan
stdev_sigclip_rf2 = np.nan
ndet_sigclip_rf2 = np.nan
else:
median_rf2, mad_rf2, mean_rf2, stdev_rf2 = np.nan, np.nan, np.nan, np.nan
ndet_rf2 = np.nan
median_sigclip_rf2, mad_sigclip_rf2 = np.nan, np.nan
mean_sigclip_rf2, stdev_sigclip_rf2 = np.nan, np.nan
ndet_sigclip_rf2 = np.nan
# RF3
if len(rf3) > 4:
finiteind = np.isfinite(rf3)
rf3 = rf3[finiteind]
median_rf3 = np.median(rf3)
mad_rf3 = np.median(np.fabs(rf3 - median_rf3))
mean_rf3 = np.mean(rf3)
stdev_rf3 = np.std(rf3)
ndet_rf3 = len(rf3)
if sigclip:
sigclip_rf3, lo, hi = stats_sigmaclip(rf3,
low=sigclip,
high=sigclip)
median_sigclip_rf3 = np.median(sigclip_rf3)
mad_sigclip_rf3 = np.median(np.fabs(sigclip_rf3 -
median_sigclip_rf3))
mean_sigclip_rf3 = np.mean(sigclip_rf3)
stdev_sigclip_rf3 = np.std(sigclip_rf3)
ndet_sigclip_rf3 = len(sigclip_rf3)
else:
median_sigclip_rf3 = np.nan
mad_sigclip_rf3 = np.nan
mean_sigclip_rf3 = np.nan
stdev_sigclip_rf3 = np.nan
ndet_sigclip_rf3 = np.nan
else:
median_rf3, mad_rf3, mean_rf3, stdev_rf3 = (np.nan, np.nan,
np.nan, np.nan)
ndet_rf3 = np.nan
median_sigclip_rf3, mad_sigclip_rf3 = np.nan, np.nan
mean_sigclip_rf3, stdev_sigclip_rf3 = np.nan, np.nan
ndet_sigclip_rf3 = np.nan
# mags
# RM1
if len(rm1) > 4:
finiteind = np.isfinite(rm1)
rm1 = rm1[finiteind]
median_rm1 = np.median(rm1)
mad_rm1 = np.median(np.fabs(rm1 - median_rm1))
mean_rm1 = np.mean(rm1)
stdev_rm1 = np.std(rm1)
ndet_rm1 = len(rm1)
if sigclip:
sigclip_rm1, lo, hi = stats_sigmaclip(rm1,
low=sigclip,
high=sigclip)
median_sigclip_rm1 = np.median(sigclip_rm1)
mad_sigclip_rm1 = np.median(np.fabs(sigclip_rm1 -
median_sigclip_rm1))
mean_sigclip_rm1 = np.mean(sigclip_rm1)
stdev_sigclip_rm1 = np.std(sigclip_rm1)
ndet_sigclip_rm1 = len(sigclip_rm1)
else:
median_sigclip_rm1 = np.nan
mad_sigclip_rm1 = np.nan
mean_sigclip_rm1 = np.nan
stdev_sigclip_rm1 = np.nan
ndet_sigclip_rm1 = np.nan
else:
median_rm1, mad_rm1, mean_rm1, stdev_rm1 = np.nan, np.nan, np.nan, np.nan
ndet_rm1 = np.nan
median_sigclip_rm1, mad_sigclip_rm1 = np.nan, np.nan
mean_sigclip_rm1, stdev_sigclip_rm1 = np.nan, np.nan
ndet_sigclip_rm1 = np.nan
# RM2
if len(rm2) > 4:
finiteind = np.isfinite(rm2)
rm2 = rm2[finiteind]
median_rm2 = np.median(rm2)
mad_rm2 = np.median(np.fabs(rm2 - median_rm2))
mean_rm2 = np.mean(rm2)
stdev_rm2 = np.std(rm2)
ndet_rm2 = len(rm2)
if sigclip:
sigclip_rm2, lo, hi = stats_sigmaclip(rm2,
low=sigclip,
high=sigclip)
median_sigclip_rm2 = np.median(sigclip_rm2)
mad_sigclip_rm2 = np.median(np.fabs(sigclip_rm2 -
median_sigclip_rm2))
mean_sigclip_rm2 = np.mean(sigclip_rm2)
stdev_sigclip_rm2 = np.std(sigclip_rm2)
ndet_sigclip_rm2 = len(sigclip_rm2)
else:
median_sigclip_rm2 = np.nan
mad_sigclip_rm2 = np.nan
mean_sigclip_rm2 = np.nan
stdev_sigclip_rm2 = np.nan
ndet_sigclip_rm2 = np.nan
else:
median_rm2, mad_rm2, mean_rm2, stdev_rm2 = np.nan, np.nan, np.nan, np.nan
ndet_rm2 = np.nan
median_sigclip_rm2, mad_sigclip_rm2 = np.nan, np.nan
mean_sigclip_rm2, stdev_sigclip_rm2 = np.nan, np.nan
ndet_sigclip_rm2 = np.nan
# RM3
if len(rm3) > 4:
finiteind = np.isfinite(rm3)
rm3 = rm3[finiteind]
median_rm3 = np.median(rm3)
mad_rm3 = np.median(np.fabs(rm3 - median_rm3))
mean_rm3 = np.mean(rm3)
stdev_rm3 = np.std(rm3)
ndet_rm3 = len(rm3)
if sigclip:
sigclip_rm3, lo, hi = stats_sigmaclip(rm3,
low=sigclip,
high=sigclip)
median_sigclip_rm3 = np.median(sigclip_rm3)
mad_sigclip_rm3 = np.median(np.fabs(sigclip_rm3 -
median_sigclip_rm3))
mean_sigclip_rm3 = np.mean(sigclip_rm3)
stdev_sigclip_rm3 = np.std(sigclip_rm3)
ndet_sigclip_rm3 = len(sigclip_rm3)
else:
median_sigclip_rm3 = np.nan
mad_sigclip_rm3 = np.nan
mean_sigclip_rm3 = np.nan
stdev_sigclip_rm3 = np.nan
ndet_sigclip_rm3 = np.nan
else:
median_rm3, mad_rm3, mean_rm3, stdev_rm3 = (np.nan, np.nan,
np.nan, np.nan)
ndet_rm3 = np.nan
median_sigclip_rm3, mad_sigclip_rm3 = np.nan, np.nan
mean_sigclip_rm3, stdev_sigclip_rm3 = np.nan, np.nan
ndet_sigclip_rm3 = np.nan
# EP1
if len(ep1) > 4:
finiteind = np.isfinite(ep1)
ep1 = ep1[finiteind]
median_ep1 = np.median(ep1)
mad_ep1 = np.median(np.fabs(ep1 - median_ep1))
mean_ep1 = np.mean(ep1)
stdev_ep1 = np.std(ep1)
ndet_ep1 = len(ep1)
if sigclip:
sigclip_ep1, lo, hi = stats_sigmaclip(ep1,
low=sigclip,
high=sigclip)
median_sigclip_ep1 = np.median(sigclip_ep1)
mad_sigclip_ep1 = np.median(np.fabs(sigclip_ep1 -
median_sigclip_ep1))
mean_sigclip_ep1 = np.mean(sigclip_ep1)
stdev_sigclip_ep1 = np.std(sigclip_ep1)
ndet_sigclip_ep1 = len(sigclip_ep1)
else:
median_sigclip_ep1 = np.nan
mad_sigclip_ep1 = np.nan
mean_sigclip_ep1 = np.nan
stdev_sigclip_ep1 = np.nan
ndet_sigclip_ep1 = np.nan
else:
median_ep1, mad_ep1, mean_ep1, stdev_ep1 = np.nan, np.nan, np.nan, np.nan
ndet_ep1 = np.nan
median_sigclip_ep1, mad_sigclip_ep1 = np.nan, np.nan
mean_sigclip_ep1, stdev_sigclip_ep1 = np.nan, np.nan
ndet_sigclip_ep1 = np.nan
# EP2
if len(ep2) > 4:
finiteind = np.isfinite(ep2)
ep2 = ep2[finiteind]
median_ep2 = np.median(ep2)
mad_ep2 = np.median(np.fabs(ep2 - median_ep2))
mean_ep2 = np.mean(ep2)
stdev_ep2 = np.std(ep2)
ndet_ep2 = len(ep2)
if sigclip:
sigclip_ep2, lo, hi = stats_sigmaclip(ep2,
low=sigclip,
high=sigclip)
median_sigclip_ep2 = np.median(sigclip_ep2)
mad_sigclip_ep2 = np.median(np.fabs(sigclip_ep2 -
median_sigclip_ep2))
mean_sigclip_ep2 = np.mean(sigclip_ep2)
stdev_sigclip_ep2 = np.std(sigclip_ep2)
ndet_sigclip_ep2 = len(sigclip_ep2)
else:
median_sigclip_ep2 = np.nan
mad_sigclip_ep2 = np.nan
mean_sigclip_ep2 = np.nan
stdev_sigclip_ep2 = np.nan
ndet_sigclip_ep2 = np.nan
else:
median_ep2, mad_ep2, mean_ep2, stdev_ep2 = np.nan, np.nan, np.nan, np.nan
ndet_ep2 = np.nan
median_sigclip_ep2, mad_sigclip_ep2 = np.nan, np.nan
mean_sigclip_ep2, stdev_sigclip_ep2 = np.nan, np.nan
ndet_sigclip_ep2 = np.nan
# EP3
if len(ep3) > 4:
finiteind = np.isfinite(ep3)
ep3 = ep3[finiteind]
median_ep3 = np.median(ep3)
mad_ep3 = np.median(np.fabs(ep3 - median_ep3))
mean_ep3 = np.mean(ep3)
stdev_ep3 = np.std(ep3)
ndet_ep3 = len(ep3)
if sigclip:
sigclip_ep3, lo, hi = stats_sigmaclip(ep3,
low=sigclip,
high=sigclip)
median_sigclip_ep3 = np.median(sigclip_ep3)
mad_sigclip_ep3 = np.median(np.fabs(sigclip_ep3 -
median_sigclip_ep3))
mean_sigclip_ep3 = np.mean(sigclip_ep3)
stdev_sigclip_ep3 = np.std(sigclip_ep3)
ndet_sigclip_ep3 = len(sigclip_ep3)
else:
median_sigclip_ep3 = np.nan
mad_sigclip_ep3 = np.nan
mean_sigclip_ep3 = np.nan
stdev_sigclip_ep3 = np.nan
ndet_sigclip_ep3 = np.nan
else:
median_ep3, mad_ep3, mean_ep3, stdev_ep3 = np.nan, np.nan, np.nan, np.nan
ndet_ep3 = np.nan
median_sigclip_ep3, mad_sigclip_ep3 = np.nan, np.nan
mean_sigclip_ep3, stdev_sigclip_ep3 = np.nan, np.nan
ndet_sigclip_ep3 = np.nan
# TF1
if len(tf1) > 4:
finiteind = np.isfinite(tf1)
tf1 = tf1[finiteind]
median_tf1 = np.median(tf1)
mad_tf1 = np.median(np.fabs(tf1 - median_tf1))
mean_tf1 = np.mean(tf1)
stdev_tf1 = np.std(tf1)
ndet_tf1 = len(tf1)
if sigclip:
sigclip_tf1, lo, hi = stats_sigmaclip(tf1,
low=sigclip,
high=sigclip)
median_sigclip_tf1 = np.median(sigclip_tf1)
mad_sigclip_tf1 = np.median(np.fabs(sigclip_tf1 -
median_sigclip_tf1))
mean_sigclip_tf1 = np.mean(sigclip_tf1)
stdev_sigclip_tf1 = np.std(sigclip_tf1)
ndet_sigclip_tf1 = len(sigclip_tf1)
else:
median_sigclip_tf1 = np.nan
mad_sigclip_tf1 = np.nan
mean_sigclip_tf1 = np.nan
stdev_sigclip_tf1 = np.nan
ndet_sigclip_tf1 = np.nan
else:
median_tf1, mad_tf1, mean_tf1, stdev_tf1 = np.nan, np.nan, np.nan, np.nan
ndet_tf1 = np.nan
median_sigclip_tf1, mad_sigclip_tf1 = np.nan, np.nan
mean_sigclip_tf1, stdev_sigclip_tf1 = np.nan, np.nan
ndet_sigclip_tf1 = np.nan
# TF2
if len(tf2) > 4:
finiteind = np.isfinite(tf2)
tf2 = tf2[finiteind]
median_tf2 = np.median(tf2)
mad_tf2 = np.median(np.fabs(tf2 - median_tf2))
mean_tf2 = np.mean(tf2)
stdev_tf2 = np.std(tf2)
ndet_tf2 = len(tf2)
if sigclip:
sigclip_tf2, lo, hi = stats_sigmaclip(tf2,
low=sigclip,
high=sigclip)
median_sigclip_tf2 = np.median(sigclip_tf2)
mad_sigclip_tf2 = np.median(np.fabs(sigclip_tf2 -
median_sigclip_tf2))
mean_sigclip_tf2 = np.mean(sigclip_tf2)
stdev_sigclip_tf2 = np.std(sigclip_tf2)
ndet_sigclip_tf2 = len(sigclip_tf2)
else:
median_sigclip_tf2 = np.nan
mad_sigclip_tf2 = np.nan
mean_sigclip_tf2 = np.nan
stdev_sigclip_tf2 = np.nan
ndet_sigclip_tf2 = np.nan
else:
median_tf2, mad_tf2, mean_tf2, stdev_tf2 = np.nan, np.nan, np.nan, np.nan
ndet_tf2 = np.nan
median_sigclip_tf2, mad_sigclip_tf2 = np.nan, np.nan
mean_sigclip_tf2, stdev_sigclip_tf2 = np.nan, np.nan
ndet_sigclip_tf2 = np.nan
# TF3
if len(tf3) > 4:
finiteind = np.isfinite(tf3)
tf3 = tf3[finiteind]
median_tf3 = np.median(tf3)
mad_tf3 = np.median(np.fabs(tf3 - median_tf3))
mean_tf3 = np.mean(tf3)
stdev_tf3 = np.std(tf3)
ndet_tf3 = len(tf3)
if sigclip:
sigclip_tf3, lo, hi = stats_sigmaclip(tf3,
low=sigclip,
high=sigclip)
median_sigclip_tf3 = np.median(sigclip_tf3)
mad_sigclip_tf3 = np.median(np.fabs(sigclip_tf3 -
median_sigclip_tf3))
mean_sigclip_tf3 = np.mean(sigclip_tf3)
stdev_sigclip_tf3 = np.std(sigclip_tf3)
ndet_sigclip_tf3 = len(sigclip_tf3)
else:
median_sigclip_tf3 = np.nan
mad_sigclip_tf3 = np.nan
mean_sigclip_tf3 = np.nan
stdev_sigclip_tf3 = np.nan
ndet_sigclip_tf3 = np.nan
else:
median_tf3, mad_tf3, mean_tf3, stdev_tf3 = np.nan, np.nan, np.nan, np.nan
ndet_tf3 = np.nan
median_sigclip_tf3, mad_sigclip_tf3 = np.nan, np.nan
mean_sigclip_tf3, stdev_sigclip_tf3 = np.nan, np.nan
ndet_sigclip_tf3 = np.nan
## COLLECT STATS
print('%sZ: done with statistics for %s' %
(datetime.utcnow().isoformat(), lcfile))
lcobj = os.path.splitext(os.path.basename(lcfile))[0]
if '_llc' in lcobj:
# TESS filenaming format: {gaiaid}_llc.fits -- so if we want to
# retrieve the Gaia-ID, must omit "_llc" here.
lcobj = lcobj.replace('_llc','')
if 'hlsp' in lcobj:
# typical format:
# hlsp_cdips_tess_ffi_gaiatwo0002917938284837069312-0006_tess_v01_llc.fits
from parse import search
res = search('hlsp_cdips_tess_ffi_gaiatwo{}-{}_tess{}', lcobj)
lcobj = res[0].lstrip('0')
return {'lcfile':lcfile,
'lcobj':lcobj,
# reduced mags aperture 1
'median_rf1':median_rf1,
'mad_rf1':mad_rf1,
'mean_rf1':mean_rf1,
'stdev_rf1':stdev_rf1,
'ndet_rf1':ndet_rf1,
'median_sigclip_rf1':median_sigclip_rf1,
'mad_sigclip_rf1':mad_sigclip_rf1,
'mean_sigclip_rf1':mean_sigclip_rf1,
'stdev_sigclip_rf1':stdev_sigclip_rf1,
'ndet_sigclip_rf1':ndet_sigclip_rf1,
# reduced mags aperture 2
'median_rf2':median_rf2,
'mad_rf2':mad_rf2,
'mean_rf2':mean_rf2,
'stdev_rf2':stdev_rf2,
'ndet_rf2':ndet_rf2,
'median_sigclip_rf2':median_sigclip_rf2,
'mad_sigclip_rf2':mad_sigclip_rf2,
'mean_sigclip_rf2':mean_sigclip_rf2,
'stdev_sigclip_rf2':stdev_sigclip_rf2,
'ndet_sigclip_rf2':ndet_sigclip_rf2,
# reduced mags aperture 3
'median_rf3':median_rf3,
'mad_rf3':mad_rf3,
'mean_rf3':mean_rf3,
'stdev_rf3':stdev_rf3,
'ndet_rf3':ndet_rf3,
'median_sigclip_rf3':median_sigclip_rf3,
'mad_sigclip_rf3':mad_sigclip_rf3,
'mean_sigclip_rf3':mean_sigclip_rf3,
'stdev_sigclip_rf3':stdev_sigclip_rf3,
'ndet_sigclip_rf3':ndet_sigclip_rf3,
# reduced mags aperture 1
'median_rm1':median_rm1,
'mad_rm1':mad_rm1,
'mean_rm1':mean_rm1,
'stdev_rm1':stdev_rm1,
'ndet_rm1':ndet_rm1,
'median_sigclip_rm1':median_sigclip_rm1,
'mad_sigclip_rm1':mad_sigclip_rm1,
'mean_sigclip_rm1':mean_sigclip_rm1,
'stdev_sigclip_rm1':stdev_sigclip_rm1,
'ndet_sigclip_rm1':ndet_sigclip_rm1,
# reduced mags aperture 2
'median_rm2':median_rm2,
'mad_rm2':mad_rm2,
'mean_rm2':mean_rm2,
'stdev_rm2':stdev_rm2,
'ndet_rm2':ndet_rm2,
'median_sigclip_rm2':median_sigclip_rm2,
'mad_sigclip_rm2':mad_sigclip_rm2,
'mean_sigclip_rm2':mean_sigclip_rm2,
'stdev_sigclip_rm2':stdev_sigclip_rm2,
'ndet_sigclip_rm2':ndet_sigclip_rm2,
# reduced mags aperture 3
'median_rm3':median_rm3,
'mad_rm3':mad_rm3,
'mean_rm3':mean_rm3,
'stdev_rm3':stdev_rm3,
'ndet_rm3':ndet_rm3,
'median_sigclip_rm3':median_sigclip_rm3,
'mad_sigclip_rm3':mad_sigclip_rm3,
'mean_sigclip_rm3':mean_sigclip_rm3,
'stdev_sigclip_rm3':stdev_sigclip_rm3,
'ndet_sigclip_rm3':ndet_sigclip_rm3,
# EPD mags aperture 1
'median_ep1':median_ep1,
'mad_ep1':mad_ep1,
'mean_ep1':mean_ep1,
'stdev_ep1':stdev_ep1,
'ndet_ep1':ndet_ep1,
'median_sigclip_ep1':median_sigclip_ep1,
'mad_sigclip_ep1':mad_sigclip_ep1,
'mean_sigclip_ep1':mean_sigclip_ep1,
'stdev_sigclip_ep1':stdev_sigclip_ep1,
'ndet_sigclip_ep1':ndet_sigclip_ep1,
# EPD mags aperture 2
'median_ep2':median_ep2,
'mad_ep2':mad_ep2,
'mean_ep2':mean_ep2,
'stdev_ep2':stdev_ep2,
'ndet_ep2':ndet_ep2,
'median_sigclip_ep2':median_sigclip_ep2,
'mad_sigclip_ep2':mad_sigclip_ep2,
'mean_sigclip_ep2':mean_sigclip_ep2,
'stdev_sigclip_ep2':stdev_sigclip_ep2,
'ndet_sigclip_ep2':ndet_sigclip_ep2,
# EPD mags aperture 3
'median_ep3':median_ep3,
'mad_ep3':mad_ep3,
'mean_ep3':mean_ep3,
'stdev_ep3':stdev_ep3,
'ndet_ep3':ndet_ep3,
'median_sigclip_ep3':median_sigclip_ep3,
'mad_sigclip_ep3':mad_sigclip_ep3,
'mean_sigclip_ep3':mean_sigclip_ep3,
'stdev_sigclip_ep3':stdev_sigclip_ep3,
'ndet_sigclip_ep3':ndet_sigclip_ep3,
# TFA mags aperture 1
'median_tf1':median_tf1,
'mad_tf1':mad_tf1,
'mean_tf1':mean_tf1,
'stdev_tf1':stdev_tf1,
'ndet_tf1':ndet_tf1,
'median_sigclip_tf1':median_sigclip_tf1,
'mad_sigclip_tf1':mad_sigclip_tf1,
'mean_sigclip_tf1':mean_sigclip_tf1,
'stdev_sigclip_tf1':stdev_sigclip_tf1,
'ndet_sigclip_tf1':ndet_sigclip_tf1,
# TFA mags aperture 2
'median_tf2':median_tf2,
'mad_tf2':mad_tf2,
'mean_tf2':mean_tf2,
'stdev_tf2':stdev_tf2,
'ndet_tf2':ndet_tf2,
'median_sigclip_tf2':median_sigclip_tf2,
'mad_sigclip_tf2':mad_sigclip_tf2,
'mean_sigclip_tf2':mean_sigclip_tf2,
'stdev_sigclip_tf2':stdev_sigclip_tf2,
'ndet_sigclip_tf2':ndet_sigclip_tf2,
# TFA mags aperture 3
'median_tf3':median_tf3,
'mad_tf3':mad_tf3,
'mean_tf3':mean_tf3,
'stdev_tf3':stdev_tf3,
'ndet_tf3':ndet_tf3,
'median_sigclip_tf3':median_sigclip_tf3,
'mad_sigclip_tf3':mad_sigclip_tf3,
'mean_sigclip_tf3':mean_sigclip_tf3,
'stdev_sigclip_tf3':stdev_sigclip_tf3,
'ndet_sigclip_tf3':ndet_sigclip_tf3}
def lc_statistics_worker(task):
"""
This is a worker that runs the function above in a parallel worker pool.
"""
try:
return get_lc_statistics(task[0], **task[1])
except Exception as e:
print('LC STATS SOMETHING WENT WRONG! task was {}, exception was {}'.
format( repr(task), repr(e))
)
return None
def parallel_lc_statistics(lcdir,
lcglob,
fovcatalog,
fovcathasgaiaids=False,
tfalcrequired=False,
fitslcnottxt=False,
fovcatcols=(0,9), # objectid, magcol to use
fovcatmaglabel='r',
outfile=None,
nworkers=16,
workerntasks=500,
rmcols=[19,20,21],
epcols=[22,23,24],
tfcols=[25,26,27],
rfcols=None,
correctioncoeffs=None,
sigclip=4.0,
epdlcrequired=True,
istessandmaskedges=False):
"""
This calculates statistics on all lc files in lcdir.
Args:
lcdir (str): directory containing lightcurves
lcglob (str): glob to epd lcs, inside lcdir. E.g., '*.epdlc'. These
contain the rlc, and are used to derive the filenames of the tfalcs.
fovcatalog (str): path to the REFORMED fov catalog, which gets the
catalog magnitude corresponding to canonical magnitude for any star.
fovcathasgaiaids (bool): if the reformed FOV catalog has Gaia ids, set
this to be true. The default is to assume HAT-IDs, which have different
string lengths & and are read differently.
fitslcnottxt (bool): if True, I/O will attempt to read
INSTRUMENTAL/EPD/TFA magnitudes from a FITS-format lightcurve, not a
text-file lightcurve. By default, false.
epdlcrequired (bool): a variety of aperturephot.py tools assume that if
you are creating statistics, you have run EPD. This isn't necessarily
true (you may wish to get statistics on the instrumental raw
magnitudes). If you set this to False, the statistics file will,
hackily, populate the "EPD statistics" with IRM values.
Output:
Puts the results in text file outfile.
outfile contains the following columns:
object, ndet,
median RM[1-3], MAD RM[1-3], mean RM[1-3], stdev RM[1-3],
median EP[1-3], MAD EP[1-3], mean EP[1-3], stdev EP[1-3],
median TF[1-3], MAD TF[1-3], mean TF[1-3], stdev TF[1-3]
if a value is missing, it will be np.nan.
Notes:
For ISM, consider using correctioncoeffs as well. These are c1, c2
resulting from a fit to the catalogmag-flux relation using the
expression:
catrmag = -2.5 * log10(flux/c1) + c2
where the fit is done in the bright limit (8.0 < r < 12.0). this
corrects for too-faint catalog mags because of crowding and blending.
correctioncoeffs is like:
[[ap1_c1,ap1_c2],[ap2_c1,ap2_c2],[ap3_c1,ap3_c2]]
"""
lcfiles = glob.glob(os.path.join(lcdir, lcglob))
tasks = [[x, {'rmcols':rmcols,
'epcols':epcols,
'tfcols':tfcols,
'rfcols':rfcols,
'sigclip':sigclip,
'tfalcrequired':tfalcrequired,
'fitslcnottxt':fitslcnottxt,
'epdlcrequired':epdlcrequired,
'istessandmaskedges':istessandmaskedges}] for x in lcfiles]
pool = mp.Pool(nworkers,maxtasksperchild=workerntasks)
results = pool.map(lc_statistics_worker, tasks)
pool.close()
pool.join()
print('%sZ: done. %s lightcurves processed.' %
(datetime.utcnow().isoformat(), len(lcfiles)))
if not outfile:
outfile = os.path.join(lcdir, 'lightcurve-statistics.txt')
outf = open(outfile,'wb')
outlineformat = (
'%s %.3f '
'%.6f %.6f %.6f %.6f %s %.6f %.6f %.6f %.6f %s '
'%.6f %.6f %.6f %.6f %s %.6f %.6f %.6f %.6f %s '
'%.6f %.6f %.6f %.6f %s %.6f %.6f %.6f %.6f %s '
'%.6f %.6f %.6f %.6f %s %.6f %.6f %.6f %.6f %s '
'%.6f %.6f %.6f %.6f %s %.6f %.6f %.6f %.6f %s '
'%.6f %.6f %.6f %.6f %s %.6f %.6f %.6f %.6f %s '
'%.6f %.6f %.6f %.6f %s %.6f %.6f %.6f %.6f %s '
'%.6f %.6f %.6f %.6f %s %.6f %.6f %.6f %.6f %s '
'%.6f %.6f %.6f %.6f %s %.6f %.6f %.6f %.6f %s '
'%.6f %.6f %.6f %.6f %s %.6f %.6f %.6f %.6f %s '
'%.6f %.6f %.6f %.6f %s %.6f %.6f %.6f %.6f %s '
'%.6f %.6f %.6f %.6f %s %.6f %.6f %.6f %.6f %s '
'%.3f %.3f %.3f\n'
)
outheader = '# total objects: %s, sigmaclip used: %s\n' % (
len(lcfiles), sigclip)
outf.write(outheader.encode('utf-8'))
outcolumnkey = (
'# columns are:\n'
'# 0,1: object, catalog mag %s\n'
'# 2,3,4,5,6: median RM1, MAD RM1, mean RM1, stdev RM1, ndet RM1\n'
'# 7,8,9,10,11: sigma-clipped median RM1, MAD RM1, mean RM1, '
'stdev RM1, ndet RM1\n'
'# 12,13,14,15,16: median RM2, MAD RM2, mean RM2, stdev RM2, ndet RM2\n'
'# 17,18,19,20,21: sigma-clipped median RM2, MAD RM2, mean RM2, '
'stdev RM2, ndet RM2\n'
'# 22,23,24,25,26: median RM3, MAD RM3, mean RM3, stdev RM3, ndet RM3\n'
'# 27,28,29,30,31: sigma-clipped median RM3, MAD RM3, mean RM3, '
'stdev RM3, ndet RM3\n'
'# 32,33,34,35,36: median EP1, MAD EP1, mean EP1, stdev EP1, ndet EP1\n'
'# 37,38,39,40,41: sigma-clipped median EP1, MAD EP1, mean EP1, '
'stdev EP1, ndet EP1\n'
'# 42,43,44,45,46: median EP2, MAD EP2, mean EP2, stdev EP2, ndet EP2\n'
'# 47,48,49,50,51: sigma-clipped median EP2, MAD EP2, mean EP2, '
'stdev EP2, ndet EP2\n'
'# 52,53,54,55,56: median EP3, MAD EP3, mean EP3, stdev EP3, ndet EP3\n'
'# 57,58,59,60,61: sigma-clipped median EP3, MAD EP3, mean EP3, '
'stdev EP3, ndet EP3\n'
'# 62,63,64,65,66: median TF1, MAD TF1, mean TF1, stdev TF1, ndet TF1\n'
'# 67,68,69,70,71: sigma-clipped median TF1, MAD TF1, mean TF1, '
'stdev TF1, ndet TF1\n'
'# 72,73,74,75,76: median TF2, MAD TF2, mean TF2, stdev TF2, ndet TF2\n'
'# 77,78,79,80,81: sigma-clipped median TF2, MAD TF2, mean TF2, '
'stdev TF2, ndet TF2\n'
'# 82,83,84,85,86: median TF3, MAD TF3, mean TF3, stdev TF3, ndet TF3\n'
'# 87,88,89,90,91: sigma-clipped median TF3, MAD TF3, mean TF3, '
'stdev TF3, ndet TF3\n'
'# 92,93,94,95,96: median RF1, MAD RF1, mean RF1, stdev RF1, ndet RF1\n'
'# 97,98,99,100,101: sigma-clipped median RF1, MAD RF1, mean RF1, '
'stdev RF1, ndet RF1\n'
'# 102,103,104,105,106: median RF2, MAD RF2, mean RF2, stdev RF2, ndet '
'RF2\n'
'# 107,108,109,110,111: sigma-clipped median RF2, MAD RF2, mean RF2, '
'stdev RF2, ndet RF2\n'
'# 112,113,114,115,116: median RF3, MAD RF3, mean RF3, stdev RF3, '
'ndet RF3\n'
'# 117,118,119,120,121: sigma-clipped median RF3, MAD RF3, mean RF3, '
'stdev RF3, ndet RF3\n'
'# 122, 123, 124: corrected cat mag AP1, corrected cat mag AP1, '
'corrected cat mag AP3\n'
) % fovcatmaglabel
outf.write(outcolumnkey.encode('utf-8'))
# open the fovcatalog and read in the column magnitudes and hatids
if not fovcathasgaiaids:
# assume HAT-IDs, HAT-123-4567890, 17 character strings
fovcat = np.genfromtxt(fovcatalog,
usecols=fovcatcols,
dtype='U17,f8',
names=['objid','mag'])
else:
# assume GAIA-IDs. From gaia2read, with "GAIA" id option, this is just
# 19 character integers.
fovcat = np.genfromtxt(fovcatalog,
usecols=fovcatcols,
dtype='U19,f8',
names=['objid','mag'])
# Using a dictionary leads to ~ 300x speedup
fovdict = dict(fovcat)
for stat in results:
if stat is not None:
# find the catalog mag for this object
if stat['lcobj'] in fovdict:
catmag = fovdict[stat['lcobj']]
else:
print('no catalog mag for %s, using median TF3 mag' %
stat['lcobj'])
catmag = stat['median_tf3']
if | pd.isnull(catmag) | pandas.isnull |
import argparse
import os.path as osp
from glob import glob
import cv2
import pandas as pd
from tqdm import tqdm
from gwd.converters import kaggle2coco
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--image-pattern", default="/data/SPIKE_images/*jpg")
parser.add_argument("--annotation-root", default="/data/SPIKE_annotations")
parser.add_argument("--kaggle_output_path", default="/data/spike.csv")
parser.add_argument("--coco_output_path", default="/data/coco_spike.json")
return parser.parse_args()
def main():
args = parse_args()
img_paths = glob(args.image_pattern)
annotations = []
for img_path in tqdm(img_paths):
ann_path = osp.join(args.annotation_root, (osp.basename(img_path.replace("jpg", "bboxes.tsv"))))
ann = | pd.read_csv(ann_path, sep="\t", names=["x_min", "y_min", "x_max", "y_max"]) | pandas.read_csv |
"""
Optimal power flow in power distribution grids using
second-order cone optimization
by:
<NAME>
30/08/2021
Version 01
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import cvxpy as cvx
"----- Read the database -----"
feeder = pd.read_csv("FEEDER34.csv")
num_lines = len(feeder)
line = {}
load = {}
source = {}
num_nodes = 0
num_sources = 0
kk = 0
for k in range(num_lines):
n1 = feeder['From'][k]
n2 = feeder['To'][k]
z = feeder['Rpu'][k] + 1j*feeder['Xpu'][k]
line[n1,n2] = kk,1/z,feeder['SmaxLine'][k]
kk = kk + 1
num_nodes = np.max([num_nodes,n1+1,n2+1])
load[n2] = feeder['Ppu'][k]+1j*feeder['Qpu'][k]
if feeder['SGmax'][k] > 0.0:
source[n2] = (num_sources,feeder['PGmax'][k],feeder['SGmax'][k])
num_sources = num_sources + 1
"----- Optimization model -----"
s_slack = cvx.Variable(complex=True)
u = cvx.Variable(num_nodes)
w = cvx.Variable(num_lines,complex=True)
s_from = cvx.Variable(num_lines,complex=True)
s_to = cvx.Variable(num_lines,complex=True)
s_gen = cvx.Variable(num_sources,complex=True)
res = [u[0]==1.0]
EqN = num_nodes*[0]
for (k,m) in line:
pos,ykm,smax = line[(k,m)]
EqN[k] = EqN[k]-s_from[pos]
EqN[m] = EqN[m]+s_to[pos]
EqN[m] = EqN[m]-load[m]
for m in source:
pos,pmax,smax = source[m]
EqN[m] = EqN[m]+s_gen[pos]
res += [cvx.abs(s_gen[pos]) <= smax]
res += [cvx.real(s_gen[pos]) <= pmax]
EqN[0] = EqN[0] + s_slack
res += [EqN[0] == 0]
for (k,m) in line:
pos,ykm,smax = line[(k,m)]
res += [cvx.SOC(u[k]+u[m],cvx.vstack([2*w[pos],u[k]-u[m]]))]
res += [s_from[pos] == ykm.conjugate()*(u[k]-w[pos])]
res += [s_to[pos] == ykm.conjugate()*(cvx.conj(w[pos])-u[m])]
res += [u[m] >= 0.95**2]
res += [u[m] <= 1.05**2]
res += [cvx.abs(s_from[pos]) <= smax]
res += [cvx.abs(s_to[pos]) <= smax]
res += [EqN[m] == 0]
res += [cvx.abs(s_slack)<=10]
obj = cvx.Minimize(cvx.sum(cvx.real(s_from))-cvx.sum(cvx.real(s_to)))
OPFSOC = cvx.Problem(obj,res)
OPFSOC.solve(solver=cvx.ECOS,verbose=False)
print(OPFSOC.status,obj.value)
"----- Print results -----"
v = np.sqrt(u.value)
a = np.zeros(num_nodes)
s = np.zeros(num_nodes)*0j
plim = np.zeros(num_nodes)
for (k,m) in line:
pos,ykm,smax = line[(k,m)]
a[m] = a[k]-np.angle(w.value[pos])
for m in source:
pos,pmax,smax = source[m]
s[m] = s_gen[pos].value
plim[m] = pmax
results = | pd.DataFrame() | pandas.DataFrame |
from trueskill import TrueSkill
import pandas as pd
def get_elo(d, env, player):
if player not in d:
d[player] = env.create_rating()
return d[player]
def calc_elo(df):
env = TrueSkill(draw_probability=0)
ratings = {}
for idx, row in df.iterrows():
teams = [[get_elo(ratings, env, player) for player in team] for team in row['teams']]
elos = env.rate(teams, row['ranks'])
for team, team_elos in zip(row['teams'], elos):
for player, player_elo in zip(team, team_elos):
ratings[player] = player_elo
df.loc[idx, 'elos'] = [elos]
return ratings, df
def parse_results():
results = []
with open('results.txt') as f:
for line in f.readlines():
if line.startswith('DATE'): continue
date, game, teams, ranks = line.split('|')
teams, ranks = teams.split(';'), ranks.split(';')
teams = [team.split(',') for team in teams]
ranks = [int(rank) for rank in ranks]
results.append((date, game, teams, ranks))
res_df = | pd.DataFrame(results, columns=['date', 'game', 'teams', 'ranks']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
'''
Rickshaw
-------
Python Pandas + Rickshaw.js
'''
from __future__ import division
import time
import json
from pkg_resources import resource_string
import numpy as np
import pandas as pd
from jinja2 import Environment, PackageLoader
class Chart(object):
'''Visualize Pandas Timeseries with Rickshaw.js'''
def __init__(self, data=None, width=750, height=400, plt_type='line',
colors=None, x_time=True, palette=None, **kwargs):
'''Generate a Rickshaw time series visualization with Pandas
Series and DataFrames.
The bearcart Chart generates the Rickshaw visualization of a Pandas
timeseries Series or DataFrame. The only required parameters are
data, width, height, and type. Colors is an optional parameter;
bearcart will default to the Rickshaw spectrum14 color palette if
none are passed. Keyword arguments can be passed to disable the
following components:
- x_axis
- y_axis
- hover
- legend
Parameters
----------
data: Pandas Series or DataFrame, default None
The Series or Dataframe must have a Datetime index.
width: int, default 960
Width of the chart in pixels
height: int, default 500
Height of the chart in pixels
type: string, default 'line'
Must be one of 'line', 'area', 'scatterplot' or 'bar'
colors: dict, default None
Dict with keys matching DataFrame or Series column names, and hex
strings for colors
x_time: boolean, default True
If passed as False, the x-axis will have non-time values
kwargs:
Keyword arguments that, if passed as False, will disable the
following components: x_axis, y_axis, hover, legend
Returns
-------
Bearcart object
Examples
--------
>>>vis = bearcart.Chart(data=df, width=800, height=300, type='area')
>>>vis = bearcart.Chart(data=series,type='scatterplot',
colors={'Data 1': '#25aeb0',
'Data 2': '#114e4f'})
#Disable x_axis and legend
>>>vis = bearcart.Chart(data=df, x_axis=False, legend=False)
'''
self.defaults = {'x_axis': True, 'y_axis': True, 'hover': True,
'legend': True}
self.env = Environment(loader=PackageLoader('external.bearcart',
'templates'))
self.palette_scheme = palette or 'spectrum14'
#Colors need to be js strings
if colors:
self.colors = {key: "'{0}'".format(value)
for key, value in colors.iteritems()}
else:
self.colors = None
self.x_axis_time = x_time
self.renderer = plt_type
self.width = width
self.height = height
self.template_vars = {}
#Update defaults for passed kwargs
for key, value in kwargs.iteritems():
self.defaults[key] = value
# Get templates for graph elements
for att, val in self.defaults.iteritems():
render_vars = {}
if val:
if not self.x_axis_time:
if att == 'x_axis' and val is not True:
att = 'x_axis_num'
render_vars = self.make_ticks(val)
elif att == 'hover':
render_vars = {'x_hover': 'xFormatter: function(x)'
'{return xTicks[x]}'}
temp = self.env.get_template(att + '.js')
self.template_vars.update({att: temp.render(render_vars)})
#Transform data into Rickshaw-happy JSON format
if data is not None:
self.transform_data(data)
def make_ticks(self, axis):
self.template_vars['transform'] = (
"rotateText();$('#legend').bind('click',rotateText);")
cases = ','.join(["%s:'%s'" % (i, v) for i, v in enumerate(axis)])
return {'xTicks': 'var xTicks = {%s};' % cases,
'ticks': 'tickFormat:function(x){return xTicks[x]},'}
def transform_data(self, data):
'''Transform Pandas Timeseries into JSON format
Parameters
----------
data: DataFrame or Series
Pandas DataFrame or Series must have datetime index
Returns
-------
JSON to object.json_data
Example
-------
>>>vis.transform_data(df)
>>>vis.json_data
'''
def convert(v):
if isinstance(v, np.float64):
v = float(v)
elif isinstance(v, np.int64):
v = int(v)
return v
objectify = lambda dat: [{"x": convert(x), "y": convert(y)}
for x, y in dat.iteritems()]
self.raw_data = data
if isinstance(data, pd.Series):
data.name = data.name or 'data'
self.json_data = [{'name': data.name, 'data': objectify(data)}]
elif isinstance(data, pd.DataFrame):
self.json_data = [{'name': x[0], 'data': objectify(x[1])}
for x in data.iteritems()]
#Transform to Epoch seconds for Rickshaw
if self.x_axis_time:
for datacol in self.json_data:
datacol = datacol['data']
for objs in datacol:
if | pd.isnull(objs['x']) | pandas.isnull |
import matplotlib.pyplot as plt
#from baseline_10day_avg import *
import pandas as pd
def run(res):
df = | pd.DataFrame.from_records(res) | pandas.DataFrame.from_records |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.12.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %%
BASE_SEED = 0
DATA_SETUP = dict(
num_features=100,
noise=0.1,
delta=0.5,
divergent=True,
)
import os
DATA_FILE_FORMAT = os.environ.get("DATA_FILE_FORMAT") or "results/simulations_{}_paper_snapshot.csv"
NUM_SHADOWS = int(os.environ.get("NUM_SHADOWS") or 30)
NUM_EVAL_MODELS = int(os.environ.get("NUM_EVAL_MODELS") or 200)
RESTORE_SAVED_DATA = False
# %%
# %load_ext autoreload
# %autoreload 2
import math
import hashlib
import itertools
import collections
import numpy as np
import pandas as pd
import seaborn as sns
import diffprivlib.models as dp
from IPython.display import display
from tqdm import autonotebook as tqdm
from matplotlib import pyplot as plt
from sklearn.datasets import make_spd_matrix
from sklearn.model_selection import train_test_split
from scipy import stats
from joblib import Parallel, delayed
from mia import run_shadow_model_attack, run_threshold_estimator, apply_estimator_func
from utils import infer_from, normalize_vuln_mean, normalize_vuln_std
from model_zoo import model_zoo, lr_setup, renaming_dict, DpClassifierFactory
from plotting import add_significance_markers
import plot_params
# %%
import logging
logger = logging.Logger(name="default")
# %% [markdown]
# ## Simulating disparate vulnerability
# %% [markdown]
# ### Synthetic data
# %%
def create_multinomial_setup(
num_features=2,
noise=0.1,
delta=0.5,
divergent=True,
seed=1
):
gen = np.random.RandomState(seed)
weights = np.ones(num_features) / num_features
if divergent:
means_a = np.zeros([num_features])
means_b = np.ones([num_features])
else:
means_a = gen.uniform(-1, 1, num_features)
means_b = gen.uniform(-1, 1, num_features)
cov = make_spd_matrix(num_features, random_state=seed) * noise
return means_a, means_b, cov, noise, delta
def gen_sim_data(setup, gen, size_a, size_b):
means0, means1, cov, noise, delta = setup
y = [0] * (size_a // 2) + [1] * math.ceil(size_a / 2) + \
[0] * (size_b // 2) + [1] * math.ceil(size_b / 2)
a0 = gen.multivariate_normal(means0 - delta, cov, size=size_a // 2)
a1 = gen.multivariate_normal(means1, cov, size=math.ceil(size_a / 2))
b0 = gen.multivariate_normal(means0, cov, size=size_b // 2)
b1 = gen.multivariate_normal(means1 - delta, cov, size=math.ceil(size_b / 2))
X = np.vstack([a0, a1, b0, b1])
hi = 2**32
index = gen.randint(hi, size=size_a + size_b)
return | pd.DataFrame(X, index=index) | pandas.DataFrame |
import math
import pandas as pd
import numpy as np
import sys
class MultiLayerPerceptron:
def __init__(self, batch_size, learning_rate, num_epochs):
self.batch_size = batch_size
self.learning_rate = learning_rate
self.num_epochs = num_epochs
# Layer 1: weight, bias
# w: 512 * (28*28)
# x: (28*28) * 1
# b: 512 * 1
# r: 512 * 1
# Layer 2: weight, bias
# w: 256 * 512
# x: 512 * 1
# b: 256 * 1
# r: 256 * 1
# Layer 3: weight, bias
# w: 10 * 256
# x: 256 * 1
# b: 10 * 1
# r: 10 * 1
self.params = {
'W1': np.random.randn(512, 784) * np.sqrt(1.0 / 784),
'b1': np.random.randn(512, 1) * np.sqrt(1.0 / 748),
'W2': np.random.randn(256, 512) * np.sqrt(1.0 / 512),
'b2': np.random.randn(256, 1) * np.sqrt(1.0 / 512),
'W3': np.random.randn(10, 256) * np.sqrt(1.0 / 256),
'b3': np.random.randn(10, 1) * np.sqrt(1.0 / 256)
}
def start(self):
if len(sys.argv) > 3:
trainX = pd.read_csv(sys.argv[1], header=None)
trainY = pd.read_csv(sys.argv[2], header=None)
testX = pd.read_csv(sys.argv[3], header=None)
else:
trainX = pd.read_csv('train_image.csv', header=None)
trainY = | pd.read_csv('train_label.csv', header=None) | pandas.read_csv |
# Collection of pandas scripts that may be useful
import pandas as pd
import os
from PIL import Image
import imagehash
# Image hash functions:
# https://content-blockchain.org/research/testing-different-image-hash-functions/
def phash(img_path):
# Identifies dups even when caption is different
phash = imagehash.phash(Image.open(img_path))
return phash
# Data Cleaning
# The HM Dataset is very noisy: In the first version of the dataset there were many duplicates with conflicting labels
# In the second version, the conflicting labels have all been resolved, yet the duplicates remain
def clean_data(data_path="./data", img_path="./data", save_path="./data", force=False):
"""
Cleans the HM train & dev data.
Outputs traindev & pretrain data.
data_path: Path to folder with train.jsonl, dev_unseen.jsonl, dev_seen.jsonl
"""
# Check if the statement was already run and the necessary data exists:
if os.path.exists(os.path.join(save_path, "pretrain.jsonl")):
print('Clean datasets already exist')
if not force:
return
print('Rebuilding clean datasets...')
else:
print("Preparing...")
# Load all files
train = pd.read_json(os.path.join(data_path, "train.jsonl"), lines=True, orient="records")
dev_seen = pd.read_json(os.path.join(data_path, "dev.jsonl"), lines=True, orient="records")
dev_unseen = pd.read_json(os.path.join(data_path, "dev_unseen.jsonl"), lines=True, orient="records")
test = pd.read_json(os.path.join(data_path, "test.jsonl"), lines=True, orient="records")
test_seen = pd.read_json(os.path.join(data_path, "test_seen.jsonl"), lines=True, orient="records")
test_unseen = pd.read_json(os.path.join(data_path, "test_unseen.jsonl"), lines=True, orient="records")
# Lengths
print('')
print('Sizes before cleaning:')
print(f'train: {len(train)} memes')
print(f'dev_seen: {len(dev_seen)} memes')
print(f'dev_unseen: {len(dev_unseen)} memes')
print(f'test: {len(test)} memes')
print(f'test_seen: {len(test_seen)} memes')
print(f'test_unseen: {len(test_unseen)} memes')
# We validate with dev_seen throughout all experiments, so we only take the new data from dev_unseen add it to
# train and then discard dev_unseen
dev_unseen = dev_unseen[~dev_unseen['id'].isin(dev_seen.id.values)].copy()
# Clean training data
df_dict = {'train': train, 'dev_seen': dev_seen, 'dev_unseen': dev_unseen}
train_dist = pd.concat([df.assign(identity=key) for key, df in df_dict.items()])
train_dist['full_path'] = train_dist['img'].apply(lambda x: os.path.join(img_path, str(x)))
# Identify text dups
text_dups = train_dist.text.value_counts().reset_index(name="counter")
text_dups = text_dups.loc[text_dups['counter'] > 1]
rmv_ids = []
for t in text_dups['index'].values:
# Identify image dups
text_dup_df = train_dist.loc[train_dist.text == t].copy()
text_dup_df['hash'] = text_dup_df['full_path'].apply(lambda x: phash(x))
hash_dups = text_dup_df.hash.value_counts().reset_index(name="counter")
hash_dups = hash_dups.loc[hash_dups['counter'] > 1]
for h in hash_dups['index'].values:
# Identify correct label by majority rule
dup_df = text_dup_df.loc[text_dup_df.hash == h]
true_label = round(dup_df.label.values.mean())
# Add duplicate IDs to rmv_ids except for last one
rmv_ids.extend(dup_df.loc[dup_df.label != true_label].id.values)
rmv_ids.extend(dup_df.loc[dup_df.label == true_label].id.values)
rmv_ids.pop()
# Output all files we need
print('')
print('Sizes after cleaning:')
# a) Clean train file (All duplicates are in train)
train = train[~train['id'].isin(rmv_ids)].copy()
train.to_json(path_or_buf=os.path.join(save_path, "train.jsonl"), orient='records', lines=True)
print(f'train: {len(train)} memes')
# b) Pretrain file for ITM & LM pre-training
pretrain = | pd.concat([train, dev_seen, dev_unseen]) | pandas.concat |
"""
Tests for zipline/utils/pandas_utils.py
"""
from unittest import skipIf
import pandas as pd
from zipline.testing import parameter_space, ZiplineTestCase
from zipline.testing.predicates import assert_equal
from zipline.utils.pandas_utils import (
categorical_df_concat,
nearest_unequal_elements,
new_pandas,
skip_pipeline_new_pandas,
)
class TestNearestUnequalElements(ZiplineTestCase):
@parameter_space(tz=['UTC', 'US/Eastern'], __fail_fast=True)
def test_nearest_unequal_elements(self, tz):
dts = pd.to_datetime(
['2014-01-01', '2014-01-05', '2014-01-06', '2014-01-09'],
).tz_localize(tz)
def t(s):
return None if s is None else pd.Timestamp(s, tz=tz)
for dt, before, after in (('2013-12-30', None, '2014-01-01'),
('2013-12-31', None, '2014-01-01'),
('2014-01-01', None, '2014-01-05'),
('2014-01-02', '2014-01-01', '2014-01-05'),
('2014-01-03', '2014-01-01', '2014-01-05'),
('2014-01-04', '2014-01-01', '2014-01-05'),
('2014-01-05', '2014-01-01', '2014-01-06'),
('2014-01-06', '2014-01-05', '2014-01-09'),
('2014-01-07', '2014-01-06', '2014-01-09'),
('2014-01-08', '2014-01-06', '2014-01-09'),
('2014-01-09', '2014-01-06', None),
('2014-01-10', '2014-01-09', None),
('2014-01-11', '2014-01-09', None)):
computed = nearest_unequal_elements(dts, t(dt))
expected = (t(before), t(after))
self.assertEqual(computed, expected)
@parameter_space(tz=['UTC', 'US/Eastern'], __fail_fast=True)
def test_nearest_unequal_elements_short_dts(self, tz):
# Length 1.
dts = pd.to_datetime(['2014-01-01']).tz_localize(tz)
def t(s):
return None if s is None else pd.Timestamp(s, tz=tz)
for dt, before, after in (('2013-12-31', None, '2014-01-01'),
('2014-01-01', None, None),
('2014-01-02', '2014-01-01', None)):
computed = nearest_unequal_elements(dts, t(dt))
expected = (t(before), t(after))
self.assertEqual(computed, expected)
# Length 0
dts = pd.to_datetime([]).tz_localize(tz)
for dt, before, after in (('2013-12-31', None, None),
('2014-01-01', None, None),
('2014-01-02', None, None)):
computed = nearest_unequal_elements(dts, t(dt))
expected = (t(before), t(after))
self.assertEqual(computed, expected)
def test_nearest_unequal_bad_input(self):
with self.assertRaises(ValueError) as e:
nearest_unequal_elements(
pd.to_datetime(['2014', '2014']),
pd.Timestamp('2014'),
)
self.assertEqual(str(e.exception), 'dts must be unique')
with self.assertRaises(ValueError) as e:
nearest_unequal_elements(
pd.to_datetime(['2014', '2013']),
pd.Timestamp('2014'),
)
self.assertEqual(
str(e.exception),
'dts must be sorted in increasing order',
)
class TestCatDFConcat(ZiplineTestCase):
@skipIf(new_pandas, skip_pipeline_new_pandas)
def test_categorical_df_concat(self):
inp = [
pd.DataFrame(
{
'A': pd.Series(['a', 'b', 'c'], dtype='category'),
'B': pd.Series([100, 102, 103], dtype='int64'),
'C': pd.Series(['x', 'x', 'x'], dtype='category'),
}
),
pd.DataFrame(
{
'A': pd.Series(['c', 'b', 'd'], dtype='category'),
'B': pd.Series([103, 102, 104], dtype='int64'),
'C': pd.Series(['y', 'y', 'y'], dtype='category'),
}
),
pd.DataFrame(
{
'A': pd.Series(['a', 'b', 'd'], dtype='category'),
'B': | pd.Series([101, 102, 104], dtype='int64') | pandas.Series |
'''
<NAME>
Stanford University dept of Geophysics
<EMAIL>
Codes to process geospatial data in earth engine and python
'''
import os
import ee
import time
import tqdm
import fiona
import datetime
import numpy as np
import pandas as pd
import xarray as xr
import rasterio as rio
import geopandas as gp
from osgeo import gdal
from osgeo import osr
from datetime import timedelta
from rasterio import features, mask
from shapely.ops import unary_union
from climata.usgs import DailyValueIO
from pandas.tseries.offsets import MonthEnd
from dateutil.relativedelta import relativedelta
from dateutil.relativedelta import relativedelta
from pandas.tseries.offsets import MonthEnd
from tqdm import tqdm
'''
#############################################################################################################
Helpers for working with dataframes, times, dicts etc
#############################################################################################################
'''
def col_to_dt(df):
'''
converts the first col of a dataframe read from CSV to datetime
'''
t = df.copy()
t['dt'] = pd.to_datetime(df[df.columns[0]])
t = t.set_index(pd.to_datetime(t[t.columns[0]]))
t.drop([t.columns[0], "dt"],axis = 1, inplace = True)
return t
def dl_2_df(dict_list, dt_idx):
'''
converts a list of dictionaries to a single dataframe
'''
alldat = [item for sublist in [x.values() for x in dict_list] for item in sublist]
# Make the df
alldata = pd.DataFrame(alldat).T
alldata.index = dt_idx
col_headers = [item for sublist in [x.keys() for x in dict_list] for item in sublist]
alldata.columns = col_headers
return alldata
'''
#############################################################################################################
Vector / Shapefile / EE Geometry Functions
#############################################################################################################
'''
# TODO: Make a single function that converts shp (multipoly / single poly / points / ) --> EE geom
def gdf_to_ee_poly(gdf, simplify = True):
if simplify:
gdf = gdf.geometry.simplify(0.01)
lls = gdf.geometry.iloc[0]
x,y = lls.exterior.coords.xy
coords = [list(zip(x,y))]
area = ee.Geometry.Polygon(coords)
return area
def gdf_to_ee_multipoly(gdf):
lls = gdf.geometry.iloc[0]
mps = [x for x in lls]
multipoly = []
for i in mps:
x,y = i.exterior.coords.xy
coords = [list(zip(x,y))]
multipoly.append(coords)
return ee.Geometry.MultiPolygon(multipoly)
def get_area(gdf, fast = True):
t = gdf.buffer(0.001).unary_union
d = gp.GeoDataFrame(geometry=gp.GeoSeries(t))
if fast:
d2 = gp.GeoDataFrame(geometry=gp.GeoSeries(d.simplify(0.001)))
area = gdf_to_ee_multipoly(d2)
else:
area = gdf_to_ee_multipoly(d)
return area
def gen_polys(geometry, dx=0.5, dy=0.5):
'''
Input: ee.Geometry
Return: ee.ImaceCollection of polygons
Use: Subpolys used to submit full res (30m landsat; 10m sentinel) resolution for large areas
'''
bounds = ee.Geometry(geometry).bounds()
coords = ee.List(bounds.coordinates().get(0))
ll = ee.List(coords.get(0))
ur = ee.List(coords.get(2))
xmin = ll.get(0)
xmax = ur.get(0)
ymin = ll.get(1)
ymax = ur.get(1)
xx = ee.List.sequence(xmin, xmax, dx)
yy = ee.List.sequence(ymin, ymax, dy)
polys = []
for x in tqdm(xx.getInfo()):
for y in yy.getInfo():
x1 = ee.Number(x).subtract(ee.Number(dx).multiply(0.5))
x2 = ee.Number(x).add(ee.Number(dx).multiply(0.5))
y1 = ee.Number(y).subtract(ee.Number(dy).multiply(0.5))
y2 = ee.Number(y).add(ee.Number(dy).multiply(0.5))
geomcoords = ee.List([x1, y1, x2, y2]);
rect = ee.Algorithms.GeometryConstructors.Rectangle(geomcoords);
polys.append(ee.Feature(rect))
return ee.FeatureCollection(ee.List(polys)).filterBounds(geometry)
'''
#############################################################################################################
Matplotlib Plotting for Vectors / Shapefiles
#############################################################################################################
'''
def draw_poly(gdf, mpl_map, facecolor = "red", alpha = 0.3, edgecolor = 'black', lw = 1, fill = True):
'''
Turns a geopandas gdf into matplotlib polygon patches for friendly plotting with basemap.
'''
for index, row in gdf.iterrows():
lats = []
lons = []
for pt in list(row['geometry'].exterior.coords):
lats.append(pt[1])
lons.append(pt[0])
x, y = m( lons, lats )
xy = zip(x,y)
poly = Polygon(list(xy), fc=facecolor, alpha=alpha, ec = edgecolor ,lw = lw, fill = fill)
plt.gca().add_patch(poly)
return
def draw_polys(gdf, mpl_map, facecolor = "red", alpha = 0.3, edgecolor = 'black', lw = 1, fill = True, zorder = 3):
'''
Turns a geopandas gdf of multipolygons into matplotlib polygon patches for friendly plotting with basemap.
'''
for index, row in gdf.iterrows():
lats = []
lons = []
for pt in list(row['geometry'].exterior.coords):
lats.append(pt[1])
lons.append(pt[0])
x, y = m( lons, lats )
xy = zip(x,y)
poly = Polygon(list(xy), fc=facecolor, alpha=alpha, ec = edgecolor ,lw = lw, fill = fill, zorder = zorder)
plt.gca().add_patch(poly)
return
def draw_points(gdf, mpl_map, sizecol = None, color = 'red', alpha = 0.7, edgecolor = None, fill = True, zorder = 4):
'''
Turns a geopandas gdf of points into matplotlib lat/lon objects for friendly plotting with basemap.
'''
lats = []
lons = []
for index, row in gdf.iterrows():
for pt in list(row['geometry'].coords):
lats.append(pt[1])
lons.append(pt[0])
if sizecol is None:
sizecol = 50
else:
sizecol = sizecol.values
mpl_map.scatter(lons, lats, latlon=True, s = sizecol, alpha=alpha, c = color, edgecolor = edgecolor, zorder = zorder)
return
'''
#############################################################################################################
EE Wrappers
#############################################################################################################
'''
def calc_monthly_sum(dataset, startdate, enddate, area):
'''
Calculates monthly sums (pd.Dataframe) for EE data given startdate, enddate, and area
Datasets are stored in `data` dict below.
Note the "scaling_factor" parameter,
which is provided by EE for each dataset, and further scaled by temporal resolution to achieve monthly resolution
This is explicitly written in the `data` dict
EE will throw a cryptic error if the daterange you input is not valid for the product of interest.
'''
ImageCollection = dataset[0]
var = dataset[1]
scaling_factor = dataset[2]
resolution = dataset[3]
dt_idx = pd.date_range(startdate,enddate, freq='MS')
sums = []
seq = ee.List.sequence(0, len(dt_idx)-1)
num_steps = seq.getInfo()
print("processing:")
print("{}".format(ImageCollection.first().getInfo()['id']))
for i in tqdm(num_steps):
start = ee.Date(startdate).advance(i, 'month')
end = start.advance(1, 'month');
im = ee.Image(ImageCollection.select(var).filterDate(start, end).sum().set('system:time_start', start.millis()))
scale = im.projection().nominalScale()
scaled_im = im.multiply(scaling_factor).multiply(ee.Image.pixelArea()).multiply(1e-12) # mm --> km^3
sumdict = scaled_im.reduceRegion(
reducer = ee.Reducer.sum(),
geometry = area,
scale = resolution,
bestEffort= True)
total = sumdict.getInfo()[var]
sums.append(total)
sumdf = pd.DataFrame(np.array(sums), dt_idx + MonthEnd(0))
sumdf.columns = [var]
df = sumdf.astype(float)
return df
def calc_monthly_mean(dataset, startdate, enddate, area):
'''
Same as above, but calculates mean (useful for anoamly detection, state variables like SM and SWE)
'''
ImageCollection = dataset[0]
var = dataset[1]
scaling_factor = dataset[2]
dt_idx = pd.date_range(startdate,enddate, freq='MS')
sums = []
seq = ee.List.sequence(0, len(dt_idx)-1)
num_steps = seq.getInfo()
print("processing:")
print("{}".format(ImageCollection.first().getInfo()['id']))
for i in tqdm(num_steps):
start = ee.Date(startdate).advance(i, 'month')
end = start.advance(1, 'month');
im = ee.ImageCollection(ImageCollection).select(var).filterDate(start, end).mean().set('system:time_start', start.millis())
scale = im.projection().nominalScale()
scaled_im = im.multiply(scaling_factor).multiply(ee.Image.pixelArea()).multiply(1e-12) # mm --> km^3
sumdict = scaled_im.reduceRegion(
reducer = ee.Reducer.sum(),
geometry = area,
scale = scale,
bestEffort = True)
total = sumdict.getInfo()[var]
sums.append(total)
sumdf = pd.DataFrame(np.array(sums), dt_idx + MonthEnd(0))
sumdf.columns = [var]
df = sumdf.astype(float)
return df
def get_grace(dataset, startdate, enddate, area):
'''
Get Grace data from EE. Similar to above
'''
ImageCollection = dataset[0]
var = dataset[1]
scaling_factor = dataset[2]
dt_idx = pd.date_range(startdate,enddate, freq='M')
sums = []
seq = ee.List.sequence(0, len(dt_idx)-1)
print("processing:")
print("{}".format(ImageCollection.first().getInfo()['id']))
num_steps = seq.getInfo()
for i in tqdm(num_steps):
start = ee.Date(startdate).advance(i, 'month')
end = start.advance(1, 'month');
try:
im = ee.ImageCollection(ImageCollection).select(var).filterDate(start, end).sum().set('system:time_start', end.millis())
t2 = im.multiply(ee.Image.pixelArea()).multiply(scaling_factor).multiply(1e-6) # Multiply by pixel area in km^2
scale = t2.projection().nominalScale()
sumdict = t2.reduceRegion(
reducer = ee.Reducer.sum(),
geometry = area,
scale = scale)
result = sumdict.getInfo()[var] * 1e-5 # cm to km
sums.append(result)
except:
sums.append(np.nan) # If there is no grace data that month, append a np.nan
sumdf = pd.DataFrame(np.array(sums), dt_idx)
sumdf.columns = [var]
df = sumdf.astype(float)
return df
def get_ims(dataset, startdate,enddate, area, return_dates = False, table = False, monthly_mean = False, monthly_sum = False):
'''
Returns gridded images for EE datasets
'''
if monthly_mean:
if monthly_sum:
raise ValueError("cannot perform mean and sum reduction at the same time")
ImageCollection = dataset[0]
var = dataset[1]
scaling_factor = dataset[2]
native_res = dataset[3]
dt_idx = pd.date_range(startdate,enddate, freq='MS')
ims = []
seq = ee.List.sequence(0, len(dt_idx)-1)
num_steps = seq.getInfo()
# TODO: Make this one loop ?
print("processing:")
print("{}".format(ImageCollection.first().getInfo()['id']))
for i in tqdm(num_steps):
start = ee.Date(startdate).advance(i, 'month')
end = start.advance(1, 'month');
if monthly_mean:
im1 = ee.ImageCollection(ImageCollection).select(var).filterDate(start, end).mean().set('system:time_start', end.millis())
im = ee.ImageCollection(im1)
elif monthly_sum:
im1 = ee.ImageCollection(ImageCollection).select(var).filterDate(start, end).sum().set('system:time_start', end.millis())
im = ee.ImageCollection(im1)
else:
im = ee.ImageCollection(ImageCollection).select(var).filterDate(start, end).set('system:time_start', end.millis())
# This try / catch is probably not great, but needs to be done for e.g. grace which is missing random months
try:
result = im.getRegion(area,native_res,"epsg:4326").getInfo()
ims.append(result)
except:
continue
results = []
dates = []
print("postprocesing")
for im in tqdm(ims):
header, data = im[0], im[1:]
df = pd.DataFrame(np.column_stack(data).T, columns = header)
df.latitude = pd.to_numeric(df.latitude)
df.longitude = pd.to_numeric(df.longitude)
df[var] = pd.to_numeric(df[var])
if table:
results.append(df)
continue
images = []
for idx,i in enumerate(df.id.unique()):
t1 = df[df.id==i]
arr = array_from_df(t1,var)
arr[arr == 0] = np.nan
images.append(arr*scaling_factor)# This is the only good place to apply the scaling factor.
if return_dates:
date = df.time.iloc[idx]
dates.append(datetime.datetime.fromtimestamp(date/1000.0))
results.append(images)
print("====COMPLETE=====")
# Unpack the list of results
if return_dates:
return [ [item for sublist in results for item in sublist], dates]
else:
return [item for sublist in results for item in sublist]
def array_from_df(df, variable):
'''
Convets a pandas df with lat, lon, variable to a numpy array
'''
# get data from df as arrays
lons = np.array(df.longitude)
lats = np.array(df.latitude)
data = np.array(df[variable]) # Set var here
# get the unique coordinates
uniqueLats = np.unique(lats)
uniqueLons = np.unique(lons)
# get number of columns and rows from coordinates
ncols = len(uniqueLons)
nrows = len(uniqueLats)
# determine pixelsizes
ys = uniqueLats[1] - uniqueLats[0]
xs = uniqueLons[1] - uniqueLons[0]
# create an array with dimensions of image
arr = np.zeros([nrows, ncols], np.float32)
# fill the array with values
counter =0
for y in range(0,len(arr),1):
for x in range(0,len(arr[0]),1):
if lats[counter] == uniqueLats[y] and lons[counter] == uniqueLons[x] and counter < len(lats)-1:
counter+=1
arr[len(uniqueLats)-1-y,x] = data[counter] # we start from lower left corner
return arr
# This is the staging area. Haven's used these in a while, or not tested altogether.
def img_to_arr(eeImage, var_name, area, scale = 30):
temp = eeImage.select(var_name).clip(area)
latlon = eeImage.pixelLonLat().addBands(temp)
latlon = latlon.reduceRegion(
reducer = ee.Reducer.toList(),
geometry = area,
scale = scale
)
data = np.array((ee.Array(latlon.get(var_name)).getInfo()))
lats = np.array((ee.Array(latlon.get('latitude')).getInfo()))
lons = np.array((ee.Array(latlon.get('longitude')).getInfo()))
lc,freq = np.unique(data,return_counts = True)
return data, lats,lons
def imc_to_arr(eeImage):
temp = eeImage.filterBounds(area).first().pixelLonLat()
latlon = temp.reduceRegion(
reducer = ee.Reducer.toList(),
geometry = area,
scale = 1000
)
data = np.array((ee.Array(latlon.get('cropland')).getInfo()))
lats = np.array((ee.Array(latlon.get('latitude')).getInfo()))
lons = np.array((ee.Array(latlon.get('longitude')).getInfo()))
lc,freq = np.unique(data,return_counts = True)
return data, lats,lons
def arr_to_img(data,lats,lons):
uniquelats = np.unique(lats)
uniquelons = np.unique(lons)
ncols = len(uniquelons)
nrows = len(uniquelats)
ys = uniquelats[1] - uniquelats[0]
xs = uniquelons[1] - uniquelons[0]
arr = np.zeros([nrows, ncols], np.float32)
counter = 0
for y in range(0, len(arr),1):
for x in range(0, len(arr[0]),1):
if lats[counter] == uniquelats[y] and lons[counter] == uniquelons[x] and counter < len(lats)-1:
counter+=1
arr[len(uniquelats)-1-y,x] = data[counter]
return arr
def freq_hist(eeImage, area, scale, var_name):
freq_dict = ee.Dictionary(
eeImage.reduceRegion(ee.Reducer.frequencyHistogram(), area, scale).get(var_name)
);
return freq_dict
'''
#############################################################################################################
NetCDF / Gtiff Functions
#############################################################################################################
'''
def get_lrm_swe(shppath, data_dir ="../data/LRM_SWE_monthly" ):
'''
Given a path to a shapefile, compute the monthly SWE
Input: (str) - path to shapefile
Output: (pd.DataFrame) - monthly SWE
'''
# Find SWE files
files = [os.path.join(data_dir,x) for x in os.listdir(data_dir) if x.endswith(".tif")]
files.sort()
# Read CVWS shapefile
with fiona.open(shppath, "r") as shapefile:
cvws_geom = [feature["geometry"] for feature in shapefile]
# Read the files, mask nans, clip to CVWS, extract dates
imdict = {}
for i in tqdm(files[:]):
date = datetime.datetime.strptime(i[-12:-4],'%Y%m%d')+ timedelta(days=-1) # Get the date
datestr = date.strftime('%Y%m%d') # Format date
src = rio.open(i) # Read file
src2 = rio.mask.mask(src, cvws_geom, crop=True) # Clip to shp
arr = src2[0] # read as array
arr = arr.reshape(arr.shape[1], arr.shape[2]) # Reshape bc rasterio has a different dim ordering
arr[arr < 0 ] = np.nan # Mask nodata vals
imdict[datestr] = arr
# Fill in the dates with no SWE with nans
dt_idx = pd.date_range(list(imdict.keys())[0], list(imdict.keys())[-1], freq = "M")
all_dates = {}
for i in dt_idx:
date = i.strftime("%Y%m%d")
if date in imdict.keys():
im = imdict[date]
else:
im = np.zeros_like(list(imdict.values())[0])
im[im==0] = np.nan
all_dates[date] = im
# Stack all dates to 3D array
cvws_swe = np.dstack(list(all_dates.values()))
# Compute monthly sums
swesums = []
for i in range(cvws_swe.shape[2]):
swesums.append(np.nansum(cvws_swe[:,:,i] *500**2 * 1e-9)) # mult by 2500m pixel area, convert m^3 to km^3
swedf = pd.DataFrame(swesums,dt_idx)
swedf.columns = ['swe_lrm']
return swedf
def get_snodas_swe(shppath, data_dir ="/Users/aakash/Desktop/SatDat/SNODAS/SNODAS_CA_processed/" ):
'''
Given a path to a shapefile, compute the monthly SWE
Input: (str) - path to shapefile
Output: (pd.DataFrame) - monthly SWE
'''
# Find SWE files
files = [os.path.join(data_dir,x) for x in os.listdir(data_dir) if x.endswith(".tif")]
files.sort()
# Read CVWS shapefile
with fiona.open(shppath, "r") as shapefile:
cvws_geom = [feature["geometry"] for feature in shapefile]
# Read the files, mask nans, clip to CVWS, extract dates
imdict = {}
for i in tqdm(files[:]):
date = datetime.datetime.strptime(i[-16:-8],'%Y%m%d')# Get the date
if date.day == 1:
datestr = date.strftime('%Y%m%d') # Format date
src = rio.open(i) # Read file
src2 = rio.mask.mask(src, cvws_geom, crop=True) # Clip to shp
arr = src2[0].astype(float) # read as array
arr = arr.reshape(arr.shape[1], arr.shape[2]) # Reshape bc rasterio has a different dim ordering
arr[arr < 0 ] = np.nan # Mask nodata vals
imdict[datestr] = arr/1000 # divide by scale factor to get SWE in m
# Stack all dates to 3D array
cvws_swe = np.dstack(list(imdict.values()))
# Compute monthly sums
swesums = []
for i in range(cvws_swe.shape[2]):
swesums.append(np.nansum(cvws_swe[:,:,i] * 1000**2 * 1e-9)) # multiply by 1000m pixel size, convert m^3 to km^3
dt_idx = [datetime.datetime.strptime(x, '%Y%m%d')+ timedelta(days=-1) for x in imdict.keys()]
swedf = pd.DataFrame(swesums,dt_idx)
swedf.columns = ['swe_snodas']
return swedf
def get_ssebop(shppath):
'''
Given a path to a shapefile, compute the monthly SSEBop ET
Input: (str) - path to shapefile
Output: (pd.DataFrame) - monthly SSEBop ET
'''
files = [os.path.join("../data",x) for x in os.listdir("../data") if x.endswith("nc") if "SSEBOP" in x]
ds = xr.open_dataset(files[0])
gdf = gp.read_file(shppath)
ds['catch'] = rasterize(gdf.geometry, ds['et'][0].coords)
ssebop_masked = ds['et']*ds['catch']
dt = pd.date_range(ds.time[0].values, ds.time[-1].values, freq = "MS")
et= []
for i in ssebop_masked:
et.append(i.sum()* 1e-6) # m^2 to km^2
etdf = pd.DataFrame({'et': np.array(et)}, index = dt)
etdf.columns = ["aet_ssebop"]
etdf.set_index(etdf.index + | MonthEnd(0) | pandas.tseries.offsets.MonthEnd |
import logging
import os
import shutil
import tempfile
from pathlib import Path
import h5py
import numpy as np
import pandas as pd
import pytest
from PyDSS.common import LimitsFilter
from PyDSS.dataset_buffer import DatasetBuffer
from PyDSS.export_list_reader import ExportListProperty
from PyDSS.metrics import MultiValueTypeMetricBase
from PyDSS.simulation_input_models import (
SimulationSettingsModel, create_simulation_settings, load_simulation_settings
)
from PyDSS.value_storage import ValueByNumber, ValueByList
from PyDSS.utils.utils import load_data
from tests.common import FakeElement
STORE_FILENAME = os.path.join(tempfile.gettempdir(), "store.h5")
FLOATS = (1.0, 2.0, 3.0, 4.0, 5.0)
COMPLEX_NUMS = (
complex(1, 2), complex(3, 4), complex(5, 6), complex(7, 8),
complex(9, 10),
)
LIST_COMPLEX_NUMS = (
[complex(1, 2), complex(3, 4)],
[complex(5, 6), complex(7, 8)],
[complex(9, 10), complex(11, 12)],
[complex(13, 14), complex(15, 16)],
[complex(17, 18), complex(19, 20)],
)
logger = logging.getLogger(__name__)
OBJS = [
FakeElement("Fake.a", "a"),
FakeElement("Fake.b", "b"),
]
@pytest.fixture
def simulation_settings():
project_path = Path(tempfile.gettempdir()) / "pydss_projects"
if project_path.exists():
shutil.rmtree(project_path)
project_name = "test_project"
project_path.mkdir()
filename = create_simulation_settings(project_path, project_name, ["s1"])
yield load_simulation_settings(filename)
if os.path.exists(STORE_FILENAME):
os.remove(STORE_FILENAME)
if project_path.exists():
shutil.rmtree(project_path)
class FakeMetric(MultiValueTypeMetricBase):
def __init__(self, prop, dss_objs, options, values):
super().__init__(prop, dss_objs, options)
self._elem_index = 0
self._val_index = 0
self._values = values
def _get_value(self, obj, timestamp):
prop = next(iter(self._properties.values()))
if isinstance(self._values[self._val_index], list):
val = ValueByList(obj.FullName, prop.name, self._values[self._val_index], ["", ""])
else:
val = ValueByNumber(obj.FullName, prop.name, self._values[self._val_index])
logger.debug("elem_index=%s val_index=%s, val=%s", self._elem_index, self._val_index, val.value)
self._elem_index += 1
if self._elem_index == len(self._dss_objs):
self._elem_index = 0
# Change values once we've iterated through all elements.
self._val_index += 1
if self._val_index == len(self._values):
self._val_index = 0
return val
def test_metrics_store_all(simulation_settings):
data = {
"property": "Property",
"store_values_type": "all",
}
values = FLOATS
prop = ExportListProperty("Fake", data)
metric = FakeMetric(prop, OBJS, simulation_settings, values)
with h5py.File(STORE_FILENAME, mode="w", driver="core") as hdf_store:
metric.initialize_data_store(hdf_store, "", len(values))
for i in range(len(values)):
metric.append_values(i)
metric.close()
dataset = hdf_store["Fake/ElementProperties/Property"]
assert dataset.attrs["length"] == len(values)
assert dataset.attrs["type"] == "per_time_point"
df = DatasetBuffer.to_dataframe(dataset)
assert isinstance(df, pd.DataFrame)
assert len(df) == len(values)
for column in df.columns:
for val1, val2 in zip(df[column].values, values):
assert val1 == val2
assert metric.max_num_bytes() == len(values) * len(OBJS) * 8
def test_metrics_store_all_complex_abs(simulation_settings):
data = {
"property": "Property",
"store_values_type": "all",
"data_conversion": "abs",
}
values = COMPLEX_NUMS
prop = ExportListProperty("Fake", data)
metric = FakeMetric(prop, OBJS, simulation_settings, values)
with h5py.File(STORE_FILENAME, mode="w", driver="core") as hdf_store:
metric.initialize_data_store(hdf_store, "", len(values))
for i in range(len(values)):
metric.append_values(i)
metric.close()
dataset = hdf_store["Fake/ElementProperties/Property"]
assert dataset.attrs["length"] == len(values)
assert dataset.attrs["type"] == "per_time_point"
df = DatasetBuffer.to_dataframe(dataset)
assert len(df) == len(values)
for column in df.columns:
for val1, val2 in zip(df[column].values, values):
assert isinstance(val1, float)
assert val1 == abs(val2)
def test_metrics_store_all_complex_sum(simulation_settings):
data = {
"property": "Property",
"store_values_type": "all",
"data_conversion": "sum",
}
values = LIST_COMPLEX_NUMS
prop = ExportListProperty("Fake", data)
metric = FakeMetric(prop, OBJS, simulation_settings, values)
with h5py.File(STORE_FILENAME, mode="w", driver="core") as hdf_store:
metric.initialize_data_store(hdf_store, "", len(values))
for i in range(len(values)):
metric.append_values(i)
metric.close()
dataset = hdf_store["Fake/ElementProperties/Property"]
assert dataset.attrs["length"] == len(values)
assert dataset.attrs["type"] == "per_time_point"
df = DatasetBuffer.to_dataframe(dataset)
assert len(df) == len(values)
for column in df.columns:
for val1, val2 in zip(df[column].values, values):
assert isinstance(val1, complex)
assert val1 == sum(val2)
def test_metrics_store_all_complex_abs_sum(simulation_settings):
data = {
"property": "Property",
"store_values_type": "all",
"data_conversion": "abs_sum",
}
values = LIST_COMPLEX_NUMS
prop = ExportListProperty("Fake", data)
metric = FakeMetric(prop, OBJS, simulation_settings, values)
with h5py.File(STORE_FILENAME, mode="w", driver="core") as hdf_store:
metric.initialize_data_store(hdf_store, "", len(values))
for i in range(len(values)):
metric.append_values(i)
metric.close()
dataset = hdf_store["Fake/ElementProperties/Property"]
assert dataset.attrs["length"] == len(values)
assert dataset.attrs["type"] == "per_time_point"
df = DatasetBuffer.to_dataframe(dataset)
assert len(df) == len(values)
for column in df.columns:
for val1, val2 in zip(df[column].values, values):
assert isinstance(val1, float)
assert val1 == abs(sum(val2))
def test_metrics_store_all_filtered(simulation_settings):
data = {
"property": "Property",
"store_values_type": "all",
"limits": [1.0, 3.0],
"limits_filter": LimitsFilter.OUTSIDE,
}
values = FLOATS
prop = ExportListProperty("Fake", data)
metric = FakeMetric(prop, OBJS, simulation_settings, values)
with h5py.File(STORE_FILENAME, mode="w", driver="core") as hdf_store:
metric.initialize_data_store(hdf_store, "", len(values))
for i in range(len(values)):
metric.append_values(i)
metric.close()
dataset = hdf_store["Fake/ElementProperties/Property"]
assert dataset.attrs["length"] == 2 * len(OBJS)
assert dataset.attrs["type"] == "filtered"
assert [x for x in dataset[:4]] == [4, 4, 5, 5]
time_step_dataset = hdf_store["Fake/ElementProperties/PropertyTimeStep"]
assert time_step_dataset.attrs["type"] == "time_step"
assert time_step_dataset.attrs["length"] == 4
assert [x for x in time_step_dataset[0]] == [3, 0]
assert [x for x in time_step_dataset[1]] == [3, 1]
assert [x for x in time_step_dataset[2]] == [4, 0]
assert [x for x in time_step_dataset[3]] == [4, 1]
def test_metrics_store_moving_average_and_max(simulation_settings):
window_size = 10
values = [float(i) for i in range(50)] + [float(i) for i in range(25)]
data1 = {
"property": "Property",
"store_values_type": "max",
}
data2 = {
"property": "Property",
"store_values_type": "moving_average",
"window_size": window_size,
}
prop1 = ExportListProperty("Fake", data1)
prop2 = ExportListProperty("Fake", data2)
metric = FakeMetric(prop1, OBJS, simulation_settings, values)
metric.add_property(prop2)
base_df = pd.DataFrame(values)
base_series = base_df.iloc[:, 0]
base_rm = base_series.rolling(window_size).mean()
with h5py.File(STORE_FILENAME, mode="w", driver="core") as hdf_store:
metric.initialize_data_store(hdf_store, "", len(values))
for i in range(len(values)):
metric.append_values(i)
metric.close()
dataset1 = hdf_store["Fake/ElementProperties/PropertyMax"]
assert dataset1.attrs["length"] == 1
assert dataset1.attrs["type"] == "value"
assert dataset1[0][0] == 49
assert dataset1[0][1] == 49
dataset2 = hdf_store["Fake/ElementProperties/PropertyAvg"]
assert dataset2.attrs["length"] == len(values)
assert dataset2.attrs["type"] == "per_time_point"
df = DatasetBuffer.to_dataframe(dataset2)
assert len(df) == len(values)
for column in df.columns:
for val1, val2 in zip(df[column].values, base_rm.values):
if np.isnan(val1):
assert np.isnan(val2)
else:
assert val1 == val2
def test_metrics_store_moving_average_with_limits(simulation_settings):
limits = [1.0, 50.0]
window_size = 10
data = {
"property": "Property",
"store_values_type": "moving_average",
"window_size": window_size,
"limits": limits,
"limits_filter": LimitsFilter.OUTSIDE,
}
values = [float(x) for x in range(1, 101)]
expected_values = [x for x in values if x < limits[0] or x > limits[1]]
base_df = | pd.DataFrame(values) | pandas.DataFrame |
#work on approaches to create a spark dataframe
#create the spark session object
from pyspark.sql import SparkSession
from pyspark.sql.types import *
from pyspark.sql.functions import *
spark = SparkSession.Builder().appName("create-df").master("local[3]").getOrCreate()
sc=spark.sparkContext
print('created spark session object')
#create df from a parallel collection (list), using toDF() method
print('create df from a parallel collection (list)')
df=spark.range(500).toDF("number")
df.show()
df.printSchema()
df.first()
#rename the columns and create another dataframe
df_renamed=df.toDF("id")
df_renamed.printSchema()
#create row objects
r1=[('alice',1)]
r2=[{'name':'alice','age':1}]
schema = StructType([
StructField("name",StringType(),True),
StructField("age",IntegerType(),True)
])
#using createDataFrame() method
df1=spark.createDataFrame(r1)
df1.show()
df1.printSchema()
df2=spark.createDataFrame(r1,['name','age'])
df2.show()
df2.printSchema()
df3=spark.createDataFrame(r2)
df3.show()
df3.printSchema()
#create dataframe from an rdd
rdd1 = sc.parallelize(r1)
df4 = spark.createDataFrame(rdd1)
df4.show()
df4.printSchema()
df5 = spark.createDataFrame(rdd1,['name','age'])
df5.show()
df5.printSchema()
df6=spark.createDataFrame(rdd1,schema)
df6.show()
df6.printSchema()
# create a new dataframe - **usage unknown**
df7=df6.alias("df6_alias")
df7.show()
#create a row object
"""
from pyspark.sql import Row
person=Row('name','age')
person=rdd1.map(lambda x:person(*x))
df6=spark.createDataFrame(person)
df6.show()
df6.printSchema()
"""
#create df from pandas dataframe
import pandas as pd
d={'col1':[1,2],'col2':[3,4]}
pd_df1= | pd.DataFrame(data=d) | pandas.DataFrame |
import time
import importlib
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
import dash_table
import pandas as pd
import numpy as np
from sklearn.cluster import KMeans
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.metrics import average_precision_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import f1_score
from sklearn.metrics import auc
from sklearn.metrics import average_precision_score
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from sklearn.decomposition import PCA
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
from inspect import signature
class Dataset:
def __init__(self, df):
self.df = df
def detect_outlier_zscore(data, threshold):
outliers = pd.DataFrame([], columns=['ID', 'sqdist', 'cluster'])
mean = np.mean(data.sqdist)
std = np.std(data.sqdist)
for y in data.itertuples():
z_score = (y.sqdist - mean)/std
if np.abs(z_score) > threshold:
outliers = outliers.append(
{'ID': y.ID, 'sqdist': y.sqdist, 'cluster': y.cluster}, ignore_index=True)
return outliers
def detect_outlier_quantile(data, percent):
quantile = data.sqdist.quantile(percent)
outliers = pd.DataFrame([], columns=['ID', 'sqdist', 'cluster'])
for y in data.itertuples():
if y.sqdist > quantile:
outliers = outliers.append(
{'ID': y.ID, 'sqdist': y.sqdist, 'cluster': y.cluster}, ignore_index=True)
return outliers
def outlierss(df, number_cluster, outlier_method, threshold):
clusters = []
outliers = | pd.DataFrame([], columns=['ID', 'sqdist', 'cluster']) | pandas.DataFrame |
# Web-Scraper for Reddit Data
# Data used for paper and results were last scraped in September 2020.
# Adapted from (https://github.com/hesamuel/goodbye_world/blob/master/code/01_Data_Collection.ipynb
# data analysis imports
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# NLP Imports
import nltk
nltk.download('wordnet')
nltk.download('stopwords')
from nltk.tokenize import RegexpTokenizer
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
import re
from sklearn.feature_extraction.text import CountVectorizer
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
from PIL import Image
import wordninja
# creating user agent
headers = {"User-agent" : "randomuser"} # set user agent to reddit account username
url_1 = "https://www.reddit.com/r/depression.json"
res = requests.get(url_1, headers=headers)
res.status_code
# scraper function
def reddit_scrape(url_string, number_of_scrapes, output_list):
#scraped posts outputted as lists
after = None
for _ in range(number_of_scrapes):
if _ == 0:
print("SCRAPING {}\n--------------------------------------------------".format(url_string))
print("<<<SCRAPING COMMENCED>>>")
print("Downloading Batch {} of {}...".format(1, number_of_scrapes))
elif (_+1) % 5 ==0:
print("Downloading Batch {} of {}...".format((_ + 1), number_of_scrapes))
if after == None:
params = {}
else:
#THIS WILL TELL THE SCRAPER TO GET THE NEXT SET AFTER REDDIT'S after CODE
params = {"after": after}
res = requests.get(url_string, params=params, headers=headers)
if res.status_code == 200:
the_json = res.json()
output_list.extend(the_json["data"]["children"])
after = the_json["data"]["after"]
else:
print(res.status_code)
break
time.sleep(randint(1,6))
print("<<<SCRAPING COMPLETED>>>")
print("Number of posts downloaded: {}".format(len(output_list)))
print("Number of unique posts: {}".format(len(set([p["data"]["name"] for p in output_list]))))
# remove any repeat posts
def create_unique_list(original_scrape_list, new_list_name):
data_name_list=[]
for i in range(len(original_scrape_list)):
if original_scrape_list[i]["data"]["name"] not in data_name_list:
new_list_name.append(original_scrape_list[i]["data"])
data_name_list.append(original_scrape_list[i]["data"]["name"])
#CHECKING IF THE NEW LIST IS OF SAME LENGTH AS UNIQUE POSTS
print("LIST NOW CONTAINS {} UNIQUE SCRAPED POSTS".format(len(new_list_name)))
# scraping suicide_watch data
suicide_data = []
reddit_scrape("https://www.reddit.com/r/SuicideWatch.json", 50, suicide_data)
suicide_data_unique = []
create_unique_list(suicide_data, suicide_data_unique)
# add suicide_watch to dataframe
suicide_watch = pd.DataFrame(suicide_data_unique)
suicide_watch["is_suicide"] = 1
suicide_watch.head()
# scraping suicide_watch data
depression_data = []
reddit_scrape("https://www.reddit.com/r/depression.json", 50, depression_data)
depression_data_unique = []
create_unique_list(depression_data, depression_data_unique)
# add suicide_watch to dataframe
depression = pd.DataFrame(depression_data_unique)
depression["is_suicide"] = 0
depression.head()
# saving data
suicide_watch.to_csv('suicide_watch.csv', index = False)
depression.to_csv('depression.csv', index = False)
# creating combined CSV
depression = pd.read_csv('depression.csv')
suicide_watch = | pd.read_csv('suicide_watch.csv') | pandas.read_csv |
import h5py
import numpy as np
from collections import Counter
import os
import pandas as pd
from multiprocessing import Pool
import time
def read_loom(loom_path):
assert os.path.exists(loom_path)
with h5py.File(loom_path, "r", libver='latest', swmr=True) as f:
gene_name = f["row_attrs/Gene"][...].astype(np.str)
assert np.max(list(Counter(gene_name).values())) == 1, "{}".format(list(filter(lambda x: x[1] > 1, Counter(gene_name).items())))
bc_gene_mat = f["matrix"][...].transpose()
bc_gene_mat[bc_gene_mat == -1] = np.nan
cell_id = f["col_attrs/CellID"][...].astype(np.str) if "CellID" in f["col_attrs"].keys() else np.arange(bc_gene_mat.shape[0]).astype(np.str)
return | pd.DataFrame(bc_gene_mat, columns=gene_name, index=cell_id) | pandas.DataFrame |
import pandas as pd
from pandas.io.json import json_normalize
class EsGroupBy:
def __init__(self,
es_connection,
index_pattern,
time_range_start,
time_range_end,
filters,
single_page_size=10000,
groupbys=None,
operations=None):
self.es = es_connection
self.SIZE = single_page_size
self.index_pattern = index_pattern
self.groupby(groupbys)
self.agg(operations)
self.filters = filters
self.time_range_start = time_range_start
self.time_range_end = time_range_end
self.dataframe = pd.DataFrame()
def groupby(self, groupby_list):
self.groupby_list = [groupby_list] if isinstance(
groupby_list, str) else groupby_list
return self
def agg(self, operations_list):
self.operations_list = [
{k: v} for k, v in operations_list.items()
] if isinstance(operations_list, dict) else operations_list
def __sources_element_builder(self, name):
return {
name: {
'terms': {
'field': name,
'order': 'asc'
}
}
}
def __sources_builder(self, groupby_list):
sources = []
for el in groupby_list:
sources.append(self.__sources_element_builder(el))
return sources
def __aggregations_element_builder(self, field_operation):
field, operation = next(iter(field_operation.items()))
return {
field + '_' + operation: {
operation: {
'field': field
}
}}
def __aggregations_builder(self, operations_list):
operations = {}
for el in operations_list:
operations.update(self.__aggregations_element_builder(el))
return operations
def __filter_element_builder(self, field_value):
field, value = next(iter(field_value.items()))
return {
'match_phrase': {
field: {
'query': value
}
}
}
def __time_range_filter_builder(self, start, end):
return {
'range': {
'@timestamp': {
'from': start,
'to': end,
'include_lower': True,
'include_upper': False
}
}
}
def __filters_builder(self, filters, time_range_start, time_range_end):
filters_value = []
for el in filters:
filters_value.append(self.__filter_element_builder(el))
filters_value.append(self.__time_range_filter_builder(
time_range_start,
time_range_end))
return filters_value
def dsl(self, after=None):
if after is None:
composite_value = {"size": self.SIZE,
"sources": self.__sources_builder(
self.groupby_list)}
else:
composite_value = {"size": self.SIZE,
"sources": self.__sources_builder(
self.groupby_list),
"after": after}
my_buckets_value = {"composite": composite_value,
"aggregations": self.__aggregations_builder(
self.operations_list)}
aggs_value = {"my_buckets": my_buckets_value}
must_value = self.__filters_builder(self.filters,
self.time_range_start,
self.time_range_end)
query_value = {
"bool": {
"must": must_value,
"filter": [
{
"match_all": {}
}
]
}
}
# "size" : 0 because here hits are not needed, just aggs
full_dsl = {"size": 0, "aggs": aggs_value, "query": query_value}
return full_dsl
def execute(self):
num_iteration = 0
after_key = None
result_size = -1
while(result_size == -1 or result_size == self.SIZE):
dsl = self.dsl(after_key)
res_json = self.es.search(
index=self.index_pattern,
body=dsl
)
res_json_buckets = res_json['aggregations'][
'my_buckets'][
'buckets']
if (res_json_buckets != []):
after_key = res_json['aggregations'][
'my_buckets'][
'after_key']
df_res = pd.DataFrame(res_json_buckets)
df_list = [ | json_normalize(df_res['key']) | pandas.io.json.json_normalize |