prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# *****************************************************************************
# © Copyright IBM Corp. 2018. All Rights Reserved.
#
# This program and the accompanying materials
# are made available under the terms of the Apache V2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
#
# *****************************************************************************
import datetime as dt
import importlib
import json
import logging
import time
import warnings
from collections import OrderedDict
import numpy as np
import pandas as pd
from sqlalchemy import Column, Integer, String, DateTime, Float
from sqlalchemy.sql.sqltypes import TIMESTAMP, VARCHAR, FLOAT, INTEGER
import iotfunctions
from . import db as db_module
from .automation import (TimeSeriesGenerator, DateGenerator, MetricGenerator, CategoricalGenerator)
from .exceptions import StageException
from .pipeline import (CalcPipeline, DropNull, JobController, JobLogNull, Trace, AggregateItems)
from .stages import (DataReader, DataWriter, DataWriterFile)
from .util import (MemoryOptimizer, build_grouper, categorize_args, reset_df_index)
logger = logging.getLogger(__name__)
def retrieve_entity_type_metadata(raise_error=True, **kwargs):
"""
Get server metadata for entity type
"""
db = kwargs['_db']
# get kpi functions metadata
meta = db.http_request(object_type='engineInput', object_name=kwargs['logical_name'], request='GET',
raise_error=raise_error)
try:
meta = json.loads(meta)
except (TypeError, json.JSONDecodeError):
meta = None
if meta is None or 'exception' in meta:
raise RuntimeError(('API call to server did not retrieve valid entity '
' type properties for %s.' % kwargs['logical_name']))
if meta['kpiDeclarations'] is None:
meta['kpiDeclarations'] = []
logger.warning(('This entity type has no calculated kpis'))
# cache function catalog metadata in the db object
function_list = [x['functionName'] for x in meta['kpiDeclarations']]
db.load_catalog(install_missing=True, function_list=function_list)
# map server properties
params = {}
params['_entity_type_id'] = meta['entityTypeId']
params['_db_schema'] = meta['schemaName']
params['name'] = meta['metricsTableName']
params['_timestamp'] = meta['metricTimestampColumn']
params['_dimension_table_name'] = meta['dimensionsTable']
params['_data_items'] = meta['dataItems']
# constants
c_meta = db.http_request(object_type='constants', object_name=kwargs['logical_name'], request='GET')
try:
c_meta = json.loads(c_meta)
except (TypeError, json.JSONDecodeError):
logger.debug(('API call to server did not retrieve valid entity type'
' properties. No properties set.'))
else:
for p in c_meta:
key = p['name']
if isinstance(p['value'], dict):
params[key] = p['value'].get('value', p['value'])
else:
params[key] = p['value']
logger.debug('Retrieved server constant %s with value %s', key, params[key])
params = {**kwargs, **params}
return (params, meta)
class EntityType(object):
"""
Data is organised around Entity Types. Entity Types have one or more
physical database object for their data. When creating a new Entity Type,
it will attempt to connect itself to a table of the same name in the
database. If no table exists the Entity Type will create one.
Entity types describe the payload of an AS job. A job is built by a
JobController using functions metadata prepared by the Entity Type.
Metadata prepared is:
_functions:
List of function objects
_data_items:
List of data items and all of their metadata such as their
datatype.
_granularities_dict:
Dictionary keyed on granularity name. Contains a granularity object
that provides access to granularity metadata such as the time
level and other dimensions involved in the aggregation.
_schedules_dict:
Dictionary keyed on a schedule frequency containing other metadata
about the operations to be run at this frequency, e.g. how many days
should be backtracked when retrieving daat.
Entity types may be initialized as client objects for local testing
or may be loaded from the server. After initialization all of the
above instance variables will be populated. The metadata looks the same
regardless of whether the entity type was loaded from the server
or initialized on the client. The logic to build the metadata is
different though.
Parameters
----------
name: str
Name of the entity type. Use lower case. Will be used as the physical
database table name so don't use database reserved works of special
characters.
db: Database object
Contains the connection info for the database
*args:
Additional positional arguments are used to add the list of SQL Alchemy
Column objects contained within this table. Similar to the style of a
CREATE TABLE sql statement. There is no need to specify column names
if you are using an existing database table as an entity type.
**kwargs
Additional keywork args.
_timestamp: str
Overide the timestamp column name from the default of 'evt_timestamp'
"""
is_entity_type = True
is_local = False
auto_create_table = True
aggregate_complete_periods = True # align data for aggregation with time grain to avoid partial periods
log_table = 'KPI_LOGGING' # deprecated, to be removed
checkpoint_table = 'KPI_CHECKPOINT' # deprecated,to be removed
chunk_size = None # use job controller default chunk
default_backtrack = None
trace_df_changes = True
drop_existing = False
# These two columns will be available in the dataframe of a pipeline
_entity_id = 'deviceid' # identify the instance
_timestamp_col = '_timestamp' # copy of the event timestamp from the index
# This column will identify an instance in the index
_df_index_entity_id = 'id'
# when automatically creating a new dimension, use this suffix
_auto_dim_suffix = '_auto_dim'
# when looking for an automatically created numeric index it should be named:
auto_index_name = '_auto_index_'
# constants declared as part of an entity type definition
ui_constants = None
_functions = None
# generator
_scd_frequency = '2D' # deprecated. Use parameters on EntityDataGenerator
_activity_frequency = '3D' # deprecated. Use parameters on EntityDataGenerator
_start_entity_id = 73000 # deprecated. Use parameters on EntityDataGenerator
_auto_entity_count = 5 # deprecated. Use parameters on EntityDataGenerator
# pipeline work variables stages
_dimension_table = None
_scd_stages = None
_custom_calendar = None
# variabes that will be set when loading from the server
_entity_type_id = None
logical_name = None
_timestamp = 'evt_timestamp'
_dimension_table_name = None
_db_connection_dbi = None
_db_schema = None
_data_items = None
tenant_id = None
_entity_filter_list = None
_start_ts_override = None
_end_ts_override = None
_stages = None
_schedules_dict = None
_granularities_dict = None
_input_set = None
_output_list = None
_invalid_stages = None
_disabled_stages = None
# processing defaults
_pre_aggregate_time_grain = None # aggregate incoming data before processing
_auto_read_from_ts_table = True # read new data from designated time series table for the entity
_pre_agg_rules = None # pandas agg dictionary containing list of aggregates to apply for each item
_pre_agg_outputs = None # dictionary containing list of output items names for each item
_data_reader = DataReader
_abort_on_fail = False
_auto_save_trace = 30
save_trace_to_file = False
drop_null_class = DropNull
enable_downcast = False
allow_projection_list_trim = True
_write_usage = False
# deprecated class variables (to be removed)
_checkpoint_by_entity = True # manage a separate checkpoint for each entity instance
_is_initial_transform = True
_is_preload_complete = False
def __init__(self, name, db, *args, **kwargs):
logger.debug('Initializing new entity type using iotfunctions %s', iotfunctions.__version__)
try:
self.logical_name = kwargs.get('logical_name', None)
if self.logical_name is None:
self.logical_name = name
except AttributeError:
self.logical_name = name
if db == None:
name = 'None'
elif db.db_type == 'db2':
name = name.upper()
else:
name = name.lower()
self.name = name
self.description = kwargs.get('description', None)
if self.description is None:
self.description = ''
else:
del (kwargs['description'])
self.activity_tables = {}
self.scd = {}
self.db = db
if self.db is not None:
self.tenant_id = self.db.tenant_id
self._system_columns = [self._entity_id, self._timestamp_col, 'logicalinterface_id', 'devicetype', 'format',
'updated_utc', self._timestamp]
self._stage_type_map = self.default_stage_type_map()
self._custom_exclude_col_from_auto_drop_nulls = []
self._drop_all_null_rows = True
if self._scd_stages is None:
self._scd_stages = []
if self._data_items is None:
self._data_items = []
if self._granularities_dict is None:
self._granularities_dict = {}
# additional params set from kwargs
self.set_params(**kwargs)
# Start a trace to record activity on the entity type
self._trace = Trace(object_name=None, parent=self, db=db)
if self._disabled_stages is None:
self._disabled_stages = []
if self._invalid_stages is None:
self._invalid_stages = []
if len(self._disabled_stages) > 0 or len(self._invalid_stages) > 0:
self.trace_append(created_by=self, msg='Skipping disabled and invalid stages', log_method=logger.info,
**{'skipped_disabled_stages': [s['functionName'] for s in self._disabled_stages],
'skipped_disabled_data_items': [s['output'] for s in self._disabled_stages],
'skipped_invalid_stages': [s['functionName'] for s in self._invalid_stages],
'skipped_invalid_data_items': [s['output'] for s in self._invalid_stages]})
# attach to time series table
if self._db_schema is None:
logger.warning(('No _db_schema specified in **kwargs. Using'
'default database schema.'))
self._mandatory_columns = [self._timestamp, self._entity_id]
# separate args into categories
categories = [('constant', 'is_ui_control', None), ('granularity', 'is_granularity', None),
('function', 'is_function', None), ('column', None, Column)]
categorized = categorize_args(categories, 'functions', *args)
cols = list(categorized.get('column', []))
functions = list(categorized.get('function', []))
constants = list(categorized.get('constant', []))
grains = list(categorized.get('granularity', []))
if self.drop_existing and db is not None and not self.is_local:
self.drop_tables()
# create a database table if needed using cols
if name is not None and db is not None and not self.is_local:
try:
self.table = self.db.get_table(self.name, self._db_schema)
except KeyError:
if self.auto_create_table:
ts = db_module.TimeSeriesTable(self.name, self.db, *cols, **kwargs)
self.table = ts.table
# self.db.create()
msg = 'Create table %s' % self.name
logger.info(msg)
else:
msg = ('Database table %s not found. Unable to create'
' entity type instance. Provide a valid table name'
' or use the auto_create_table = True keyword arg'
' to create a table. ' % (self.name))
raise ValueError(msg)
# populate the data items metadata from the supplied columns
if isinstance(self._data_items, list) and len(self._data_items) == 0:
self._data_items = self.build_item_metadata(self.table)
else:
logger.warning((
'Created a logical entity type. It is not connected to a real database table, so it cannot perform any database operations.'))
# add granularities
for g in grains:
logger.debug('Adding granularity to entity type: %s', g.name)
self._granularities_dict[g.name] = g
# add constants
self.ui_constants = constants
self.build_ui_constants()
# _functions
# functions may have been provided as kwarg and may be includes as args
# compbine all
if self._functions is None:
self._functions = []
self._functions.extend(functions)
if name is not None and db is not None and not self.is_local:
db.entity_type_metadata[self.logical_name] = self
logger.debug(('Initialized entity type %s'), str(self))
def add_activity_table(self, name, activities, *args, **kwargs):
"""
add an activity table for this entity type.
parameters
----------
name: str
table name
activities: list of strs
activity type codes: these identify the nature of the activity, e.g. PM is Preventative Maintenance
*args: Column objects
other columns describing the activity, e.g. materials_cost
"""
if self.db is None:
msg = ('Entity type has no db connection. Local entity types'
' are not allowed to add activity tables ')
raise ValueError(msg)
kwargs['_activities'] = activities
kwargs['schema'] = self._db_schema
# name = name.lower()
if self.db.db_type == 'db2':
name = name.upper()
else:
name = name.lower()
table = db_module.ActivityTable(name, self.db, *args, **kwargs)
try:
sqltable = self.db.get_table(name, self._db_schema)
except KeyError:
table.create()
self.activity_tables[name] = table
def add_slowly_changing_dimension(self, property_name, datatype, **kwargs):
"""
add a slowly changing dimension table containing a single property for this entity type
parameters
----------
property_name : str
name of property, e.g. firmware_version (lower case, no database reserved words)
datatype: sqlalchemy datatype
"""
if self.db is None:
msg = ('Entity type has no db connection. Local entity types'
' are not allowed to add slowly changing dimensions ')
raise ValueError(msg)
property_name = property_name.lower()
name = '%s_scd_%s' % (self.name, property_name)
kwargs['schema'] = self._db_schema
if self.db.db_type == 'db2':
name = name.upper()
else:
name = name.lower()
table = db_module.SlowlyChangingDimension(name=name, database=self.db, property_name=property_name,
datatype=datatype, **kwargs)
try:
self.db.get_table(name, self._db_schema)
except KeyError:
table.create()
self.scd[property_name] = table
def _add_scd_pipeline_stage(self, scd_lookup):
self._scd_stages.append(scd_lookup)
def build_agg_dict_from_meta_list(self, meta_list):
agg_dict = OrderedDict()
input_items = set()
output_items = []
for f in meta_list:
input_item = f['input'].get('source')
output_item = f['output'].get('name')
aggregate = f['functionName']
try:
agg_dict[input_item].append(aggregate)
except KeyError:
agg_dict[input_item] = [aggregate]
input_items.add(input_item)
output_items.append(output_item)
return (agg_dict, input_items, output_items)
def build_arg_metadata(self, obj):
"""
Examine the metadata provided by build_ui() to understand more about
the arguments to a function.
Place the values of inputs and outputs into 2 dicts
Return these two dicts in a tuple along with an output_meta dict
that contains argument values and types
Build the _input_set and _output list. These describe the set of
data items required as inputs to a function and the list of data
items produced by the function.
"""
name = obj.__class__.__name__
try:
(inputs, outputs) = obj.build_ui()
except (AttributeError, NotImplementedError) as e:
try:
fn_metadata = obj.metadata()
inputs = fn_metadata.get('input', None)
outputs = fn_metadata.get('output', None)
except (AttributeError, KeyError) as ex:
msg = ('Can\'t get metadata for function %s. Implement the'
' build_ui() method for this function. %s' % (name, str(e)))
raise NotImplementedError(msg)
input_args = {} # this is not used. Included only to maintain compatibility of return signature
output_args = {} # this is not used. Included only to maintain compatibility of return signature
output_meta = {} # this is not used. Included only to maintain compatibility of return signature
output_list = []
# There are two ways to gather inputs to a function.
# 1) from the arguments of the function
# 2) from the an explicit list of items returned by the get_input_items
# method
try:
input_set = set(obj.get_input_items())
except AttributeError:
input_set = set()
else:
if len(input_set) > 0:
logger.debug(('Function %s has explicit required input items '
' delivered by the get_input_items() method: %s'), name, input_set)
if not isinstance(inputs, list):
raise TypeError(('Function registration metadata must be defined',
' using a list of objects derived from iotfunctions',
' BaseUIControl. Check metadata for %s'
' %s ' % (name, inputs)))
if not isinstance(outputs, list):
raise TypeError(('Function registration metadata must be defined',
' using a list of objects derived from iotfunctions',
' BaseUIControl. Check metadata for %s'
' %s ' % (name, outputs)))
args = []
args.extend(inputs)
args.extend(outputs)
for a in args:
try:
# get arg name and type from UI object
type_ = a.type_
arg = a.name
except AttributeError as e:
try:
# get arg name and type from legacy dict
type_ = a.get('type', None)
arg = a.get('name', None)
except AttributeError:
type_ = None
arg = None
if type_ is None or arg is None:
msg = ('Error while getting metadata from function. The inputs'
' and outputs of the function are not described correctly'
' using UIcontrols with a type_ %s and name %s' % (type_, arg))
raise TypeError(msg)
arg_value = getattr(obj, arg)
out_arg = None
out_arg_value = None
if type_ == 'DATA_ITEM':
# the argument is an input that contains a data item or
# list of data items
if isinstance(arg_value, list):
input_set |= set(arg_value)
else:
input_set.add(arg_value)
logger.debug('Using input items %s for %s', arg_value, arg)
elif type_ == 'OUTPUT_DATA_ITEM':
# the arg is an output item or list of them
out_arg = arg
out_arg_value = arg_value
# some inputs implicitly describe outputs
try:
out_arg = a.output_item
except AttributeError:
pass # no need to check legacy dict for this property as it was not supported in the legacy dict
else:
if out_arg is not None:
out_arg_value = getattr(obj, out_arg)
# process output args
if out_arg is not None:
if isinstance(out_arg_value, list):
output_list.extend(out_arg_value)
else:
output_list.append(out_arg_value)
logger.debug('Using output items %s for %s', out_arg_value, out_arg)
# output_meta is present in the AS metadata structure, but not
# currently produced for local functions
return (input_args, output_args, output_meta, input_set, output_list)
def build_ui_constants(self):
"""
Build attributes for each ui constants declared with the entity type
"""
if self.ui_constants is None:
logger.debug('No constants declared in entity definition')
self.ui_constants = []
params = {}
for c in self.ui_constants:
try:
params[c.name] = c.default
except AttributeError:
logger.warning(('Cannot set value of parameter %s as it does'
' not have a default value'), c.name)
self.set_params(**params)
def build_flat_stage_list(self):
"""
Build a flat list of all function objects defined for entity type
"""
stages = []
for stage in self._functions:
try:
is_system = stage.is_system_function
except AttributeError:
is_system = False
logger.warning(('Function %s has no is_system_function property.'
' This means it was not inherited from '
' an iotfunctions base class. AS authors are'
' strongly encouraged to always inherit '
' from iotfunctions base classes'), stage.__class__.__name__)
if not is_system:
stages.append(stage)
return stages
def build_granularities(self, grain_meta, freq_lookup):
"""
Convert AS granularity metadata to granularity objects.
"""
out = {}
for g in grain_meta:
grouper = []
freq = None
entity_id = None
if g['entityFirst']:
grouper.append(pd.Grouper(key=self._entity_id))
entity_id = self._entity_id
if g['frequency'] is not None:
freq = (self.get_grain_freq(g['frequency'], freq_lookup, None))
if freq is None:
raise ValueError(('Invalid frequency name %s. The frequency name'
' must exist in the frequency lookup %s' % (g['frequency'], freq_lookup)))
# add a number to the frequency to make it compatible with pd.Timedelta
if freq[0] not in ['1', '2', '3', '4', '5', '6', '7', '8', '9']:
freq = '1' + freq
grouper.append(pd.Grouper(key=self._timestamp, freq=freq))
custom_calendar = None
custom_calendar_keys = []
dimensions = []
# differentiate between dimensions and custom calendar items
for d in g['dataItems']:
grouper.append(pd.Grouper(key=d))
if self._custom_calendar is not None:
if d in self._custom_calendar._output_list:
custom_calendar_keys.append(d)
dimensions.append(d)
granularity = Granularity(name=g['name'], grouper=grouper, dimensions=dimensions,
entity_name=self.logical_name, timestamp=self._timestamp, entity_id=entity_id,
custom_calendar_keys=custom_calendar_keys, freq=freq,
custom_calendar=custom_calendar)
out[g['name']] = granularity
return out
def build_item_metadata(self, table):
"""
Build a client generated version of AS server metadata from a
sql alachemy table object.
"""
if self.db is None:
msg = ('Entity type has no db connection. Local entity types'
' cannot build item metadata from tables ')
raise ValueError(msg)
for col_name, col in list(table.c.items()):
item = {}
if col_name not in self.get_excluded_cols():
item['name'] = col_name
item['type'] = 'METRIC'
item['parentDataItem'] = None
item['kpiFunctionDto'] = None
item['columnName'] = col.name
item['columnType'] = self.db.get_as_datatype(col)
item['sourceTableName'] = self.name
item['tags'] = []
item['transient'] = False
self._data_items.append(item)
return self._data_items
def build_schedules(self, metadata):
"""
Build a dictionary of schedule metadata from the schedules contained
within function definitions.
The schedule dictionary is keyed on a pandas freq string. This
frequency denotes the schedule interval. The dictionary contains a
tuple (start_hour,start_minute,backtrack_days)
Returns
-------
tuple containing updated metadata and a dict of schedules
Example
-------
{ '5min': [16,3,7] }
5 minute schedule interval with a start time of 4:03pm and backtrack of 7 days.
"""
freqs = {}
for f in metadata:
if f['schedule'] is not None:
freq = f['schedule']['every']
start = time.strptime(f['schedule']['starting_at'], '%H:%M:%S')
start_hour = start[3]
start_min = start[4]
backtrack = f['backtrack']
if backtrack is not None:
backtrack_days = backtrack.get('days', 0) + (backtrack.get('hours', 0) / 24) + (
backtrack.get('minutes', 0) / 1440)
else:
backtrack_days = None
existing_schedule = freqs.get(freq, None)
if existing_schedule is None:
f[freq] = (start_hour, start_min, backtrack_days)
else:
corrected_schedule = list(existing_schedule)
if existing_schedule[0] > start_hour:
corrected_schedule[0] = start_hour
logger.warning(('There is a conflict in the schedule metadata.'
' Picked the earlier start hour of %s instead of %s'
' for schedule %s.' % (start_hour, corrected_schedule[0], freq)))
if existing_schedule[1] > start_min:
corrected_schedule[1] = start_min
logger.warning(('There is a conflict in the schedule metadata.'
' Picked the earlier start minute of %s instead of %s'
' for schedule %s.' % (start_min, existing_schedule[1], freq)))
if backtrack_days is not None:
if existing_schedule[2] is None or existing_schedule[2] < backtrack_days:
corrected_schedule[2] = backtrack_days
logger.warning(('There is a conflict in the schedule metadata.'
' Picked the longer backtrack of %s instead of %s'
' for schedule %s.' % (backtrack_days, existing_schedule[2], freq)))
f[freq] = tuple(corrected_schedule)
freqs[freq] = f[freq]
f['schedule'] = freq
return freqs
def classify_stages(self):
"""
Create a dictionary of stage objects. Dictionary is keyed by
stage type and a granularity obj. It contains a list of stage
objects. Stages are classified by timing of execution, ie: preload,
get_data, transform, aggregate
"""
logger.debug('Classifying stages by timing of execution, ie: preload, get_data, transform, aggregate')
stage_metadata = dict()
active_granularities = set()
# Add a data_reader stage. This will read entity data.
if self._auto_read_from_ts_table:
auto_reader = self._data_reader(name='read_entity_data', obj=self)
stage_type = self.get_stage_type(auto_reader)
granularity = None # input level
stage_metadata[(stage_type, granularity)] = [auto_reader]
auto_reader.schedule = None
auto_reader._entity_type = self
else:
logger.debug(('Skipped auto read of payload data as'
' payload does not have _auto_read_from_ts_table'
' set to True'))
# Build a stage for each function.
for s in self._functions:
# replace deprecated function
obj = self.get_replacement(s)
# add metadata to stage
try:
obj.name
except AttributeError:
obj.name = obj.__class__.__name__
try:
obj._schedule
except AttributeError:
obj._schedule = None
try:
obj.granularity
except AttributeError:
obj.granularity = None
# the stage needs to know what entity type it belongs to
obj._entity_type = self
# classify stage
stage_type = self.get_stage_type(obj)
granularity = obj.granularity
if granularity is not None and isinstance(granularity, str):
granularity = self._granularities_dict.get(granularity, False)
if not granularity:
msg = ('Cannot build stage metdata. The granularity metadata'
' is invalid. Granularity of function is %s. Valid '
' granularities are %s' % (granularity, list(self._granularities_dict.keys())))
raise StageException(msg, obj.name)
elif isinstance(granularity, Granularity):
pass
else:
granularity = None
try:
# add to stage_type / granularity
stage_metadata[(stage_type, granularity)].append(obj)
except KeyError:
# start a new stage_type / granularity
stage_metadata[(stage_type, granularity)] = [obj]
# Remember all active granularities
if granularity is not None:
active_granularities.add(granularity)
# add metadata derived from function registration and function args
# input set and output list are critical metadata for the dependency model
# there are three ways to set them
# 1) using the instance variables _input_set and _output_list
# 2) using the methods get_input_set and get_output_list
# 3) using the function's registration metadata
if obj._input_set is not None:
logger.debug('Input set was preset for function %s', obj.name)
input_set = obj._input_set
else:
try:
input_set = obj.get_input_set()
except AttributeError:
input_set = None
if obj._output_list is not None:
logger.debug('Output list set was preset for function %s', obj.name)
output_list = obj._output_list
else:
try:
output_list = obj.get_output_list()
except AttributeError:
output_list = None
if input_set is None or output_list is None:
# get the input set and output list from the function argument metadata
(in_, out, out_meta, reg_input_set, reg_output_list) = self.build_arg_metadata(obj)
if input_set is None:
input_set = reg_input_set
if output_list is None:
output_list = reg_output_list
# set the _input_set and _output_list
obj._input_set = input_set
obj._output_list = output_list
# The stage may have metadata parameters that need to be
# copied onto the entity type
try:
entity_metadata = obj._metadata_params
except AttributeError:
entity_metadata = {}
logger.debug(('Function %s has no _metadata_params'
' property. This property allows the stage'
' to add properties to the entity type.'
' Using default of %s'), obj.name, entity_metadata)
if entity_metadata is not None and entity_metadata:
self.set_params(**entity_metadata)
self.trace_append(created_by=obj, msg='Adding entity type properties from function',
log_method=logger.debug, **entity_metadata)
# The stage may be a special stage that should be added to
# a special stages list, e.g. stages that have
# the property is_scd_lookup = True should be added to the
# _scd_stages list
specials = {'is_scd_lookup': self._scd_stages}
for function_prop, list_obj in list(specials.items()):
try:
is_function_prop = getattr(obj, function_prop)
except AttributeError:
is_function_prop = False
if is_function_prop:
list_obj.append(obj)
# Add for each granularity without frequency two AggregateItem stages. The result columns of these stages
# are used in the DataWriter when the aggregation results are pushed to the database
for gran in active_granularities:
if gran.freq is None:
for func_name, output_name in {('max', DataWriter.ITEM_NAME_TIMESTAMP_MAX),
('min', DataWriter.ITEM_NAME_TIMESTAMP_MIN)}:
new_stage = AggregateItems(input_items=[self._timestamp], aggregation_function=func_name,
output_items=[output_name])
new_stage._entity_type = self
new_stage.name = new_stage.__class__.__name__
new_stage._schedule = None
new_stage.granularity = gran
new_stage._input_set = {self._timestamp}
new_stage._output_list = [output_name]
stage_type = self.get_stage_type(new_stage)
stage_metadata[(stage_type, gran)].append(new_stage)
return stage_metadata
def build_stage_metadata(self, *args):
"""
Make a new JobController payload from a list of function objects
"""
metadata = []
for f in args:
# if function is deprecated it may have a replacement
f = self.get_replacement(f)
fn = {}
try:
name = f.name
except AttributeError:
name = f.__class__.__name__
fn['name'] = name
fn['object_instance'] = f
fn['description'] = f.__doc__
fn['functionName'] = f.__class__.__name__
fn['enabled'] = True
fn['execStatus'] = False
fn['schedule'] = None
fn['backtrack'] = None
fn['granularity'] = f.granularity
(fn['input'], fn['output'], fn['outputMeta'], fn['input_set'], fn['output_list']) = self.build_arg_metadata(
f)
fn['inputMeta'] = None
metadata.append(fn)
logger.debug(('Added local function instance as job stage: %s'), fn)
self._stages = self.build_stages(function_meta=metadata, granularities_dict=self._granularities_dict)
return metadata
def index_df(self, df):
"""
Create an index on the deviceid and the timestamp
"""
if self._df_index_entity_id is None:
self._df_index_entity_id = self._entity_id
if self._timestamp_col is None:
self._timestamp_col = self._timestamp
if df.index.names != [self._df_index_entity_id, self._timestamp]:
try:
df = df.set_index([self._df_index_entity_id, self._timestamp])
except KeyError:
df = reset_df_index(df, auto_index_name=self.auto_index_name)
try:
df = df.set_index([self._df_index_entity_id, self._timestamp])
except KeyError:
try:
df[self._df_index_entity_id] = df[self._entity_id]
df = df.set_index([self._df_index_entity_id, self._timestamp])
except KeyError:
raise KeyError(('Error attempting to index time series'
' dataframe. Unable to locate index'
' columns: %s or %s, %s') % (
self._df_index_entity_id, self._entity_id, self._timestamp))
logger.debug(('Indexed dataframe on %s, %s'), self._df_index_entity_id, self._timestamp)
else:
logger.debug(('Found existing index on %s, %s.'
'No need to recreate index'), self._df_index_entity_id, self._timestamp)
# create a dummy column for _entity_id
if self._entity_id != self._df_index_entity_id:
df[self._entity_id] = df.index.get_level_values(self._df_index_entity_id)
# create a dummy column for _timestamp
if self._timestamp != self._timestamp_col:
df[self._timestamp_col] = df.index.get_level_values(self._timestamp)
return df
def cos_save(self):
if self.db is None:
msg = ('Entity type has no db connection. Local entity types'
' are not allowed to save to cloud object storage ')
raise ValueError(msg)
name = ['entity_type', self.name]
name = '.'.join(name)
self.db.cos_save(self, name)
@classmethod
def default_stage_type_map(cls):
"""
Configure how properties of stages are used to set the stage type
that is used by the job controller to decide how to process a stage
"""
return [('preload', 'is_preload'), ('get_data', 'is_data_source'), ('transform', 'is_transformer'),
('aggregate', 'is_data_aggregator'), ('simple_aggregate', 'is_simple_aggregator'),
('complex_aggregate', 'is_complex_aggregator'), ]
def df_sort_timestamp(self, df):
"""
Sort a dataframe on the timestamp column. Returns a tuple containing
the sorted dataframe and a column_name for the timestamp column.
"""
ts_col_name = self._timestamp
# timestamp may be column or in index
try:
df.sort_values([ts_col_name], inplace=True)
except KeyError:
try:
# legacy check for a redundant _timestamp alternative column
df.sort_values([self._timestamp_col], inplace=True)
ts_col_name = self._timestamp_col
except KeyError:
try:
df.sort_index(level=[ts_col_name], inplace=True)
except:
raise
return (df, ts_col_name)
def drop_tables(self, recreate=False):
"""
Drop tables known to be associated with this entity type
"""
self.db.drop_table(self.name, schema=self._db_schema, recreate=recreate)
self.drop_child_tables(recreate=recreate)
def drop_child_tables(self, recreate=False):
"""
Drop all child tables
"""
if self.db is None:
msg = ('Entity type has no db connection. Local entity types'
' are not allowed to drop child tables ')
raise ValueError(msg)
if self._dimension_table_name is None:
tables = []
else:
tables = [self._dimension_table_name]
tables.extend(self.activity_tables.values())
tables.extend(self.scd.values())
[self.db.drop_table(x, self._db_schema, recreate=recreate) for x in tables]
msg = 'dropped tables %s' % tables
logger.info(msg)
def exec_local_pipeline(self, start_ts=None, end_ts=None, entities=None, **kw):
"""
Test the functions on an entity type
Test will be run on local metadata. It will not use the server
job log. Results will be written to file.
"""
params = {'data_writer': DataWriterFile, 'keep_alive_duration': None, 'save_trace_to_file': True,
'default_backtrack': 'checkpoint', 'trace_df_changes': True, '_abort_on_fail': True,
'job_log_class': JobLogNull, '_auto_save_trace': None, '_start_ts_override': start_ts,
'_end_ts_override': end_ts, '_entity_filter_list': entities, '_production_mode': False}
kw = {**params, **kw}
job = JobController(payload=self, **kw)
# propagate parameters to functions
for f in self._functions:
for key, value in list(kw.items()):
setattr(f, key, value)
job.execute()
def get_attributes_dict(self):
"""
Produce a dictionary containing all attributes
"""
c = {}
for att in dir(self):
value = getattr(self, att)
if not callable(value):
c[att] = value
return c
def get_calc_pipeline(self, stages=None):
"""
Make a new CalcPipeline object. Reset processing variables.
"""
warnings.warn('get_calc_pipeline() is deprecated. Use build_job()', DeprecationWarning)
self._scd_stages = []
self._custom_calendar = None
self._is_initial_transform = True
return CalcPipeline(stages=stages, entity_type=self)
def get_function_replacement_metadata(self, meta):
"""
replace incoming function metadata for aggregate functions with
metadata that will be used to build a DataAggregator
"""
replacement = {'Sum': 'sum', 'Minimum': 'min', 'Maximum': 'max', 'Mean': 'mean', 'Median': 'median',
'Count': 'count', 'DistinctCount': 'count_distinct', 'StandardDeviation': 'std',
'Variance': 'var', 'Product': 'product', 'First': 'first', 'Last': 'last'}
name = meta.get('functionName', None)
replacement_name = replacement.get(name, None)
if replacement_name is not None:
meta['functionName'] = replacement_name
return (meta.get('granularity', None), meta)
else:
return (None, None)
def get_local_column_lists_by_type(self, columns, known_categoricals_set=None):
"""
Examine a list of columns and poduce a tuple containing names
of metric,dates,categoricals and others
"""
if known_categoricals_set is None:
known_categoricals_set = set()
metrics = []
dates = []
categoricals = []
others = []
if columns is None:
columns = []
all_cols = set([x.name for x in columns])
# exclude known categoricals that are not present in table
known_categoricals_set = known_categoricals_set.intersection(all_cols)
for c in columns:
data_type = c.type
if isinstance(data_type, (FLOAT, Float, INTEGER, Integer)):
metrics.append(c.name)
elif db_module.DB2_DOUBLE is not None and isinstance(data_type, db_module.DB2_DOUBLE):
metrics.append(c.name)
elif isinstance(data_type, (VARCHAR, String)):
categoricals.append(c.name)
elif isinstance(data_type, (TIMESTAMP, DateTime)):
dates.append(c.name)
else:
others.append(c.name)
msg = 'Found column %s of unknown data type %s' % (c, data_type.__class__.__name__)
logger.warning(msg)
# reclassify categoricals that did were not correctly classified based on data type
for c in known_categoricals_set:
if c not in categoricals:
categoricals.append(c)
metrics = [x for x in metrics if x != c]
dates = [x for x in dates if x != c]
others = [x for x in others if x != c]
return (metrics, dates, categoricals, others)
def get_custom_calendar(self):
return self._custom_calendar
def get_data(self, start_ts=None, end_ts=None, entities=None, columns=None):
"""
Retrieve entity data at input grain or preaggregated
"""
if self.db is None:
msg = ('Entity type has no db connection. Local entity types'
' are not allowed to retrieve database data ')
raise ValueError(msg)
tw = {} # info to add to trace
if entities is None:
tw['entity_filter'] = 'all'
else:
tw['entity_filter'] = '%s entities' % len(entities)
if self._pre_aggregate_time_grain is None:
df = self.db.read_table(table_name=self.name, schema=self._db_schema, timestamp_col=self._timestamp,
parse_dates=None, columns=columns, start_ts=start_ts, end_ts=end_ts,
entities=entities, dimension=self._dimension_table_name)
tw['pre-aggregeted'] = None
else:
(metrics, dates, categoricals, others) = self.db.get_column_lists_by_type(self.name, self._db_schema)
if self._dimension_table_name is not None:
categoricals.extend(self.db.get_column_names(self._dimension_table_name, self._db_schema))
if columns is None:
columns = []
columns.extend(metrics)
columns.extend(dates)
columns.extend(categoricals)
columns.extend(others)
# make sure each column is in the aggregate dictionary
# apply a default aggregate for each column not specified in the aggregation metadata
if self._pre_agg_rules is None:
self._pre_agg_rules = {}
self._pre_agg_outputs = {}
for c in columns:
try:
self._pre_agg_rules[c]
except KeyError:
if c not in [self._timestamp, self._entity_id]:
if c in metrics:
self._pre_agg_rules[c] = 'mean'
self._pre_agg_outputs[c] = 'mean_%s' % c
else:
self._pre_agg_rules[c] = 'max'
self._pre_agg_outputs[c] = 'max_%s' % c
else:
pass
df = self.db.read_agg(table_name=self.name, schema=self._db_schema, groupby=[self._entity_id],
timestamp=self._timestamp, time_grain=self._pre_aggregate_time_grain,
agg_dict=self._pre_agg_rules, agg_outputs=self._pre_agg_outputs, start_ts=start_ts,
end_ts=end_ts, entities=entities, dimension=self._dimension_table_name)
tw['pre-aggregeted'] = self._pre_aggregate_time_grain
tw['rows_retrieved'] = len(df.index)
tw['start_ts'] = start_ts
tw['end_ts'] = end_ts
self.trace_append(created_by=self, msg='Retrieved entity timeseries data for %s' % self.name, **tw)
# Optimizing the data frame size using downcasting
if self.enable_downcast:
memo = MemoryOptimizer()
df = memo.downcastNumeric(df)
try:
df = self.index_df(df)
except (AttributeError, KeyError):
pass
return df
def get_data_items(self):
"""
Get the list of data items defined
:return: list of dicts containting data item metadata
"""
return self._data_items
def get_excluded_cols(self):
"""
Return a list of physical columns that should be excluded when returning
the list of data items
"""
return ['logicalinterface_id', 'format', 'updated_utc', 'devicetype', 'eventtype']
def get_grain_freq(self, grain_name, lookup, default):
"""
Lookup a pandas frequency string from an AS granularity name
"""
for l in lookup:
if grain_name == l['name']:
return l['alias']
return default
def get_output_items(self):
"""
Get a list of non calculated items: outputs from the time series table
"""
items = [x.get('columnName') for x in self._data_items if
x.get('type') == 'METRIC' or x.get('type') == 'DIMENSION']
return items
def get_log(self, rows=100):
"""
Get KPI execution log info. Returns a dataframe.
"""
if self.db is None:
msg = ('Entity type has no db connection. Local entity types'
' are not allowed to get log data ')
raise ValueError(msg)
query, log = self.db.query(self.log_table, self._db_schema)
query = query.filter(log.c.entity_type == self.name).order_by(log.c.timestamp_utc.desc()).limit(rows)
df = self.db.read_sql_query(query)
return df
def get_latest_log_entry(self):
"""
Get the most recent log entry. Returns dict.
"""
last = self.get_log(rows=1)
last = last.to_dict('records')[0]
return last
def get_param(self, param, default=None):
try:
out = getattr(self, param)
except AttributeError:
out = default
return out
def get_end_ts_override(self):
if self._end_ts_override is not None:
if isinstance(self._end_ts_override, dt.datetime):
return self._end_ts_override
date_time_obj = dt.datetime.strptime(self._end_ts_override[0], '%Y-%m-%d %H:%M:%S')
return date_time_obj
return None
def get_stage_type(self, stage):
"""
Examine the stage object to determine how it should be processed by
the JobController
Sets the stage type to the first valid entry in the stage map
the stage map is a list of tuples containing a stage type and
a boolean property name:
example:
[('get_data','is_data_source'),
('simple_aggregate','is_simple_aggregate')]
if a stage has both an is_data_source = True and
a is_simple_aggregate = True, the stage type will be returned as
'get_data'
"""
for (stage_type, prop) in self._stage_type_map:
try:
prop_value = getattr(stage, prop)
except AttributeError:
pass
else:
if prop_value:
return stage_type
raise TypeError(('Could not identify stage type for stage'
' %s from the stage map. Adjust the stage map'
' for the entity type or define an appropriate'
' is_<something> property on the class of the '
' stage. Stage map is %s' % (stage.name, self._stage_type_map)))
def get_start_ts_override(self):
if self._start_ts_override is not None:
if isinstance(self._start_ts_override, dt.datetime):
date_time_obj = self._start_ts_override
else:
date_time_obj = dt.datetime.strptime(self._start_ts_override[0], '%Y-%m-%d %H:%M:%S')
return date_time_obj
return None
def get_replacement(self, obj):
"""
Get replacement for deprecated function
"""
try:
is_deprecated = obj.is_deprecated
except AttributeError:
is_deprecated = False
if is_deprecated:
try:
obj = obj.get_replacement()
except AttributeError:
msg = ('Skipped deprecated function. The function'
' %s has no designated replacement. Provide a'
' replacement by implementing the get_replacement()'
' method or rework entity type to remove the reference'
' to the deprecated function' % obj.__class__.__name__)
raise StageException(msg, obj.__class__.__name__)
else:
logger.debug('Entity Type has a reference to a deprecated'
' function. This function was automatically'
' replaced by %s', obj.__class__.__name__)
return obj
def generate_data(self, entities=None, days=0, seconds=300, freq='1min', scd_freq='1D', write=True,
drop_existing=False, data_item_mean=None, data_item_sd=None, data_item_domain=None, columns=None,
start_entity_id=None, auto_entity_count=None, datasource=None, datasourcemetrics=None):
"""
Generate random time series data for entities
Parameters
----------
entities: list
List of entity ids to genenerate data for
days: number
Number of days worth of data to generate (back from system date)
seconds: number
Number of seconds of worth of data to generate (back from system date)
freq: str
Pandas frequency string - interval of time between subsequent rows of data
write: bool
write generated data back to table with same name as entity
drop_existing: bool
drop existing time series, dimension, activity and scd table
data_item_mean: dict
mean values for generated data items. dict is keyed on data item name
data_item_sd: dict
std values for generated data items. dict is keyed on data item name
data_item_domain: dict
domains of values for categorical data items. dict is keyed on data item name
datasource: dataframe
dataframe as data source
datasourcemetrics : list of strings
list of relevant column for datasource
"""
if entities is None:
if start_entity_id is None:
start_entity_id = self._start_entity_id
if auto_entity_count is None:
auto_entity_count = self._auto_entity_count
entities = [str(start_entity_id + x) for x in list(range(auto_entity_count))]
if data_item_mean is None:
data_item_mean = {}
if data_item_sd is None:
data_item_sd = {}
if data_item_domain is None:
data_item_domain = {}
if drop_existing and self.db is not None:
self.drop_tables(recreate=True)
known_categoricals = set(data_item_domain.keys())
exclude_cols = ['deviceid', 'devicetype', 'format', 'updated_utc', 'logicalinterface_id', self._timestamp]
if self.db is None or self.is_local:
write = False
msg = 'This is a local entity or entity with no database connection, test data will not be written'
logger.debug(msg)
(metrics, dates, categoricals, others) = self.get_local_column_lists_by_type(columns,
known_categoricals_set=known_categoricals)
else:
(metrics, dates, categoricals, others) = self.db.get_column_lists_by_type(self.table, self._db_schema,
exclude_cols=exclude_cols,
known_categoricals_set=known_categoricals)
msg = 'Generating data for %s with metrics %s and dimensions %s and dates %s' % (
self.name, metrics, categoricals, dates)
logger.debug(msg)
ts = TimeSeriesGenerator(metrics=metrics, ids=entities, days=days, seconds=seconds, freq=freq,
categoricals=categoricals, dates=dates, timestamp=self._timestamp,
domains=data_item_domain, datasource=datasource, datasourcemetrics=datasourcemetrics)
ts.data_item_mean = data_item_mean
ts.data_item_sd = data_item_sd
ts.data_item_domain = data_item_domain
df = ts.execute()
dimension_table_exists = False
try:
dimension_table_exists = self.db.if_exists(table_name=self._dimension_table_name, schema=self._db_schema)
except Exception:
pass
if self._dimension_table_name is not None and dimension_table_exists:
self.generate_dimension_data(entities, write=write, data_item_mean=data_item_mean,
data_item_sd=data_item_sd, data_item_domain=data_item_domain)
if write and self.db is not None:
for o in others:
if o not in df.columns:
df[o] = None
df['logicalinterface_id'] = ''
df['devicetype'] = self.logical_name
df['format'] = ''
df['updated_utc'] = dt.datetime.utcnow()
self.db.write_frame(table_name=self.name, df=df, schema=self._db_schema, timestamp_col=self._timestamp)
for (at_name, at_table) in list(self.activity_tables.items()):
adf = self.generate_activity_data(table_name=at_name, activities=at_table._activities, entities=entities,
days=days, seconds=seconds, write=write)
msg = 'generated data for activity table %s' % at_name
logger.debug(msg)
for scd in list(self.scd.values()):
sdf = self.generate_scd_data(scd_obj=scd, entities=entities, days=days, seconds=seconds, write=write,
freq=scd_freq, domains=data_item_domain)
msg = 'generated data for scd table %s' % scd.name
logger.debug(msg)
return df
def generate_activity_data(self, table_name, activities, entities, days, seconds, write=True):
if self.db is None:
msg = ('Entity type has no db connection. Local entity types'
' are not allowed to generate activity data ')
raise ValueError(msg)
try:
(metrics, dates, categoricals, others) = self.db.get_column_lists_by_type(table_name, self._db_schema,
exclude_cols=[self._entity_id,
'start_date',
'end_date'])
except KeyError:
metrics = []
dates = []
categoricals = []
others = []
metrics.append('duration')
categoricals.append('activity')
ts = TimeSeriesGenerator(metrics=metrics, dates=dates, categoricals=categoricals, ids=entities, days=days,
seconds=seconds, freq=self._activity_frequency)
ts.set_domain('activity', activities)
df = ts.execute()
df['start_date'] = df[self._timestamp]
duration = df['duration'].abs()
df['end_date'] = df['start_date'] + pd.to_timedelta(duration, unit='h')
# probability that an activity took place in the interval
p_activity = (days * 60 * 60 * 24 + seconds) / pd.to_timedelta(self._activity_frequency).total_seconds()
is_activity = p_activity >= np.random.uniform(0, 1, len(df.index))
df = df[is_activity]
cols = [x for x in df.columns if x not in ['duration', self._timestamp]]
df = df[cols]
if write:
msg = 'Generated %s rows of data and inserted into %s' % (len(df.index), table_name)
logger.debug(msg)
self.db.write_frame(table_name=table_name, df=df, schema=self._db_schema)
return df
def generate_dimension_data(self, entities, write=True, data_item_mean=None, data_item_sd=None,
data_item_domain=None):
if self.db is None:
msg = ('Entity type has no db connection. Local entity types'
' are not allowed to generate dimension data ')
raise ValueError(msg)
# check for existing dimension data
df_existing = self.db.read_dimension(self._dimension_table_name, schema=self._db_schema, entities=entities)
existing_entities = set(df_existing[self._entity_id])
# do not generate data for existing entities
entities = list(set(entities) - existing_entities)
if len(entities) > 0:
if data_item_mean is None:
data_item_mean = {}
if data_item_sd is None:
data_item_sd = {}
if data_item_domain is None:
data_item_domain = {}
known_categoricals = set(data_item_domain.keys())
(metrics, dates, categoricals, others) = self.db.get_column_lists_by_type(self._dimension_table_name,
self._db_schema,
exclude_cols=[self._entity_id],
known_categoricals_set=known_categoricals)
rows = len(entities)
data = {}
for m in metrics:
mean = data_item_mean.get(m, 0)
sd = data_item_sd.get(m, 1)
data[m] = MetricGenerator(m, mean=mean, sd=sd).get_data(rows=rows)
for c in categoricals:
categories = data_item_domain.get(c, None)
data[c] = CategoricalGenerator(c, categories).get_data(rows=rows)
data[self._entity_id] = entities
df = pd.DataFrame(data=data)
for d in dates:
df[d] = DateGenerator(d).get_data(rows=rows)
df[d] = pd.to_datetime(df[d])
if write:
if self.db.db_type == 'db2':
self._dimension_table_name = self._dimension_table_name.upper()
else:
self._dimension_table_name = self._dimension_table_name.lower()
self.db.write_frame(df, table_name=self._dimension_table_name, if_exists='append',
schema=self._db_schema)
else:
logger.debug('No new entities. Did not generate dimension data.')
def get_entity_filter(self):
"""
Get the list of entity ids that are valid for pipeline processing.
"""
return self._entity_filter_list
def get_last_checkpoint(self):
"""
Get the last checkpoint recorded for entity type
"""
if self.db is None:
msg = ('Entity type has no db connection. Local entity'
' types do not have a checkpoint')
logger.debug(msg)
return None
(query, table) = self.db.query_column_aggregate(table_name=self.checkpoint_table, schema=self._db_schema,
column='TIMESTAMP', aggregate='max')
query.filter(table.c.entity_type_id == self._entity_type_id)
return query.scalar()
def generate_scd_data(self, scd_obj, entities, days, seconds, freq, write=True, domains=None):
if self.db is None:
msg = ('Entity type has no db connection. Local entity types'
' are not allowed to generate scd data ')
raise ValueError(msg)
if domains is None:
domains = {}
table_name = scd_obj.name
msg = 'generating data for %s for %s days and %s seconds' % (table_name, days, seconds)
try:
(metrics, dates, categoricals, others) = self.db.get_column_lists_by_type(table_name, self._db_schema,
exclude_cols=[self._entity_id,
'start_date',
'end_date'])
except KeyError:
metrics = []
dates = []
categoricals = [scd_obj.property_name.name]
others = []
msg = msg + ' with metrics %s, dates %s, categorials %s and others %s' % (metrics, dates, categoricals, others)
logger.debug(msg)
ts = TimeSeriesGenerator(metrics=metrics, dates=dates, categoricals=categoricals, ids=entities, days=days,
seconds=seconds, freq=freq, domains=domains)
df = ts.execute()
df['start_date'] = df[self._timestamp]
# probability that a change took place in the interval
p_activity = (days * 60 * 60 * 24 + seconds) / pd.to_timedelta(freq).total_seconds()
is_activity = p_activity >= np.random.uniform(0, 1, len(df.index))
df = df[is_activity]
cols = [x for x in df.columns if x not in [self._timestamp]]
df = df[cols]
df['end_date'] = None
if len(df.index) > 0:
df = df.groupby([self._entity_id]).apply(self._set_end_date)
try:
self.db.truncate(table_name, schema=self._db_schema)
except KeyError:
pass
if write:
msg = 'Generated %s rows of data and inserted into %s' % (len(df.index), table_name)
logger.debug(msg)
self.db.write_frame(table_name=table_name, df=df, schema=self._db_schema, if_exists='append')
return df
def _get_scd_list(self):
return [(s.output_item, s.table_name) for s in self._scd_stages]
def is_base_item(self, item_name):
"""
Base items are non calculated data items.
"""
item_type = self._data_items[item_name]['columnType']
if item_type == 'METRIC':
return True
else:
return False
def is_data_item(self, name):
"""
Determine whether an item is a data item
"""
if name in [x.name for x in self._data_items]:
return True
else:
return False
def make_dimension(self, name=None, *args, **kw):
"""
Add dimension table by specifying additional columns
Parameters
----------
name: str
dimension table name
*args: sql alchemchy Column objects
* kw: : schema
"""
if self.db is None:
msg = ('Entity type has no db connection. Local entity types'
' are not allowed to make dimensions ')
raise ValueError(msg)
kw['schema'] = self._db_schema
if name is None:
name = '%s_dimension' % self.name
# name = name.lower()
# self._dimension_table_name = name
if self.db.db_type == 'db2':
self._dimension_table_name = name.upper()
else:
self._dimension_table_name = name.lower()
try:
self._dimension_table = self.db.get_table(self._dimension_table_name, self._db_schema)
except KeyError:
dim = db_module.Dimension(self._dimension_table_name, self.db, *args, **kw)
self._dimension_table = dim.table
# dim.create()
# msg = 'Created dimension table %s' % self._dimension_table_name
msg = 'Dimension table not created'
logger.debug(msg)
def publish_kpis(self, raise_error=True):
warnings.warn(('publish_kpis() is deprecated for EntityType. Instead'
' use an EntityType inherited from BaseCustomEntityType'), DeprecationWarning)
"""
Publish the stages assigned to this entity type to the AS Server
"""
export = []
stages = self.build_flat_stage_list()
self.db.register_functions(stages)
for s in stages:
try:
name = s.name
except AttributeError:
name = s.__class__.__name__
logger.debug(('Function class %s has no name property.'
' Using the class name'), name)
try:
args = s._get_arg_metadata()
except AttributeError:
msg = ('Attempting to publish kpis for an entity type.'
' Function %s has no _get_arg_spec() method.'
' It cannot be published') % name
raise NotImplementedError(msg)
metadata = {'name': name, 'args': args}
export.append(metadata)
logger.debug('Published kpis to entity type')
logger.debug(export)
response = self.db.http_request(object_type='kpiFunctions', object_name=self.logical_name, request='POST',
payload=export, raise_error=raise_error)
logger.debug(response)
return response
def raise_error(self, exception, msg=None, abort_on_fail=False, stage_name=None):
"""
Raise an exception. Append a message and the current trace to the stacktrace.
"""
err_info = {'AttributeError': 'The function %s makes reference to an object property that does not exist.',
'SyntaxError': 'The function %s contains a syntax error. If the function includes a type-in expression, make sure this is correct.',
'ValueError': 'The function %s is operating on a data that has an unexpected value for its data type.',
'TypeError': 'The function %s is operating on a data that has an unexpected data type.',
'KeyError': 'The function %s is refering to a dictionary key or dataframe column name that doesnt exist.',
'NameError': 'The function %s is refering to an object that doesnt exist. If refering to data items in a pandas dataframe, ensure that you quote them, e.g. df["temperature"].', }
if msg is None:
msg = err_info.get(exception.__class__.__name__, 'The function %s failed to execute.') % stage_name
if abort_on_fail:
raise StageException(error_message=msg, stage_name=stage_name, exception=exception)
else:
logger.warning(msg)
return msg
def create_sample_data(self, drop_existing, generate_days, generate_entities=None,
populate_dm_wiot_entity_list=False):
if generate_days > 0:
# classify stages is adds entity metdata to the stages
# need to run it before executing any stage
self.classify_stages()
generators = [x for x in self._functions if x.is_data_generator]
start = dt.datetime.utcnow() - dt.timedelta(days=generate_days)
for g in generators:
logger.debug(('Running generator %s with start date %s.'
' Drop existing %s'), g.__class__.__name__, start, drop_existing)
g.execute(df=None, start_ts=start, entities=generate_entities)
if populate_dm_wiot_entity_list:
self.populate_entity_list_table()
def populate_entity_list_table(self):
entity_list_table_name = 'dm_wiot_entity_list'
try:
if self.db.db_type == 'db2':
entity_list_table_name = entity_list_table_name.upper()
else:
entity_list_table_name = entity_list_table_name.lower()
entities = [str(self._start_entity_id + x) for x in list(range(self._auto_entity_count))]
self.db.start_session()
table = self.db.get_table(entity_list_table_name, self._db_schema)
for entity_id in entities:
stmt = table.insert().values({'entity_type_id': self._entity_type_id, 'entity_id': entity_id})
self.db.connection.execute(stmt)
self.db.commit()
except Exception:
logger.debug('Error populating dm_wiot_entity_list table.')
def register(self, publish_kpis=False, raise_error=False, sample_entity_type=False):
"""
Register entity type so that it appears in the UI. Create a table for input data.
Parameters
----------
credentials: dict
credentials for the ICS metadata service
"""
if self.db is None:
msg = ('Entity type has no db connection. Local entity types'
' may not be registered ')
raise ValueError(msg)
cols = []
columns = []
metric_column_names = []
table = {}
table['name'] = self.logical_name
table['metricTableName'] = self.name
table['metricTimestampColumn'] = self._timestamp
table['description'] = self.description
table['origin'] = 'AS_SAMPLE'
for c in self.db.get_column_names(self.table, schema=self._db_schema):
cols.append((self.table, c, 'METRIC'))
metric_column_names.append(c)
if self._dimension_table is not None:
table['dimensionTableName'] = self._dimension_table_name
for c in self.db.get_column_names(self._dimension_table, schema=self._db_schema):
if c not in metric_column_names:
cols.append((self._dimension_table, c, 'DIMENSION'))
for (table_obj, column_name, col_type) in cols:
msg = 'found %s column %s' % (col_type, column_name)
logger.debug(msg)
# if column_name not in self.get_excluded_cols():
data_type = table_obj.c[column_name].type
if isinstance(data_type, (FLOAT, Float, INTEGER, Integer)):
data_type = 'NUMBER'
elif db_module.DB2_DOUBLE is not None and isinstance(data_type, db_module.DB2_DOUBLE):
data_type = 'NUMBER'
elif isinstance(data_type, (VARCHAR, String)):
data_type = 'LITERAL'
elif isinstance(data_type, (TIMESTAMP, DateTime)):
data_type = 'TIMESTAMP'
else:
data_type = str(data_type)
logger.warning('Unknown datatype %s for column %s' % (data_type, column_name))
columns.append({'name': column_name, 'type': col_type, 'columnName': column_name, 'columnType': data_type,
'tags': None, 'transient': False})
table['dataItemDto'] = columns
if self._db_schema is not None:
table['schemaName'] = self._db_schema
else:
try:
table['schemaName'] = self.db.credentials['db2']['username']
except KeyError:
try:
username = self.db.credentials["postgresql"]['username']
table["schemaName"] = "public"
except KeyError:
raise KeyError('No database credentials found. Unable to register table.')
payload = [table]
response = self.db.http_request(request='POST', object_type='entityType', object_name=self.name,
payload=payload, raise_error=raise_error, sample_entity_type=sample_entity_type)
msg = 'Metadata registered for table %s ' % self.name
logger.debug(msg)
if publish_kpis:
self.publish_kpis(raise_error=raise_error)
self.db.register_constants(self.ui_constants)
return response
def trace_append(self, created_by, msg, log_method=None, df=None, **kwargs):
"""
Write to entity type trace
"""
self._trace.write(created_by=created_by, log_method=log_method, text=msg, df=df, **kwargs)
def set_custom_calendar(self, custom_calendar):
"""
Set a custom calendar for the entity type.
"""
if custom_calendar is not None:
self._custom_calendar = custom_calendar
def get_server_params(self):
"""
Retrieve the set of properties assigned through the UI
Assign to instance variables
"""
if self.db is None:
msg = ('Entity type has no db connection. Local entity types'
' are not able to get server params ')
logger.debug(msg)
return {}
meta = self.db.http_request(object_type='constants', object_name=self.logical_name, request='GET')
try:
meta = json.loads(meta)
except (TypeError, json.JSONDecodeError):
params = {}
logger.debug('API call to server did not retrieve valid entity type properties. No properties set.')
else:
params = {}
for p in meta:
key = p['name']
if isinstance(p['value'], dict):
params[key] = p['value'].get('value', p['value'])
else:
params[key] = p['value']
logger.debug('Adding server property %s with value %s to entity type', key, params[key])
self.set_params(**params)
return params
def _set_end_date(self, df):
df['end_date'] = df['start_date'].shift(-1)
df['end_date'] = df['end_date'] - | pd.Timedelta(microseconds=1) | pandas.Timedelta |
# -*- coding: utf-8 -*-
"""HAR_Opportunity.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1qfhns0ykD6eLkoWICPu6WbdD4r7V-6Uf
# Introduction
This notebook presents the several machine learning models using CNN and LSTM for HAR. To obtain a detailed description of the architecture, please refer to the dissertation, **"RECOGNISING HUMAN ACTIVITIES AUTONOMOUSLY THROUGH FUSION OF SENSOR DATA"**.
## Dataset
As a dataset, the [OPPORTUNITY Activity Recognition Data Set](http://archive.ics.uci.edu/ml/datasets/OPPORTUNITY+Activity+Recognition) is used. To prepare this dataset for the program, the following code is uncommented and executed.
"""
# Download dataset zip file and place data files in training and test set directries
zipfile_dataset_opportunity = "OpportunityUCIDataset.zip"
url_dataset_opportunity = "https://archive.ics.uci.edu/ml/machine-learning-databases/00226/OpportunityUCIDataset.zip"
#!wget $url_dataset_opportunity
#!unzip $zipfile_dataset_opportunity
#!ls OpportunityUCIDataset/dataset/
# Deploy dataset files into training and test directories
#!mkdir -p ../data/test
#!mkdir ../data/train
#%cd OpportunityUCIDataset/dataset/
#!cp S[1-3]-Drill.dat S1-ADL[1-5].dat S2-ADL[1-3].dat S3-ADL[1-3].dat ../../../data/train/
#!cp S[23]-ADL[23].dat ../../../data/test/
#%cd ../../
#!ls ../data/train/
#!ls ../data/test/
"""# 1.Parameters
Adjustable flags and parameters are listed. Hyperparameters for each ML model are in "[F] ML models" section.
|Name|Type|Explanation|
|-|-|-|
|flag_delete_null|Flag|Whether delete the Null class or not|
|flag_label|Flag|Activity type (gesture or locomotion)|
|flag_(ML model name)|Flag|Whether execute the model or not|
|flag_experiment|Flag|Whether run repeated evaluation for summary statistics or not|
|flag_model_load|Flag|Whether load the model from the file or not|
|flag_model_save|Flag|Whether save the model to the file after training or not|
|flag_EarlyStopping|Flag|Enable Early stopping|
|flag_es_monitor|Flag|Monitor type for Early stopping|
|ratio_train|Parameter|The ratio between training and validation sets|
|seed|Parameter|Fix the seed for reproducibility|
|flag_scale|Flag|Scaling technique|
|window_size|Parameter|The length of the sliding window|
|window_step|Parameter|The step of the sliding window|
|flag_sw_label|Flag|Class label of the sliding window|
|flag_balance_*|Flag|Enable data balancing|
|flag_data_dist|Flag|Display dataset distribution|
|flag_interpolate|Flag|Enable interpolation|
|flag_plot_model|Flag|Whether plot model graphs or not|
|flag_save_fig|Flag|Whether save graphs or not|
|flag_summary|Flag|Show summary of the dataset|
|flag_cm_norm|Flag|Whether normalise confusion matrix or not|
|flag_TensorBoard|Flag|Whether save the Tensorboard data or not|
"""
### [Note]
#
# Hyperparameters for each ML model are in "[F] ML models" section
#
### ---------- ---------- ---------- ---------- ----------
### Flags
flag_delete_null = False
# Label
flag_label = "ML_Both_Arms"
#flag_label = "Locomotion"
# ML
flag_CNN_1d = True
flag_LSTM_Mto1 = True
flag_CNN1D_LSTM = True
flag_ConvLSTM = True
flag_Ensemble = True
flag_experiment = True
flag_model_load = False
flag_model_save = True
flag_EarlyStopping = True
flag_es_monitor = "val_loss"
#flag_es_monitor = "val_accuracy"
### ---------- ---------- ---------- ---------- ----------
### Pre-processing
# Ratio of training dataset to be split
ratio_train = 0.85
# Randam seed for reproducibility
seed = 7
# scaling
flag_scaling = "Std" # for Gaussian
#flag_scaling = "Norm" # (0 - 1)
# Sliding window
window_size = 15
window_step = 8
flag_sw_label = "last"
#flag_sw_label = "mode"
# Data balancing
flag_balance_under1 = False
flag_balance_under2 = False
flag_balance_under3 = False
flag_balance_over1 = False
flag_balance_over2 = False
flag_balance_over3 = False
### ---------- ---------- ---------- ---------- ----------
### Evaluation
flag_data_dist = False
flag_interpolate = True
flag_plot_model = True
flag_savefig = True
flag_summary = True
flag_cm_norm = True
flag_TensorBoard = False
### ---------- ---------- ---------- ---------- ----------
### Directories
dir_log = 'log'
dir_model = 'model'
### ---------- ---------- ---------- ---------- ----------
### Names
# models
modelname_cnn_1d = 'CNN_1D'
modelname_lstm_Mto1 = 'LSTM_Mto1'
modelname_cnn1d_lstm = 'CNN1D_LSTM'
modelname_convlstm = 'ConvLSTM'
modelname_ensemble = 'Ensemble'
modelname_lstm_Mto1_null = 'LSTM_Mto1_null'
# Label list
labels_Loco = ['(Null)',
'Stand',
'Walk',
'Sit',
'Lie']
labels_ML = ['(Null)',
'Open Door 1', 'Open Door 2',
'Close Door 1', 'Close Door 2',
'Open Fridge', 'Close Fridge',
'Open Dishwasher', 'Close Dishwasher',
'Open Drawer 1', 'Close Drawer 1',
'Open Drawer 2', 'Close Drawer 2',
'Open Drawer 3', 'Close Drawer 3',
'Clean Table', 'Drink from Cup', 'Toggle Switch']
### ---------- ---------- ---------- ---------- ----------
"""# 2.Setup
## Import libraries
"""
# Pre-process
import os
import glob
import numpy as np
import random as rn
import pandas as pd
from numpy.lib.stride_tricks import as_strided
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from tensorflow.keras.utils import to_categorical
import collections
# Evaluation
from sklearn.metrics import f1_score, classification_report, confusion_matrix
import matplotlib.pyplot as plt
import time
from datetime import datetime
from numpy import mean, std
from matplotlib import pyplot
import seaborn as sns
sns.set()
# NNs
from tensorflow.keras.models import load_model
from tensorflow.keras.utils import plot_model
from tensorflow.keras.callbacks import EarlyStopping, TensorBoard
# CNN
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Conv1D, MaxPool1D
from tensorflow.keras.layers import Activation, Dropout, Flatten, Dense
from tensorflow.keras.layers import Input, BatchNormalization
from tensorflow.keras.optimizers import Adam
# LSTM
from tensorflow.keras.layers import LSTM, TimeDistributed
from tensorflow.keras import regularizers
# ConvLSTM
from tensorflow.keras.layers import ConvLSTM2D
# Ensemble
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Concatenate
# Set random seed (for reproducibility)
# Hash seed
os.environ["PYTHONHASHSEED"] = str(seed)
# Built-in random
rn.seed(seed)
# Numpy.random
np.random.seed(seed)
# Tensorflow
tf.random.set_seed(seed)
"""## [F] Pre-processing"""
def read_files(files):
for i, file in enumerate(files):
print(f'[{i+1}] Reading file: {file}')
d = pd.read_csv(file, header=None, sep=' ')
# Truncate last residual records for sliding window
mod = (len(d) - window_size) % window_step
d = d[:len(d)-mod]
# Count records with NaN
d_nan = d.isnull().sum()
# Convert NaN
# Linear interpolation
if flag_interpolate:
d.interpolate(inplace=True)
# Convert remaining NaNs into 0
if flag_interpolate:
d.replace(np.nan, 0, inplace=True)
if i == 0:
dataset = d
dataset_nan = d_nan
else:
dataset = pd.concat([dataset, d])
dataset_nan = dataset_nan + d_nan
return dataset, dataset_nan
# Adjust label values (0 to num_classes)
def adjust_idx_labels(data_y):
if flag_label == 'Locomotion':
data_y[data_y == 4] = 3
data_y[data_y == 5] = 4
elif flag_label == 'ML_Both_Arms':
data_y[data_y == 406516] = 1
data_y[data_y == 406517] = 2
data_y[data_y == 404516] = 3
data_y[data_y == 404517] = 4
data_y[data_y == 406520] = 5
data_y[data_y == 404520] = 6
data_y[data_y == 406505] = 7
data_y[data_y == 404505] = 8
data_y[data_y == 406519] = 9
data_y[data_y == 404519] = 10
data_y[data_y == 406511] = 11
data_y[data_y == 404511] = 12
data_y[data_y == 406508] = 13
data_y[data_y == 404508] = 14
data_y[data_y == 408512] = 15
data_y[data_y == 407521] = 16
data_y[data_y == 405506] = 17
return data_y
def sliding_window(data, w_size, w_step):
shape = np.array(data.shape)
# Compute new shape & strides based on window size & step
newshape = ((shape - w_size) // w_step) + 1
newshape = np.append(newshape, [w_size[0], data.shape[1]])
# Original strides * window step
newstrides = np.array(data.strides) * w_step
# For window size & features, set original strides
newstrides = np.append(newstrides, data.strides)
# Create a view for new shape & stride
data_strided = as_strided(data, shape=newshape, strides=newstrides)
# Flatten strided shape
newshape_flatten = [i for i in newshape if i != 1]
return data_strided.reshape(newshape_flatten)
def opp_sliding_window(X, Y):
X = sliding_window(X, (window_size, X.shape[1]), (window_step, 1))
Y = sliding_window(Y, (window_size, Y.shape[1]), (window_step, 1))
return X, Y
"""## [F] ML models"""
# Train
epochs = 100
batch_size = 100
repeats = 10
# EarlyStopping
es_patience = 5
"""### CNN"""
# Layer
cnn_padding ='same'
cnn_activation = 'relu'
cnn_units = 128
cnn_dropout = 0.5
cnn_pool_size = 2
## 1D Conv
cnn_1d_filters = 64
cnn_1d_kernel_size = 5
def build_model_cnn_1d():
model = Sequential(name=modelname_cnn_1d)
# Conv layer 1
model.add(Conv1D(
input_shape = cnn_1d_input_shape,
filters = cnn_1d_filters,
kernel_size = cnn_1d_kernel_size,
padding = cnn_padding,
activation = cnn_activation))
# Conv layer 2
model.add(Conv1D(
filters = cnn_1d_filters,
kernel_size = cnn_1d_kernel_size,
padding = cnn_padding,
activation = cnn_activation))
# Conv layer 3
model.add(Conv1D(
filters = cnn_1d_filters,
kernel_size = cnn_1d_kernel_size,
padding = cnn_padding,
activation = cnn_activation))
# Conv layer 4
model.add(Conv1D(
filters = cnn_1d_filters,
kernel_size = cnn_1d_kernel_size,
padding = cnn_padding,
activation = cnn_activation))
# Maxpool layer
# model.add(MaxPool1D(
# pool_size = cnn_pool_size))
model.add(Flatten())
# Dense layer 1
model.add(Dense(
units = cnn_units,
activation = 'relu'))
# Dropout
model.add(Dropout(cnn_dropout))
# Dense layer 2
model.add(Dense(
units = cnn_units,
activation = 'relu'))
# Dropout
model.add(Dropout(cnn_dropout))
# Output layer
model.add(Dense(
units = num_classes,
activation = 'softmax'))
model.compile(
loss = 'categorical_crossentropy',
optimizer = 'adam',
metrics = ['accuracy'])
return model
"""### LSTM"""
# Layer
lstm_units = 128
lstm_dropout = 0.5
lstm_weight_decay = 1e-4
# LSTM (Many-to-One, stateless)
def build_model_lstm_Mto1():
model = Sequential(name=modelname_lstm_Mto1)
# LSTM layer
model.add(LSTM(
input_shape = lstm_input_shape,
units = lstm_units,
# kernel_regularizer = regularizers.l2(lstm_weight_decay),
return_sequences = False)) # final layer of LSTM (only final output)
# Dropout
model.add(Dropout(lstm_dropout))
# Dense layer
model.add(Dense(
units = lstm_units,
activation = 'relu'))
# Dropout
model.add(Dropout(lstm_dropout))
# Output layer
model.add(Dense(
units = num_classes,
activation = 'softmax'))
model.compile(
loss = 'categorical_crossentropy',
optimizer = 'adam',
metrics = ['accuracy'])
return model
"""### CNN-LSTM"""
# Data
cnn_lstm_steps = 3
cnn_lstm_length = int(window_size / cnn_lstm_steps)
# Layer
cnn_lstm_padding = 'same'
cnn_lstm_activation = 'relu'
cnn_lstm_dropout = 0.5
cnn_lstm_pool_size = 2
## CNN
cnn_lstm_filters = 64
cnn1d_lstm_kernel_size = 3
# LSTM
cnn_lstm_units = 128
def build_model_cnn1d_lstm():
model = Sequential(name=modelname_cnn1d_lstm)
## CNN (with TimeDistributed)
# Conv layer 1
model.add(TimeDistributed(Conv1D(
filters = cnn_lstm_filters,
kernel_size = cnn1d_lstm_kernel_size,
padding = cnn_lstm_padding,
activation = cnn_lstm_activation),
input_shape = cnn1d_lstm_input_shape))
# Conv layer 2
model.add(TimeDistributed(Conv1D(
filters = cnn_lstm_filters,
kernel_size = cnn1d_lstm_kernel_size,
padding = cnn_lstm_padding,
activation = cnn_lstm_activation)))
# Conv layer 3
model.add(TimeDistributed(Conv1D(
filters = cnn_lstm_filters,
kernel_size = cnn1d_lstm_kernel_size,
padding = cnn_lstm_padding,
activation = cnn_lstm_activation)))
# Conv layer 4
model.add(TimeDistributed(Conv1D(
filters = cnn_lstm_filters,
kernel_size = cnn1d_lstm_kernel_size,
padding = cnn_lstm_padding,
activation = cnn_lstm_activation)))
# Dropout
model.add(TimeDistributed(Dropout(cnn_lstm_dropout)))
# Maxpool layer
model.add(TimeDistributed(MaxPool1D(
pool_size = cnn_lstm_pool_size)))
model.add(TimeDistributed(Flatten()))
## LSTM
# LSTM layer 1
model.add(LSTM(
units = cnn_lstm_units,
return_sequences = True))
# Dropout
model.add(Dropout(cnn_lstm_dropout))
# LSTM layer 2
model.add(LSTM(
units = cnn_lstm_units,
return_sequences = False))
# Dropout
model.add(Dropout(cnn_lstm_dropout))
# Output layer
model.add(Dense(
units = num_classes,
activation = 'softmax'))
model.compile(
loss = 'categorical_crossentropy',
optimizer = 'adam',
metrics = ['accuracy'])
return model
"""### ConvLSTM"""
# Data
convlstm_steps = 3
convlstm_length = int(window_size / convlstm_steps)
# Layer
convlstm_padding = 'same'
convlstm_activation = 'relu'
convlstm_dropout = 0.5
convlstm_pool_size = 2
## CNN
convlstm_filters = 64
convlstm_kernel_size = (1, 3)
convlstm_units = 128
def build_model_convlstm():
model = Sequential(name=modelname_convlstm)
# Conv LSTM layer 1
model.add(ConvLSTM2D(
filters = convlstm_filters,
kernel_size = convlstm_kernel_size,
padding = convlstm_padding,
activation = convlstm_activation,
input_shape = convlstm_input_shape,
return_sequences = True))
# return_sequences = False)) # final layer of LSTM (only final output)
# Conv LSTM layer 2
model.add(ConvLSTM2D(
filters = convlstm_filters,
kernel_size = convlstm_kernel_size,
padding = convlstm_padding,
activation = convlstm_activation,
return_sequences = True))
# Conv LSTM layer 3
model.add(ConvLSTM2D(
filters = convlstm_filters,
kernel_size = convlstm_kernel_size,
padding = convlstm_padding,
activation = convlstm_activation,
return_sequences = True))
# Conv LSTM layer 4
model.add(ConvLSTM2D(
filters = convlstm_filters,
kernel_size = convlstm_kernel_size,
padding = convlstm_padding,
activation = convlstm_activation,
return_sequences = False))
# Dropout
model.add(Dropout(convlstm_dropout))
model.add(Flatten())
# Dense layer
model.add(Dense(
units = convlstm_units,
activation = convlstm_activation))
# Output layer
model.add(Dense(
units = num_classes,
activation = 'softmax'))
model.compile(
loss = 'categorical_crossentropy',
optimizer = 'adam',
metrics = ['accuracy'])
return model
"""### Ensemble"""
# Layer
ensemble_units = 10
ensemble_activation = 'relu'
def build_model_ensemble(inputs, outputs):
ensemble_merge = Concatenate(axis=1)(outputs)
# Dense layer
ensemble_hidden = Dense(
units = ensemble_units,
activation = ensemble_activation)(ensemble_merge)
# Output layer
ensemble_output = Dense(
units = num_classes,
activation = 'softmax')(ensemble_hidden)
model = Model(
inputs = inputs,
outputs = ensemble_output,
name = modelname_ensemble)
model.compile(
loss = 'categorical_crossentropy',
optimizer = 'adam',
metrics = ['accuracy'])
return model
"""## [F] Evaluation"""
# Train and evaluate a model (once)
def evaluate_model(model_name, X_train, y_train, X_val, y_val, X_test, y_test):
# Build model
if model_name == modelname_cnn_1d:
model = build_model_cnn_1d()
elif model_name == modelname_lstm_Mto1:
model = build_model_lstm_Mto1()
elif model_name == modelname_cnn1d_lstm:
model = build_model_cnn1d_lstm()
elif model_name == modelname_convlstm:
model = build_model_convlstm()
else:
print("Error: specify correct model name")
return -1
# Train
history = model.fit(
x = X_train,
y = y_train,
batch_size = batch_size,
epochs = epochs,
verbose = 0,
callbacks = [cb],
validation_data = (X_val, y_val)
)
num_epochs = len(history.history['loss'])
## Evaluate
# Accuracy
_, accuracy = model.evaluate(X_test, y_test, batch_size=batch_size, verbose=0)
# F1
y_pred = model.predict(X_test)
f1 = f1_score(y_test.argmax(axis=-1), y_pred.argmax(axis=-1), average='weighted')
return accuracy, f1, num_epochs
# Repeat experiment
def run_experiment(model_name, X_train, X_val, X_test, y_train, y_val, y_test, repeats=10):
print(f'Model: {model_name}')
scores_acc = []
scores_f1 = []
scores_epoch = []
for r in range(repeats):
acc, f1, epoch = evaluate_model(model_name, X_train, y_train, X_val, y_val, X_test, y_test)
print(f'[#{r+1:>2d}] Accuracy: {acc:.3f}, F1 score(weighted): {f1:.3f}, epoch: {epoch}')
scores_acc.append(acc)
scores_f1.append(f1)
scores_epoch.append(epoch)
# Summarise mean and standard deviation
print(f'Accuracy: {mean(scores_acc):.3f} (+/- {std(scores_acc):.3f})')
print(f'F1 score(weighted): {mean(scores_f1):.3f} (+/- {std(scores_f1):.3f})')
print(f'epoch: {mean(scores_epoch):.1f} (+/- {std(scores_epoch):.3f})')
# Boxplot of scores
metrics_list = ['Accuracy', 'F1 score']
all_scores = []
all_scores.append(scores_acc)
all_scores.append(scores_f1)
plt.boxplot(all_scores, labels=metrics_list)
if flag_savefig:
plt.savefig("boxplot_" + model_name + ".png")
plt.show()
# Plot a histogram of each variable in the dataset
def plot_variable_distributions(X, start=0, end=None, xlim=None):
if end is None:
end = X.shape[1]-1
print(X.shape)
num_features = end - start +1
print(f'# of plots: {num_features} ({start} - {end})')
plt.figure(figsize=(10, 2*num_features), tight_layout=True)
xaxis = None
for i, f in enumerate(range(start, end+1)):
print(i)
if xlim is None:
ax = plt.subplot(num_features, 1, i+1, title='Feature: ' + str(f))
else:
ax = plt.subplot(num_features, 1, i+1, sharex=xaxis, title='Feature: ' + str(f))
ax.set_xlim(xlim)
if i == 0:
xaxis = ax
plt.hist(X[:, f], bins=100)
plt.show()
# Plot graphs for loss and accuracy
def plot_acc_graph(history):
# Set figure size
fig = plt.figure(figsize=(15, 6))
plt.subplots_adjust(wspace=0.2)
# Loss
plt.subplot(1,2,1)
plt.plot(history.history['loss'],
label='Train',
color='black')
plt.plot(history.history['val_loss'],
label='Val',
color='red')
#plt.ylim(0, 1)
plt.legend()
plt.grid(True)
plt.xlabel('Epoch')
plt.ylabel('Loss')
# Accuracy
plt.subplot(1,2,2)
plt.plot(history.history['accuracy'],
label='Train',
color='black')
plt.plot(history.history['val_accuracy'],
label='Val',
color='red')
plt.ylim(0, 1)
plt.legend()
plt.grid(True)
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
if flag_savefig:
plt.savefig("acc_graph_" + history.model.name + ".png")
plt.show()
# Print execution time
def print_execution_time(time_start):
time_elapsed = time.perf_counter() - time_start
min, sec = divmod(time_elapsed, 60)
hour, min = divmod(min, 60)
print(f"Execution time: {hour:.0f} hour {min:.0f} min {sec:.0f} sec")
"""# 3.Pre-processing"""
# For CSF3 (UoM) setting
import platform
system_name = platform.system()
print('system_name: ' + system_name)
if system_name == "Linux":
flag_summary = False
flag_cm_norm = False
flag_plot_model = False
"""## Read files"""
# dataset files (train & test)
files_train = glob.glob('../data/train/*.dat')
files_test = glob.glob('../data/test/*.dat')
# Read datafiles (if not yet)
if not 'R_dataset_train' in locals():
R_dataset_train, nan_train = read_files(files_train)
R_dataset_test, nan_test = read_files(files_test)
# Discard null action records
if flag_delete_null:
if flag_label == 'Locomotion':
dataset_train = R_dataset_train[R_dataset_train.iloc[:, 243] != 0]
dataset_test = R_dataset_test[R_dataset_test.iloc[:, 243] != 0]
elif flag_label == 'ML_Both_Arms':
dataset_train = R_dataset_train[R_dataset_train.iloc[:, 249] != 0]
dataset_test = R_dataset_test[R_dataset_test.iloc[:, 249] != 0]
else:
dataset_train = R_dataset_train
dataset_test = R_dataset_test
# Balancing data 1 (After reading files)
if flag_balance_under1:
if flag_label == 'Locomotion':
idx_label = 243
elif flag_label == 'ML_Both_Arms':
idx_label = 249
min_train = dataset_train.iloc[:, idx_label].value_counts().min()
dataset_train_np = dataset_train.to_numpy()
for i in dataset_train.iloc[:, idx_label].unique():
dataset_train_np = np.delete(dataset_train_np, np.where(dataset_train_np[:, idx_label] == i)[0][min_train:], axis=0)
dataset_train = pd.DataFrame(dataset_train_np)
"""## Divide X / Y
(features and labels)
"""
## Features (X)
# Strip unnecessay columns
# (following opportunity challenge specification)
X_train = pd.concat([
dataset_train.iloc[:, 1:46], # (included:excluded)
dataset_train.iloc[:, 50:59],
dataset_train.iloc[:, 63:72],
dataset_train.iloc[:, 76:85],
dataset_train.iloc[:, 89:98],
dataset_train.iloc[:, 102:134]],
axis=1)
X_test = pd.concat([
dataset_test.iloc[:, 1:46],
dataset_test.iloc[:, 50:59],
dataset_test.iloc[:, 63:72],
dataset_test.iloc[:, 76:85],
dataset_test.iloc[:, 89:98],
dataset_test.iloc[:, 102:134]],
axis=1)
## Labels (Y)
# from last 7 columns
if flag_label == 'Locomotion':
y_train = dataset_train.iloc[:,243]
y_test = dataset_test.iloc[:,243]
elif flag_label == 'ML_Both_Arms':
y_train = dataset_train.iloc[:,249]
y_test = dataset_test.iloc[:,249]
y_train = y_train.rename('Label')
y_test = y_test.rename('Label')
num_features = len(X_train.columns)
# Input shape of NNs
cnn_1d_input_shape = (window_size, num_features)
lstm_input_shape = (window_size, num_features)
cnn1d_lstm_input_shape = (None, cnn_lstm_length, num_features)
convlstm_input_shape = (convlstm_steps, 1, convlstm_length, num_features)
"""## (Distributions)"""
if flag_data_dist:
plot_variable_distributions(X_train.to_numpy(), start=0, end=29)
plot_variable_distributions(X_train.to_numpy(), start=30, end=59)
plot_variable_distributions(X_train.to_numpy(), start=60, end=89)
plot_variable_distributions(X_train.to_numpy(), start=90, end=112)
"""## Encode labels"""
## Encode label (one-hot)
# Adjust label values for to_categorical()
y_train = adjust_idx_labels(y_train)
y_test = adjust_idx_labels(y_test)
# Convert class vector (int) to one-hot vector
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
y_train = pd.DataFrame(y_train)
y_test = pd.DataFrame(y_test)
"""## Split train / val"""
# Split into train and val (No shuffle)
X_train, X_val, y_train, y_val = \
train_test_split(X_train, y_train,
train_size=ratio_train,
random_state=seed,
shuffle=False)
# Balancing data 2 (After splitting train, val, and test)
if flag_balance_under2:
min_train = y_train.value_counts().min()
X_train = X_train.to_numpy()
y_train = y_train.to_numpy()
y_train_n = y_train.argmax(axis=-1)
for i in range(len(np.unique(y_train_n))):
X_train = np.delete(X_train, np.where(y_train_n == i)[0][min_train:], axis=0)
y_train_n = np.delete(y_train_n, np.where(y_train_n == i)[0][min_train:], axis=0)
y_train = to_categorical(y_train_n)
X_train = pd.DataFrame(X_train)
y_train = pd.DataFrame(y_train)
# The number of classes
num_classes = len(y_train.columns)
# label list (for classification_report, confusion_matrix)
if flag_label == 'Locomotion':
labels_cr = labels_Loco
labels_cm = labels_Loco
elif flag_label == 'ML_Both_Arms':
labels_cr = labels_ML
labels_cm = labels_ML
if flag_delete_null:
labels_cr = np.delete(labels_cr, 0)
# labels_cm = np.delete(labels_cm, 0)
# confusion_matrix
labels = np.arange(0, num_classes)
"""## Scaling
"""
if flag_scaling == "Norm":
scaler = MinMaxScaler()
elif flag_scaling == "Std":
scaler = StandardScaler()
# Fit the scaler on the training data (to avoid data leakage)
scaler.fit(X_train)
# Scale (to numpy)
X_train = scaler.transform(X_train)
X_val = scaler.transform(X_val)
X_test = scaler.transform(X_test)
# Convert to numpy
y_train = y_train.to_numpy()
y_val = y_val.to_numpy()
y_test = y_test.to_numpy()
"""## (Distributions)"""
if flag_data_dist:
plot_variable_distributions(X_train, start=0, end=29)
plot_variable_distributions(X_train, start=30, end=59)
plot_variable_distributions(X_train, start=60, end=89)
plot_variable_distributions(X_train, start=90, end=112)
"""## Sliding window"""
X_train_sw, y_train_sw = opp_sliding_window(X_train, y_train)
X_val_sw, y_val_sw = opp_sliding_window(X_val, y_val)
X_test_sw, y_test_sw = opp_sliding_window(X_test, y_test)
if flag_sw_label == "last":
# last class of each sliding window
y_train_sw_label = np.asarray([[i[-1]] for i in y_train_sw]).reshape(-1, y_train_sw.shape[-1])
y_val_sw_label = np.asarray([[i[-1]] for i in y_val_sw]).reshape(-1, y_val_sw.shape[-1])
y_test_sw_label = np.asarray([[i[-1]] for i in y_test_sw]).reshape(-1, y_test_sw.shape[-1])
elif flag_sw_label == "mode":
# mode in each sliding window
y_train_sw_mode = np.asarray([collections.Counter(i.argmax(axis=-1)).most_common()[0][0] for i in y_train_sw])
y_train_sw_label = to_categorical(y_train_sw_mode)
y_val_sw_mode = np.asarray([collections.Counter(i.argmax(axis=-1)).most_common()[0][0] for i in y_val_sw])
y_val_sw_label = to_categorical(y_val_sw_mode)
y_test_sw_mode = np.asarray([collections.Counter(i.argmax(axis=-1)).most_common()[0][0] for i in y_test_sw])
y_test_sw_label = to_categorical(y_test_sw_mode)
# For evaluation
y_test_classes_sw_label = y_test_sw_label.argmax(axis=-1)
# Blancing data 3 (After sliding window)
if flag_balance_under3:
y_train_sw_n = y_train_sw_label.argmax(axis=-1)
min_train = pd.DataFrame(y_train_sw_label).value_counts().min()
for i in range(num_classes):
X_train_sw = np.delete(X_train_sw, np.where(y_train_sw_n == i)[0][min_train:], axis=0)
y_train_sw_n = np.delete(y_train_sw_n, np.where(y_train_sw_n == i)[0][min_train:], axis=0)
y_train_sw_label = to_categorical(y_train_sw_n)
elif flag_balance_over3:
y_train_sw_n = y_train_sw_label.argmax(axis=-1)
max_train = pd.DataFrame(y_train_sw_n)[0].value_counts().max()
num_labels = np.unique(y_train_sw_n).size
X_train_sw_balanced = np.empty((num_labels * max_train, X_train_sw.shape[1], X_train_sw.shape[2]))
y_train_sw_balanced = np.empty((num_labels * max_train, y_train_sw.shape[1], y_train_sw.shape[2]))
y_train_sw_label_balanced = np.empty((num_labels * max_train, y_train_sw_label.shape[1]))
X_train_sw_balanced[:X_train_sw.shape[0]] = X_train_sw
y_train_sw_balanced[:y_train_sw.shape[0]] = y_train_sw
y_train_sw_label_balanced[:y_train_sw_label.shape[0]] = y_train_sw_label
l = X_train_sw.shape[0]
for c in np.unique(y_train_sw_n):
num = np.count_nonzero(y_train_sw_n == c)
if max_train > num:
num_diff = max_train - num
idx_c = np.where(y_train_sw_n == c)[0]
idx_add = np.random.choice(idx_c, num_diff, replace=True)
for i in idx_add:
X_train_sw_balanced[l] = X_train_sw[i]
y_train_sw_balanced[l] = y_train_sw[i]
y_train_sw_label_balanced[l] = y_train_sw_label[i]
l += 1
X_train_sw = X_train_sw_balanced
y_train_sw = y_train_sw_balanced
y_train_sw_label = y_train_sw_label_balanced
"""## [Summary]"""
if flag_summary:
# The number of samples (train, val, test)
num_samples = len(X_train) + len(X_val) + len(X_test)
num_train = X_train.shape[0]
num_val = X_val.shape[0]
num_test = X_test.shape[0]
num_classes_train = y_train.shape[-1]
num_classes_val = y_val.shape[-1]
num_classes_test = y_test.shape[-1]
y_counts = pd.concat([np.flip( | pd.DataFrame(y_train) | pandas.DataFrame |
import pandas as pd
import pytest
# Wawa on toy YSDA
@pytest.fixture
def toy_labels_result_zbs():
return pd.Series(
['no', 'yes', 'no', 'yes', 'no'],
index= | pd.Index(['t1', 't2', 't3', 't4', 't5'], name='task') | pandas.Index |
# -*- coding: utf-8 -*-
import sys
import json
import logging
from typing import Tuple, List
from docopt import docopt
from munch import Munch
import pandas as pd
from wetterdienst import (
__version__,
metadata_for_climate_observations,
get_nearby_stations,
)
from wetterdienst.additionals.geo_location import stations_to_geojson
from wetterdienst.additionals.time_handling import mktimerange, parse_datetime
from wetterdienst.additionals.util import normalize_options, setup_logging, read_list
from wetterdienst.api import DWDStationRequest
from wetterdienst.enumerations.column_names_enumeration import DWDMetaColumns
from wetterdienst.enumerations.parameter_enumeration import Parameter
from wetterdienst.enumerations.period_type_enumeration import PeriodType
from wetterdienst.enumerations.time_resolution_enumeration import TimeResolution
log = logging.getLogger(__name__)
def run():
"""
Usage:
wetterdienst stations --parameter=<parameter> --resolution=<resolution> --period=<period> [--station=] [--latitude=] [--longitude=] [--count=] [--distance=] [--persist] [--format=<format>] # noqa:E501
wetterdienst readings --parameter=<parameter> --resolution=<resolution> --period=<period> --station=<station> [--persist] [--date=<date>] [--format=<format>] # noqa:E501
wetterdienst readings --parameter=<parameter> --resolution=<resolution> --period=<period> --latitude= --longitude= [--count=] [--distance=] [--persist] [--date=<date>] [--format=<format>] # noqa:E501
wetterdienst about [parameters] [resolutions] [periods]
wetterdienst --version
wetterdienst (-h | --help)
Options:
--parameter=<parameter> Parameter/variable, e.g. "kl", "air_temperature", "precipitation", etc. # noqa:E501
--resolution=<resolution> Dataset resolution: "annual", "monthly", "daily", "hourly", "minute_10", "minute_1" # noqa:E501
--period=<period> Dataset period: "historical", "recent", "now"
--station=<station> Comma-separated list of station identifiers
--latitude=<latitude> Latitude for filtering by geoposition.
--longitude=<longitude> Longitude for filtering by geoposition.
--count=<count> Number of nearby stations when filtering by geoposition. # noqa:E501
--distance=<distance> Maximum distance in km when filtering by geoposition. # noqa:E501
--persist Save and restore data to filesystem w/o going to the network # noqa:E501
--date=<date> Date for filtering data. Can be either a single date(time) or # noqa:E501
an ISO-8601 time interval, see https://en.wikipedia.org/wiki/ISO_8601#Time_intervals. # noqa:E501
--format=<format> Output format. [Default: json]
--version Show version information
--debug Enable debug messages
-h --help Show this screen
Examples requesting stations:
# Get list of all stations for daily climate summary data in JSON format
wetterdienst stations --parameter=kl --resolution=daily --period=recent
# Get list of all stations in CSV format
wetterdienst stations --parameter=kl --resolution=daily --period=recent --format=csv # noqa:E501
# Get list of specific stations
wetterdienst stations --resolution=daily --parameter=kl --period=recent --station=1,1048,2667 # noqa:E501
# Get list of specific stations in GeoJSON format
wetterdienst stations --resolution=daily --parameter=kl --period=recent --station=1,1048,2667 --format=geojson # noqa:E501
Examples requesting readings:
# Get daily climate summary data for stations 44 and 1048
wetterdienst readings --station=44,1048 --parameter=kl --resolution=daily --period=recent # noqa:E501
# Optionally save/restore to/from disk in order to avoid asking upstream servers each time # noqa:E501
wetterdienst readings --station=44,1048 --parameter=kl --resolution=daily --period=recent --persist # noqa:E501
# Limit output to specific date
wetterdienst readings --station=44,1048 --parameter=kl --resolution=daily --period=recent --date=2020-05-01 # noqa:E501
# Limit output to specified date range in ISO-8601 time interval format
wetterdienst readings --station=44,1048 --parameter=kl --resolution=daily --period=recent --date=2020-05-01/2020-05-05 # noqa:E501
# The real power horse: Acquire data across historical+recent data sets
wetterdienst readings --station=44,1048 --parameter=kl --resolution=daily --period=historical,recent --date=1969-01-01/2020-06-11 # noqa:E501
# Acquire monthly data for 2020-05
wetterdienst readings --station=44,1048 --parameter=kl --resolution=monthly --period=recent,historical --date=2020-05 # noqa:E501
# Acquire monthly data from 2017-01 to 2019-12
wetterdienst readings --station=44,1048 --parameter=kl --resolution=monthly --period=recent,historical --date=2017-01/2019-12 # noqa:E501
# Acquire annual data for 2019
wetterdienst readings --station=44,1048 --parameter=kl --resolution=annual --period=recent,historical --date=2019 # noqa:E501
# Acquire annual data from 2010 to 2020
wetterdienst readings --station=44,1048 --parameter=kl --resolution=annual --period=recent,historical --date=2010/2020 # noqa:E501
# Acquire hourly data
wetterdienst readings --station=44,1048 --parameter=air_temperature --resolution=hourly --period=recent --date=2020-06-15T12 # noqa:E501
Examples using geospatial features:
# Acquire stations and readings by geoposition, request specific number of nearby stations. # noqa:E501
wetterdienst stations --resolution=daily --parameter=kl --period=recent --lat=50.2 --lon=10.3 --count=10 # noqa:E501
wetterdienst readings --resolution=daily --parameter=kl --period=recent --lat=50.2 --lon=10.3 --count=10 --date=2020-06-30 # noqa:E501
# Acquire stations and readings by geoposition, request stations within specific radius. # noqa:E501
wetterdienst stations --resolution=daily --parameter=kl --period=recent --lat=50.2 --lon=10.3 --distance=20 # noqa:E501
wetterdienst readings --resolution=daily --parameter=kl --period=recent --lat=50.2 --lon=10.3 --distance=20 --date=2020-06-30 # noqa:E501
"""
# Read command line options.
options = normalize_options(
docopt(run.__doc__, version=f"wetterdienst {__version__}")
)
# Setup logging.
debug = options.get("debug")
log_level = logging.INFO
if debug:
log_level = logging.DEBUG
setup_logging(log_level)
if options.about:
about(options)
return
if options.stations:
df = metadata_for_climate_observations(
parameter=options.parameter,
time_resolution=options.resolution,
period_type=options.period,
)
if options.station:
station_ids = read_list(options.station)
df = df[df.STATION_ID.isin(station_ids)]
elif options.latitude and options.longitude:
nearby_stations, distances = get_nearby(options)
df = df[df.STATION_ID.isin(nearby_stations)]
if df.empty:
log.error("No data available for given constraints")
sys.exit(1)
elif options.readings:
if options.station:
station_ids = read_list(options.station)
elif options.latitude and options.longitude:
nearby_stations, distances = get_nearby(options)
station_ids = nearby_stations
else:
raise KeyError("Either --station or --lat, --lon required")
request = DWDStationRequest(
station_ids=station_ids,
parameter=read_list(options.parameter),
time_resolution=options.resolution,
period_type=read_list(options.period),
write_file=options.persist,
prefer_local=options.persist,
humanize_column_names=True,
tidy_data=True,
)
data = list(request.collect_data())
if not data:
log.error("No data available for given constraints")
sys.exit(1)
df = | pd.concat(data) | pandas.concat |
import pandas as pd
import os
import numpy as np
from matplotlib import pyplot as plt
import matplotlib.dates as mdates
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
root = '/Users/Gabe/Downloads/thesis spreadies'
# sg_1k_1k = pd.read_csv(os.path.join(root,'we_depletions_sg_SWHC1000_INIDEP1000_timeseries.csv'), parse_dates=True)
# sg_600_600 = pd.read_csv(os.path.join(root, 'we_depletions_sg_SWHC600_INIDEP600_timeseries.csv'), parse_dates=True)
# sg_600_300 = pd.read_csv(os.path.join(root,'we_depletions_sg_SWHC600_INIDEP300_timeseries.csv'), parse_dates=True)
# sg_600_150 = pd.read_csv(os.path.join(root, 'we_depletions_sg_SWHC600_INIDEP150_timeseries.csv'), parse_dates=True)
#
# sg_300_300 = pd.read_csv(os.path.join(root, 'we_depletions_sg_SWHC300_INIDEP300_timeseries.csv'), parse_dates=True)
# sg_300_150 = pd.read_csv(os.path.join(root,'we_depletions_sg_SWHC300_INIDEP150_timeseries.csv'), parse_dates=True)
# sg_300_0 = pd.read_csv(os.path.join(root, 'we_depletions_sg_SWHC300_INIDEP0_timeseries.csv'), parse_dates=True)
#
# sg_150_150 = pd.read_csv(os.path.join(root, 'we_depletions_sg_SWHC150_INIDEP150_timeseries.csv'), parse_dates=True)
# sg_150_75 = pd.read_csv(os.path.join(root,'we_depletions_sg_SWHC150_INIDEP75_timeseries.csv'), parse_dates=True)
# sg_150_0 = pd.read_csv(os.path.join(root, 'we_depletions_sg_SWHC150_INIDEP0_timeseries.csv'), parse_dates=True)
#
# print sg_1k_1k.head()
#
# vcm_600_600 = pd.read_csv(os.path.join(root,'we_depletions_vcm_SWHC600_INIDEP600_timeseries.csv'), parse_dates=True)
# vcm_600_300 = pd.read_csv(os.path.join(root,'we_depletions_vcm_SWHC600_INIDEP300_timeseries.csv'), parse_dates=True)
# vcm_600_150 = pd.read_csv(os.path.join(root,'we_depletions_vcm_SWHC600_INIDEP150_timeseries.csv'), parse_dates=True)
# vcm_300_300 = pd.read_csv(os.path.join(root, 'ext_we_depletions_vcm_SWHC300_INIDEP300.csv'), parse_dates=True)
# vcm_300_150 = pd.read_csv(os.path.join(root,'ext_we_depletions_vcm_SWHC300_INIDEP150.csv'), parse_dates=True)
# vcm_300_0 = pd.read_csv(os.path.join(root, 'ext_we_depletions_vcm_SWHC300_INIDEP0.csv'), parse_dates=True)
# plt.plot([1,2,3], [3, 5,7])
# plt.show()
vcm_600_600 = pd.read_csv(os.path.join(root, 'ext_we_depletions_vcm_SWHC600_INIDEP600.csv'), parse_dates=True)
vcm_600_300 = pd.read_csv(os.path.join(root, 'ext_we_depletions_vcm_SWHC600_INIDEP300.csv'), parse_dates=True)
vcm_600_000 = pd.read_csv(os.path.join(root, 'ext_we_depletions_vcm_SWHC600_INIDEP0.csv'), parse_dates=True)
vcm_300_300 = pd.read_csv(os.path.join(root, 'ext_we_depletions_vcm_SWHC300_INIDEP300.csv'), parse_dates=True)
vcm_300_150 = pd.read_csv(os.path.join(root, 'ext_we_depletions_vcm_SWHC300_INIDEP150.csv'), parse_dates=True)
vcm_300_000 = pd.read_csv(os.path.join(root, 'ext_we_depletions_vcm_SWHC300_INIDEP0.csv'), parse_dates=True)
sg_600_600 = pd.read_csv(os.path.join(root, 'ext_we_depletions_sg_SWHC600_INIDEP600.csv'), parse_dates=True)
sg_600_300 = pd.read_csv(os.path.join(root, 'ext_we_depletions_sg_SWHC600_INIDEP300.csv'), parse_dates=True)
sg_600_000 = pd.read_csv(os.path.join(root, 'ext_we_depletions_sg_SWHC600_INIDEP0.csv'), parse_dates=True)
sg_300_300 = pd.read_csv(os.path.join(root, 'ext_we_depletions_sg_SWHC300_INIDEP300.csv'), parse_dates=True)
sg_300_150 = pd.read_csv(os.path.join(root, 'ext_we_depletions_sg_SWHC300_INIDEP150.csv'), parse_dates=True)
sg_300_000 = pd.read_csv(os.path.join(root, 'ext_we_depletions_sg_SWHC300_INIDEP0.csv'), parse_dates=True)
sg_150_150 = pd.read_csv(os.path.join(root, 'ext_we_depletions_sg_SWHC150_INIDEP150.csv'), parse_dates=True)
sg_150_075 = pd.read_csv(os.path.join(root,'ext_we_depletions_sg_SWHC150_INIDEP75.csv'), parse_dates=True)
sg_150_000 = pd.read_csv(os.path.join(root, 'ext_we_depletions_sg_SWHC150_INIDEP0.csv'), parse_dates=True)
sg_50_050 = pd.read_csv(os.path.join(root, 'ext_we_depletions_sg_SWHC50_INIDEP50.csv'), parse_dates=True)
sg_50_025 = pd.read_csv(os.path.join(root,'ext_we_depletions_sg_SWHC50_INIDEP25.csv'), parse_dates=True)
sg_50_000 = pd.read_csv(os.path.join(root, 'ext_we_depletions_sg_SWHC50_INIDEP0.csv'), parse_dates=True)
vcm_150_150 = pd.read_csv(os.path.join(root, 'ext_we_depletions_sg_SWHC150_INIDEP150.csv'), parse_dates=True)
vcm_150_075 = pd.read_csv(os.path.join(root,'ext_we_depletions_sg_SWHC150_INIDEP75.csv'), parse_dates=True)
vcm_150_000 = pd.read_csv(os.path.join(root, 'ext_we_depletions_sg_SWHC150_INIDEP0.csv'), parse_dates=True)
# # plt.plot([1,2,3], [3, 5,7])
# # plt.show()
#
# vcm_600_600 = pd.read_csv(os.path.join(root,'we_depletions_vcm_SWHC600_INIDEP600_timeseries.csv'), parse_dates=True)
# vcm_600_300 = pd.read_csv(os.path.join(root,'we_depletions_vcm_SWHC600_INIDEP300_timeseries.csv'), parse_dates=True)
# vcm_600_150 = pd.read_csv(os.path.join(root,'we_depletions_vcm_SWHC600_INIDEP150_timeseries.csv'), parse_dates=True)
#
# vcm_300_300 = pd.read_csv(os.path.join(root, 'we_depletions_vcm_SWHC300_INIDEP300_timeseries.csv'), parse_dates=True)
# vcm_300_150 = pd.read_csv(os.path.join(root,'we_depletions_vcm_SWHC300_INIDEP150_timeseries.csv'), parse_dates=True)
# vcm_300_0 = pd.read_csv(os.path.join(root, 'we_depletions_vcm_SWHC300_INIDEP0_timeseries.csv'), parse_dates=True)
# print(sg_600_600['date'])
#
# plt.plot(sg_600_150['date'], sg_600_150['depletion'], label='sg')
# # plt.grid()
# plt.legend()
# plt.show()
# # plt.savefig(os.path.join(root, 'testfig.png'))
years = mdates.YearLocator()
months = mdates.MonthLocator()
years_fmt = mdates.DateFormatter('%Y')
### ===== SG 50 ======
fig, (ax1, ax2) = plt.subplots(nrows=2, sharey=False, sharex=True)
ax1.plot(pd.to_datetime(sg_50_000['date']), sg_50_000['depletion'], color='r', label='swhc_50_inidep_000', linewidth=5)
ax1.plot(pd.to_datetime(sg_50_025['date']), sg_50_025['depletion'], color='b', label='swhc_50_inidep_025', linewidth=3)
ax1.plot(pd.to_datetime(sg_50_050['date']), sg_50_050['depletion'], color='g', label='swhc_50_inidep_050', linewidth=1)
ax1.set_xlabel('Date')
ax1.set_ylabel('Depletion (mm)')
ax1.set_title('Depletion with Given SWHC and Initial Depletion - Sevilleta')
ax1.legend()
ax1.grid()
ax2.plot(pd.to_datetime(sg_50_000['date']), sg_50_000['recharge_ro'], color='r', label='swhc_50_inidep_000', linewidth=3)
ax2.plot(pd.to_datetime(sg_50_025['date']), sg_50_025['recharge_ro'], color='b', label='swhc_50_inidep_025', linewidth=2)
ax2.plot(pd.to_datetime(sg_50_050['date']), sg_50_050['recharge_ro'], color='g', label='swhc_50_inidep_050', linewidth=1)
ax2.set_xlabel('Date')
ax2.set_ylabel('Recharge (mm)')
ax2.legend()
ax2.grid()
ax2.set_title('Recharge with Given SWHC and Initial Depletion - Sevilleta')
plt.subplots_adjust(hspace=1)
plt.show()
### ===== vcm 150 ======
fig, (ax1, ax2) = plt.subplots(nrows=2, sharey=False, sharex=True)
ax1.plot(pd.to_datetime(vcm_150_000['date']), vcm_150_000['depletion'], color='r', label='swhc_150_inidep_000', linewidth=5)
ax1.plot(pd.to_datetime(vcm_150_075['date']), vcm_150_075['depletion'], color='b', label='swhc_150_inidep_075', linewidth=3)
ax1.plot(pd.to_datetime(vcm_150_150['date']), vcm_150_150['depletion'], color='g', label='swhc_600_inidep_150', linewidth=1)
ax1.set_title('Depletion with Given SWHC and Initial Depletion - <NAME>')
ax1.grid()
ax1.legend()
ax2.plot(pd.to_datetime(vcm_150_000['date']), vcm_150_000['recharge_ro'], color='r', label='swhc_150_inidep_000', linewidth=5)
ax2.plot(pd.to_datetime(vcm_150_075['date']), vcm_150_075['recharge_ro'], color='b', label='swhc_150_inidep_075', linewidth=3)
ax2.plot( | pd.to_datetime(vcm_150_150['date']) | pandas.to_datetime |
# flask imports
from datetime import datetime
from eve.auth import requires_auth
from eve.render import send_response
from flask import request, abort, Blueprint, g, Response
from flask import current_app as app
# utils imports
import numpy as np
import pandas as pd
from auth.authentication import EVETokenAuth
edinet_methods = Blueprint('edinet_methods', __name__)
"""
EVE docs has been modified to add desired blueprints function to the "API DOC". To use it:
1. The method's name must start with "api_"
2. Docstring to the function with the json:
{
"doc": {
"title": "title",
"body": "body",
},
#for each method i.e GET (optional, if not provided, default from the function signature will be used)
"GET": {
"label": "information of GET method",
"params" [{"name": "first_param", "type": "type", "required":"false", "doc":"aditional_info", "**kwargs": "more_inofo"},..]
}
}
"""
@edinet_methods.route("/league_table_summarised/<leagueTableId>", methods=['GET'])
@requires_auth(EVETokenAuth)
def api_get_league_table_summarised(leagueTableId):
"""
{
"doc": {
"title": "league table summarised help",
"body": "<p> Obtain the sumarized of the league table </p>"
},
"GET": {
"label": "Obtain the league_table sumarized",
"params":[{"name": "leagueTableId", "type":"string", "required":"true", "doc":"id of the leage_table"},
{"name": "period", "type":"string", "required":"false", "info":"the period to sumarize", "values": ["D", "W", "M", "Y"]},
{"name": "type", "type":"list", "required":"false", "info":"the field to sumarize"}]
}
}
"""
companyId = g.get("auth_value")
# params from url
period = request.args.get('period', 'M')
type = request.args.get('type', ['savings', 'smileys'])
if not isinstance(type, list):
type = type.split(',') # type=savings,smileys in the url
periodsAllowed = ['D', 'W', 'M', 'Y'] # Weekly means Monday to Sunday
period = period[0].upper()
# recupero la info de mongo
query = {'companyId': companyId, 'leagueTableId': leagueTableId}
doc = app.data.driver.db['league_table'].find_one(query, {'_id': 0}, timeout=False)
try:
reporting_Units = doc['reporting_Units']
except:
reporting_Units = []
# recupero la info de mongo de baseline i creo el resultat per cadascu
res_report = {}
for reportingUnit in reporting_Units:
query_reporting = {'companyId': companyId, 'reportingUnitId': reportingUnit}
doc_reporting = app.data.driver.db['reporting_units'].find_one(query_reporting, timeout=False)
if doc_reporting:
modelling_Units = doc_reporting['modelling_Units']
res_report[reportingUnit] = []
for modelUnit in modelling_Units:
# update_baseline(companyId, modellingUnitId) # TO DO
query_baseline = {'companyId': companyId, 'modellingUnitId': modelUnit}
doc_baseline = app.data.driver.db['baselines'].find_one(query_baseline,
{'prediction': 1, 'values': 1, 'smileys': 1,
'timestamps': 1}, timeout=False)
if doc_baseline:
res_parcial = {}
# creo el dataframe
df = pd.DataFrame.from_records(
{'values': doc_baseline['values'], 'smileys': doc_baseline['smileys'],
'prediction': doc_baseline['prediction'], 'timestamps': doc_baseline['timestamps']})
df = df.set_index(pd.DatetimeIndex(df['timestamps']))
if df.empty != True and period in periodsAllowed:
for typ in type:
if typ in doc_baseline.keys() or typ == 'savings':
if typ in ['savings', 'values', 'prediction']:
df_grouped = df.groupby(pd.TimeGrouper(freq=period)).sum()
else:
df_grouped = df.groupby(pd.TimeGrouper(freq=period)).mean()
if typ == 'savings':
res_parcial[typ] = df_grouped['prediction'] - df_grouped['values']
else:
res_parcial[typ] = df_grouped[typ]
res_parcial[typ] = res_parcial[typ].where(( | pd.notnull(res_parcial[typ]) | pandas.notnull |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 26 14:02:03 2019
@author: <NAME>
"""
import pandas as pd
from pandas import ExcelWriter
from fuzzywuzzy import fuzz
from fuzzywuzzy import process
def match2Lists(list1,list2):
"""
Loops over a list and returns fuzzy matches found in a second list.
Inputs:
list1 - list of terms to search for in the master list
list2 - master list that is searched for matches over and over
"""
TopMatch = []
TopScore = []
TopRowIdx = []
for member in list1:
x=process.extractOne(member, list2)
TopMatch.append(x[0])
TopScore.append(x[1])
TopRowIdx.append(x[2])
return TopMatch, TopScore, TopRowIdx
def createRUID_List(rowIdxList, headerStr):
"""
Loops over a series containing row indices and returns a list of RUID strings.
Inputs:
rowIdxList - collection of row index values
headerStr - DataFrame header string value for column containing RUIDs
Outputs:
new list containing RUID strings
"""
RUID_List = []
for aRowIdx in rowIdxList:
workingRUID=df[headerStr].iloc[aRowIdx]
RUID_List.append(workingRUID)
return RUID_List
df = pd.read_excel("abcd_rucdr_master_forPython.xlsx")
print ('Finished reading in input file.')
#blackList=['NDAR_INV']
#for pattern in blackList:
# df['pGUID_Rutgers'] = df['pGUID_Rutgers'].replace(pattern, '')
#datasets
Mismatch_DAIC_IDs = df.iloc[1949:2201,0].dropna()
print (Mismatch_DAIC_IDs)
Mismatch_Rutgers_IDs = df.iloc[1949:2201,1].dropna()
print (Mismatch_Rutgers_IDs)
Unique_DAIC_IDs = df.iloc[1403:1948,0].dropna()
print (Unique_DAIC_IDs)
Unique_Rutgers_IDs = df.iloc[0:1403,1].dropna()
print (Unique_Rutgers_IDs)
AllRutgersIDs = df['rucdr.SUBCODE'].dropna()
AllDAIC_IDs = df['abcd.id_redcap'].dropna()
print ('About to start first match2collections.')
BestMatch_Mismatch_DtoR, BestScore_Mismatch_DtoR, BestRowIdx_Mismatch_DtoR = match2Lists(Mismatch_DAIC_IDs,AllRutgersIDs)
print ('Just finished first match2collections.')
print ('About to start second match2collections.')
BestMatch_Mismatch_RtoD, BestScore__Mismatch_RtoD, BestRowIdx_Mismatch_RtoD = match2Lists(Mismatch_Rutgers_IDs, AllDAIC_IDs)
print ('Just finished second match2collections.')
print ('About to start third match2collections.')
BestMatch_Unique_DtoR, BestScore_Unique_DtoR, BestRowIdx_Unique_DtoR = match2Lists(Unique_DAIC_IDs, AllRutgersIDs)
print ('Just finished third match2collections.')
print ('About to start fourth match2collections.')
BestMatch_Unique_RtoD, BestScore_Unique_RtoD, BestRowIdx_Unique_RtoD = match2Lists(Unique_Rutgers_IDs, AllDAIC_IDs)
print ('Just finished fourth match2collections.')
df['BestMatchdf_Mismatch_DtoR']=pd.Series(BestMatch_Mismatch_DtoR)
df['BestScoredf_Mismatch_DtoR']=pd.Series(BestScore_Mismatch_DtoR)
df['BestRowIdxdf_Mismatch_DtoR']=pd.Series(BestRowIdx_Mismatch_DtoR)
df['BestMatchdf_Mismatch_RtoD']=pd.Series(BestMatch_Mismatch_RtoD)
df['BestScoredf_Mismatch_RtoD']=pd.Series(BestScore__Mismatch_RtoD)
df['BestRowIdxdf_Mismatch_RtoD']=pd.Series(BestRowIdx_Mismatch_RtoD)
df['BestMatchdf_Unique_DtoR']= | pd.Series(BestMatch_Unique_DtoR) | pandas.Series |
# coding=utf-8
# Author: <NAME> & <NAME>
# Date: Jan 06, 2021
#
# Description: Parse Epilepsy Foundation Forums and extract dictionary matches
#
import os
import sys
#
#include_path = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, 'include'))
include_path = '/nfs/nfs7/home/rionbr/myaura/include'
sys.path.insert(0, include_path)
#
import pandas as pd
pd.set_option('display.max_rows', 100)
pd.set_option('display.max_columns', 500)
| pd.set_option('display.width', 1000) | pandas.set_option |
"""A module to store some results that are parsed from .txt files."""
import os
from configparser import ConfigParser
from types import SimpleNamespace
import pandas as pd
import numpy as np
from skm_pyutils.py_table import list_to_df
from dictances.bhattacharyya import bhattacharyya
from .main import main as ctrl_main
here = os.path.dirname(os.path.abspath(__file__))
def parse_cfg(name):
"""Parse the configs at configs/name."""
cfg_path = os.path.join(here, "..", "configs", name)
cfg = ConfigParser()
cfg.read(cfg_path)
return cfg
def df_from_dict(dict, cols):
"""Form a dataframe from a dictionary with cols, keys are considered an entry."""
vals = []
for k, v in dict.items():
vals.append([k, v])
df = pd.DataFrame(vals, columns=cols)
return df
def store_region_results():
np.random.seed(42)
vals = []
names = [
"Tetrode CA3 CA1",
"MOp to SSp-ll",
"Figure 1 E",
"Max distance 3",
"Figure 1 A",
]
mean_vals = [
0.4248 / 5.0,
6.7371 / 79.0,
8.512 / 20.0,
8.86 / 25.0,
0.7340 / 3.0,
]
stats_vals = [
0.4117 / 5.0,
6.20478 / 79.0,
8.511 / 20.0,
9.27 / 25.0,
0.7346 / 3.0,
]
for i in range(len(names)):
vals.append([names[i], mean_vals[i], "Monte Carlo simulation"])
vals.append([names[i], stats_vals[i], "Statistical estimation"])
cols = ["Connectivity", "Expected proportion connected", "Calculation"]
df = | pd.DataFrame(vals, columns=cols) | pandas.DataFrame |
from aggregate.decennial_census.decennial_census_001020 import decennial_census_001020
from aggregate.aggregation_helpers import order_aggregated_columns
import pandas as pd
from internal_review.set_internal_review_file import set_internal_review_files
from utils.PUMA_helpers import clean_PUMAs, puma_to_borough
dcp_pop_races = ["anh", "bnh", "hsp", "onh", "wnh"]
race_labels = ["", "_wnh", "_bnh", "_hsp", "_anh", "_onh"]
pop_labels = ["Total Population", "White", "Black", "Hispanic", "Asian", "Other"]
def nycha_tenants(geography: str, write_to_internal_review=False):
assert geography in ["citywide", "borough", "puma"]
clean_data = load_clean_nycha_data()
census20 = decennial_census_001020(
geography=geography, year="1519"
) # this is in fact pulling 2020 census but design has it mapped from 1519 to 2020
if geography == "puma":
final = get_percentage( | pd.concat([census20, clean_data], axis=1) | pandas.concat |
"""
Contains the machine learning code for Taxonomist
Authors:
<NAME> (1), <NAME> (1), <NAME> (1), <NAME> (2),
<NAME> (2), <NAME> (1), <NAME> (1)
Affiliations:
(1) Department of Electrical and Computer Engineering, Boston University
(2) Sandia National Laboratories
This work has been partially funded by Sandia National Laboratories. Sandia
National Laboratories is a multimission laboratory managed and operated by
National Technology and Engineering Solutions of Sandia, LLC., a wholly owned
subsidiary of Honeywell International, Inc., for the U.S. Department of
Energy’s National Nuclear Security Administration under Contract DENA0003525.
"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from sklearn.multiclass import OneVsRestClassifier
from sklearn.utils.validation import check_is_fitted
def generate_features(timeseries, feature_tuples, trim=60):
""" Generate features from timeseries
Parameters
----------
timeseries : pd.DataFrame[time, metric]
DataFrame of metrics over time.
feature_tuples : Iterable[(feature_name, feature_function)]
List of feature name strings and feature functions.
trim : int
The amount of time to trim from both ends to remove startup and
finalization steps.
Returns
-------
features : Array[metric * feature_types]
The calculated list of features.
"""
if trim != 0:
timeseries = timeseries[trim:-trim]
features = []
for col in timeseries.columns:
for name, func in feature_tuples:
features.append(pd.Series(
name=name + '_' + col,
data=func(timeseries[col])
))
return pd.concat(features, axis=1)
class Taxonomist(OneVsRestClassifier):
""" The main class implementing Taxonomist
Parameters
----------
estimator : estimator object
The main estimator that the classifier is going to use.
n_jobs : int
Number of parallel processes to use.
threshold : float
The confidence threshold.
"""
def __init__(self, estimator, n_jobs=1, threshold=0.9):
self.threshold = threshold
self.scaler = MinMaxScaler()
super().__init__(estimator, n_jobs)
def fit(self, X, y):
norm_X = self.scaler.fit_transform(X)
super().fit(norm_X, y)
def decision_function(self, X):
""" Reports the distance from the decision boundary
for classifiers that support it.
We need to override the `decision_function` from scikit-learn because
we need a likelihood estimate for classifiers that only report
`predict_proba`. After pull request
[#10612](https://github.com/scikit-learn/scikit-learn/pull/10612)
is merged, we can use the normal `decision_function` from sklearn.
"""
check_is_fitted(self, 'estimators_')
if len(X) == 0:
return pd.DataFrame(data=[], index=X.index, columns=self.classes_)
try:
T = np.array([est.decision_function(X).ravel()
for est in self.estimators_]).T
except AttributeError:
T = np.array([e.predict_proba(X)[:, 1] * 2 - 1
for e in self.estimators_]).T
if len(self.estimators_) == 1:
T = T.ravel()
return | pd.DataFrame(data=T, columns=self.classes_, index=X.index) | pandas.DataFrame |
# python3
# coding: utf-8
# import threading
import openpyxl as px
import pandas as pd
from helper_Word import helper_Word
from Docx_to_pdf import Docx_to_PDF
from PDF_Combiner import Combine_PDF
from shutil import copyfile
import os
# from collections import OrderedDict
import time
import json_helper
# import Excel_to_Word_in_arbeit
# from helper_Word import helper_Word as hw
__version__ = "0.0.16"
__author__ = "<NAME>"
__repo__ = r"https://github.com/StefanHol/SchoolReport_Excel2Word2PDF"
__config_json__ = "config.json"
__Anleitung__ = "SchoolReport Excel2Word2PDF.pdf"
# rebuild_GUI = True
rebuild_GUI = False
# # ToDo: Add certificate
# # Could not find a suitable TLS CA certificate bundle, invalid path
# # https://github.com/pyinstaller/pyinstaller/issues/6352
used_Qt_Version = 0
try:
from PyQt5.QtWidgets import QMainWindow, QInputDialog, QMessageBox
from PyQt5.QtWidgets import QWidget, QComboBox, QCheckBox, QLineEdit, QFileDialog, QTableWidgetItem
from PyQt5.QtGui import QFont, QIcon, QPixmap
from PyQt5.QtWidgets import QTableView, QAbstractItemView, QCompleter, QLineEdit, QHeaderView
from PyQt5.QtCore import QAbstractTableModel, Qt
used_Qt_Version = 5
except Exception as e:
print(e)
exit()
pass
def compile_GUI():
if used_Qt_Version == 4:
print("Compile QUI for Qt Version: " + str(used_Qt_Version))
os.system("pyuic4 -o GUI\Converter_ui.py GUI\Converter.ui")
elif used_Qt_Version == 5:
print("Compile QUI for Qt Version: " + str(used_Qt_Version))
os.system("pyuic5 -o GUI\Converter_ui.py GUI\Converter.ui")
if rebuild_GUI:
compile_GUI()
from GUI.Converter_ui import Ui_MainWindow
import logging
logging.basicConfig(level=logging.WARNING)
class PandasModel(QAbstractTableModel):
"""
Class to populate a table view with a pandas dataframe
https://stackoverflow.com/questions/31475965/fastest-way-to-populate-qtableview-from-pandas-data-frame
"""
def __init__(self, data, parent=None):
QAbstractTableModel.__init__(self, parent)
self._data = data
logger = logging.getLogger(__name__)
logger.debug("init Pandas Model")
def rowCount(self, parent=None):
return self._data.shape[0]
def columnCount(self, parent=None):
return self._data.shape[1]
def data(self, index, role=Qt.DisplayRole):
if index.isValid():
if role == Qt.DisplayRole:
return str(self._data.iloc[index.row(), index.column()])
return None
def headerData(self, col, orientation, role):
if orientation == Qt.Horizontal and role == Qt.DisplayRole:
return self._data.columns[col]
return None
class create_letter_of_reference():
def __init__(self):
self.logger = logging.getLogger("SR_E2W2P.create_letter_of_reference")
self.logger.info("init create_letter_of_reference")
self.Array_CheckBox_Namen = None
self.Text_Feld_Namen = None
self.wb = None
self.df = None
self.srcFileName = ""
self.destination_Folder = ""
self.Excel_Filename = ""
self.wichtige_meldung = ""
def read_word_information_fields(self, srcFileName):
self.srcFileName = srcFileName
self.logger.info("read_word_information_fields")
word_doc = helper_Word()
word_doc.init_file(srcFileName, False)
self.logger.info("get_all_fieldnames_from_word")
word_doc.get_all_fieldnames_from_word()
self.logger.info("find_arrays_and_text_field_names")
word_doc.find_arrays_and_text_field_names()
self.logger.info("find_arrays_and_text_field_names: done")
self.Array_CheckBox_Namen = word_doc.Text_FieldArray_Row_Names
self.Text_Feld_Namen = word_doc.Text_Field_Names
self.logger.info("Array_CheckBox_Namen:")
self.logger.info(self.Array_CheckBox_Namen)
self.logger.info("Text_Feld_Namen:")
self.logger.info(self.Text_Feld_Namen)
try:
word_doc.doc.Close()
self.logger.info("Word Document Closed")
except Exception as e:
self.logger.error(e)
return self.Array_CheckBox_Namen, self.Text_Feld_Namen
def read_excel_file_to_pandas(self, Excel_Filename):
self.logger.debug("read_excel_file_to_pandas")
# print("Excel_Filename")
self.Excel_Filename = Excel_Filename
# print("self.Excel_Filename: ", self.Excel_Filename)
self.wb = px.load_workbook(self.Excel_Filename, data_only=True)
self.logger.info("loaded Excel File: {}".format(Excel_Filename))
self.logger.info("Array_CheckBox_Namen\n{}".format(self.Array_CheckBox_Namen))
self.df = self.create_dataframe_for_InputFields(self.Array_CheckBox_Namen, self.Text_Feld_Namen)
# print("self.df", self.df)
self.df = self.remove_empty_data(self.df)
self.logger.debug("self.df {}".format(self.df))
return self.df
def get_values_By_range_name(self, range_name):
value_array = []
self.logger.debug("check: {}".format(range_name))
try:
address = list(self.wb.defined_names[range_name].destinations)
# address
for sheetname, cellAddress in address:
cellAddress = cellAddress.replace('$', '')
cellAddress
worksheet = self.wb[sheetname]
for i in range(0, len(worksheet[cellAddress])):
for item in worksheet[cellAddress][i]:
# print(item.value)
value_array.append(item.value)
except Exception as e:
self.logger.error("Error: Textfeld {} ist in Worddokument definiert,".format(range_name) +
"kann aber nicht im Exceldokument gefunden werden.\n{}".format(e))
self.wichtige_meldung = ("Warnung: Textfeld {} ist in Worddokument definiert,".format(range_name) +
"kann aber nicht im Exceldokument gefunden werden.\n{}\n".format(e) +
"Vorlagen überprüfen!\nDie entsprechenden Daten werden nicht übertragen.\n")
return value_array
def create_folder_if_not_exist(self, folder):
if not os.path.exists(folder):
os.mkdir(folder)
def ExtractFileName(self, mypath):
name = mypath.split(os.path.sep)
return name[len(name) - 1]
def zahl_zu_bewertung(self, value):
if str(value) == "1":
return "sehr gut"
if str(value) == "2":
return "gut"
if str(value) == "3":
return "schlecht"
if str(value) == "4":
return "sehr schlecht"
def Copy_word_file_and_write_data(self, df, targetname, df_index):
# copy File and create target file object
self.create_folder_if_not_exist(self.destination_Folder)
FileName = self.srcFileName.replace(".docx", "_-_" + (df["Name"].iloc[df_index]).replace(" ", "_") + ".docx")
FileName = self.ExtractFileName(
self.srcFileName.replace(".docx", "_-_" + (df["Name"].iloc[df_index]).replace(" ", "_") + ".docx"))
# dstFileName = srcFileName.replace(".docx", "_-_" + (df["Name"].iloc[df_index]).replace(" ", "_") + ".docx")
logging.info(FileName)
dstFileName = self.destination_Folder + os.path.sep + FileName
# print(srcFileName,"\n"+ dstFileName)
copyfile(self.srcFileName, dstFileName)
# wordFile.doc.Close()
wordFile = helper_Word()
wordFile.init_file(dstFileName, False)
self.logger.info(FileName)
for ColumnName in df:
# print(df["Name"].iloc[df_index], ":\t",ColumnName , "\t", df[ColumnName].iloc[df_index])
# ToDo: Call Input Function here
SchülerName = df["Name"].iloc[df_index]
ZellenNameOderArray = ColumnName
Cellen_Oder_Array_Wert = df[ColumnName].iloc[df_index]
self.logger.info("Name: {} \t {} \t {}".format(SchülerName, ColumnName, Cellen_Oder_Array_Wert))
# ToDo: Call Input Function here
try:
counter = 0
for TextElement in self.Text_Feld_Namen:
# print(counter)
# print(TextElement)
counter += 1
outputText = df[TextElement].iloc[df_index]
try:
try:
self.logger.info("int: TextElement {} {}".format(TextElement, int(outputText)))
outputText = int(outputText)
except Exception as e:
self.logger.info("str: TextElement {} {}".format(TextElement, str(outputText)))
outputText = str(outputText)
# print(e, str(outputText), str(TextElement))
wordFile.set_WordField_Text(TextElement, str(outputText))
self.logger.info("TextElement {} {}".format(TextElement, wordFile.get_WordField_Text(TextElement)))
except Exception as e:
self.logger.debug("{} {} {}".format(e, str(outputText), str(TextElement)))
# print(e, str(outputText), str(TextElement))
counter = 0
for ArrayElement in self.Array_CheckBox_Namen:
# print(counter)
counter += 1
outputText = df[ArrayElement].iloc[df_index]
#########################################################################
# Noten CheckBox und Text
# korrigieren
#########################################################################
SetCheckBox = False
realerror = True
error_message = ""
try:
outputText = int(outputText)
self.logger.debug("int: ArrayElement {} {}".format(outputText, ArrayElement))
self.logger.debug(str(ArrayElement) + " = " + self.zahl_zu_bewertung(outputText))
realerror = False
except Exception as e:
try:
outputText = str(outputText)
self.logger.debug("str: ArrayElement {} {}".format(outputText, ArrayElement))
self.logger.debug(str(ArrayElement) + " = " + self.zahl_zu_bewertung(outputText))
realerror = False
except Exception as e:
realerror = True
self.logger.debug("{} try of try outputText: {} {}".format(e, ArrayElement, outputText))
error_message = "{}: try of try outputText: {}, {}".format(e, ArrayElement, outputText)
# print(e, ArrayElement, outputText)
pass
try:
if not (outputText is None):
CheckBox = ArrayElement + str(outputText - 1)
SetCheckBox = True
realerror = False
else:
self.logger.debug("Skip Checkbox: {} {}".format(outputText, ArrayElement))
pass
except Exception as e:
realerror = True
self.logger.debug("tryexcept 2: {} {} {}".format(e, outputText, ArrayElement))
error_message = "warning: nichts in checkbox eingetragen {}, {}, {}".format(e, outputText,
ArrayElement)
SetCheckBox = False
# print(CheckBox)
if SetCheckBox:
realerror = False
wordFile.check_checkbox(CheckBox)
# save & close target file here
if realerror:
self.logger.debug("Hinweis: {}".format(error_message))
except Exception as e:
self.logger.error("Fehler hier: {}".format(e))
finally:
try:
wordFile.save_and_close()
except Exception as e:
print(e)
return FileName
def create_wordDocxs_from_dataframe(self, df):
# print(df.head())
if True:
start_time = time.time()
file_name_list = []
for index in range(len(df)):
# print(index)
# #ToDo: Call Input Function here
self.logger.debug(str(df["Name"].iloc[index]))
file_name = self.Copy_word_file_and_write_data(df, df["Name"].iloc[index], index)
file_name_list.append(file_name)
if True:
self.logger.info("Creation takes %.2f seconds" % (time.time() - start_time))
# print("create_wordDocxs_from_dataframe", file_name_list)
return file_name_list
def create_dataframe_for_InputFields(self, Array_CheckBox_Namen, Text_Feld_Namen):
self.logger.debug("create_dataframe_for_InputFields")
df = | pd.DataFrame({}) | pandas.DataFrame |
import os
import time
import pickle
import numpy as np
import pandas as pd
from scipy import stats
from IPython.display import display
# Base classes
from sklearn.base import ClassifierMixin, TransformerMixin
# Random search & splitting
from sklearn.model_selection import RandomizedSearchCV, train_test_split
# Classifiers
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import BaggingClassifier
from xgboost import XGBClassifier
# Transformers
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.preprocessing import FunctionTransformer
# Metrics
from sklearn.metrics import make_scorer, accuracy_score, f1_score, roc_auc_score
# transformers
from twitter_utils import twitter_feature_generation_transformer, twitter_feature_selection_transformer
# custom scorer
def afr(y_true, y_pred, **kwargs):
"""The afr scoring function gives a weighted average of accuracy,
balanced F1 and roc_auc scores, with .2, .3 and .5 respectively.
"""
accuracy = accuracy_score(y_true, y_pred)
f1 = f1_score(y_true, y_pred, average="weighted")
roc_auc = roc_auc_score(y_true, y_pred, average="weighted")
return (accuracy * .2) + (f1 * .3) + (roc_auc * .5)
afr_scorer = make_scorer(afr, greater_is_better=True)
afr_scorer.name = 'afr'
def passthrough(df):
return df.copy()
passthrough_transformer = FunctionTransformer(passthrough)
# Base class for pipeline
class BinaryClassifierPipelineStep(TransformerMixin, ClassifierMixin):
def __init__(self, context={}):
self._context = context
def set_context(self, context):
self._context = context
return self
def get_fitted_classifiers(self):
return self._context._clfs
def get_best_classifier(self):
return self._context._best
def get_info(self):
return self._context._info
class DataSplitStep(BinaryClassifierPipelineStep):
"""
"""
def __init__(self, training_size=.64, validation_size=.16, splitting=None):
self._training = training_size
self._validation = validation_size
if callable(splitting):
self._splitter = splitting
else:
self._splitter = train_test_split
def transform(self, X, y):
t_size = self._training if isinstance(self._training, int) else int(len(X)*self._training)
X_tv, X_train, y_tv, y_train = self._splitter(X, y, test_size=t_size, random_state=42)
# Split remaing data into training and validation
t_size = self._validation if isinstance(self._validation, int) else int(len(X)*self._validation)
X_val, X_test, y_val, y_test = self._splitter(X_tv, y_tv, test_size=t_size, random_state=42)
return X_train, X_val, X_test, y_train, y_val, y_test
class FeatureEngineeringStep(BinaryClassifierPipelineStep):
"""
"""
def __init__(self, feature_generation=None, feature_selection=None, feature_scaling=None, context={}):
self._context = context
# Define the trasnformer to be used for feature generation
if callable(getattr(feature_generation, "transform", None)):
self._feature_generation = feature_generation
else:
self._feature_generation = twitter_feature_generation_transformer
# Define the trasnformer to be used for feature selection
if callable(getattr(feature_selection, "transform", None)):
self._feature_selection = feature_selection
else:
self._feature_selection = twitter_feature_selection_transformer
# The transformer used for feature scaling will be defined after the
# pipeline context is set
self._scaling = feature_scaling
def _setup(self):
# Define the transformer to be used for feature scaling
self._context._scaling = self._scaling
if self._scaling == 'min-max':
self._context._scaler = MinMaxScaler()
elif self._scaling == 'standard':
self._context._scaler = StandardScaler()
elif callable(getattr(self._scaling, "transform", None)):
self._context._scaling = getattr(self._scaling, "name", type(self._scaling))
self._context._scaler = self._scaling
else:
self._context._scaling = 'none'
self._context._scaler = passthrough_transformer
def fit_transform(self, X):
# Setup scaler on the pipeline context
self._setup()
# Run feature generation transform
X_t = self._feature_generation.transform(X)
# Run feature selection transform
X_t = self._feature_selection.transform(X_t)
# run feature scaling transform
return self._context._scaler.fit_transform(X_t)
def transform(self, X):
X_t = self._feature_generation.transform(X)
X_t = self._feature_selection.transform(X_t)
return self._context._scaler.transform(X_t)
class ModelTuningStep(BinaryClassifierPipelineStep):
def __init__(self, scoring=None, n_iter=20):
# Define number of iteractions for random search
self._n_iter = n_iter
# the scoring will be setup after context is set
self._scoring = scoring
# Setup must run after pipeline context is set
def _setup(self):
# Define the scoring function used in cross-validation
self._context._scoring = self._scoring
if self._scoring == 'roc_auc':
self._context._scorer = make_scorer(roc_auc_score)
elif self._scoring == 'f1':
self._context._scorer = make_scorer(f1_score)
elif self._scoring == 'accuracy':
self._context._scorer = make_scorer(accuracy_score)
elif callable(self._scoring):
self._context._scoring = getattr(self._scoring, "name", type(self._scoring))
self._context._scorer = self._scoring
else:
self._context._scoring = 'roc_auc'
self._context._scorer = make_scorer(roc_auc_score)
# Define the classifier's hyper-parameters search space used in
# the training and tuning step
self._context._clfs = {
'knn': {
'name': 'K-Nearest Neighbors',
'base': KNeighborsClassifier(),
'param_distributions': {
'n_neighbors': [int(x) for x in np.linspace(2, 50, num=20)],
'weights': ['uniform', 'distance'],
'algorithm': ['auto', 'ball_tree', 'kd_tree', 'brute']
}
},
'log_reg': {
'name': 'Logistic Regression',
'base': LogisticRegression(random_state=42),
'param_distributions': {
'penalty': ['l1', 'l2', 'elasticnet'],
'fit_intercept': [True, False],
'max_iter': [int(x) for x in np.linspace(100, 1000, num=20)]
}
},
'svm': {
'name': 'Support Vector Machines',
'base': SVC(random_state=42),
'param_distributions': {
'C': [round(x, 3) for x in np.linspace(0.1, 5, num=10)],
'kernel': ['linear', 'poly', 'rbf', 'sigmoid'],
'gamma': ['scale', 'auto']
}
},
'tree': {
'name': 'Decision Tree',
'base': DecisionTreeClassifier(random_state=42),
'param_distributions': {
"max_depth": [int(x) for x in np.linspace(1, 12, num=5)],
"max_features": [int(x) for x in np.linspace(1, 20, num=5)],
"min_samples_leaf": [int(x) for x in np.linspace(1, 200, num=20)],
"criterion": ["gini", "entropy"]
}
},
'rf': {
'name': 'Random Forest',
'base': RandomForestClassifier(random_state=42),
'param_distributions': {
'n_estimators': [int(x) for x in np.linspace(start=100, stop=1500, num=10)],
'max_features': [.5, 'sqrt', 'log2'],
'max_depth': [None] + [int(x) for x in np.linspace(5, 50, num=10)],
'min_samples_split': [2, 5, 10],
'min_samples_leaf': [1, 2, 4],
'bootstrap': [True, False]
}
},
'bagging': {
'name': 'Baggin',
'base': BaggingClassifier(random_state=42),
'param_distributions': {
"n_estimators": [int(x) for x in np.linspace(start=100, stop=1500, num=10)],
"max_samples": np.arange(start=0.1, stop=1.0, step=0.1)
}
},
'xgboost': {
'name': 'XGBoost',
'base': XGBClassifier(random_state=42, objective='binary:logistic'),
'param_distributions': {
'n_estimators': stats.randint(150, 1000),
'learning_rate': stats.uniform(0.01, 0.6),
'max_depth': [3, 4, 5, 6, 7, 8, 9]
}
}
}
return self
def fit(self, X_training, y_training):
# setup context objects
self._setup()
self._context._X_training = X_training
self._context._y_training = y_training
for clf in self._context._clfs:
# Load a previously fitted model
if self._context._save and os.path.isfile(f'output/models/{self._context._name}/{clf}.model'):
self._context._clfs[clf]['sclf'] = pickle.load(open(f'output/models/{self._context._name}/{clf}.model', 'rb'))
# or fit the model and save it for future use
else:
t = time.process_time()
sclf = RandomizedSearchCV(
self._context._clfs[clf]['base'],
self._context._clfs[clf]['param_distributions'],
random_state=42,
n_iter=self._n_iter,
cv=None,#self._cv,
scoring=self._context._scorer,
n_jobs=-1
).fit(X_training, y_training)
sclf.run_time = time.process_time() - t
if self._context._save:
if not os.path.exists(f'output/models/{self._context._name}'):
os.makedirs(f'output/models/{self._context._name}')
pickle.dump(sclf, open(f'output/models/{self._context._name}/{clf}.model', 'wb'))
self._context._clfs[clf]['sclf'] = sclf
return self
class ModelSelectionStep(BinaryClassifierPipelineStep):
def __init__(self, scoring=None):
self._scoring_p = scoring
def _setup(self):
# Define the scoring function used in cross-validation and model selection
self._scoring = self._scoring_p
if self._scoring_p == 'roc_auc':
self._scorer = make_scorer(roc_auc_score)
elif self._scoring_p == 'f1':
self._scorer = make_scorer(f1_score)
elif self._scoring_p == 'accuracy':
self._scorer = make_scorer(accuracy_score)
elif callable(self._scoring_p):
self._scoring = getattr(self._scoring_p, "name", type(self._scoring_p))
self._scorer = self._scoring_p
else:
self._scoring = self._context._scoring
self._scorer = self._context._scorer
def fit(self, X_valid, y_valid):
# setup scorer, after context is set
self._setup()
info = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# # EDA and Modeling Employee Attrition
# In[ ]:
# make sure we have the latest seaborb package
print()
# In[ ]:
# should be version 11
import seaborn as sns
sns.__version__
# In[ ]:
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import os
for dirname, _, filenames in os.walk("../../../input/patelprashant_employee-attrition"):
for filename in filenames:
print(os.path.join(dirname, filename))
# In[ ]:
df = | pd.read_csv("../../../input/patelprashant_employee-attrition/WA_Fn-UseC_-HR-Employee-Attrition.csv") | pandas.read_csv |
"""
PTC
---
Data handling for turn-by-turn measurement files from the ``PTC`` code, which can be obtained by performing
particle tracking of your machine through the ``MAD-X PTC`` interface. The files are very close in
structure to **TFS** files, with the difference that the data part is split into "segments" relating
containing data for a given observation point.
"""
import copy
import logging
from collections import namedtuple
from datetime import datetime
from pathlib import Path
from typing import Any, Dict, List, Sequence, Tuple, Union
import numpy as np
import pandas as pd
from dateutil import tz
from turn_by_turn.constants import (
COLPARTICLE,
COLTURN,
COLX,
COLY,
DATE,
HEADER,
NAMES,
PLANES,
SEGMENT_MARKER,
SEGMENTS,
TIME,
TIME_FORMAT,
TYPES,
)
from turn_by_turn.errors import PTCFormatError
from turn_by_turn.structures import TbtData, TransverseData
LOGGER = logging.getLogger()
Segment = namedtuple("Segment", ["number", "turns", "particles", "element", "name"])
def read_tbt(file_path: Union[str, Path]) -> TbtData:
"""
Reads turn-by-turn data from the ``PTC`` **trackone** format file.
Args:
file_path (Union[str, Path]): path to the turn-by-turn measurement file.
Returns:
A ``TbTData`` object with the loaded data.
"""
file_path = Path(file_path)
LOGGER.debug(f"Reading PTC trackone file at path: '{file_path.absolute()}'")
lines: List[str] = file_path.read_text().splitlines()
LOGGER.debug("Reading header from file")
date, header_length = _read_header(lines)
lines = lines[header_length:]
# parameters
bpms, particles, column_indices, n_turns, n_particles = _read_from_first_turn(lines)
# read into dict first for speed then convert to DFs
matrices = [{p: {bpm: np.zeros(n_turns) for bpm in bpms} for p in PLANES} for _ in range(n_particles)]
matrices = _read_data(lines, matrices, column_indices)
for bunch in range(n_particles):
matrices[bunch] = TransverseData(
X= | pd.DataFrame(matrices[bunch]["X"]) | pandas.DataFrame |
import logging
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from torch.nn import functional as F
from tqdm import tqdm
from beam_search import beam_decode
logger = logging.getLogger(__name__)
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def top_k_logits(logits, k):
v, ix = torch.topk(logits, k)
out = logits.clone()
out[out < v[:, [-1]]] = -float('inf')
return out
def convert_weights(model: nn.Module):
"""Convert applicable model parameters to fp16"""
def _convert_weights_to_fp16(l):
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
l.weight.data = l.weight.data.half()
if l.bias is not None:
l.bias.data = l.bias.data.half()
if isinstance(l, nn.MultiheadAttention):
for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]:
tensor = getattr(l, attr)
if tensor is not None:
tensor.data = tensor.data.half()
for name in ["text_projection", "proj"]:
if hasattr(l, name):
attr = getattr(l, name)
if attr is not None:
attr.data = attr.data.half()
model.apply(_convert_weights_to_fp16)
def get_model_attr(mconf, tconf):
n_head = mconf.n_head
n_block = mconf.n_layer
nembd = mconf.n_embd
data = tconf.dataset[-20:-4]
model_attr = f"Head:{n_head}_Block{n_block}_nembd:{nembd}_data:{data}"
return model_attr
def set_plot_params():
## fonts
plt.rcParams['font.family'] = 'serif'
plt.rcParams['font.serif'] = 'Ubuntu'
plt.rcParams['font.monospace'] = 'Ubuntu mono'
plt.rcParams['axes.labelweight'] = 'bold'
# # font sizes
# plt.rcParams['font.size'] = 16
# plt.rcParams['axes.labelsize'] = 12
# plt.rcParams['xtick.labelsize'] = 10
# plt.rcParams['ytick.labelsize'] = 10
# plt.rcParams['legend.fontsize'] = 14
# plt.rcParams['figure.titlesize'] = 16
## colors
plt.rcParams['text.color'] = 'white'
plt.rcParams['axes.labelcolor'] = 'white'
plt.rcParams['xtick.color'] = 'white'
plt.rcParams['ytick.color'] = 'white'
plt.rcParams["figure.facecolor"] = '202020'
plt.rcParams['axes.facecolor']= '202020'
plt.rcParams['savefig.facecolor']= '202020'
def set_plot_white():
# Set the global font to be DejaVu Sans, size 10 (or any other sans-serif font of your choice!)
plt.rc('font',**{'family':'sans-serif','sans-serif':['DejaVu Sans'],'size':10})
# Set the font used for MathJax - more on this later
plt.rc('mathtext',**{'default':'regular'})
plt.rcParams['text.color'] = 'black'
plt.rcParams['axes.labelcolor'] = 'black'
plt.rcParams['xtick.color'] = 'black'
plt.rcParams['ytick.color'] = 'black'
plt.rcParams["figure.facecolor"] = 'white'
plt.rcParams['axes.facecolor']= 'white'
plt.rcParams['savefig.facecolor']= 'white'
def set_plot_black():
plt.rcParams['text.color'] = 'white'
plt.rcParams['axes.labelcolor'] = 'white'
plt.rcParams['xtick.color'] = 'white'
plt.rcParams['ytick.color'] = 'white'
plt.rcParams["figure.facecolor"] = '202020'
plt.rcParams['axes.facecolor']= '202020'
plt.rcParams['savefig.facecolor']= '202020'
def plot_losses(trainer):
plt.figure(figsize=(20,5))
# plotting train losses
plt.subplot(1,2,1)
plt.title('%s training losses' % str(trainer)[1:8])
for i, losses in enumerate(trainer.train_losses):
plt.plot(losses, label=i)
plt.legend(title="epoch")
# plotting testing losses
plt.subplot(1,2,2)
plt.title('%s testing losses' % str(trainer)[1:8])
for i, losses in enumerate(trainer.test_losses):
plt.plot(losses, label=i)
plt.legend(title="epoch")
plt.show()
def plot_losses_wattr(trainer, model_attr):
plt.figure(figsize=(20,5))
# plotting train losses
plt.subplot(1,2,1)
plt.title('%s training losses' % model_attr)
for i, losses in enumerate(trainer.train_losses):
plt.plot(losses, label=i)
plt.legend(title="epoch")
# plotting testing losses
plt.subplot(1,2,2)
plt.title('%s testing losses' % model_attr)
for i, losses in enumerate(trainer.test_losses):
plt.plot(losses, label=i)
plt.legend(title="epoch")
plt.show()
def print_full(df, length=None):
length = len(df) if length is None else len(df)
print(length)
pd.set_option('display.max_rows', length)
torch.set_printoptions(threshold=1e3)
print(df)
pd.reset_option('display.max_rows')
torch.set_printoptions(threshold=1e3)
# results = predict_raster_recursive_time_auto(model, loader, window, stoi, itos_dt, sample=True, top_p=0.95, top_p_t=0.95, frame_end=0, get_dt=True, gpu=False)
def process_predictions(results, stoi, window):
pred_keys = ['ID', 'dt', 'Trial', 'Interval']
predicted_dict = {k: results[k] for k in results if k in pred_keys}
df_pred = | pd.DataFrame(predicted_dict) | pandas.DataFrame |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from random import shuffle
from pathlib import Path
from functools import partial
from typing import Any, Dict, List
import numpy
from pandas import DataFrame, Series, read_csv, concat
from lib.cast import safe_int_cast
from lib.concurrent import thread_map
from lib.data_source import DataSource
from lib.net import download_snapshot
from lib.utils import URL_OUTPUTS_PROD, combine_tables
_COLUMN_MAPPING = {
"DATE": "date",
"STATION": "noaa_station",
"TMIN": "minimum_temperature",
"TMAX": "maximum_temperature",
"PRCP": "rainfall",
"SNOW": "snowfall",
}
_OUTPUT_COLUMNS = [
"date",
"key",
"noaa_station",
"noaa_distance",
"minimum_temperature",
"maximum_temperature",
"rainfall",
"snowfall",
]
_DISTANCE_THRESHOLD = 300
_INVENTORY_URL = "https://open-covid-19.github.io/weather/ghcn/ghcnd-inventory.txt"
_STATION_URL_TPL = "https://open-covid-19.github.io/weather/ghcn/{}.csv"
# _INVENTORY_URL = "https://www1.ncdc.noaa.gov/pub/data/ghcn/daily/ghcnd-inventory.txt"
# _STATION_URL_TPL = (
# "https://www.ncei.noaa.gov/data/global-historical-climatology-network-daily/access/{}.csv"
# )
class NoaaGhcnDataSource(DataSource):
# A bit of a circular dependency but we need the latitude and longitude to compute weather
def fetch(
self, output_folder: Path, cache: Dict[str, str], fetch_opts: List[Dict[str, Any]]
) -> Dict[str, str]:
geo_url = f"{URL_OUTPUTS_PROD}/geography.csv"
download_opts = (fetch_opts or [{}])[0].get("opts", {})
return {0: download_snapshot(geo_url, output_folder, **download_opts)}
@staticmethod
def haversine_distance(
stations: DataFrame, lat: float, lon: float, radius: float = 6373.0
) -> Series:
""" Compute the distance between two <latitude, longitude> pairs in kilometers """
# Compute the pairwise deltas
lat_diff = stations.lat - lat
lon_diff = stations.lon - lon
# Apply Haversine formula
a = numpy.sin(lat_diff / 2) ** 2
a += math.cos(lat) * numpy.cos(stations.lat) * numpy.sin(lon_diff / 2) ** 2
c = numpy.arctan2(numpy.sqrt(a), numpy.sqrt(1 - a)) * 2
return radius * c
@staticmethod
def fix_temp(value: int):
value = safe_int_cast(value)
return None if value is None else "%.1f" % (value / 10.0)
@staticmethod
def station_records(station_cache: Dict[str, DataFrame], stations: DataFrame, location: Series):
nearest = stations.copy()
nearest["key"] = location.key
# Get the nearest stations from our list of stations given lat and lon
nearest["distance"] = NoaaGhcnDataSource.haversine_distance(
nearest, location.lat, location.lon
)
# Filter out the 10 nearest stations
nearest = nearest[nearest.distance < _DISTANCE_THRESHOLD].sort_values("distance").iloc[:20]
# Early exit: no stations found within distance threshold
if len(nearest) == 0:
return DataFrame(columns=_OUTPUT_COLUMNS)
# Query the cache and pull data only if not already cached
for station_id in filter(lambda x: x not in station_cache, nearest.id.values):
# Read the records from the nearest station
# Use our mirror since NOAA's website is very flaky
station_url = _STATION_URL_TPL.format(station_id)
data = read_csv(station_url, usecols=lambda column: column in _COLUMN_MAPPING.keys())
data = data.rename(columns=_COLUMN_MAPPING)
# Convert temperature to correct values
data["minimum_temperature"] = data["minimum_temperature"].apply(
NoaaGhcnDataSource.fix_temp
)
data["maximum_temperature"] = data["maximum_temperature"].apply(
NoaaGhcnDataSource.fix_temp
)
# Get only data for 2020 and add location values
data = data[data.date > "2019-12-31"]
# Save into the cache
station_cache[station_id] = data
# Get station records from the cache
nearest = nearest.rename(columns={"id": "noaa_station", "distance": "noaa_distance"})
station_tables = [station_cache[station_id] for station_id in nearest.noaa_station.values]
station_tables = [table.merge(nearest) for table in station_tables]
data = combine_tables(reversed(station_tables), ["date", "key"])
# Return all the available data from the records
return data[[col for col in _OUTPUT_COLUMNS if col in data.columns]]
def parse_dataframes(
self, dataframes: List[DataFrame], aux: Dict[str, DataFrame], **parse_opts
):
# Get all the weather stations with data up until 2020
stations = read_csv(
_INVENTORY_URL,
sep=r"\s+",
names=("id", "lat", "lon", "measurement", "year_start", "year_end"),
)
stations = stations[stations.year_end == 2020][["id", "lat", "lon", "measurement"]]
# Filter stations that at least provide max and min temps
measurements = ["TMIN", "TMAX"]
stations = stations.groupby(["id", "lat", "lon"]).agg(lambda x: "|".join(x))
stations = stations[stations.measurement.apply(lambda x: all(m in x for m in measurements))]
stations = stations.reset_index()
# Get all the POI from metadata and go through each key
keep_columns = ["key", "latitude", "longitude"]
metadata = dataframes[0][keep_columns].dropna()
# Only use keys present in the metadata table
metadata = metadata.merge(aux["metadata"])[keep_columns]
# Convert all coordinates to radians
stations["lat"] = stations.lat.apply(math.radians)
stations["lon"] = stations.lon.apply(math.radians)
metadata["lat"] = metadata.latitude.apply(math.radians)
metadata["lon"] = metadata.longitude.apply(math.radians)
# Use a cache to avoid having to query the same station multiple times
station_cache: Dict[str, DataFrame] = {}
# Make sure the stations and the cache are sent to each function call
map_func = partial(NoaaGhcnDataSource.station_records, station_cache, stations)
# We don't care about the index while iterating over each metadata item
map_iter = [record for _, record in metadata.iterrows()]
# Shuffle the iterables to try to make better use of the caching
shuffle(map_iter)
# Bottleneck is network so we can use lots of threads in parallel
records = thread_map(map_func, map_iter, total=len(metadata))
return | concat(records) | pandas.concat |
# -*- coding: utf-8 -*-
"""
This module contains the ReadSets class that is in charge
of reading the sets files, reshaping them to be used in
the build class, creating and reading the parameter files and
checking the errors in the definition of the sets and parameters
"""
import itertools as it
from openpyxl import load_workbook
import pandas as pd
from hypatia.error_log.Checks import (
check_nan,
check_index,
check_index_data,
check_table_name,
check_mapping_values,
check_mapping_ctgry,
check_sheet_name,
check_tech_category,
check_carrier_type,
check_years_mode_consistency,
)
from hypatia.error_log.Exceptions import WrongInputMode
import numpy as np
from hypatia.utility.constants import (
global_set_ids,
regional_set_ids,
technology_categories,
carrier_types,
)
from hypatia.utility.constants import take_trade_ids, take_ids, take_global_ids
MODES = ["Planning", "Operation"]
class ReadSets:
""" Class that reads the sets of the model, creates the parameter files with
default values and reads the filled parameter files
Attributes
------------
mode:
The mode of optimization including the operation and planning mode
path:
The path of the set files given by the user
glob_mapping : dict
A dictionary of the global set tables given by the user in the global.xlsx file
mapping : dict
A dictionary of the regional set tables given by the user in the regional
set files
connection_sheet_ids: dict
A nested dictionary that defines the sheet names of the parameter file of
the inter-regional links with their default values, indices and columns
global_sheet_ids : dict
A nested dictionary that defines the sheet names of the global parameter file
with their default values, indices and columns
regional_sheets_ids : dict
A nested dictionary that defines the sheet names of the regional parameter files
with their default values, indices and columns
trade_data : dict
A nested dictionary for storing the inter-regional link data
global_data : dict
A nested dictionary for storing the global data
data : dict
A nested dictionary for storing the regional data
"""
def __init__(self, path, mode="Planning"):
self.mode = mode
self.path = path
self._init_by_xlsx()
def _init_by_xlsx(self,):
"""
Reads and organizes the global and regional sets
"""
glob_mapping = {}
wb_glob = load_workbook(r"{}/global.xlsx".format(self.path))
sets_glob = wb_glob["Sets"]
set_glob_category = {key: value for key, value in sets_glob.tables.items()}
for entry, data_boundary in sets_glob.tables.items():
data_glob = sets_glob[data_boundary]
content = [[cell.value for cell in ent] for ent in data_glob]
header = content[0]
rest = content[1:]
df = pd.DataFrame(rest, columns=header)
glob_mapping[entry] = df
self.glob_mapping = glob_mapping
check_years_mode_consistency(
mode=self.mode, main_years=list(self.glob_mapping["Years"]["Year"])
)
for key, value in self.glob_mapping.items():
check_table_name(
file_name="global",
allowed_names=list(global_set_ids.keys()),
table_name=key,
)
check_index(value.columns, key, "global", pd.Index(global_set_ids[key]))
check_nan(key, value, "global")
if key == "Technologies":
check_tech_category(value, technology_categories, "global")
if key == "Carriers":
check_carrier_type(value, carrier_types, "global")
self.regions = list(self.glob_mapping["Regions"]["Region"])
self.main_years = list(self.glob_mapping["Years"]["Year"])
if "Timesteps" in self.glob_mapping.keys():
self.time_steps = list(self.glob_mapping["Timesteps"]["Timeslice"])
self.timeslice_fraction = self.glob_mapping["Timesteps"][
"Timeslice_fraction"
].values
else:
self.time_steps = ["Annual"]
self.timeslice_fraction = np.ones((1, 1))
# possible connections among the regions
if len(self.regions) > 1:
lines_obj = it.permutations(self.regions, r=2)
self.lines_list = []
for item in lines_obj:
if item[0] < item[1]:
self.lines_list.append("{}-{}".format(item[0], item[1]))
mapping = {}
for reg in self.regions:
wb = load_workbook(r"{}/{}.xlsx".format(self.path, reg))
sets = wb["Sets"]
self._setbase_reg = [
"Technologies",
"Carriers",
"Carrier_input",
"Carrier_output",
]
set_category = {key: value for key, value in sets.tables.items()}
reg_mapping = {}
for entry, data_boundary in sets.tables.items():
data = sets[data_boundary]
content = [[cell.value for cell in ent] for ent in data]
header = content[0]
rest = content[1:]
df = pd.DataFrame(rest, columns=header)
reg_mapping[entry] = df
mapping[reg] = reg_mapping
for key, value in mapping[reg].items():
check_table_name(
file_name=reg,
allowed_names=list(regional_set_ids.keys()),
table_name=key,
)
check_index(value.columns, key, reg, pd.Index(regional_set_ids[key]))
check_nan(key, value, reg)
if key == "Technologies":
check_tech_category(value, technology_categories, reg)
if key == "Carriers":
check_carrier_type(value, carrier_types, reg)
if key == "Carrier_input" or key == "Carrier_output":
check_mapping_values(
value,
key,
mapping[reg]["Technologies"],
"Technologies",
"Technology",
"Technology",
reg,
)
check_mapping_values(
mapping[reg]["Carrier_input"],
"Carrier_input",
mapping[reg]["Carriers"],
"Carriers",
"Carrier_in",
"Carrier",
reg,
)
check_mapping_values(
mapping[reg]["Carrier_output"],
"Carrier_output",
mapping[reg]["Carriers"],
"Carriers",
"Carrier_out",
"Carrier",
reg,
)
check_mapping_ctgry(
mapping[reg]["Carrier_input"],
"Carrier_input",
mapping[reg]["Technologies"],
"Supply",
reg,
)
check_mapping_ctgry(
mapping[reg]["Carrier_output"],
"Carrier_output",
mapping[reg]["Technologies"],
"Demand",
reg,
)
self.mapping = mapping
Technologies = {}
for reg in self.regions:
regional_tech = {}
for key in list(self.mapping[reg]["Technologies"]["Tech_category"]):
regional_tech[key] = list(
self.mapping[reg]["Technologies"].loc[
self.mapping[reg]["Technologies"]["Tech_category"] == key
]["Technology"]
)
Technologies[reg] = regional_tech
self.Technologies = Technologies
self._create_input_data()
def _create_input_data(self):
"""
Defines the sheets, indices and columns of the parameter files
"""
if len(self.regions) > 1:
# Create the columns of inter-regional links as a multi-index of the
# pairs of regions and the transmitted carriers
indexer = pd.MultiIndex.from_product(
[self.lines_list, self.glob_mapping["Carriers_glob"]["Carrier"]],
names=["Line", "Transmitted Carrier"],
)
self.connection_sheet_ids = {
"F_OM": {
"value": 0,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer,
},
"V_OM": {
"value": 0,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer,
},
"Residual_capacity": {
"value": 0,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer,
},
"Capacity_factor_line": {
"value": 1,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer,
},
"Line_efficiency": {
"value": 1,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer,
},
"AnnualProd_perunit_capacity": {
"value": 1,
"index": pd.Index(
["AnnualProd_Per_UnitCapacity"], name="Performance Parameter"
),
"columns": indexer,
},
}
self.global_sheet_ids = {
"Max_production_global": {
"value": 1e30,
"index": pd.Index(self.main_years, name="Years"),
"columns": self.glob_mapping["Technologies_glob"].loc[
(
self.glob_mapping["Technologies_glob"]["Tech_category"]
!= "Demand"
)
& (
self.glob_mapping["Technologies_glob"]["Tech_category"]
!= "Storage"
)
]["Technology"],
},
"Min_production_global": {
"value": 0,
"index": pd.Index(self.main_years, name="Years"),
"columns": self.glob_mapping["Technologies_glob"].loc[
(
self.glob_mapping["Technologies_glob"]["Tech_category"]
!= "Demand"
)
& (
self.glob_mapping["Technologies_glob"]["Tech_category"]
!= "Storage"
)
]["Technology"],
},
"Glob_emission_cap_annual": {
"value": 1e30,
"index": pd.Index(self.main_years, name="Years"),
"columns": ["Global Emission Cap"],
},
}
if self.mode == "Planning":
self.connection_sheet_ids.update(
{
"INV": {
"value": 0,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer,
},
"Decom_cost": {
"value": 0,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer,
},
"Min_totalcap": {
"value": 0,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer,
},
"Max_totalcap": {
"value": 1e10,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer,
},
"Min_newcap": {
"value": 0,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer,
},
"Max_newcap": {
"value": 1e10,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer,
},
"Line_lifetime": {
"value": 1,
"index": pd.Index(
["Technical Life Time"], name="Performance Parameter"
),
"columns": indexer,
},
"Line_Economic_life": {
"value": 1,
"index": pd.Index(
["Economic Life time"], name="Performance Parameter"
),
"columns": indexer,
},
"Interest_rate": {
"value": 0.05,
"index": pd.Index(
["Interest Rate"], name="Performance Parameter"
),
"columns": indexer,
},
}
)
self.global_sheet_ids.update(
{
"Min_totalcap_global": {
"value": 0,
"index": pd.Index(self.main_years, name="Years"),
"columns": self.glob_mapping["Technologies_glob"].loc[
self.glob_mapping["Technologies_glob"]["Tech_category"]
!= "Demand"
]["Technology"],
},
"Max_totalcap_global": {
"value": 1e10,
"index": pd.Index(self.main_years, name="Years"),
"columns": self.glob_mapping["Technologies_glob"].loc[
self.glob_mapping["Technologies_glob"]["Tech_category"]
!= "Demand"
]["Technology"],
},
"Min_newcap_global": {
"value": 0,
"index": pd.Index(self.main_years, name="Years"),
"columns": self.glob_mapping["Technologies_glob"].loc[
self.glob_mapping["Technologies_glob"]["Tech_category"]
!= "Demand"
]["Technology"],
},
"Max_newcap_global": {
"value": 1e10,
"index": pd.Index(self.main_years, name="Years"),
"columns": self.glob_mapping["Technologies_glob"].loc[
self.glob_mapping["Technologies_glob"]["Tech_category"]
!= "Demand"
]["Technology"],
},
"Discount_rate": {
"value": 0.05,
"index": pd.Index(self.main_years, name="Years"),
"columns": ["Annual Discount Rate"],
},
}
)
self.regional_sheets_ids = {}
indexer_reg = {}
indexer_reg_drop1 = {}
indexer_reg_drop2 = {}
add_indexer = {}
conversion_plus_indexin = {}
conversion_plus_indexout = {}
# Creates the columns of the carrier_ratio_in and carrier_ratio_out sheets
# by finding the conversion plus technologies and their input and output carriers
for reg in self.regions:
if "Conversion_plus" in self.Technologies[reg].keys():
take_carrierin = [
self.mapping[reg]["Carrier_input"]
.loc[self.mapping[reg]["Carrier_input"]["Technology"] == tech][
"Carrier_in"
]
.values
for tech in self.Technologies[reg]["Conversion_plus"]
]
take_carrierin_ = [
carr
for index, value in enumerate(take_carrierin)
for carr in take_carrierin[index]
]
take_technologyin = [
self.mapping[reg]["Carrier_input"]
.loc[self.mapping[reg]["Carrier_input"]["Technology"] == tech][
"Technology"
]
.values
for tech in self.Technologies[reg]["Conversion_plus"]
]
take_technologyin_ = [
tech
for index, value in enumerate(take_technologyin)
for tech in take_technologyin[index]
]
take_carrierout = [
self.mapping[reg]["Carrier_output"]
.loc[self.mapping[reg]["Carrier_output"]["Technology"] == tech][
"Carrier_out"
]
.values
for tech in self.Technologies[reg]["Conversion_plus"]
]
take_carrierout_ = [
carr
for index, value in enumerate(take_carrierout)
for carr in take_carrierout[index]
]
take_technologyout = [
self.mapping[reg]["Carrier_output"]
.loc[self.mapping[reg]["Carrier_output"]["Technology"] == tech][
"Technology"
]
.values
for tech in self.Technologies[reg]["Conversion_plus"]
]
take_technologyout_ = [
tech
for index, value in enumerate(take_technologyout)
for tech in take_technologyout[index]
]
conversion_plus_indexin[reg] = pd.MultiIndex.from_arrays(
[take_technologyin_, take_carrierin_],
names=["Tech_category", "Technology"],
)
conversion_plus_indexout[reg] = pd.MultiIndex.from_arrays(
[take_technologyout_, take_carrierout_],
names=["Tech_category", "Technology"],
)
# Creates the columns of the technology-specific parameter files
# based on the technology categories and the technologies within each
# caregory
dict_ = self.Technologies[reg]
level1 = []
level2 = []
for key, values in dict_.items():
if key != "Demand":
for value in values:
level1.append(key)
level2.append(value)
indexer_reg[reg] = pd.MultiIndex.from_arrays(
[level1, level2], names=["Tech_category", "Technology"]
)
if "Storage" in self.Technologies[reg].keys():
indexer_reg_drop1[reg] = indexer_reg[reg].drop("Storage", level=0)
else:
indexer_reg_drop1[reg] = indexer_reg[reg]
if "Transmission" in self.Technologies[reg].keys():
indexer_reg_drop2[reg] = indexer_reg_drop1[reg].drop(
"Transmission", level=0
)
else:
indexer_reg_drop2[reg] = indexer_reg_drop1[reg]
level1_ = level1 * 2
level2_ = level2 * 2
tax = []
sub = []
for tech in level2:
tax.append("Tax")
sub.append("Sub")
taxsub = tax + sub
add_indexer[reg] = pd.MultiIndex.from_arrays(
[taxsub, level1_, level2_],
names=["Taxes or Subsidies", "Tech_category", "Technology"],
)
self.regional_sheets_ids[reg] = {
"F_OM": {
"value": 0,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer_reg[reg],
},
"V_OM": {
"value": 0,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer_reg[reg],
},
"Residual_capacity": {
"value": 0,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer_reg[reg],
},
"Max_production": {
"value": 1e20,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer_reg_drop2[reg],
},
"Min_production": {
"value": 0,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer_reg_drop2[reg],
},
"Capacity_factor_tech": {
"value": 1,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer_reg[reg],
},
"Tech_efficiency": {
"value": 1,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer_reg_drop1[reg],
},
"Specific_emission": {
"value": 0,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer_reg_drop2[reg],
},
"Carbon_tax": {
"value": 0,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer_reg_drop2[reg],
},
"Fix_taxsub": {
"value": 0,
"index": pd.Index(self.main_years, name="Years"),
"columns": add_indexer[reg],
},
"Emission_cap_annual": {
"value": 1e10,
"index": | pd.Index(self.main_years, name="Years") | pandas.Index |
# Copyright (c) 2016-2018 <NAME> <<EMAIL>>
#
# This module is free software. You can redistribute it and/or modify it under
# the terms of the MIT License, see the file COPYING included with this
# distribution.
"""Class implementing the maelstrom method (Bruse & van Heeringen, 2018)
Examples
--------
run_maelstrom(input, "hg38", outdir)
mr = MaelstromResult(outdir)
"""
import glob
import os
import re
import subprocess as sp
import shutil
import sys
from tempfile import NamedTemporaryFile
import logging
from functools import partial
import numpy as np
import pandas as pd
from sklearn.preprocessing import scale
from scipy.cluster import hierarchy
from scipy.spatial import distance
# Plotting
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('white')
from gimmemotifs.background import RandomGenomicFasta
from gimmemotifs.config import MotifConfig
from gimmemotifs.moap import moap, Moap
from gimmemotifs.rank import rankagg
from gimmemotifs.motif import read_motifs
from gimmemotifs.scanner import Scanner
from gimmemotifs.report import maelstrom_html_report
from gimmemotifs.utils import join_max, pwmfile_location
from multiprocessing import Pool
BG_LENGTH = 200
BG_NUMBER = 10000
FPR = 0.01
logger = logging.getLogger("gimme.maelstrom")
def scan_to_table(input_table, genome, scoring, pwmfile=None, ncpus=None):
"""Scan regions in input table with motifs.
Parameters
----------
input_table : str
Filename of input table. Can be either a text-separated tab file or a
feather file.
genome : str
Genome name. Can be either the name of a FASTA-formatted file or a
genomepy genome name.
scoring : str
"count" or "score"
pwmfile : str, optional
Specify a PFM file for scanning.
ncpus : int, optional
If defined this specifies the number of cores to use.
Returns
-------
table : pandas.DataFrame
DataFrame with motif ids as column names and regions as index. Values
are either counts or scores depending on the 'scoring' parameter.s
"""
config = MotifConfig()
if pwmfile is None:
pwmfile = config.get_default_params().get("motif_db", None)
if pwmfile is not None:
pwmfile = os.path.join(config.get_motif_dir(), pwmfile)
if pwmfile is None:
raise ValueError("no pwmfile given and no default database specified")
logger.info("reading table")
if input_table.endswith("feather"):
df = pd.read_feather(input_table)
idx = df.iloc[:,0].values
else:
df = | pd.read_table(input_table, index_col=0, comment="#") | pandas.read_table |
from datetime import datetime
from unittest import TestCase
from unittest.mock import Mock
from zoneinfo import ZoneInfo
from etl.src.extractor import TimeSeriesExtractor, Clock
import pandas as pd
from pandas.testing import assert_series_equal
def to_milliseconds(ts: datetime) -> int:
return int(ts.timestamp() * 1000)
class TestExtractor(TestCase):
def setUp(self):
self.mock_api = Mock()
tz = ZoneInfo("Europe/Berlin")
ts_1 = datetime(2021, 4, 11, hour=5, tzinfo=tz)
ts_2 = datetime(2021, 4, 11, hour=10, tzinfo=tz)
ts_3 = datetime(2021, 4, 11, hour=22, tzinfo=tz)
self.ts_1 = to_milliseconds(ts_1)
self.ts_2 = to_milliseconds(ts_2)
self.ts_3 = to_milliseconds(ts_3)
self.mock_api.query.return_value = {
"results": [
{
"statement_id": 0,
"series": [
{
"name": "ppm",
"columns": ["time", "value"],
"values": [
[self.ts_1, 500],
[self.ts_2, 502],
[self.ts_3, 535],
],
}
],
}
]
}
clock = Mock()
clock.now.return_value = datetime(2021, 4, 11, hour=23, tzinfo=tz)
self.extractor = TimeSeriesExtractor(
home_api=self.mock_api, day_start_hour=6, clock=clock
)
def test_has_extract_accepts_query(self):
self.extractor.extract(query="query")
def test_returns_time_series(self):
index = | pd.to_datetime([self.ts_2, self.ts_3], unit="ms") | pandas.to_datetime |
import streamlit as st
import pandas as pd
from pyvis.network import Network
import networkx as nx
import matplotlib.pyplot as plt
import bz2
import pickle
import _pickle as cPickle
import pydot
import math
import numpy as num
def decompress_pickle(file):
data = bz2.BZ2File(file, 'rb')
data = cPickle.load(data)
return data
uploaded_files = st.sidebar.file_uploader("Choose files", accept_multiple_files=True)
# sidebar for navigating pages
page_nav = st.sidebar.selectbox("Select view:",('Document overviews','Focus concepts','Path views','Active Study view','Study phenomena','Study sets'))
@st.cache
def do_this_first(uploaded_files):
#st.write(st.__version__)
# Load any compressed pickle file
# for uploaded_file in uploaded_files:
# concepts = decompress_pickle(uploaded_file)
# st.write("filename:", uploaded_file.name)
filenames = [file.name for file in uploaded_files] # return this
import pandas as pd
Agg_Conceptdata = pd.DataFrame()
All_Conceptdata = pd.DataFrame()
Agg_np_to_sent = dict()
Agg_sent_to_npflat = dict()
Agg_sent_to_phen = dict()
Agg_phen_to_sent = dict()
Agg_att_to_sent = dict()
Agg_sent_to_att = dict()
Agg_ins_to_sent = dict()
Agg_sent_to_ins = dict()
Agg_set_to_sent = dict()
Agg_sent_to_set = dict()
Agg_np_to_forms = dict()
doc_to_np = dict()
np_to_doc = dict()
Agg_df = pd.DataFrame()
Agg_df = pd.DataFrame()
Agg_np_to_roles = dict()
Agg_sent_to_clt = dict()
Agg_sents = dict()
#Agg_sents_df = pd.DataFrame()
#Agg_docs_df = pd.DataFrame()
All_df = pd.DataFrame()
for uploaded_file in uploaded_files:
concepts = decompress_pickle(uploaded_file)
filename = uploaded_file.name
#st.write("filename:", uploaded_file.name)
Conceptdata = concepts['Conceptdata']
sent_to_npflat = concepts['sent_to_npflat']
np_to_sent = concepts['np_to_sent']
np_to_forms = concepts['np_to_forms']
sent_to_phen = concepts['sent_to_phen']
phen_to_sent = concepts['phen_to_sent']
sent_to_att = concepts['sent_to_att']
att_to_sent = concepts['att_to_sent']
att_to_sent = concepts['att_to_sent']
ins_to_sent = concepts['ins_to_sent']
sent_to_ins = concepts['sent_to_ins']
set_to_sent = concepts['set_to_sent']
sent_to_set = concepts['sent_to_set']
np_to_roles = concepts['np_to_roles']
sent_to_clt = concepts['sent_to_clt']
sents = concepts['sents']
df = concepts['df']
Conceptdata['docname'] = filename
Agg_Conceptdata = Agg_Conceptdata.append(Conceptdata,ignore_index=True)
Agg_sent_to_clt[filename.replace(".pbz2","")] = sent_to_clt
Agg_np_to_sent[filename.replace(".pbz2","")] = np_to_sent
Agg_sents[filename.replace(".pbz2","")] = sents
Agg_sent_to_npflat[filename.replace(".pbz2","")] = sent_to_npflat
Agg_sent_to_set[filename.replace(".pbz2","")] = sent_to_set
Agg_sent_to_att[filename.replace(".pbz2","")] = sent_to_att
Agg_sent_to_phen[filename.replace(".pbz2","")] = sent_to_phen
Agg_sent_to_ins[filename.replace(".pbz2","")] = sent_to_ins
Agg_df = Agg_df.append(df,ignore_index=True)
doc_to_np[filename] = list(np_to_sent.keys()) # return this
for np in np_to_sent:
# if np in Agg_np_to_sent:
# Agg_np_to_sent[np] = Agg_np_to_sent[np] + [(filename,s) for s in np_to_sent[np]]
# else:
# Agg_np_to_sent[np] = [(filename,s) for s in np_to_sent[np]]
if np in np_to_doc:
np_to_doc[np] = np_to_doc[np] + [filename]
else:
np_to_doc[np] = [filename]
for np in np_to_forms:
if np in Agg_np_to_forms:
Agg_np_to_forms[np] = Agg_np_to_forms[np] + np_to_forms[np]
else:
Agg_np_to_forms[np] = np_to_forms[np]
for np in np_to_roles:
if np in Agg_np_to_roles:
Agg_np_to_roles[np] = Agg_np_to_roles[np] + np_to_roles[np]
else:
Agg_np_to_roles[np] = np_to_roles[np]
for np in phen_to_sent:
if np in Agg_phen_to_sent:
Agg_phen_to_sent[np] = Agg_phen_to_sent[np] + [(filename,s) for s in phen_to_sent[np]]
else:
Agg_phen_to_sent[np] = [(filename,s) for s in phen_to_sent[np]]
for np in att_to_sent:
if np in Agg_att_to_sent:
Agg_att_to_sent[np] = Agg_att_to_sent[np] + [(filename,s) for s in att_to_sent[np]]
else:
Agg_att_to_sent[np] = [(filename,s) for s in att_to_sent[np]]
for np in set_to_sent:
if np in Agg_set_to_sent:
Agg_set_to_sent[np] = Agg_set_to_sent[np] + [(filename,s) for s in set_to_sent[np]]
else:
Agg_set_to_sent[np] = [(filename,s) for s in set_to_sent[np]]
for np in ins_to_sent:
if np in Agg_ins_to_sent:
Agg_ins_to_sent[np] = Agg_ins_to_sent[np] + [(filename,s) for s in ins_to_sent[np]]
else:
Agg_ins_to_sent[np] = [(filename,s) for s in ins_to_sent[np]]
#st.write(Agg_Conceptdata.columns)
All_Conceptdata = pd.DataFrame()
def most_common_form(np):
return pd.Series(Agg_np_to_forms[np]).value_counts().sort_values(ascending=False).index[0]
Agg_np_to_mcform = dict()
for np in Agg_np_to_forms:
Agg_np_to_mcform[np] = most_common_form(np)
All_Conceptdata = Agg_Conceptdata.groupby('Concept').agg(doc_Occurence = pd.NamedAgg('docname',lambda x: list(x)),
doc_Frequency = pd.NamedAgg('docname',lambda x: x.shape[0]),
Raw_Frequency = pd.NamedAgg('Frequency','sum'),
Mean = pd.NamedAgg('Mean','mean'),
Median = pd.NamedAgg('Median','mean'),
Sdev = pd.NamedAgg('Sdev','mean'),
Ext_IDF = pd.NamedAgg('IDF',num.nanmin))
All_Conceptdata['Mean_Frequency'] = All_Conceptdata['Raw_Frequency']/All_Conceptdata['doc_Frequency']
All_Conceptdata['normalized_RawFreq'] = All_Conceptdata['Raw_Frequency']/All_Conceptdata['Raw_Frequency'].max()
All_Conceptdata['normalized_MeanFreq'] = All_Conceptdata['Mean_Frequency']/All_Conceptdata['Mean_Frequency'].max()
All_Conceptdata['intIDF'] = All_Conceptdata['doc_Frequency'].apply(lambda x: math.log(len(filenames),2)-abs(math.log(1+x,2)))
All_Conceptdata['intmeanTFIDF'] = All_Conceptdata['normalized_MeanFreq']*All_Conceptdata['intIDF']
for filename in filenames:
colname = filename.replace(".pbz2","")
All_Conceptdata = pd.merge(left = All_Conceptdata,
right = Agg_Conceptdata.loc[Agg_Conceptdata['docname']==filename,['Concept','Frequency']],
how='left',
left_on = 'Concept',
right_on = 'Concept')
All_Conceptdata[colname+'_TF'] = All_Conceptdata['Frequency']
del All_Conceptdata['Frequency']
All_Conceptdata[colname+'_TF'].fillna(0,inplace=True)
All_Conceptdata[colname+'_IntTFIDF'] = All_Conceptdata[colname+'_TF']*All_Conceptdata['intIDF']
All_Conceptdata['MCForm'] = All_Conceptdata['Concept'].apply(lambda x: Agg_np_to_mcform[x])
All_Conceptdata['role_frac'] = All_Conceptdata['Concept'].apply(lambda x: dict(pd.Series(Agg_np_to_roles[x]).value_counts(normalize=True)))
All_Conceptdata['phen_frac'] = All_Conceptdata['role_frac'].apply(lambda x: x.get('phen',0))
All_Conceptdata['att_frac'] = All_Conceptdata['role_frac'].apply(lambda x: x.get('att',0))
All_Conceptdata['set_frac'] = All_Conceptdata['role_frac'].apply(lambda x: x.get('set',0))
All_Conceptdata['ins_frac'] = All_Conceptdata['role_frac'].apply(lambda x: x.get('ins',0))
del All_Conceptdata['role_frac']
All_df = pd.DataFrame()
Agg_df['tuple'] = Agg_df[['Concept1','Concept2']].apply(lambda x:tuple(x),axis=1)
All_df = Agg_df.groupby('tuple').agg(Concept1 = pd.NamedAgg('Concept1',lambda x: list(x)[0]),
Concept2 = pd.NamedAgg('Concept2',lambda x: list(x)[0]),
Bondstrength = pd.NamedAgg('Bondstrength','sum'),
mean_dAB = pd.NamedAgg('dAB',num.nanmean),
mean_dBA = pd.NamedAgg('dBA',num.nanmean),
ExtIDFA = pd.NamedAgg('IDFA',num.nanmean),
ExtIDFB = pd.NamedAgg('IDFB',num.nanmean),
SdevA = pd.NamedAgg('SdevA',num.nanmean),
SdevB = pd.NamedAgg('SdevB',num.nanmean),
)
All_df = | pd.merge(left = All_df,right = All_Conceptdata.loc[:,['Concept','Raw_Frequency']],how="left",left_on = 'Concept1',right_on='Concept') | pandas.merge |
import pandas as pd
import numpy as np
import matplotlib.pyplot as pl
import os
from scipy import stats
from tqdm import tqdm
import mdtraj as md
########################################################
def get_3drobot_native(data_flag):
root_dir = '/home/hyang/bio/erf/data/decoys/3DRobot_set'
pdb_list = | pd.read_csv(f'{root_dir}/pdb_no_missing_residue.csv') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 21 10:48:15 2020
@author: <NAME>
OK TODO: make it work from the command line
OK TODO get json file
OK TODO get all json variables
OK TODO checks all JSON variables
OK TODO try except error management with setting file
OK TODO add test to see if file exists
OK TODO add test for filepath
OK TODO: change CFD mesh
OK TODO: inject the framat setting file to the wrapper
OK TODO: Compute the error
OK TODO: File management with csv deformation
OK TODO do a better job with the settings files workflow
OK TODO: get CFD forces for csv file
OK TODO: compute CFD moments
OK TODO: Verify that the displacements do not add up
TODO: checks folder structure and prints the potential errors
TODO: In deformation from csv mode the program should read a logfile where
there is the results of the previous simulations
TODO: set the logging level from the command line
TODO: add gravity in Pytornado and SU2
TODO: add motor push
WARNING the table accepted names is not supported in all classes
"""
import logging
import json
import aeroframe_2.deformation.functions as aeroDef
import aeroframe_2.deformation.framatNormalVecConverter as normalVecFramat
import aeroframe_2.csdImportGeometry.importGeomerty as importGeomerty
import aeroframe_2.informationTransfer.mapping as mapping
import aeroframe_2.informationTransfer.mappingSU2 as mappingSU2
import aeroframe_2.wrappers.framatWrapper as framatWrapper
# import aeroframe_2.wrappers.framatWrapperSU2 as framatWrapperSU2
import pytornado.stdfun.run as cfd
import ceasiompy.SU2Run.su2run as SU2_fsi
import numpy as np
# import pickle
import sys
import matplotlib.pyplot as plt
import copy
# import pandas as pd
import os
from mpl_toolkits.mplot3d import Axes3D
import pandas as pd
# from ceasiompy.SU2Run.func.extractloads import extract_loads
# import SU2_CFD
# TODO take the information from the args parameters
logging.basicConfig(level=logging.DEBUG)
__prog_name__ = "aeroframe_2"
logger = logging.getLogger(__prog_name__+"."+__name__)
def getSettings(inputFileAndPath):
"""
Reads the json input file. This files contains the user input choices. If
there is no file, the function throws an error.
Parameters
----------
inputFileAndPath : string
Contains the path to the user input JSON file.
Raises
------
FileNotFoundError
If the file is not found the function throws that error
Returns
-------
settings : Dictionay
Returns a dictionaly with all the user input settings like which CFD
solver should be used, which CSD solver, wings mechanical properties,
materials, etc..
"""
try:
with open(inputFileAndPath) as f:
settings = json.load(f)
f.close()
except FileNotFoundError:
logger.error("<setting file>.json not found for aeroframe")
raise FileNotFoundError
return settings
def pytornadoMeshing(args, aeroframeSettings):
"""
Calls pytornado functions to build up a lattice (which is basically the
mesh and our point of interest for aeroelasticity) and all other
variables/instances needed by pytornado.
Parameters
----------
args : sys.argv
Unser input from the command line.
aeroframeSettings : Dicitonary
Contains all the information that there is in the input JSON file, i.e.
the which solver to use and mechanical properties of the wing
Returns
-------
pytornadoSttings : string
Contains the equivalent of a command line input command for pytornado.
With this variable we are tricking pytornado into thinking that he is
called from the command line which is not the case.
pytornadoVariables : list
Variables containing the mesh, the results and some other information,
necessary to run a pytornado simulation.
"""
# Virtual command line setting file input path. the "args" contains the
# rest of the needed information
pytornadoSttings = args.cwd + "/CFD/settings/" + aeroframeSettings["CFD_settings_file"]
# Buids CFD mesh
lattice,vlmdata,settings,aircraft,cur_state,state = cfd.meshing(args,pytornadoSttings)
# Join all the variables for ease of use.
pytornadoVariables = [lattice, vlmdata, settings, aircraft, cur_state, state]
logger.info("Pytornado meshing done")
return pytornadoSttings, pytornadoVariables
def csvDeformation(args,pytornadoSettings,aeroframeSettings,pytornadoVariables):
"""
Deforms the VLM mesh from a csv file, by calling the aeroDef class and the
method Mesh_Def.
Parameters
----------
args : sys.argv
Unser input from the command line.
pytornadoSettings : TYPE
Settings variables files needed to run pytornado
aeroframeSettings : Dicitonary
Contains all the information that there is in the input JSON file, i.e.
the which solver to use and mechanical properties of the wing
pytornadoVariables : list
Variables containing the mesh, the results and some other information,
necessary to run a pytornado simulation.
Returns
-------
pytornadoVariables : list
Variables containing the mesh, the results and some other information,
necessary to run a pytornado simulation.
"""
# Extract the vlm mesh
lattice = pytornadoVariables[0]
# Feeds all the needed settings in order to read the .csv deformation file
# and then be able to deform the mesh (basically compute the new normnal
# vector).
mesh_def = aeroDef.Mesh_Def(args,aeroframeSettings,lattice)
# Calls the deformation function that computes the new points positions
if aeroframeSettings['deformation_from_file']:
mesh_def.deformation()
# Feeds the result back to an understandable pytornado mesh. The idea is
# not to touch any unwanted variable, hence the action of feeding back the
# new point to the old lattice variable.
lattice.p = mesh_def.f_p
lattice.v = mesh_def.f_v
lattice.c = mesh_def.f_c
lattice.bound_leg_midpoints = mesh_def.f_b
lattice.n = mesh_def.f_n
lattice.a = mesh_def.f_a
# For ease of use and code readability
pytornadoVariables[0] = lattice
return pytornadoVariables
def feeder(pytornadoVariables,meshDeformation):
"""
Transfers the deformed mesh points to pytornadoVariables.
Parameters
----------
pytornadoVariables : list
Variables containing the mesh, the results and some other information,
necessary to run a pytornado simulation.
meshDeformation : aeroDef
Vairalbe of class aeroDef that contains all the necessary mesh points
to run pytornado.
Returns
-------
pytornadoVariables : list
Variables containing the mesh, the results and some other information,
necessary to run a pytornado simulation.
"""
pytornadoVariables[0].p = meshDeformation.f_p
pytornadoVariables[0].v = meshDeformation.f_v
pytornadoVariables[0].c = meshDeformation.f_c
pytornadoVariables[0].a = meshDeformation.f_a
pytornadoVariables[0].bound_leg_midpoints = meshDeformation.f_b
pytornadoVariables[0].n = meshDeformation.f_n
return pytornadoVariables
def forcesToCsv(args,pytornadoVariables,results):
"""
Writes the results to a csv file. WARNING at the moment (19.08.20) there is
another function doing the same in CEASIOMpy but that function extracts the
loads at the boundleg midpoint which not what the FEM mesh needs.
Parameters
----------
args : sys.argv
Unser input from the command line.
pytornadoVariables : list
Variables containing the mesh, the results and some other information,
necessary to run a pytornado simulation.
results : pytornado result class
Variable containg all the panelwise results, hence airspeed, forces,
etc.
Returns
-------
None.
"""
# assembless the path
path = args.cwd + "/CSD/results/panelwiseForces.csv"
headers = "x;y;z;fx;fy;fz"
# Gets simulation values
panelCoordinates = pytornadoVariables[0].c
panelFx = results["vlmdata"].panelwise['fx']
panelFy = results["vlmdata"].panelwise['fy']
panelFz = results["vlmdata"].panelwise['fz']
# panelMx = results["vlmdata"].panelwise['mx']
# panelMy = results["vlmdata"].panelwise['my']
# panelMz = results["vlmdata"].panelwise['mz']
results = np.array([panelCoordinates[:,0],
panelCoordinates[:,1],
panelCoordinates[:,2],
panelFx,
panelFy,
panelFz,
# panelMx,
# panelMy,
# panelMz
])
np.savetxt(path, results.T, delimiter=';', header=headers)
logger.info("Simulation finised")
def solverPytornadoCSV(args, aeroframeSettings, acceptedNames):
"""
The workflow is as follows:
1) The function builds CFD mesh.
2) Then it deforms the CFD mesh.
3) Solves the CFD problem. (basically calls the solver)
4) Extract panelwise forces and saves them into a csv file.
Parameters
----------
args : sys.argv
Unser input from the command line.
aeroframeSettings : Dicitonary
Contains all the information that there is in the input JSON file, i.e.
the which solver to use and mechanical properties of the wing
acceptedNames : list
List containing all the implemented solvers. If the user asks a solver
which is not implemented the program will throw an error and close
itself.
Returns
-------
None.
"""
# Step 1) pytornado meshing
pytornadoSettings, pytornadoVariables = pytornadoMeshing(args, aeroframeSettings)
# Step 2) Deforms CFD mesh if necessary
if aeroframeSettings['deformation_from_file']:
lattice = pytornadoVariables[0]
meshDeformation = aeroDef.Mesh_Def(args,aeroframeSettings,lattice)
meshDeformation.deformation(acceptedNames)
pytornadoVariables = feeder(pytornadoVariables,meshDeformation)
# Step 3) Computes the CFD problem
pytornadoVariables, results = cfd.solver(pytornadoVariables)
# Step 4) Saves panelwise forces results
forcesToCsv(args,pytornadoVariables,results)
logger.info("End of simulation")
sys.exit()
def solverPytornadoFramat(args, aeroframeSettings, acceptedNames):
"""
Function called when the user desires to do a simulation with the VLM
solver Pytornado and the structure solver FramAT.
The workflow is as follows:
1) Builds CFD mesh.
2) Reads CPACS files and computes the nodes of pseudo 1D structural
mesh.
3) Builds CSD instance in FramAT.
4) Computes the transformation matrices
5) Computes CFD problem.
6) Enters the aeroelastic loop.
7) Projects the loads on CSD instance.
8) Computes CSD solution
9) Deforms the CFD mesh.
10) Computes the norm of the displacement error
11) Computes new CFD problem.
12) loops back to point 6) if simulation has not converged.
13) Saves the results to a CSV file
Parameters
----------
args : sys.argv
Unser input from the command line.
aeroframeSettings : Dicitonary
Contains all the information that there is in the input JSON file, i.e.
the which solver to use and mechanical properties of the wing
acceptedNames : list
List containing all the implemented solvers. If the user asks a solver
which is not implemented the program will throw an error and close
itself.
Returns
-------
None.
"""
# Step 1) pytornado meshing
pytornadoSettings, pytornadoVariables = pytornadoMeshing(args, aeroframeSettings)
# Step 2) Reads CPACS files and computes the nodes of beam model structural
# mesh. Aeroframe function pre-meshes the aircraft to get each
# structure node.
preMeshedStructre = importGeomerty.CsdGeometryImport(args,aeroframeSettings)
preMeshedStructre.getAllPoints()
# Step 3) feeds the computed nodes to the structure solver and builds a
# structure mesh
csdSolverClassVar = framatWrapper.framat(preMeshedStructre)
csdSolverClassVar.mesh()
# Step 4) feeds the computed nodes to a mapping function which computes the
# tranformation matrices (based on RBF)
logger.debug(pytornadoVariables[0].c)
# sys.exit()
transform = mapping.mapper(pytornadoVariables,preMeshedStructre,csdSolverClassVar)
transform.computesTransformationsMatrices()
# Step 5) Computes CFD problem.
cfd.solver(pytornadoVariables)
pytornadoVariablesInit = copy.deepcopy(pytornadoVariables)
# Setp 6) Aeroelastic loop.
N = aeroframeSettings["MaxIterationsNumber"]
i = 0
maxDisplacement = np.array([0])
error = []
absoluteDisplacement = []
aeroFx = []
aeroFy = []
aeroFz = []
aeroMx = []
aeroMy = []
aeroMz = []
structFx = []
structFy = []
structFz = []
structMx = []
structMy = []
structMz = []
pVold = pytornadoVariables
tol = aeroframeSettings["ConvergeanceTolerence"]
while (i < N):
# basic user comminication
logger.debug("aeroelastic loop number: "+str(i))
# Saves the aerodynamic results
points = pVold[0].bound_leg_midpoints
Fx = pVold[1].panelwise['fx']
Fy = pVold[1].panelwise['fy']
Fz = pVold[1].panelwise['fz']
Mx = pVold[1].panelwise['mx']
My = pVold[1].panelwise['my']
Mz = pVold[1].panelwise['mz']
df = pd.DataFrame(points)
df['Fx'] = Fx
df['Fy'] = Fy
df['Fz'] = Fz
df['Mx'] = Mx
df['My'] = My
df['Mz'] = Mz
df.columns = ['x', 'y', 'z', 'Fx', 'Fy', 'Fz', 'Mx', 'My', 'Mz']
df.to_csv(args.cwd + '/CFD/_results/forces' + str(i) + '.csv')
# Step 7) Projects the loads on CSD instance.
transform.aeroToStructure(args,i)
logger.debug(transform)
# Step 8) Compute structure solution
csdSolverClassVar.run(transform)
# Step 9) deforms the CFD mesh. Computes beam deformation
latticeCurrent = pytornadoVariablesInit[0]
meshDeformation = aeroDef.Mesh_Def(args,aeroframeSettings,latticeCurrent)
# Step 10) computes new aerodynamic points
transform.structureToAero(args,i)
meshDeformation.deformation(acceptedNames,transform)
pytornadoVariables = feeder(pytornadoVariables,meshDeformation)
# Step 11) Computes the norm of the displacement error
# Max structure displacement form one aeroelastic iteration to the next
maxDisplacement = np.append(maxDisplacement, np.max(np.abs(transform.suz)))
error.append(np.abs(maxDisplacement[-1] - maxDisplacement[-2]))
# Max structure displacement from undeformed state
absoluteDisplacement.append(np.abs(maxDisplacement[-1] - maxDisplacement[0]))
aeroFx.append(transform.totalAerodynamicFx)
aeroFy.append(transform.totalAerodynamicFy)
aeroFz.append(transform.totalAerodynamicFz)
aeroMx.append(transform.totalAerodynamicMx)
aeroMy.append(transform.totalAerodynamicMy)
aeroMz.append(transform.totalAerodynamicMz)
structFx.append(transform.totalStructureFx)
structFy.append(transform.totalStructureFy)
structFz.append(transform.totalStructureFz)
structMx.append(transform.totalStructureMx)
structMy.append(transform.totalStructureMy)
structMz.append(transform.totalStructureMz)
logger.info("Max error between two iteration: "+str(error))
logger.info("Max displacement between two iteration: "+str(absoluteDisplacement))
# logger.info('G load: '+str(transform.G))
# Step 12) Deforms the CFD mesh.
pytornadoVariables, results = cfd.solver(pytornadoVariables)
# logger.debug(pytornadoVariables[0].bound_leg_midpoints)
# sys.exit()
del(csdSolverClassVar)
csdSolverClassVar = framatWrapper.framat(preMeshedStructre)
csdSolverClassVar.mesh()
# del(transform)
transform = mapping.mapper(pytornadoVariables,preMeshedStructre,csdSolverClassVar)
transform.computesTransformationsMatrices()
# sys.exit()
i += 1
if i == N-1:
logger.warning("Simulation has reached max number of step,")
logger.warning("convergeance is yet to determine!")
if error[-1] <= tol:
logger.info("Simulation has converged")
i = N
# Writes a file which contains the error for each timestep
N = len(error)
path = args.cwd + "/results.csv"
MyFile = open(path,"w")
MyFile.write("Relative error; max displacement;"\
" aero Fx; aero Fy; aero Fz; aero Mx; aero My; aero Mz;"\
" struct Fx; struct Fy; struct Fz; struct Mx; struct My; struct Mz"\
)
MyFile.write("\n")
for i in range(N):
MyFile.write(str(error[i]) + ";" + \
str(absoluteDisplacement[i]) + ";" + \
str(aeroFx[i]) + ";" + \
str(aeroFy[i]) + ";" + \
str(aeroFz[i]) + ";" + \
str(aeroMx[i]) + ";" + \
str(aeroMy[i]) + ";" + \
str(aeroMz[i]) + ";" + \
str(structFx[i]) + ";" + \
str(structFy[i]) + ";" + \
str(structFz[i]) + ";" + \
str(structMx[i]) + ";" + \
str(structMy[i]) + ";" + \
str(structMz[i]) + ";"
)
MyFile.write("\n")
MyFile.close()
# Writes the forces and points at which they apply
sys.exit()
def solverSU2Framat(args, aeroframeSettings, acceptedNames):
"""
Function called when the user desires to couple the CFD solver SU2
and the structure solver FramAT.
Parameters
----------
args : sys.argv
Unser input from the command line.
aeroframeSettings : Dicitonary
Contains all the information that there is in the input JSON file, i.e.
the which solver to use and mechanical properties of the wing
acceptedNames : list
List containing all the implemented solvers. If the user asks a solver
which is not implemented the program will throw an error and close
itself.
Returns
-------
None.
"""
# TODO Step ) Checks the entry data.
# Step 1) Initialization of the loop
# Creation of the current loop directory done by the su2run.py file.
# Case s2etup
iteration = 0
wkdir = args.cwd
# TODO: read it form the config file
nb_proc = aeroframeSettings["SU2_Nproc"]
logger.debug("Configuration path: \n"+str(aeroframeSettings))
logger.debug("nb of proc: \n"+str(nb_proc))
logger.debug("WKDIR: \n"+str(wkdir))
# Step 2) Runs a single SU2 simulation
case = '/Case/'
###
# WARNING
###
SU2_fsi.run_SU2_fsi(aeroframeSettings, wkdir, case, iteration)
# Step 3) Reads CPACS files and computes the nodes of 3D beam structural
# mesh. Aeroframe_2 function 'importGeomerty' pre-meshes the
# aircraft to get each structure node.
preMeshedStructre = importGeomerty.CsdGeometryImport(args,aeroframeSettings)
preMeshedStructre.getAllPoints()
logger.info("Structure mesh points computed")
# Step 4) feeds the computed nodes to the structure solver and builds a
# structure mesh
csdSolverClassVar = framatWrapper.framat(preMeshedStructre)
csdSolverClassVar.mesh()
logger.info("FramAT mesh computed")
# Step 5) feeds the computed nodes to a mapping function which computes the
# tranformation matrices (based on RBF)
logger.debug("Next step is transformation")
forceFile = '/CFD/Case/' + str(iteration) + '/force.csv'
logger.debug(wkdir)
logger.debug(forceFile)
forceInitFilePath = wkdir + forceFile
transform = mappingSU2.mapper(forceInitFilePath,preMeshedStructre,csdSolverClassVar)
transform.computesTransformationsMatrices(forceInitFilePath)
# Setp 6) Aeroelastic loop.
N = aeroframeSettings["MaxIterationsNumber"]
maxDisplacement = np.array([0])
error = []
absoluteDisplacement = []
Gloads = []
tol = aeroframeSettings["ConvergeanceTolerence"]
while (iteration < N):
# basic user comminication
logger.debug("aeroelastic loop number: "+str(iteration))
forceFilePath = wkdir + '/CFD/Case/' + str(iteration) + '/force.csv'
# Makes a copy to avoid memory linked mistakes
transformCurrent = transform
csdSolverClassVarCurrent = csdSolverClassVar
# Step 7) Projects the loads on CSD instance.
transformCurrent.aeroToStructure(args,forceFilePath,iteration)
# Step 8) Compute structure solution
csdSolverClassVarCurrent.run(transformCurrent)
transformCurrent.structureToAero(args,iteration,forceInitFilePath,forceFilePath)
# Step 9) Computes convergence
maxDisplacement = np.append(maxDisplacement, np.max(np.abs(transform.displacements)))
error.append(np.abs(maxDisplacement[-1] - maxDisplacement[-2]))
absoluteDisplacement.append(np.abs(maxDisplacement[-1] - maxDisplacement[0]))
Gloads.append(transform.G)
logger.info("Max error between two iteration: "+str(error))
logger.info("Max displacement between two iteration: "+str(absoluteDisplacement))
logger.info('G load: '+str(Gloads[iteration]))
# WARNING do not change it's place unless you know what you are doing
iteration += 1
# Step 10) computes new CFD solution
SU2_fsi.run_SU2_fsi(aeroframeSettings, wkdir, case, iteration)
# Step 11) frees memory in order to avoir piling displacements in FramAT
del(csdSolverClassVar)
csdSolverClassVar = framatWrapper.framat(preMeshedStructre)
csdSolverClassVar.mesh()
# del(transform)
transform = mappingSU2.mapper(forceInitFilePath,preMeshedStructre,csdSolverClassVar)
transform.computesTransformationsMatrices(forceInitFilePath)
if iteration == N-1:
logger.warning("Simulation has reached max number of step,")
logger.warning("convergeance is yet to determine!")
if error[-1] <= tol:
logger.info("Simulation has converged")
iteration = N
# Writes a file which contains the error for each timestep
N = len(error)
path = args.cwd + "/results.csv"
MyFile = open(path,"w")
MyFile.write("Relative error; max displacement")
MyFile.write("\n")
for i in range(N):
MyFile.write(str(error[i]) + ";" + \
str(absoluteDisplacement[i]) + ";" + \
str(Gloads[i]))
MyFile.write("\n")
MyFile.close()
sys.exit()
def solverSU2CSV(args, aeroframeSettings, acceptedNames):
iteration = aeroframeSettings['SU2_iteration']
wkdir = args.cwd
# TODO: read it form the config file
nb_proc = aeroframeSettings["SU2_Nproc"]
logger.debug("Configuration path: \n"+str(aeroframeSettings))
logger.debug("nb of proc: \n"+str(nb_proc))
logger.debug("WKDIR: \n"+str(wkdir))
# Step 2) Runs a single SU2 simulation
case = '/Case00_alt0_mach0.3_aoa2.0_aos0.0/'
if iteration > 0:
SU2_deformationConverter(aeroframeSettings, wkdir, case, iteration)
SU2_fsi.run_SU2_fsi(aeroframeSettings, wkdir, case, iteration)
def SU2_deformationConverter(aeroframeSettings, wkdir, case, iteration):
defFilePath = wkdir + aeroframeSettings['deformation_file']
defData = | pd.read_csv(defFilePath) | pandas.read_csv |
# %% imports
import numpy as np
import pandas as pd
import config as cfg
from src.utils.data_processing import hours_in_year, medea_path
# --------------------------------------------------------------------------- #
# %% settings and initializing
# --------------------------------------------------------------------------- #
STATIC_FNAME = medea_path('data', 'processed', 'data_static.xlsx')
idx = pd.IndexSlice
# --------------------------------------------------------------------------- #
# %% read in data
# --------------------------------------------------------------------------- #
static_data = {
'CAP_R': pd.read_excel(STATIC_FNAME, 'INITIAL_CAP_R', header=[0], index_col=[0, 1]),
'CAPCOST_R': pd.read_excel(STATIC_FNAME, 'CAPITALCOST_R', header=[0], index_col=[0, 1]),
'potentials': pd.read_excel(STATIC_FNAME, 'potentials', header=[0], index_col=[0]),
'tec': pd.read_excel(STATIC_FNAME, 'parameters_G'),
'feasops': pd.read_excel(STATIC_FNAME, 'FEASIBLE_INPUT-OUTPUT'),
'cost_transport': pd.read_excel(STATIC_FNAME, 'COST_TRANSPORT', header=[0], index_col=[0]),
'CAPCOST_K': pd.read_excel(STATIC_FNAME, 'CAPITALCOST_S', header=[0], index_col=[0, 1]),
'CAP_X': pd.read_excel(STATIC_FNAME, 'ATC', index_col=[0]),
'DISTANCE': pd.read_excel(STATIC_FNAME, 'KM', index_col=[0]),
'AIR_POLLUTION': pd.read_excel(STATIC_FNAME, 'AIR_POLLUTION', index_col=[0])
}
# --------------------------------------------------------------------------------------------------------------------
plant_data = {
'hydro': pd.read_excel(medea_path('data', 'processed', 'plant-list_hydro.xlsx'), 'opsd_hydro'),
'conventional': pd.read_excel(medea_path('data', 'processed', 'power_plant_db.xlsx'))
}
ts_data = {
'timeseries': pd.read_csv(medea_path('data', 'processed', 'medea_regional_timeseries.csv'))
}
# --------------------------------------------------------------------------- #
# %% prepare set data
# --------------------------------------------------------------------------- #
dict_sets = {
'f': {
'Nuclear': [10],
'Lignite': [20],
'Coal': [30],
'Gas': [40],
'Oil': [50],
'Hydro': [60],
'Biomass': [70],
'Solar': [80],
'Wind': [90],
'Power': [100],
'Heat': [110],
'Syngas': [120]
},
'l': {f'l{x}': [True] for x in range(1, 5)},
'm': {
'el': True,
'ht': True
},
'n': {
'pv': [True],
'ror': [True],
'wind_on': [True],
'wind_off': [True]
},
'k': {
'psp_day': [True],
'psp_week': [True],
'psp_season': [True],
'res_day': [True],
'res_week': [True],
'res_season': [True],
'battery': [True]
},
't': {f't{hour}': [True] for hour in range(1, hours_in_year(cfg.year) + 1)},
'z': {zone: [True] for zone in cfg.zones}
}
# convert to DataFrames
for key, value in dict_sets.items():
dict_sets.update({key: pd.DataFrame.from_dict(dict_sets[key], orient='index', columns=['Value'])})
# --------------------------------------------------------------------------- #
# %% prepare static data
# --------------------------------------------------------------------------- #
# Source 'CO2_INTENSITY': CO2 Emission Factors for Fossil Fuels, UBA, 2016
dict_static = {
'CO2_INTENSITY': {
'Nuclear': [0],
'Lignite': [0.399],
'Coal': [0.337],
'Gas': [0.201],
'Oil': [0.266],
'Hydro': [0],
'Biomass': [0],
'Solar': [0],
'Wind': [0],
'Power': [0],
'Heat': [0],
'Syngas': [0]
},
'eta': {
'nuc': [0.34],
'lig_stm': [0.31], 'lig_stm_chp': [0.31],
'lig_boa': [0.43], 'lig_boa_chp': [0.43],
'coal_sub': [0.32], 'coal_sub_chp': [0.32],
'coal_sc': [0.41], 'coal_sc_chp': [0.41],
'coal_usc': [0.44], 'coal_usc_chp': [0.44],
'coal_igcc': [0.55],
'ng_stm': [0.40], 'ng_stm_chp': [0.40],
'ng_cbt_lo': [0.34], 'ng_cbt_lo_chp': [0.34],
'ng_cbt_hi': [0.40], 'ng_cbt_hi_chp': [0.40],
'ng_cc_lo': [0.38], 'ng_cc_lo_chp': [0.38],
'ng_cc_hi': [0.55], 'ng_cc_hi_chp': [0.55],
'ng_mtr': [0.40], 'ng_mtr_chp': [0.40],
'ng_boiler_chp': [0.90],
'oil_stm': [0.31], 'oil_stm_chp': [0.31],
'oil_cbt': [0.35], 'oil_cbt_chp': [0.35],
'oil_cc': [0.42], 'oil_cc_chp': [0.42],
'bio': [0.35], 'bio_chp': [0.35],
'heatpump_pth': [3.0]
},
'map_name2fuel': {
'nuc': 'Nuclear',
'lig': 'Lignite',
'coal': 'Coal',
'ng': 'Gas',
'oil': 'Oil',
'bio': 'Biomass',
'heatpump': 'Power'
},
'CAPCOST_X': {
'AT': [1250],
'DE': [1250]
},
'VALUE_NSE': {
'AT': [12500],
'DE': [12500]
},
'LAMBDA': [0.125],
'SIGMA': [0.175]
}
dict_additions = {
'boilers': {
# 'medea_type': [49.5],
'set_element': 'ng_boiler_chp',
('cap', 'AT'): [4.5],
('cap', 'DE'): [25.5],
('eta', 'AT'): [0.9],
('eta', 'DE'): [0.9]
# ('count', 'AT'): [15],
# ('count', 'DE'): [85],
# ('num', 'AT'): [85],
# ('num', 'DE'): [255]
},
'heatpumps': {
# 'medea_type': [100],
'set_element': 'heatpump_pth',
('cap', 'AT'): [0.1],
('cap', 'DE'): [0.1],
('eta', 'AT'): [3.0],
('eta', 'DE'): [3.0]
# ('count', 'AT'): [1],
# ('count', 'DE'): [1],
# ('num', 'AT'): [1],
# ('num', 'DE'): [1]
},
'batteries': {
'power_in': [0],
'power_out': [0],
'energy_max': [0],
'efficiency_in': [0.96],
'efficiency_out': [0.96],
'cost_power': [static_data['CAPCOST_K'].loc[('AT', 'battery'), 'annuity-power'].round(4)],
'cost_energy': [static_data['CAPCOST_K'].loc[('AT', 'battery'), 'annuity-energy'].round(4)],
'inflow_factor': [0]
}
}
dict_instantiate = {'CO2_INTENSITY': pd.DataFrame.from_dict(dict_static['CO2_INTENSITY'],
orient='index', columns=['Value'])}
dict_instantiate.update({'efficiency': pd.DataFrame.from_dict(dict_static['eta'], orient='index', columns=['l1'])})
dict_instantiate['efficiency']['product'] = 'el'
dict_instantiate['efficiency'].loc[dict_instantiate['efficiency'].index.str.contains('pth'), 'product'] = 'ht'
dict_instantiate['efficiency'].loc['ng_boiler_chp', 'product'] = 'ht'
dict_instantiate['efficiency']['fuel'] = dict_instantiate['efficiency'].index.to_series().str.split('_').str.get(
0).replace(dict_static['map_name2fuel'])
dict_instantiate['efficiency'].set_index(['product', 'fuel'], append=True, inplace=True)
dict_instantiate['efficiency'].index.set_names(['medea_type', 'product', 'fuel_name'], inplace=True)
for i in range(1, 6):
dict_instantiate['efficiency'][f'l{i}'] = dict_instantiate['efficiency']['l1']
dict_instantiate.update({'CAP_R': static_data['CAP_R'].loc[idx[:, cfg.year], :]})
dict_instantiate.update({'CAP_X': static_data['CAP_X'].loc[
static_data['CAP_X'].index.str.contains('|'.join(cfg.zones)),
static_data['CAP_X'].columns.str.contains('|'.join(cfg.zones))] / 1000})
dict_instantiate.update({'DISTANCE': static_data['DISTANCE'].loc[static_data['DISTANCE'].index.str.contains(
'|'.join(cfg.zones)), static_data['DISTANCE'].columns.str.contains('|'.join(cfg.zones))]})
static_data.update({'CAPCOST_X': pd.DataFrame.from_dict(dict_static['CAPCOST_X'], orient='index', columns=['Value'])})
static_data.update({'VALUE_NSE': pd.DataFrame.from_dict(dict_static['VALUE_NSE'], orient='index', columns=['Value'])})
static_data.update({'LAMBDA': pd.DataFrame(dict_static['LAMBDA'], columns=['Value'])})
static_data.update({'SIGMA': pd.DataFrame(dict_static['SIGMA'], columns=['Value'])})
# --------------------------------------------------------------------------- #
# %% preprocessing plant data
# --------------------------------------------------------------------------- #
# dispatchable (thermal) plants
# filter active thermal plants
plant_data.update({'active': plant_data['conventional'].loc[
(plant_data['conventional']['UnitOperOnlineDate'] < pd.Timestamp(cfg.year, 1, 1)) &
(plant_data['conventional']['UnitOperRetireDate'] > pd.Timestamp(cfg.year, 12, 31)) |
np.isnat(plant_data['conventional']['UnitOperRetireDate'])]})
# exclude hydro power plant
plant_data['active'] = plant_data['active'].loc[(plant_data['active']['MedeaType'] < 60) |
(plant_data['active']['MedeaType'] >= 70)]
# capacities by country in GW
prop_g = plant_data['active'].groupby(['MedeaType', 'PlantCountry'])['UnitNameplate'].sum().to_frame() / 1000
prop_g['eta'] = plant_data['active'].groupby(['MedeaType', 'PlantCountry'])['Eta'].mean().to_frame()
# prop_g['count'] = plant_data['active'].groupby(['MedeaType'])['PlantCountry'].value_counts().to_frame(name='count')
# prop_g['num'] = (prop_g['UnitNameplate'].round(decimals=1) * 10).astype(int)
prop_g.rename(index={'Germany': 'DE', 'Austria': 'AT'}, columns={'UnitNameplate': 'cap'}, inplace=True)
prop_g = prop_g.unstack(-1)
prop_g.drop(0.0, axis=0, inplace=True)
# index by plant element names instead of medea_type-numbers
prop_g.index = prop_g.index.map(pd.Series(static_data['tec']['set_element'].values,
index=static_data['tec']['medea_type'].values).to_dict())
# update 'empirical' efficiencies with generic efficiencies
for zone in cfg.zones:
prop_g.loc[:, idx['eta', zone]].update(pd.DataFrame.from_dict(dict_static['eta'],
orient='index', columns=['eta']).iloc[:, 0])
# add data for heat boilers
prop_g = prop_g.append(pd.DataFrame.from_dict(dict_additions['boilers']).set_index('set_element'))
# add data for heatpumps
prop_g = prop_g.append(pd.DataFrame.from_dict(dict_additions['heatpumps']).set_index('set_element'))
# remove non-existent plant
prop_g = prop_g.stack(-1).swaplevel(axis=0)
prop_g = prop_g.dropna()
# update instantiation dictionary
dict_instantiate.update({'tec_props': prop_g})
# add 'tec'-set to dict_sets
dict_sets.update({'i': pd.DataFrame(data=True, index=prop_g.index.get_level_values(1).unique().values,
columns=['Value'])})
static_data['feasops']['fuel_name'] = (static_data['feasops']['medea_type'] / 10).apply(np.floor) * 10
static_data['feasops']['fuel_name'].replace({y: x for x, y in dict_sets['f'].itertuples()}, inplace=True)
static_data['feasops']['set_element'] = static_data['feasops']['medea_type']
static_data['feasops']['set_element'].replace(
{x: y for x, y in static_data['tec'][['medea_type', 'set_element']].values}, inplace=True)
static_data['feasops'].dropna(inplace=True)
static_data['feasops'].set_index(['set_element', 'l', 'fuel_name'], inplace=True)
# following line produces memory error (0xC00000FD) --> workaround with element-wise division
# df_feasops['fuel_need'] = df_feasops['fuel']/ df_eff
# TODO: PerformanceWarning: indexing past lexsort depth may impact performance (3 times)
static_data['feasops']['fuel_need'] = np.nan
for typ in static_data['feasops'].index.get_level_values(0).unique():
for lim in static_data['feasops'].index.get_level_values(1).unique():
static_data['feasops'].loc[idx[typ, lim], 'fuel_need'] = static_data['feasops'].loc[
idx[typ, lim], 'fuel'].mean() / \
dict_static['eta'][typ][0]
# adjust static_data['tec'] to reflect modelled power plants
static_data['tec'].set_index('set_element', inplace=True)
static_data['tec'] = static_data['tec'].loc[static_data['tec'].index.isin(dict_sets['i'].index), :]
dict_instantiate['efficiency'] = \
dict_instantiate['efficiency'].loc[
dict_instantiate['efficiency'].index.get_level_values(0).isin(dict_sets['i'].index), :]
static_data['feasops'] = \
static_data['feasops'].loc[static_data['feasops'].index.get_level_values(0).isin(dict_sets['i'].index), :]
# --------------------------------------------------------------------------- #
# hydro storage data
# drop all ror data
plant_data['hydro'].drop(plant_data['hydro'][plant_data['hydro'].technology == 'Run-of-river'].index, inplace=True)
# filter out data without reservoir size in GWh
plant_data['hydro'].dropna(subset=['energy_max', 'power_in'], inplace=True)
# calculate duration of generation from full reservoir
plant_data['hydro']['max_duration'] = plant_data['hydro']['energy_max'] / plant_data['hydro']['power_out'] * 1000 / 24
plant_data['hydro']['count'] = 1
plant_data.update({'hydro_clusters': plant_data['hydro'].groupby(['technology', 'country',
pd.cut(plant_data['hydro']['max_duration'],
[0, 2, 7, 75])]).sum()})
plant_data['hydro_clusters']['efficiency_in'] = plant_data['hydro_clusters']['efficiency_in'] / \
plant_data['hydro_clusters']['count']
plant_data['hydro_clusters']['efficiency_out'] = plant_data['hydro_clusters']['efficiency_out'] / \
plant_data['hydro_clusters']['count']
plant_data['hydro_clusters']['cost_power'] = np.nan
plant_data['hydro_clusters']['cost_energy'] = np.nan
# assign technology and zone index to rows
plant_data['hydro_clusters']['country'] = plant_data['hydro_clusters'].index.get_level_values(1)
plant_data['hydro_clusters']['category'] = plant_data['hydro_clusters'].index.get_level_values(2).rename_categories(
['day', 'week', 'season']).astype(str)
plant_data['hydro_clusters']['tech'] = plant_data['hydro_clusters'].index.get_level_values(0)
plant_data['hydro_clusters']['tech'] = plant_data['hydro_clusters']['tech'].replace(['Pumped Storage', 'Reservoir'],
['psp', 'res'])
plant_data['hydro_clusters']['set_elem'] = plant_data['hydro_clusters']['tech'] + '_' + plant_data['hydro_clusters'][
'category']
plant_data['hydro_clusters'] = plant_data['hydro_clusters'].set_index(['set_elem', 'country'])
plant_data['hydro_clusters'].fillna(0, inplace=True)
plant_data['hydro_clusters']['power_out'] = plant_data['hydro_clusters']['power_out'] / 1000 # conversion from MW to GW
plant_data['hydro_clusters']['power_in'] = plant_data['hydro_clusters']['power_in'] / 1000 # conversion from MW to GW
plant_data['hydro_clusters']['inflow_factor'] = (
plant_data['hydro_clusters']['energy_max'] / plant_data['hydro_clusters']['energy_max'].sum())
plant_data['hydro_clusters'] = plant_data['hydro_clusters'].loc[:, ['power_in', 'power_out', 'energy_max',
'efficiency_in', 'efficiency_out', 'cost_power',
'cost_energy', 'inflow_factor']].copy()
# append battery data
bat_idx = pd.MultiIndex.from_product([['battery'], list(cfg.zones)])
df_battery = pd.DataFrame(np.nan, bat_idx, dict_additions['batteries'].keys())
for zone in list(cfg.zones):
for key in dict_additions['batteries'].keys():
df_battery.loc[('battery', zone), key] = dict_additions['batteries'][key][0]
plant_data['storage_clusters'] = plant_data['hydro_clusters'].append(df_battery)
# --------------------------------------------------------------------------- #
# %% process time series data
# --------------------------------------------------------------------------- #
ts_data['timeseries']['DateTime'] = pd.to_datetime(ts_data['timeseries']['DateTime'])
ts_data['timeseries'].set_index('DateTime', inplace=True)
# constrain data to scenario year
ts_data['timeseries'] = ts_data['timeseries'].loc[
(pd.Timestamp(cfg.year, 1, 1, 0, 0).tz_localize('UTC') <= ts_data['timeseries'].index) & (
ts_data['timeseries'].index <= pd.Timestamp(cfg.year, 12, 31, 23, 0).tz_localize('UTC'))]
# drop index and set index of df_time instead
if len(ts_data['timeseries']) == len(dict_sets['t']):
ts_data['timeseries'].set_index(dict_sets['t'].index, inplace=True)
else:
raise ValueError('Mismatch of time series data and model time resolution. Is cfg.year wrong?')
ts_data['timeseries']['DE-power-load'] = ts_data['timeseries']['DE-power-load'] / 0.91
# for 0.91 scaling factor see
# https://www.entsoe.eu/fileadmin/user_upload/_library/publications/ce/Load_and_Consumption_Data.pdf
# create price time series incl transport cost
ts_data['timeseries']['Nuclear'] = 3.5
ts_data['timeseries']['Lignite'] = 4.5
ts_data['timeseries']['Biomass'] = 6.5
# subset of zonal time series
ts_data['zonal'] = ts_data['timeseries'].loc[:, ts_data['timeseries'].columns.str.startswith(('AT', 'DE'))].copy()
ts_data['zonal'].columns = ts_data['zonal'].columns.str.split('-', expand=True)
# adjust column naming to reflect proper product names ('el' and 'ht')
ts_data['zonal'] = ts_data['zonal'].rename(columns={'power': 'el', 'heat': 'ht'})
model_prices = ['Coal', 'Oil', 'Gas', 'EUA', 'Nuclear', 'Lignite', 'Biomass', 'price_day_ahead']
ts_data['price'] = pd.DataFrame(index=ts_data['timeseries'].index,
columns=pd.MultiIndex.from_product([model_prices, cfg.zones]))
for zone in cfg.zones:
for fuel in model_prices:
if fuel in static_data['cost_transport'].index:
ts_data['price'][(fuel, zone)] = ts_data['timeseries'][fuel] + static_data['cost_transport'].loc[fuel, zone]
else:
ts_data['price'][(fuel, zone)] = ts_data['timeseries'][fuel]
ts_inflows = pd.DataFrame(index=list(ts_data['zonal'].index),
columns=pd.MultiIndex.from_product([cfg.zones, dict_sets['k'].index]))
for zone in list(cfg.zones):
for strg in dict_sets['k'].index:
if 'battery' not in strg:
ts_inflows.loc[:, (zone, strg)] = ts_data['zonal'].loc[:, idx[zone, 'inflows', 'reservoir']] * \
plant_data['storage_clusters'].loc[(strg, zone), 'inflow_factor']
ts_data.update({'inflows': ts_inflows})
dict_instantiate.update({'ancil': ts_data['zonal'].loc[:, idx[:, 'el', 'load']].max().unstack((1, 2)).squeeze() * 0.125
+ dict_instantiate['CAP_R'].unstack(1).drop('ror', axis=1).sum(axis=1) * 0.075})
dict_instantiate.update({'PEAK_LOAD': ts_data['zonal'].loc[:, idx[:, 'el', 'load']].max().unstack((1, 2)).squeeze()})
dict_instantiate.update({'PEAK_PROFILE': ts_data['zonal'].loc[:, idx[:, :, 'profile']].max().unstack(2).drop(
'ror', axis=0, level=1)})
# drop rows with all zeros
plant_data['storage_clusters'] = \
plant_data['storage_clusters'].loc[~(plant_data['storage_clusters'] == 0).all(axis=1), :].copy()
# --------------------------------------------------------------------------- #
# %% limits on investment - long-run vs short-run & # TODO: potentials
# --------------------------------------------------------------------------- #
invest_limits = {}
lim_invest_thermal = pd.DataFrame([0])
if cfg.invest_conventionals:
lim_invest_thermal = pd.DataFrame([float('inf')])
invest_limits.update({'thermal': lim_invest_thermal})
# dimension lim_invest_itm[r, tec_itm]
lim_invest_itm = pd.DataFrame(data=0, index=cfg.zones, columns=dict_sets['n'].index)
if cfg.invest_renewables:
for zone in cfg.zones:
for itm in lim_invest_itm.columns:
lim_invest_itm.loc[zone, itm] = float(static_data['potentials'].loc[itm, zone])
invest_limits.update({'intermittent': lim_invest_itm})
# dimension lim_invest_storage[r, tec_strg]
lim_invest_storage = pd.DataFrame(data=0, index=cfg.zones, columns=dict_sets['k'].index)
if cfg.invest_storage:
for zone in cfg.zones:
for strg in lim_invest_storage.columns:
lim_invest_storage.loc[zone, strg] = float(static_data['potentials'].loc[strg, zone])
invest_limits.update({'storage': lim_invest_storage})
# dimension lim_invest_atc[r,rr]
lim_invest_atc = | pd.DataFrame(data=0, index=cfg.zones, columns=cfg.zones) | pandas.DataFrame |
# -*- coding:utf-8 -*-
# =========================================================================== #
# Project : Data Mining #
# File : \mymain.py #
# Python : 3.9.1 #
# --------------------------------------------------------------------------- #
# Author : <NAME> #
# Company : nov8.ai #
# Email : <EMAIL> #
# URL : https://github.com/john-james-sf/Data-Mining/ #
# --------------------------------------------------------------------------- #
# Created : Tuesday, March 9th 2021, 12:24:24 am #
# Last Modified : Tuesday, March 9th 2021, 12:24:24 am #
# Modified By : <NAME> (<EMAIL>) #
# --------------------------------------------------------------------------- #
# License : BSD #
# Copyright (c) 2021 nov8.ai #
# =========================================================================== #
# =========================================================================== #
# 1. LIBRARIES #
# =========================================================================== #
#%%
# System and python libraries
from abc import ABC, abstractmethod
import datetime
import glob
import itertools
from joblib import dump, load
import os
import pickle
import time
import uuid
# Manipulating, analyzing and processing data
from collections import OrderedDict
import numpy as np
import pandas as pd
import scipy as sp
from scipy.stats.stats import pearsonr, f_oneway
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.compose import ColumnTransformer
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer, SimpleImputer
from sklearn.neighbors import LocalOutlierFactor
from sklearn.preprocessing import FunctionTransformer, StandardScaler
from sklearn.preprocessing import OneHotEncoder, PowerTransformer
from category_encoders import TargetEncoder, LeaveOneOutEncoder
# Feature and model selection and evaluation
from sklearn.feature_selection import RFECV, SelectKBest
from sklearn.feature_selection import VarianceThreshold, f_regression
from sklearn.metrics import make_scorer, mean_squared_error
from sklearn.model_selection import KFold
from sklearn.pipeline import make_pipeline, Pipeline, FeatureUnion
from sklearn.model_selection import GridSearchCV
# Regression based estimators
from sklearn.linear_model import LinearRegression, Lasso, Ridge, ElasticNet
# Tree-based estimators
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import AdaBoostRegressor, BaggingRegressor, ExtraTreesRegressor
from sklearn.ensemble import GradientBoostingRegressor, RandomForestRegressor
from sklearn.ensemble import HistGradientBoostingRegressor
from sklearn.tree import DecisionTreeRegressor
# Visualizing data
import seaborn as sns
import matplotlib.pyplot as plt
from tabulate import tabulate
# Utilities
from utils import notify, PersistEstimator, comment, print_dict, print_dict_keys
# Data Source
from data import AmesData
pd.set_option('display.max_rows', None)
| pd.set_option('display.max_columns', None) | pandas.set_option |
import pandas as pd
import pathlib
import yaml
from cooler.util import binnify, read_chromsizes
import numpy as np
import time
from statsmodels.stats.multitest import multipletests
from scipy.stats import norm
import subprocess
import pybedtools
from concurrent.futures import as_completed, ProcessPoolExecutor
import cemba_data
PACKAGE_DIR = pathlib.Path(cemba_data.__path__[0])
DSS_TWO_GROUP_TEMPLATE = PACKAGE_DIR / 'dmr/dss/DSS.TwoGroup.SingleRegionDML.ipynb'
def prepare_snakemake(allc_table_path, output_dir, chrom_sizes_path, template_path, chroms=None, smoothing=True,
chunk_size=50000000):
output_dir = pathlib.Path(output_dir).absolute()
output_dir.mkdir(exist_ok=True)
allc_table = pd.read_csv(allc_table_path, sep='\t')
allc_table.columns = ['allc_path', 'sample', 'group']
if allc_table['group'].unique().size != 2:
raise ValueError(
f"There must be two and only two different groups, got {allc_table['group'].unique().size}."
)
group1, group2 = allc_table['group'].unique()
group1_allc = allc_table.loc[allc_table['group'] == group1,
'allc_path'].tolist()
group2_allc = allc_table.loc[allc_table['group'] == group2,
'allc_path'].tolist()
group1_id = allc_table.loc[allc_table['group'] == group1, 'sample'].tolist()
group2_id = allc_table.loc[allc_table['group'] == group2, 'sample'].tolist()
chrom_sizes = read_chromsizes(chrom_sizes_path).reindex(chroms)
if chroms is None:
chroms = chrom_sizes.index.tolist()
bins = binnify(chrom_sizes.loc[chroms], binsize=chunk_size)
regions = []
for _, (chrom, start, end) in bins.iterrows():
region = f'{chrom}:{start}-{end}'
regions.append(region)
for region in regions:
config_path = f'{output_dir}/{region}.yaml'
parameters = dict(region=region,
allc_paths=group1_allc + group2_allc,
group1=group1_id,
group2=group2_id,
smoothing=smoothing)
with open(config_path, 'w') as f:
f.write(yaml.dump(parameters))
snakefile = f"""
regions = {regions}
rule main:
input:
expand('{{region}}.DSS.DML.hdf', region=regions)
rule papermill:
input:
nb='{template_path}',
config='{{region}}.yaml'
output:
nb='{{region}}.ipynb',
data='{{region}}.DSS.DML.hdf'
shell:
'papermill {{input.nb}} {{output.nb}} -f {{input.config}} && sleep 10'
"""
snakefile_path = f'{output_dir}/Snakefile'
with open(snakefile_path, 'w') as f:
f.write(snakefile)
return snakefile_path
def _parse_dml_ids(string):
ids = np.array(string.split(',')).astype(int)
start = min(ids)
end = max(ids)
result = pd.Series({
'idx_start': start,
'idx_end': end,
'n_sig': ids.size,
'total_dml': end - start + 1,
'sig_ratio': ids.size / (end - start + 1)
})
return result
def call_dmr_single_chromosome(output_dir, chrom, p_threshold, min_cg, min_len,
sig_ratio, delta, chrom_sizes_path):
# read DML for one chromosome
print(f'Reading DML tables for {chrom}')
dss_paths = list(pathlib.Path(output_dir).glob(f'{chrom}:*DSS.DML.hdf'))
dmls = []
for path in dss_paths:
try:
df = pd.read_hdf(path)
if df.shape[0] > 0:
dmls.append(df)
except ValueError:
# this happens when the HDF5 is empty
pass
dmls = pd.concat(dmls)
dmls.sort_values('pos', inplace=True)
dmls.dropna(subset=['pval'], inplace=True)
dmls.reset_index(drop=True, inplace=True)
print('Selecting significant DMLs to merge DMRs')
# Select sig
# recalculate FDR
_, fdr, *_ = multipletests(dmls['pval'], method='fdr_bh')
dmls['fdr'] = fdr
# Calculate delta significance Pr(diff.abs() > delta)
delta_p = 1 - norm.cdf(dmls['diff'].abs() - delta, scale=dmls['diff.se'])
# select sig DML to merge DMR
dmls['sig'] = (dmls['fdr'] < p_threshold) & (delta_p < p_threshold)
dmls.to_hdf(f'{output_dir}/{chrom}.DML.hdf', key='data', format="table")
sig_dmls = dmls[dmls['sig']]
# Merge DMR
print('Merge DMRs')
dml_bed = sig_dmls[['chr', 'pos', 'pos']].reset_index()
dml_bed.columns = ['id', 'chr', 'start', 'end']
dml_bed['end'] += 1
dml_bed = dml_bed.iloc[:, [1, 2, 3, 0]]
dml_bed = pybedtools.BedTool.from_dataframe(dml_bed)
try:
dmr_bed = dml_bed.sort(g=chrom_sizes_path).merge(
d=250, c='4', o='collapse').to_dataframe()
except pybedtools.helpers.BEDToolsError:
return False
# Annotate DMR
print('Annotating DMRs')
name = dmr_bed.pop('name')
dmr_bed = pd.concat([dmr_bed, name.apply(_parse_dml_ids)], axis=1)
dmr_bed = dmr_bed.astype({
'idx_start': int,
'idx_end': int,
'n_sig': int,
'total_dml': int
})
def _region_stat(row):
idx_start = row['idx_start']
idx_end = row['idx_end']
return dmls.iloc[idx_start:idx_end + 1].agg({
'mu1': 'mean',
'mu2': 'mean',
'stat': 'sum'
})
dmr_stats = dmr_bed.apply(_region_stat, axis=1)
dmr_bed = pd.concat([dmr_bed, dmr_stats], axis=1)
dmr_bed['diff'] = dmr_bed['mu1'] - dmr_bed['mu2']
dmr_bed['length'] = dmr_bed['end'] - dmr_bed['start']
# final DMR filter
judge = (dmr_bed['n_sig'] >= min_cg) & (
dmr_bed['sig_ratio'] > sig_ratio) & (dmr_bed['length'] >= min_len) & (
dmr_bed['diff'].abs() > delta)
dmr_bed['selected_dmr'] = judge
dmr_bed.to_hdf(f'{output_dir}/{chrom}.DMR.hdf', key='data')
return True
def run_dss_two_group(allc_table_path, output_dir, study_name, chrom_sizes_path, chroms=None, smoothing=True,
p_threshold=0.001, min_cg=1, min_len=1, sig_ratio=0.5, delta=0.1, cpu=10, save_dml=False,
template_path='default', chunk_size=50000000):
# prepare template
if template_path == 'default':
template_path = DSS_TWO_GROUP_TEMPLATE
# prepare snakemake
output_dir = pathlib.Path(output_dir).absolute()
this_study_dir = output_dir / f'{study_name}_DSS'
prepare_snakemake(allc_table_path=allc_table_path,
output_dir=this_study_dir,
chrom_sizes_path=chrom_sizes_path,
chroms=chroms,
smoothing=smoothing,
template_path=template_path,
chunk_size=chunk_size)
# the ipykernel package raise "zmq.error.ZMQError: Address already in use" due to parallel execution,
# restart likely solve the problem.
snakemake_cmd = f'snakemake -d {this_study_dir} --snakefile {this_study_dir}/Snakefile ' \
f'-j {cpu} --default-resources mem_mb=100 --resources mem_mb={int(5000 * cpu)} ' \
f'--restart-times 3'
subprocess.run(snakemake_cmd, shell=True, check=True)
with ProcessPoolExecutor(cpu) as exe:
futures = {}
for chrom in chroms:
f = exe.submit(call_dmr_single_chromosome,
output_dir=this_study_dir,
chrom=chrom,
p_threshold=p_threshold,
min_cg=min_cg,
min_len=min_len,
sig_ratio=sig_ratio,
delta=delta,
chrom_sizes_path=chrom_sizes_path)
futures[f] = chrom
time.sleep(1)
total_dmrs = []
for future in as_completed(futures):
chrom = futures[future]
print(f'{chrom} finished')
success_flag = future.result()
if success_flag:
total_dmrs.append( | pd.read_hdf(f'{this_study_dir}/{chrom}.DMR.hdf') | pandas.read_hdf |
# Data Worker
# %%
import os
import pandas as pd
import plotly.express as px
from pypinyin import lazy_pinyin
locations_url = 'https://blog.csdn.net/envbox/article/details/80290103'
filename = 'locations.json'
sync_folder = os.environ.get('Sync', '.')
mapbox = dict(
mapbox_accesstoken=open(os.path.join(
os.environ['onedrive'], '.mapbox_token')).read(),
mapbox_style='light'
)
def fetch_locations():
locations = | pd.read_html(locations_url) | pandas.read_html |
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 24 19:08:39 2019
@author: <NAME> et al. - "Evaluation of machine learning models for
automatic detection of DNA double strand breaks after irradiation using a gH2AX
foci assay", PLOS One, 2020
"""
# main file for training machine learning models using previously labeled data
###############################################################################
# Parameters and file path that have to be set manually:
# Parameters:
# min area of nucleus
min_area = 4000
# color channel of nucleus. 0 = red, 1 = grenn, 2 = blue. for grayscale images
# this value is ignored.
nuc_chan = 2
# color channel of foci. 0 = red, 1 = grenn, 2 = blue. for grayscale images
# this value is ignored.
foci_chan = 1
# color channel of marked image. 0 = red, 1 = grenn, 2 = blue. corresponds to
# the color of the markings in the manually labeled foci images
mark_chan = 0
# adjust image size - might be usefull to save calculation time. needs to be
# identical for foci and nucleus images
# image rescale factor:
rescale_factor = 1.0
# take only those PCA components cumulatively explaining var_max of the variance
# 0<var_max<=1.
var_max = 0.95
# randomly sample a proportion of the training data from each image (0<sampling<=1).
# speeds up training process if smaller than 1
sampling = 1
# used filter sizes
filt_range = [2,3,4,5,8,10,15,20,25,30,35]
# scaling range for frangi filter
sc_range = list(range(2,11))
#frequency range for gabor filter
freq = [0.08,0.10,0.13,0.16,0.2]
# Name used for saving the trained model and related images:
model_name = "MLP"
# directory containing the foci images:
im_path_foci = "D:\\Sample Images\\foci"
# directory containing the manually labeled foci images:
im_path_foci_marked = "D:\\Sample Images\\foci_marked"
# directory containing the nucleus images:
im_path_dapi = "D:\\Sample Images\\dapi"
###############################################################################
###############################################################################
###############################################################################
# turn of warnings, this is especially annoying with sklearn stuff
def warn(*args, **kwargs):
pass
import warnings
warnings.warn = warn
# Get packages:
# For directory, file handling etc.
import os
import sys
# import pandas:
import pandas as pd
# import numpy
import numpy as np
# For image analysis:
from skimage.io import imread, imshow
from skimage.io import imsave
from skimage.transform import rescale
# import packages related to (pre-) processing of data and model results:
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import GridSearchCV
# import model packages:
from sklearn.naive_bayes import ComplementNB
from sklearn.neural_network import MLPClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, BaggingClassifier, VotingClassifier
from sklearn.svm import LinearSVC
from sklearn.metrics import precision_score, recall_score
sys.path.append(os.getcwd())
main_file_path = os.getcwd()
# self written functions:
from FociDetectImgAnalysis import get_nuclei, GoToFiles
from GetFociData import get_labeled_data
from GetBestParams import image_pipeline
###############################################################################
# Start analysis of dapi images
# go to nucleus folder:
os.chdir(im_path_dapi)
print("Analyzing nucleus images ...")
# go to the directory containing the foci images:
#im_path = os.getcwd()+"\Sample Images\foci"
#os.chdir(im_path)
# start reading in image files:
stats = []
# get file names
save_path = "Single Nuclei"
files = GoToFiles(im_path_dapi,save_path)
for file_num in range(len(files)):
file_name, extension = os.path.splitext(files[file_num])
# print(file_name + " " + extension)
if extension in [".png",".tif",".jpg",".bmp"]:
# read image:
image = imread(files[file_num])
image = rescale(image, rescale_factor, order=1,preserve_range = True)
image = np.uint8(image)
#imshow(image)
# get region props of the blue channel:
if(len(image.shape)<3):
stats.append(get_nuclei(image[:,:],file_name))
else:
stats.append(get_nuclei(image[:,:,nuc_chan],file_name))
# Get x and y data for model training and the coordinate for each image:
# y_data is boolean with True were pixel was marked as foci and false otherwise
x_data, y_data, coords = get_labeled_data(im_path_foci,stats,im_path_foci_marked,filt_range,freq,sc_range,rescale_factor,foci_chan, mark_chan)
# When done with everything go back to the main folder:
os.chdir(main_file_path)
###############################################################################
# This part is for model training assuming "get_labeled_data" was ran successfully
# get trainingsdata:
x_train_transformed, y_train, idx, s1, s2, p, mnmx = image_pipeline(x_data,y_data,removed_im = [],var_max = var_max, sampling = sampling)
# Chose the model to train:
# neural network:
model = MLPClassifier(alpha=0.1, batch_size = 2000, learning_rate = "adaptive", learning_rate_init = 0.1, max_iter = 300, tol = 10**-4, early_stopping = True)
parameters = {'batch_size':[100,500,1000,2000,3000,4000,5000],'alpha':[10**-4,10**-3,10**-2,10**-1,1]}
# random forest:
# clf = RandomForestClassifier(criterion = "entropy",min_weight_fraction_leaf = 0.005, n_estimators = 15,max_depth = 50, min_samples_leaf = 10,min_samples_split = 100, n_jobs = -1)
# model = AdaBoostClassifier(base_estimator = clf, n_estimators=5)
# parameters = {'base_estimator__min_weight_fraction_leaf':[0.0001,0.001,0.005],'base_estimator__n_estimators':[5,10,15,20],'base_estimator__min_samples_leaf':[10,20,100]}
# complement naive bayes:
# clf = ComplementNB(alpha = 0.0, norm = True)
# model = AdaBoostClassifier(base_estimator = clf, n_estimators=15)
# parameters = {'base_estimator__alpha': [0,0.01,0.02,0.03,0.04,0.05,0.06], 'base_estimator__norm': [True, False]}
# support vector machine:
# linear svm
# clf = LinearSVC(penalty = "l2", loss = "hinge", C = 2, class_weight = "balanced", max_iter = 5000)
# model = AdaBoostClassifier(base_estimator = clf, n_estimators=5,algorithm='SAMME')
# parameters = {"base_estimator__C": [0.1,0.3,0.6,1,2,3]}
print("Performing grid search ...")
# get best model parameters:
clf = GridSearchCV(model, parameters, cv = 3)
clf.fit(x_train_transformed, y_train)
###############################################################################
# train models based on on all but one of the images and test on the remaining
# one. Do this for all combinations of images.
# Save images and some resulting statistics.
# save path:
save_path = im_path_foci + "\Results Model Validation"
# set model:
# neural network:
# model = MLPClassifier(alpha=0.1, batch_size = 2000, learning_rate = "adaptive", learning_rate_init = 0.1, max_iter = 300, tol = 10**-4, early_stopping = True)
model = clf.best_estimator_
im_stats = []
# create data sets leaving out one image:
print("Training model (leave one out) ...")
for im in range(len(x_data)):
print("Current Image:" + str(im+1))
removed_im = [im]
x_train_transformed, y_train, idx, s1, s2, p, mnmx = image_pipeline(x_data,y_data,removed_im,var_max = 0.95, sampling = 1)
# use some defined model and train it with the x-featues:
model.fit(x_train_transformed, y_train)
# create variables for test image:
x_vals_im = | pd.DataFrame(x_data[removed_im[0]]) | pandas.DataFrame |
import sklearn.ensemble as ek
from sklearn.model_selection import train_test_split
from sklearn import tree
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import confusion_matrix
from sklearn.linear_model import LogisticRegression
import pickle
import pandas as pd
import numpy as np
import pyprind
import re
from collections import Counter
import math
import traceback
model_dir = r'C:\Users\86151\Desktop\Data Mining Homework\Wee 8\ML-for-SQL-Injection\ML_for_SQL\data\models\model.model'
def entropy(s):
p, lns = Counter(s), float(len(s))
return -sum(count / lns * math.log(count / lns, 2) for count in p.values())
def getFeatures(url,label):
result = []
url = str(url)
result.append(url)
num_len=0
capital_len=0
key_num=0
feature3=0
num_len=len(re.compile(r'\d').findall(url))
try:
if len(url)!=0:
num_f=num_len/len(url)#数字字符频率
capital_len=len(re.compile(r'[A-Z]').findall(url))
if len(url)!=0:
capital_f=capital_len/len(url)#大写字母频率
url=url.lower()
key_num=url.count('and%20')+url.count('or%20')+url.count('xor%20')+url.count('sysobjects%20')+url.count('version%20')+url.count('substr%20')+url.count('len%20')+url.count('substring%20')+url.count('exists%20')
key_num=key_num+url.count('mid%20')+url.count('asc%20')+url.count('inner join%20')+url.count('xp_cmdshell%20')+url.count('version%20')+url.count('exec%20')+url.count('having%20')+url.count('unnion%20')+url.count('order%20')+url.count('information schema')
key_num=key_num+url.count('load_file%20')+url.count('load data infile%20')+url.count('into outfile%20')+url.count('into dumpfile%20')
if len(url)!=0:
space_f=(url.count(" ")+url.count("%20"))/len(url)#空格百分比
special_f=(url.count("{")*2+url.count('28%')*2+url.count('NULL')+url.count('[')+url.count('=')+url.count('?'))/len(url)
prefix_f=(url.count('\\x')+url.count('&')+url.count('\\u')+url.count('%'))/len(url)
result.append(len(url))
result.append(key_num)
result.append(capital_f)
result.append(num_f)
result.append(space_f)
result.append(special_f)
result.append(prefix_f)
result.append(entropy(url))
result.append(str(label))
return result
except:
traceback.print_exc()
exit(-1)
def plot_feature_importances(feature_importances,title,feature_names):
# 将重要性值标准化
feature_importances = 100.0*(feature_importances/max(feature_importances))
# 将得分从高到低排序
index_sorted = np.flipud(np.argsort(feature_importances))
# 让X坐标轴上的标签居中显示
pos = np.arange(index_sorted.shape[0])+0.5
# plt.figure(figsize=(16,4))
# plt.bar(pos,feature_importances[index_sorted],align='center')
# plt.xticks(pos,feature_names[index_sorted])
# plt.ylabel('Relative Importance')
# plt.title(title)
# plt.show()
if __name__ == '__main__':
# 提取特征
df = | pd.read_csv("data/dataset.csv") | pandas.read_csv |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import math
import scipy.stats as stats
from matplotlib import gridspec
from matplotlib.lines import Line2D
from .util import *
import seaborn as sns
from matplotlib.ticker import FormatStrFormatter
import matplotlib.pylab as pl
import matplotlib.dates as mdates
from matplotlib.patches import Patch
from matplotlib.lines import Line2D
import matplotlib.patheffects as pe
from .sanker import Sanker
import imageio
class Visualizer():
def __init__(self, district_list, private_list, city_list, contract_list, bank_list, leiu_list):
self.district_list = district_list.copy()
self.private_list = private_list.copy()
for x in city_list:
self.private_list.append(x)
self.contract_list = contract_list
self.bank_list = bank_list
self.leiu_list = leiu_list
self.private_districts = {}
for x in self.private_list:
self.private_districts[x.name] = []
for xx in x.district_list:
self.private_districts[x.name].append(xx)
inflow_inputs = pd.read_csv('calfews_src/data/input/calfews_src-data.csv', index_col=0, parse_dates=True)
x2_results = pd.read_csv('calfews_src/data/input/x2DAYFLOW.csv', index_col=0, parse_dates=True)
self.observations = inflow_inputs.join(x2_results)
self.observations['delta_outflow'] = self.observations['delta_inflow'] + self.observations['delta_depletions'] - self.observations['HRO_pump'] - self.observations['TRP_pump']
self.index_o = self.observations.index
self.T_o = len(self.observations)
self.day_month_o = self.index_o.day
self.month_o = self.index_o.month
self.year_o = self.index_o.year
kern_bank_observations = pd.read_csv('calfews_src/data/input/kern_water_bank_historical.csv')
kern_bank_observations = kern_bank_observations.set_index('Year')
semitropic_bank_observations = pd.read_csv('calfews_src/data/input/semitropic_bank_historical.csv')
semitropic_bank_observations = semitropic_bank_observations.set_index('Year')
total_bank_kwb = np.zeros(self.T_o)
total_bank_smi = np.zeros(self.T_o)
for x in range(0, self.T_o):
if self.month_o[x] > 9:
year_str = self.year_o[x]
else:
year_str = self.year_o[x] - 1
if self.month_o[x] == 9 and self.day_month_o[x] == 30:
year_str = self.year_o[x]
total_bank_kwb[x] = kern_bank_observations.loc[year_str, 'Ag'] + kern_bank_observations.loc[year_str, 'Mixed Purpose']
deposit_history = semitropic_bank_observations[semitropic_bank_observations.index <= year_str]
total_bank_smi[x] = deposit_history['Metropolitan'].sum() + deposit_history['South Bay'].sum()
self.observations['kwb_accounts'] = pd.Series(total_bank_kwb, index=self.observations.index)
self.observations['smi_accounts'] = pd.Series(total_bank_smi, index=self.observations.index)
def get_results_sensitivity_number(self, results_file, sensitivity_number, start_month, start_year, start_day):
self.values = {}
numdays_index = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
with h5py.File(results_file, 'r') as f:
data = f['s' + sensitivity_number]
names = data.attrs['columns']
names = list(map(lambda x: str(x).split("'")[1], names))
df_data = pd.DataFrame(data[:], columns=names)
for x in df_data:
self.values[x] = df_data[x]
datetime_index = []
monthcount = start_month
yearcount = start_year
daycount = start_day
leapcount = np.remainder(start_year, 4)
for t in range(0, len(self.values[x])):
datetime_index.append(str(yearcount) + '-' + str(monthcount) + '-' + str(daycount))
daycount += 1
if leapcount == 0 and monthcount == 2:
numdays_month = numdays_index[monthcount - 1] + 1
else:
numdays_month = numdays_index[monthcount - 1]
if daycount > numdays_month:
daycount = 1
monthcount += 1
if monthcount == 13:
monthcount = 1
yearcount += 1
leapcount += 1
if leapcount == 4:
leapcount = 0
self.values['Datetime'] = pd.to_datetime(datetime_index)
self.values = pd.DataFrame(self.values)
self.values = self.values.set_index('Datetime')
self.index = self.values.index
self.T = len(self.values.index)
self.day_year = self.index.dayofyear
self.day_month = self.index.day
self.month = self.index.month
self.year = self.index.year
self.starting_year = self.index.year[0]
self.ending_year = self.index.year[-1]
self.number_years = self.ending_year - self.starting_year
total_kwb_sim = np.zeros(len(self.values))
total_smi_sim = np.zeros(len(self.values))
for district_partner in ['DLR', 'KCWA', 'ID4', 'SMI', 'TJC', 'WON', 'WRM']:
total_kwb_sim += self.values['kwb_' + district_partner]
self.values['kwb_total'] = pd.Series(total_kwb_sim, index = self.values.index)
for district_partner in ['SOB', 'MET']:
total_smi_sim += self.values['semitropic_' + district_partner]
self.values['smi_total'] = pd.Series(total_smi_sim, index = self.values.index)
def set_figure_params(self):
self.figure_params = {}
self.figure_params['delta_pumping'] = {}
self.figure_params['delta_pumping']['extended_simulation'] = {}
self.figure_params['delta_pumping']['extended_simulation']['outflow_list'] = ['delta_outflow', 'delta_outflow']
self.figure_params['delta_pumping']['extended_simulation']['pump1_list'] = ['delta_HRO_pump', 'HRO_pump']
self.figure_params['delta_pumping']['extended_simulation']['pump2_list'] = ['delta_TRP_pump', 'TRP_pump']
self.figure_params['delta_pumping']['extended_simulation']['scenario_labels'] = ['Model Validation', 'Extended Simulation']
self.figure_params['delta_pumping']['extended_simulation']['simulation_labels'] = ['delta_HRO_pump', 'delta_TRP_pump', 'delta_outflow']
self.figure_params['delta_pumping']['extended_simulation']['observation_labels'] = ['HRO_pump', 'TRP_pump', 'delta_outflow']
self.figure_params['delta_pumping']['extended_simulation']['agg_list'] = ['AS-OCT', 'AS-OCT', 'D']
self.figure_params['delta_pumping']['extended_simulation']['unit_mult'] = [1.0, 1.0, cfs_tafd]
self.figure_params['delta_pumping']['extended_simulation']['max_value_list'] = [5000, 5000, 15]
self.figure_params['delta_pumping']['extended_simulation']['use_log_list'] = [False, False, True]
self.figure_params['delta_pumping']['extended_simulation']['use_cdf_list'] = [False, False, True]
self.figure_params['delta_pumping']['extended_simulation']['scenario_type_list'] = ['observation', 'validation', 'scenario']
self.figure_params['delta_pumping']['extended_simulation']['x_label_list'] = ['Total Pumping, SWP Delta Pumps (tAF/year)', 'Total Pumping, CVP Delta Pumps (tAF/year)', 'Daily Exceedence Probability', '']
self.figure_params['delta_pumping']['extended_simulation']['y_label_list'] = ['Probability Density', 'Probability Density', 'Daily Delta Outflow (tAF)', 'Relative Frequency of Water-year Types within Simulation']
self.figure_params['delta_pumping']['extended_simulation']['legend_label_names1'] = ['Historical (1996-2016) Observations', 'Historical (1996-2016) Model Validation', 'Extended Simulation']
self.figure_params['delta_pumping']['extended_simulation']['legend_label_names2'] = ['Critical', 'Dry', 'Below Normal', 'Above Normal', 'Wet']
self.figure_params['state_estimation'] = {}
for x in ['publication', 'sacramento', 'sanjoaquin', 'tulare']:
self.figure_params['state_estimation'][x] = {}
self.figure_params['state_estimation'][x]['non_log'] = ['Snowpack (SWE)',]
self.figure_params['state_estimation'][x]['predictor values'] = ['Mean Inflow, Prior 30 Days (tAF/day)','Snowpack (SWE)']
self.figure_params['state_estimation'][x]['colorbar_label_index'] = [0, 30, 60, 90, 120, 150, 180]
self.figure_params['state_estimation'][x]['colorbar_label_list'] = ['Oct', 'Nov', 'Dec', 'Jan', 'Feb', 'Mar', 'Apr']
self.figure_params['state_estimation'][x]['subplot_annotations'] = ['A', 'B', 'C', 'D']
self.figure_params['state_estimation'][x]['forecast_periods'] = [30,'SNOWMELT']
self.figure_params['state_estimation'][x]['all_cols'] = ['DOWY', 'Snowpack', '30MA']
self.figure_params['state_estimation'][x]['forecast_values'] = []
for forecast_days in self.figure_params['state_estimation'][x]['forecast_periods']:
if forecast_days == 'SNOWMELT':
self.figure_params['state_estimation'][x]['forecast_values'].append('Flow Estimation, Snowmelt Season (tAF)')
self.figure_params['state_estimation'][x]['all_cols'].append('Snowmelt Flow')
else:
self.figure_params['state_estimation'][x]['forecast_values'].append('Flow Estimation, Next ' + str(forecast_days) + ' Days (tAF)')
self.figure_params['state_estimation'][x]['all_cols'].append(str(forecast_days) + ' Day Flow')
self.figure_params['state_estimation']['publication']['watershed_keys'] = ['SHA', 'ORO', 'MIL', 'ISB']
self.figure_params['state_estimation']['publication']['watershed_labels'] = ['Shasta', 'Oroville', 'Millerton', 'Isabella']
self.figure_params['state_estimation']['sacramento']['watershed_keys'] = ['SHA', 'ORO', 'FOL', 'YRS']
self.figure_params['state_estimation']['sacramento']['watershed_labels'] = ['Shasta', 'Oroville', 'Folsom', 'New Bullards Bar']
self.figure_params['state_estimation']['sanjoaquin']['watershed_keys'] = ['NML', 'DNP', 'EXC', 'MIL']
self.figure_params['state_estimation']['sanjoaquin']['watershed_labels'] = ['New Melones', '<NAME>', 'Exchequer', 'Millerton']
self.figure_params['state_estimation']['tulare']['watershed_keys'] = ['PFT', 'KWH', 'SUC', 'ISB']
self.figure_params['state_estimation']['tulare']['watershed_labels'] = ['Pine Flat', 'Kaweah', 'Success', 'Isabella']
self.figure_params['model_validation'] = {}
for x in ['delta', 'sierra', 'sanluis', 'bank']:
self.figure_params['model_validation'][x] = {}
self.figure_params['model_validation']['delta']['title_labels'] = ['State Water Project Pumping', 'Central Valley Project Pumping', 'Delta X2 Location']
num_subplots = len(self.figure_params['model_validation']['delta']['title_labels'])
self.figure_params['model_validation']['delta']['label_name_1'] = ['delta_HRO_pump', 'delta_TRP_pump', 'delta_x2']
self.figure_params['model_validation']['delta']['label_name_2'] = ['HRO_pump', 'TRP_pump', 'DAY_X2']
self.figure_params['model_validation']['delta']['unit_converstion_1'] = [1.0, 1.0, 1.0]
self.figure_params['model_validation']['delta']['unit_converstion_2'] = [cfs_tafd, cfs_tafd, 1.0]
self.figure_params['model_validation']['delta']['y_label_timeseries'] = ['Pumping (tAF/week)', 'Pumping (tAF/week)', 'X2 inland distance (km)']
self.figure_params['model_validation']['delta']['y_label_scatter'] = ['(tAF/yr)', '(tAF/yr)', '(km)']
self.figure_params['model_validation']['delta']['timeseries_timestep'] = ['W', 'W', 'W']
self.figure_params['model_validation']['delta']['scatter_timestep'] = ['AS-OCT', 'AS-OCT', 'M']
self.figure_params['model_validation']['delta']['aggregation_methods'] = ['sum', 'sum', 'mean']
self.figure_params['model_validation']['delta']['notation_location'] = ['top'] * num_subplots
self.figure_params['model_validation']['delta']['show_legend'] = [True] * num_subplots
self.figure_params['model_validation']['sierra']['title_labels'] = ['Shasta', 'Oroville', 'Folsom', 'New Bullards Bar', 'New Melones', '<NAME>', 'Exchequer', 'Millerton', 'Pine Flat', 'Kaweah', 'Success', 'Isabella']
num_subplots = len(self.figure_params['model_validation']['sierra']['title_labels'])
self.figure_params['model_validation']['sierra']['label_name_1'] = ['shasta_S', 'oroville_S', 'folsom_S', 'yuba_S', 'newmelones_S', 'donpedro_S', 'exchequer_S', 'millerton_S', 'pineflat_S', 'kaweah_S', 'success_S', 'isabella_S']
self.figure_params['model_validation']['sierra']['label_name_2'] = ['SHA_storage', 'ORO_storage', 'FOL_storage', 'YRS_storage', 'NML_storage', 'DNP_storage', 'EXC_storage', 'MIL_storage', 'PFT_storage', 'KWH_storage', 'SUC_storage', 'ISB_storage']
self.figure_params['model_validation']['sierra']['unit_converstion_1'] = [1.0/1000.0] * num_subplots
self.figure_params['model_validation']['sierra']['unit_converstion_2'] = [1.0/1000000.0] * num_subplots
self.figure_params['model_validation']['sierra']['y_label_timeseries'] = ['Storage (mAF)'] * num_subplots
self.figure_params['model_validation']['sierra']['y_label_scatter'] = []
self.figure_params['model_validation']['sierra']['timeseries_timestep'] = ['W'] * num_subplots
self.figure_params['model_validation']['sierra']['scatter_timestep'] = []
self.figure_params['model_validation']['sierra']['aggregation_methods'] = ['mean'] * num_subplots
self.figure_params['model_validation']['sierra']['notation_location'] = ['bottom'] * num_subplots
self.figure_params['model_validation']['sierra']['show_legend'] = [False] * num_subplots
counter_kaweah = self.figure_params['model_validation']['sierra']['title_labels'].index('Kaweah')
counter_success = self.figure_params['model_validation']['sierra']['title_labels'].index('Success')
counter_isabella = self.figure_params['model_validation']['sierra']['title_labels'].index('Isabella')
self.figure_params['model_validation']['sierra']['notation_location'][counter_kaweah] = 'top'
self.figure_params['model_validation']['sierra']['notation_location'][counter_success] = 'topright'
self.figure_params['model_validation']['sierra']['show_legend'][counter_isabella] = True
self.figure_params['model_validation']['sanluis']['title_labels'] = ['State (SWP) Portion, San Luis Reservoir', 'Federal (CVP) Portion, San Luis Reservoir']
num_subplots = len(self.figure_params['model_validation']['sanluis']['title_labels'])
self.figure_params['model_validation']['sanluis']['label_name_1'] = ['sanluisstate_S', 'sanluisfederal_S']
self.figure_params['model_validation']['sanluis']['label_name_2'] = ['SLS_storage', 'SLF_storage']
self.figure_params['model_validation']['sanluis']['unit_converstion_1'] = [1.0/1000.0] * num_subplots
self.figure_params['model_validation']['sanluis']['unit_converstion_2'] = [1.0/1000000.0] * num_subplots
self.figure_params['model_validation']['sanluis']['y_label_timeseries'] = ['Storage (mAF)'] * num_subplots
self.figure_params['model_validation']['sanluis']['y_label_scatter'] = ['(mAF)'] * num_subplots
self.figure_params['model_validation']['sanluis']['timeseries_timestep'] = ['W'] * num_subplots
self.figure_params['model_validation']['sanluis']['scatter_timestep'] = ['M'] * num_subplots
self.figure_params['model_validation']['sanluis']['aggregation_methods'] = ['point'] * num_subplots
self.figure_params['model_validation']['sanluis']['notation_location'] = ['top'] * num_subplots
self.figure_params['model_validation']['sanluis']['show_legend'] = [True] * num_subplots
self.figure_params['model_validation']['bank']['title_labels'] = ['Kern Water Bank Accounts', 'Semitropic Water Bank Accounts']
num_subplots = len(self.figure_params['model_validation']['bank']['title_labels'])
self.figure_params['model_validation']['bank']['label_name_1'] = ['kwb_total', 'smi_total']
self.figure_params['model_validation']['bank']['label_name_2'] = ['kwb_accounts', 'smi_accounts']
self.figure_params['model_validation']['bank']['unit_converstion_1'] = [1.0/1000.0] * num_subplots
self.figure_params['model_validation']['bank']['unit_converstion_2'] = [1.0/1000000.0, 1.0/1000.0]
self.figure_params['model_validation']['bank']['y_label_timeseries'] = ['Storage (mAF)'] * num_subplots
self.figure_params['model_validation']['bank']['y_label_scatter'] = ['(mAF)'] * num_subplots
self.figure_params['model_validation']['bank']['timeseries_timestep'] = ['W'] * num_subplots
self.figure_params['model_validation']['bank']['scatter_timestep'] = ['AS-OCT'] * num_subplots
self.figure_params['model_validation']['bank']['aggregation_methods'] = ['change'] * num_subplots
self.figure_params['model_validation']['bank']['notation_location'] = ['top'] * num_subplots
self.figure_params['model_validation']['bank']['show_legend'] = [False] * num_subplots
self.figure_params['model_validation']['bank']['show_legend'][0] = True
self.figure_params['state_response'] = {}
self.figure_params['state_response']['sanluisstate_losthills'] = {}
self.figure_params['state_response']['sanluisstate_losthills']['contract_list'] = ['swpdelta',]
self.figure_params['state_response']['sanluisstate_losthills']['contributing_reservoirs'] = ['delta_uncontrolled_swp', 'oroville', 'yuba']
self.figure_params['state_response']['sanluisstate_losthills']['groundwater_account_names'] = ['LHL','WON']
self.figure_params['state_response']['sanluisstate_losthills']['reservoir_features'] = ['S', 'days_til_full', 'flood_deliveries']
self.figure_params['state_response']['sanluisstate_losthills']['reservoir_feature_colors'] = ['teal', '#3A506B', '#74B3CE', 'steelblue']
self.figure_params['state_response']['sanluisstate_losthills']['district_contracts'] = ['tableA',]
self.figure_params['state_response']['sanluisstate_losthills']['subplot_titles'] = ['State Water Project Delta Operations', 'Lost Hills Drought Management', 'San Luis Reservoir Operations', 'Lost Hills Flood Management']
self.figure_params['state_response']['sanluisstate_losthills']['legend_list_1'] = ['Y.T.D Delta Pumping', 'Projected Unstored Exports', 'Projected Stored Exports, Oroville', 'Projected Stored Exports, New Bullards']
self.figure_params['state_response']['sanluisstate_losthills']['legend_list_2'] = ['Storage', 'Projected Days to Fill', 'Flood Release Deliveries']
self.figure_params['state_response']['sanluisstate_losthills']['legend_list_3'] = ['Remaining SW Allocation', 'SW Deliveries', 'Private GW Pumping', 'District GW Bank Recovery', 'Remaining GW Bank Recovery Capacity']
self.figure_params['state_response']['sanluisstate_losthills']['legend_list_4'] = ['Carryover Recharge Capacity', 'Recharged from Contract Allocation' 'Recharge of Uncontrolled Flood Spills']
self.figure_params['state_response'] = {}
self.figure_params['state_response']['sanluisstate_wheeler'] = {}
self.figure_params['state_response']['sanluisstate_wheeler']['contract_list'] = ['swpdelta',]
self.figure_params['state_response']['sanluisstate_wheeler']['contributing_reservoirs'] = ['delta_uncontrolled_swp', 'oroville', 'yuba']
self.figure_params['state_response']['sanluisstate_wheeler']['groundwater_account_names'] = ['WRM']
self.figure_params['state_response']['sanluisstate_wheeler']['reservoir_features'] = ['S', 'days_til_full', 'flood_deliveries']
self.figure_params['state_response']['sanluisstate_wheeler']['reservoir_feature_colors'] = ['teal', '#3A506B', '#74B3CE', 'lightsteelblue']
self.figure_params['state_response']['sanluisstate_wheeler']['district_contracts'] = ['tableA',]
self.figure_params['state_response']['sanluisstate_wheeler']['subplot_titles'] = ['State Water Project Delta Operations', 'Wheeler Ridge Drought Management', 'San Luis Reservoir Operations', 'Wheeler Ridge Flood Management']
self.figure_params['state_response']['sanluisstate_wheeler']['legend_list_1'] = ['Y.T.D Delta Pumping', 'Projected Unstored Exports', 'Projected Stored Exports, Oroville', 'Projected Stored Exports, New Bullards']
self.figure_params['state_response']['sanluisstate_wheeler']['legend_list_2'] = ['Storage', 'Projected Days to Fill', 'Flood Release Deliveries']
self.figure_params['state_response']['sanluisstate_wheeler']['legend_list_3'] = ['Remaining SW Allocation', 'SW Deliveries', 'Private GW Pumping', 'District GW Bank Recovery', 'Remaining GW Bank Recovery Capacity']
self.figure_params['state_response']['sanluisstate_wheeler']['legend_list_4'] = ['Carryover Recharge Capacity', 'Recharge of Uncontrolled Flood Spills', 'Recharged from Contract Allocation']
self.figure_params['district_water_use'] = {}
self.figure_params['district_water_use']['physical'] = {}
self.figure_params['district_water_use']['physical']['district_groups'] = ['Municipal Districts', 'Kern County Water Agency', 'CVP - Friant Contractors', 'CVP - San Luis Contractors', 'Groundwater Banks']
self.figure_params['district_water_use']['physical']['Municipal Districts'] = ['bakersfield', 'ID4', 'fresno', 'southbay', 'socal', 'centralcoast']
self.figure_params['district_water_use']['physical']['Kern County Water Agency'] = ['berrenda', 'belridge', 'buenavista', 'cawelo', 'henrymiller', 'losthills', 'rosedale', 'semitropic', 'tehachapi', 'tejon', 'westkern', 'wheeler', 'northkern', 'kerntulare']
self.figure_params['district_water_use']['physical']['CVP - Friant Contractors'] = ['arvin', 'delano', 'pixley', 'exeter', 'kerntulare', 'lindmore', 'lindsay', 'lowertule', 'porterville', 'saucelito', 'shaffer', 'sosanjoaquin', 'teapot', 'terra', 'chowchilla', 'maderairr', 'tulare', 'fresnoid']
self.figure_params['district_water_use']['physical']['CVP - San Luis Contractors'] = ['westlands', 'panoche', 'sanluiswater', 'delpuerto']
self.figure_params['district_water_use']['physical']['Groundwater Banks'] = ['stockdale', 'kernriverbed', 'poso', 'pioneer', 'kwb', 'b2800', 'irvineranch', 'northkernwb']
self.figure_params['district_water_use']['physical']['subplot columns'] = 2
self.figure_params['district_water_use']['physical']['color map'] = 'YlGbBu_r'
self.figure_params['district_water_use']['physical']['write file'] = True
self.figure_params['district_water_use']['annual'] = {}
self.figure_params['district_water_use']['annual']['district_groups'] = ['Municipal Districts', 'Kern County Water Agency', 'CVP - Friant Contractors', 'CVP - San Luis Contractors']
self.figure_params['district_water_use']['annual']['Municipal Districts'] = ['bakersfield', 'ID4', 'fresno', 'southbay', 'socal', 'centralcoast']
self.figure_params['district_water_use']['annual']['Kern County Water Agency'] = ['berrenda', 'belridge', 'buenavista', 'cawelo', 'henrymiller', 'losthills', 'rosedale', 'semitropic', 'tehachapi', 'tejon', 'westkern', 'wheeler']
self.figure_params['district_water_use']['annual']['CVP - Friant Contractors'] = ['arvin', 'delano', 'pixley', 'exeter', 'kerntulare', 'lindmore', 'lindsay', 'lowertule', 'porterville', 'saucelito', 'shaffer', 'sosanjoaquin', 'teapot', 'terra', 'chowchilla', 'maderairr', 'tulare', 'fresnoid']
self.figure_params['district_water_use']['annual']['CVP - San Luis Contractors'] = ['westlands', 'panoche', 'sanluiswater', 'delpuerto']
self.figure_params['district_water_use']['annual']['subplot columns'] = 2
self.figure_params['district_water_use']['annual']['color map'] = 'BrBG_r'
self.figure_params['district_water_use']['annual']['write file'] = True
self.figure_params['flow_diagram'] = {}
self.figure_params['flow_diagram']['tulare'] = {}
self.figure_params['flow_diagram']['tulare']['column1'] = ['Shasta', 'Folsom', 'Oroville', 'New Bullards', 'Uncontrolled']
self.figure_params['flow_diagram']['tulare']['row1'] = ['Delta Outflow', 'Carryover',]
self.figure_params['flow_diagram']['tulare']['column2'] = ['San Luis (Fed)', 'San Luis (State)', 'Millerton', 'Isabella', 'Pine Flat', 'Kaweah', 'Success']
self.figure_params['flow_diagram']['tulare']['row2'] = ['Carryover',]
self.figure_params['flow_diagram']['tulare']['column3'] = ['Exchange', 'CVP-Delta', 'Cross Valley', 'State Water Project', 'Friant Class 1','Friant Class 2', 'Kern River', 'Kings River', 'Kaweah River', 'Tule River', 'Flood']
self.figure_params['flow_diagram']['tulare']['row3'] = ['Private Pumping', 'GW Banks']
self.figure_params['flow_diagram']['tulare']['column4'] = ['Exchange', 'CVP-Delta', 'Urban', 'KCWA', 'CVP-Friant','Other']
self.figure_params['flow_diagram']['tulare']['row4'] = ['Carryover',]
self.figure_params['flow_diagram']['tulare']['column5'] = ['Irrigation', 'Urban', 'In-Lieu Recharge', 'Direct Recharge']
self.figure_params['flow_diagram']['tulare']['titles'] = ['Sacramento Basin\nSupplies', 'Tulare Basin\nSupplies', 'Surface Water\nContract Allocations', 'Contractor Groups', 'Water Use Type']
def scenario_compare(self, folder_name, figure_name, plot_name, validation_values, show_plot):
outflow_list = self.figure_params[figure_name][plot_name]['outflow_list']
pump1_list = self.figure_params[figure_name][plot_name]['pump1_list']
pump2_list = self.figure_params[figure_name][plot_name]['pump2_list']
scenario_labels = self.figure_params[figure_name][plot_name]['scenario_labels']
simulation_labels = self.figure_params[figure_name][plot_name]['simulation_labels']
observation_labels = self.figure_params[figure_name][plot_name]['observation_labels']
agg_list = self.figure_params[figure_name][plot_name]['agg_list']
unit_mult = self.figure_params[figure_name][plot_name]['unit_mult']
max_value_list = self.figure_params[figure_name][plot_name]['max_value_list']
use_log_list = self.figure_params[figure_name][plot_name]['use_log_list']
use_cdf_list = self.figure_params[figure_name][plot_name]['use_cdf_list']
scenario_type_list = self.figure_params[figure_name][plot_name]['scenario_type_list']
x_label_list = self.figure_params[figure_name][plot_name]['x_label_list']
y_label_list = self.figure_params[figure_name][plot_name]['y_label_list']
legend_label_names1 = self.figure_params[figure_name][plot_name]['legend_label_names1']
legend_label_names2 = self.figure_params[figure_name][plot_name]['legend_label_names2']
color1 = sns.color_palette('spring', n_colors = 3)
color2 = sns.color_palette('summer', n_colors = 3)
color_list = np.array([color1[0], color1[2], color2[0]])
max_y_val = np.zeros(len(simulation_labels))
fig = plt.figure(figsize = (20, 16))
gs = gridspec.GridSpec(3,2, width_ratios=[3,1], figure = fig)
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0])
ax3 = plt.subplot(gs[2, 0])
ax4 = plt.subplot(gs[:, 1])
axes_list = [ax1, ax2, ax3]
counter = 0
for sim_label, obs_label, agg, max_value, use_log, use_cdf, ax_loop in zip(simulation_labels, observation_labels, agg_list, max_value_list, use_log_list, use_cdf_list, axes_list):
data_type_dict = {}
data_type_dict['scenario'] = self.values[sim_label].resample(agg).sum() * unit_mult[0]
data_type_dict['validation'] = validation_values[sim_label].resample(agg).sum() * unit_mult[1]
data_type_dict['observation'] = self.observations[obs_label].resample(agg).sum() * unit_mult[2]
if use_log:
for scen_type in scenario_type_list:
values_int = data_type_dict[scen_type]
data_type_dict[scen_type] = np.log(values_int[values_int > 0])
for scen_type in scenario_type_list:
max_y_val[counter] = max([max(data_type_dict[scen_type]), max_y_val[counter]])
counter += 1
if use_cdf:
for scen_type, color_loop in zip(scenario_type_list, color_list):
cdf_values = np.zeros(100)
values_int = data_type_dict[scen_type]
for x in range(0, 100):
x_val = int(np.ceil(max_value)) * (x/100)
cdf_values[x] = len(values_int[values_int > x_val])/len(values_int)
ax_loop.plot(cdf_values, np.arange(0, int(np.ceil(max_value)), int(np.ceil(max_value))/100), linewidth = 3, color = color_loop)
else:
pos = np.linspace(0, max_value, 101)
for scen_type, color_loop in zip(scenario_type_list, color_list):
kde_est = stats.gaussian_kde(data_type_dict[scen_type])
ax_loop.fill_between(pos, kde_est(pos), edgecolor = 'black', alpha = 0.6, facecolor = color_loop)
sri_dict = {}
sri_dict['validation'] = validation_values['delta_forecastSRI']
sri_dict['scenario'] = self.values['delta_forecastSRI']
sri_cutoffs = {}
sri_cutoffs['W'] = [9.2, 100]
sri_cutoffs['AN'] = [7.8, 9.2]
sri_cutoffs['BN'] = [6.6, 7.8]
sri_cutoffs['D'] = [5.4, 6.6]
sri_cutoffs['C'] = [0.0, 5.4]
wyt_list = ['W', 'AN', 'BN', 'D', 'C']
scenario_type_list = ['validation', 'scenario']
colors = sns.color_palette('RdBu_r', n_colors = 5)
percent_years = {}
for wyt in wyt_list:
percent_years[wyt] = np.zeros(len(scenario_type_list))
for scen_cnt, scen_type in enumerate(scenario_type_list):
ann_sri = []
for x_cnt, x in enumerate(sri_dict[scen_type]):
if sri_dict[scen_type].index.month[x_cnt] == 9 and sri_dict[scen_type].index.day[x_cnt] == 30:
ann_sri.append(x)
ann_sri = np.array(ann_sri)
for x_cnt, wyt in enumerate(wyt_list):
mask_value = (ann_sri >= sri_cutoffs[wyt][0]) & (ann_sri < sri_cutoffs[wyt][1])
percent_years[wyt][scen_cnt] = len(ann_sri[mask_value])/len(ann_sri)
colors = sns.color_palette('RdBu_r', n_colors = 5)
last_type = np.zeros(len(scenario_type_list))
for cnt, x in enumerate(wyt_list):
ax4.bar(['Validated Period\n(1997-2016)', 'Extended Simulation\n(1906-2016)'], percent_years[x], alpha = 1.0, label = wyt, facecolor = colors[cnt], edgecolor = 'black', bottom = last_type)
last_type += percent_years[x]
ax1.set_xlim([0.0, 500.0* np.ceil(max_y_val[0]/500.0)])
ax2.set_xlim([0.0, 500.0* np.ceil(max_y_val[1]/500.0)])
ax3.set_xlim([0.0, 1.0])
ax4.set_ylim([0, 1.15])
ax1.set_yticklabels('')
ax2.set_yticklabels('')
label_list = []
loc_list = []
for value_x in range(0, 120, 20):
label_list.append(str(value_x) + ' %')
loc_list.append(value_x/100.0)
ax4.set_yticklabels(label_list)
ax4.set_yticks(loc_list)
ax3.set_xticklabels(label_list)
ax3.set_xticks(loc_list)
ax3.set_yticklabels(['4', '8', '16', '32', '64', '125', '250', '500', '1000', '2000', '4000'])
ax3.set_yticks([np.log(4), np.log(8), np.log(16), np.log(32), np.log(64), np.log(125), np.log(250), np.log(500), np.log(1000), np.log(2000), np.log(4000)])
ax3.set_ylim([np.log(4), np.log(4000)])
for ax, x_lab, y_lab in zip([ax1, ax2, ax3, ax4], x_label_list, y_label_list):
ax.set_xlabel(x_lab, fontsize = 16, fontname = 'Gill Sans MT', fontweight = 'bold')
ax.set_ylabel(y_lab, fontsize = 16, fontname = 'Gill Sans MT', fontweight = 'bold')
ax.grid(False)
for tick in ax.get_xticklabels():
tick.set_fontname('Gill Sans MT')
tick.set_fontsize(14)
for tick in ax.get_yticklabels():
tick.set_fontname('Gill Sans MT')
tick.set_fontsize(14)
legend_elements = []
for x_cnt, x in enumerate(legend_label_names1):
legend_elements.append(Patch(facecolor = color_list[x_cnt], edgecolor = 'black', label = x))
ax1.legend(handles = legend_elements, loc = 'upper left', framealpha = 0.7, shadow = True, prop={'family':'Gill Sans MT','weight':'bold','size':14})
legend_elements_2 = []
for x_cnt, x in enumerate(legend_label_names2):
legend_elements_2.append(Patch(facecolor = colors[x_cnt], edgecolor = 'black', label = x))
ax4.legend(handles = legend_elements_2, loc = 'upper left', framealpha = 0.7, shadow = True, prop={'family':'Gill Sans MT','weight':'bold','size':14})
plt.savefig(folder_name + figure_name + '_' + plot_name + '.png', dpi = 150, bbox_inches = 'tight', pad_inches = 0.0)
if show_plot:
plt.show()
plt.close()
def make_deliveries_by_district(self, folder_name, figure_name, plot_name, scenario_name, show_plot):
if plot_name == 'annual':
name_bridge = {}
name_bridge['semitropic'] = 'KER01'
name_bridge['westkern'] = 'KER02'
name_bridge['wheeler'] = 'KER03'
name_bridge['kerndelta'] = 'KER04'
name_bridge['arvin'] = 'KER05'
name_bridge['belridge'] = 'KER06'
name_bridge['losthills'] = 'KER07'
name_bridge['northkern'] = 'KER08'
name_bridge['northkernwb'] = 'KER08'
name_bridge['ID4'] = 'KER09'
name_bridge['sosanjoaquin'] = 'KER10'
name_bridge['berrenda'] = 'KER11'
name_bridge['buenavista'] = 'KER12'
name_bridge['cawelo'] = 'KER13'
name_bridge['rosedale'] = 'KER14'
name_bridge['shaffer'] = 'KER15'
name_bridge['henrymiller'] = 'KER16'
name_bridge['kwb'] = 'KER17'
name_bridge['b2800'] = 'KER17'
name_bridge['pioneer'] = 'KER17'
name_bridge['irvineranch'] = 'KER17'
name_bridge['kernriverbed'] = 'KER17'
name_bridge['poso'] = 'KER17'
name_bridge['stockdale'] = 'KER17'
name_bridge['delano'] = 'KeT01'
name_bridge['kerntulare'] = 'KeT02'
name_bridge['lowertule'] = 'TUL01'
name_bridge['tulare'] = 'TUL02'
name_bridge['lindmore'] = 'TUL03'
name_bridge['saucelito'] = 'TUL04'
name_bridge['porterville'] = 'TUL05'
name_bridge['lindsay'] = 'TUL06'
name_bridge['exeter'] = 'TUL07'
name_bridge['terra'] = 'TUL08'
name_bridge['teapot'] = 'TUL09'
name_bridge['bakersfield'] = 'BAK'
name_bridge['fresno'] = 'FRE'
name_bridge['southbay'] = 'SOB'
name_bridge['socal'] = 'SOC'
name_bridge['tehachapi'] = 'TEH'
name_bridge['tejon'] = 'TEJ'
name_bridge['centralcoast'] = 'SLO'
name_bridge['pixley'] = 'PIX'
name_bridge['chowchilla'] = 'CHW'
name_bridge['maderairr'] = 'MAD'
name_bridge['fresnoid'] = 'FSI'
name_bridge['westlands'] = 'WTL'
name_bridge['panoche'] = 'PAN'
name_bridge['sanluiswater'] = 'SLW'
name_bridge['delpuerto'] = 'DEL'
elif plot_name == 'monthly':
name_bridge = {}
name_bridge['semitropic'] = 'Semitropic Water Storage District'
name_bridge['westkern'] = 'West Kern Water District'
name_bridge['wheeler'] = 'Wheeler Ridge-Maricopa Water Storage District'
name_bridge['kerndelta'] = 'Kern Delta Water District'
name_bridge['arvin'] = 'Arvin-Edison Water Storage District'
name_bridge['belridge'] = 'Belridge Water Storage District'
name_bridge['losthills'] = 'Lost Hills Water District'
name_bridge['northkern'] = 'North Kern Water Storage District'
name_bridge['northkernwb'] = 'North Kern Water Storage District'
name_bridge['ID4'] = 'Urban'
name_bridge['sosanjoaquin'] = 'Southern San Joaquin Municipal Utility District'
name_bridge['berrenda'] = 'Berrenda Mesa Water District'
name_bridge['buenavista'] = 'Buena Vista Water Storage District'
name_bridge['cawelo'] = 'Cawelo Water District'
name_bridge['rosedale'] = 'Rosedale-Rio Bravo Water Storage District'
name_bridge['shaffer'] = 'Shafter-Wasco Irrigation District'
name_bridge['henrymiller'] = 'Henry Miller Water District'
name_bridge['kwb'] = 'Kern Water Bank Authority'
name_bridge['b2800'] = 'Kern Water Bank Authority'
name_bridge['pioneer'] = 'Kern Water Bank Authority'
name_bridge['irvineranch'] = 'Kern Water Bank Authority'
name_bridge['kernriverbed'] = 'Kern Water Bank Authority'
name_bridge['poso'] = 'Kern Water Bank Authority'
name_bridge['stockdale'] = 'Kern Water Bank Authority'
name_bridge['delano'] = 'Delano-Earlimart Irrigation District'
name_bridge['kerntulare'] = 'Kern-Tulare Water District'
name_bridge['lowertule'] = 'Lower Tule River Irrigation District'
name_bridge['tulare'] = 'Tulare Irrigation District'
name_bridge['lindmore'] = 'Lindmore Irrigation District'
name_bridge['saucelito'] = 'Saucelito Irrigation District'
name_bridge['porterville'] = 'Porterville Irrigation District'
name_bridge['lindsay'] = 'Lindsay-Strathmore Irrigation District'
name_bridge['exeter'] = 'Exeter Irrigation District'
name_bridge['terra'] = 'Terra Bella Irrigation District'
name_bridge['teapot'] = 'Tea Pot Dome Water District'
name_bridge['bakersfield'] = 'Urban'
name_bridge['fresno'] = 'Urban'
name_bridge['southbay'] = 'Urban'
name_bridge['socal'] = 'Urban'
name_bridge['tehachapi'] = 'Tehachapi - Cummings County Water District'
name_bridge['tejon'] = 'Tejon-Castac Water District'
name_bridge['centralcoast'] = 'SLO'
name_bridge['pixley'] = 'Pixley Irrigation District'
name_bridge['chowchilla'] = 'Chowchilla Water District'
name_bridge['maderairr'] = 'Madera Irrigation District'
name_bridge['fresnoid'] = 'Fresno Irrigation District'
name_bridge['westlands'] = 'Westlands Water District'
name_bridge['panoche'] = 'Panoche Water District'
name_bridge['sanluiswater'] = 'San Luis Water District'
name_bridge['delpuerto'] = 'Del Puerto Water District'
name_bridge['alta'] = 'Alta Irrigation District'
name_bridge['consolidated'] = 'Consolidated Irrigation District'
location_type = plot_name
self.total_irrigation = {}
self.total_recharge = {}
self.total_pumping = {}
self.total_flood_purchases = {}
self.total_recovery_rebate = {}
self.total_recharge_sales = {}
self.total_recharge_purchases = {}
self.total_recovery_sales = {}
self.total_recovery_purchases = {}
for bank in self.bank_list:
self.total_irrigation[bank.name] = np.zeros(self.number_years*12)
self.total_recharge[bank.name] = np.zeros(self.number_years*12)
self.total_pumping[bank.name] = np.zeros(self.number_years*12)
self.total_flood_purchases[bank.name] = np.zeros(self.number_years*12)
self.total_recovery_rebate[bank.name] = np.zeros(self.number_years*12)
self.total_recharge_sales[bank.name] = np.zeros(self.number_years*12)
self.total_recharge_purchases[bank.name] = np.zeros(self.number_years*12)
self.total_recovery_sales[bank.name] = np.zeros(self.number_years*12)
self.total_recovery_purchases[bank.name] = np.zeros(self.number_years*12)
for district in self.district_list:
self.total_irrigation[district.name] = np.zeros(self.number_years*12)
self.total_recharge[district.name] = np.zeros(self.number_years*12)
self.total_pumping[district.name] = np.zeros(self.number_years*12)
self.total_flood_purchases[district.name] = np.zeros(self.number_years*12)
self.total_recovery_rebate[district.name] = np.zeros(self.number_years*12)
self.total_recharge_sales[district.name] = np.zeros(self.number_years*12)
self.total_recharge_purchases[district.name] = np.zeros(self.number_years*12)
self.total_recovery_sales[district.name] = np.zeros(self.number_years*12)
self.total_recovery_purchases[district.name] = np.zeros(self.number_years*12)
date_list_labels = []
for year_num in range(self.starting_year, 2017):
start_month = 1
end_month = 13
if year_num == self.starting_year:
start_month = 10
if year_num == 2016:
end_month = 10
for month_num in range(start_month, end_month):
date_string_start = str(year_num) + '-' + str(month_num) + '-01'
date_list_labels.append(date_string_start)
for district in self.district_list:
inleiu_name = district.name + '_inleiu_irrigation'
inleiu_recharge_name = district.name + '_inleiu_recharge'
direct_recover_name = district.name + '_recover_banked'
indirect_surface_name = district.name + '_exchanged_SW'
indirect_ground_name = district.name + '_exchanged_GW'
inleiu_pumping_name = district.name + '_leiupumping'
pumping_name = district.name + '_pumping'
recharge_name = district.name + '_' + district.key + '_recharged'
numdays_month = [31, 28, 31, 30, 31, 30, 31, 31, 29, 31, 30, 31]
for year_num in range(0, self.number_years+1):
year_str = str(year_num + self.starting_year)
start_month = 1
end_month = 13
if year_num == 0:
start_month = 10
if year_num == self.number_years:
end_month = 10
for month_num in range(start_month, end_month):
if month_num == 1:
month_num_prev = '12'
year_str_prior = str(year_num + self.starting_year - 1)
end_day_prior = str(numdays_month[11])
else:
month_num_prev = str(month_num - 1)
year_str_prior = str(year_num + self.starting_year)
end_day_prior = str(numdays_month[month_num-2])
date_string_current = year_str + '-' + str(month_num) + '-' + str(numdays_month[month_num-1])
date_string_prior = year_str_prior + '-' + month_num_prev + '-' + end_day_prior
###GW/SW exchanges,
if indirect_surface_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), indirect_surface_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), indirect_surface_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), indirect_surface_name].values[0]
#count irrigation deliveries for district that gave up SW (for GW in canal)
self.total_irrigation[district.name][year_num*12 + month_num - 10] += total_delivery
###GW/SW exchanges,
if indirect_ground_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), indirect_ground_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), indirect_ground_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), indirect_ground_name].values[0]
self.total_recovery_purchases[district.name][year_num*12 + month_num - 10] += total_delivery
##In leiu deliveries for irrigation
if inleiu_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), inleiu_name].values[0]
#attibute inleiu deliveries for irrigation to district operating the bank
self.total_irrigation[district.name][year_num*12 + month_num - 10] += total_delivery
self.total_recharge_sales[district.name][year_num*12 + month_num - 10] += total_delivery
if inleiu_recharge_name in self.values:
if month_num == 10:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_recharge_name].values[0]
else:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_recharge_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), inleiu_recharge_name].values[0]
#attibute inleiu deliveries for irrigation to district operating the bank
self.total_recharge[district.name][year_num*12 + month_num - 10] += total_recharge
self.total_recharge_sales[district.name][year_num*12 + month_num - 10] += total_recharge
#GW recovery
if direct_recover_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), direct_recover_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), direct_recover_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), direct_recover_name].values[0]
#if classifying by physical location, attribute to district recieving water (as irrigation)
self.total_irrigation[district.name][year_num*12 + month_num - 10] += total_delivery
self.total_recovery_purchases[district.name][year_num*12 + month_num - 10] += total_delivery
##Pumnping for inleiu recovery
if inleiu_pumping_name in self.values:
if month_num == 10:
total_leiupumping = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_pumping_name].values[0]
else:
total_leiupumping = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_pumping_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), inleiu_pumping_name].values[0]
#if classifying by physical location, to district operating the bank
self.total_pumping[district.name][year_num*12 + month_num - 10] += total_leiupumping
self.total_recovery_sales[district.name][year_num*12 + month_num - 10] += total_leiupumping
self.total_recovery_rebate[district.name][year_num*12 + month_num - 10] += total_leiupumping
#Recharge, in- and out- of district
if recharge_name in self.values:
if month_num == 10:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), recharge_name].values[0]
else:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), recharge_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), recharge_name].values[0]
self.total_recharge[district.name][year_num*12 + month_num - 10] += total_recharge
for bank_name in self.bank_list:
bank_recharge_name = district.name + '_' + bank_name.key + '_recharged'
if bank_recharge_name in self.values:
if month_num == 10:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), bank_recharge_name].values[0]
else:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), bank_recharge_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), bank_recharge_name].values[0]
self.total_recharge[bank_name.name][year_num*12 + month_num - 10] += total_recharge
self.total_recharge_purchases[district.name][year_num*12 + month_num - 10] += total_recharge
for bank_name in self.leiu_list:
bank_recharge_name = district.name + '_' + bank_name.key + '_recharged'
if bank_recharge_name in self.values:
if month_num == 10:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), bank_recharge_name].values[0]
else:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), bank_recharge_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), bank_recharge_name].values[0]
self.total_recharge_purchases[district.name][year_num*12 + month_num - 10] += total_recharge
#Contract deliveries
for contract in self.contract_list:
delivery_name = district.name + '_' + contract.name + '_delivery'
recharge_contract_name = district.name + '_' + contract.name + '_recharged'
flood_irr_name = district.name + '_' + contract.name + '_flood_irrigation'
flood_name = district.name + '_' + contract.name + '_flood'
###All deliveries made from a district's contract
if delivery_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), delivery_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), delivery_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), delivery_name].values[0]
self.total_irrigation[district.name][year_num*12 + month_num - 10] += total_delivery
##Deliveries made for recharge are subtracted from the overall contract deliveries
if recharge_contract_name in self.values:
if month_num == 10:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), recharge_contract_name].values[0]
else:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), recharge_contract_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), recharge_contract_name].values[0]
self.total_irrigation[district.name][year_num*12 + month_num - 10] -= total_recharge
#flood water used for irrigation - always attribute as irrigation
if flood_irr_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[ | pd.DatetimeIndex([date_string_current]) | pandas.DatetimeIndex |
import pandas as pd
import numpy as np
import requests
from fake_useragent import UserAgent
import io
import os
import time
import json
import demjson
from datetime import datetime
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
# Main Economic Indicators: https://alfred.stlouisfed.org/release?rid=205
url = {
"fred_econ": "https://fred.stlouisfed.org/graph/fredgraph.csv?",
"philfed": "https://www.philadelphiafed.org/surveys-and-data/real-time-data-research/",
"chicagofed": "https://www.chicagofed.org/~/media/publications/",
"OECD": "https://stats.oecd.org/sdmx-json/data/DP_LIVE/"
}
def date_transform(df, format_origin, format_after):
return_list = []
for i in range(0, len(df)):
return_list.append(datetime.strptime(df[i], format_origin).strftime(format_after))
return return_list
def gdp_quarterly(startdate="1947-01-01", enddate="2021-01-01"):
"""
Full Name: <NAME>omestic Product
Description: Billions of Dollars, Quarterly, Seasonally Adjusted Annual Rate
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "GDP",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df.columns = ["Date", "GDP"]
df["Date"] = pd.to_datetime(df["Date"], format = "%Y-%m-%d")
df["GDP"] = df["GDP"].astype(float)
return df
def gdpc1_quarterly(startdate="1947-01-01", enddate="2021-01-01"):
"""
Full Name: Real Gross Domestic Product
Description: Billions of Chained 2012 Dollars, Quarterly, Seasonally Adjusted Annual Rate
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "GDPC1",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
return df
def oecd_gdp_monthly(startdate="1947-01-01", enddate="2021-01-01"):
"""
Full Name: Real Gross Domestic Product
Description: Billions of Chained 2012 Dollars, Quarterly, Seasonally Adjusted Annual Rate
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "USALORSGPNOSTSAM",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
return df
def payems_monthly(startdate="1939-01-01", enddate="2021-01-01"):
"""
Full Name: All Employees, Total Nonfarm
Description: Thousands of Persons,Seasonally Adjusted, Monthly
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "PAYEMS",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df.columns = ["Date", "Payems"]
df["Date"] = pd.to_datetime(df["Date"], format = "%Y-%m-%d")
df["Payems"] = df["Payems"].astype(float)
return df
def ppi():
tmp_url = url["fred_econ"] + "bgcolor=%23e1e9f0&chart_type=line&drp=0&fo=open%20sans&graph_bgcolor=%23ffffff&height=450&mode=fred&recession_bars=on&txtcolor=%23444444&ts=12&tts=12&width=968&nt=0&thu=0&trc=0&show_legend=yes&show_axis_titles=yes&show_tooltip=yes&id=PPIACO,PCUOMFGOMFG&scale=left,left&cosd=1913-01-01,1984-12-01&coed=2021-04-01,2021-04-01&line_color=%234572a7,%23aa4643&link_values=false,false&line_style=solid,solid&mark_type=none,none&mw=3,3&lw=2,2&ost=-99999,-99999&oet=99999,99999&mma=0,0&fml=a,a&fq=Monthly,Monthly&fam=avg,avg&fgst=lin,lin&fgsnd=2020-02-01,2020-02-01&line_index=1,2&transformation=lin,lin&vintage_date=2021-06-10,2021-06-10&revision_date=2021-06-10,2021-06-10&nd=1913-01-01,1984-12-01"
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
r = requests.get(tmp_url, headers=request_header)
data_text = r.content
df = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df["DATE"] = pd.to_datetime(df["DATE"], format="%Y-%m-%d")
#df = df[list(df.columns[1:])].replace(".", np.nan).astype(float)
name_list = {
"PPIACO": "Producer Price Index by Commodity: All Commodities",
"PCUOMFGOMFG": "Producer Price Index by Industry: Total Manufacturing Industries"
}
df.replace(".", np.nan, inplace = True)
df.columns = ["Date", "PPI_C", "PPI_I"]
df["Date"] = pd.to_datetime(df["Date"], format = "%Y-%m-%d")
df[["PPI_C", "PPI_I"]] = df[["PPI_C", "PPI_I"]].astype(float)
return df
def pmi():
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_ism_pmi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国ISM制造业PMI报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "28",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_ism_pmi"
temp_df = temp_df.astype("float")
PMI_I = pd.DataFrame()
PMI_I["Date"] = pd.to_datetime(temp_df.index, format = "%Y-%m-%d")
PMI_I["ISM_PMI_I"] = np.array(temp_df).astype(float)
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_ism_non_pmi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国ISM非制造业PMI报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "29",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_ism_non_pmi"
temp_df = temp_df.astype("float")
PMI_NI = pd.DataFrame()
PMI_NI["Date"] = pd.to_datetime(temp_df.index, format = "%Y-%m-%d")
PMI_NI["ISM_PMI_NI"] = np.array(temp_df).astype(float)
PMI = pd.merge_asof(PMI_I, PMI_NI, on = "Date")
return PMI
def unrate(startdate="1948-01-01", enddate="2021-01-01"):
"""
Full Name: Unemployment Rate: Aged 15-64: All Persons for the United States
Description: Percent, Seasonally Adjusted, Monthly, Quarterly and Annually
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "LRUN64TTUSM156S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_monthly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_monthly["DATE"] = pd.to_datetime(df_monthly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "LRUN64TTUSQ156S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_quarterly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_quarterly["DATE"] = pd.to_datetime(
df_quarterly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "LRUN64TTUSA156S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_annually = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_annually["DATE"] = pd.to_datetime(
df_annually["DATE"], format="%Y-%m-%d")
df = pd.merge_asof(
df_monthly,
df_quarterly,
on="DATE",
direction="backward")
df = pd.merge_asof(df, df_annually, on="DATE", direction="backward")
df.columns = ["Date", "UR_Monthly", "UR_Quarterly", "UR_Annually"]
return df
def erate(startdate="1955-01-01", enddate="2021-01-01"):
"""
Full Name: Employment Rate: Aged 25-54: All Persons for the United States
Description: Percent,Seasonally Adjusted, Monthly, Quarterly and Annually
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "LREM25TTUSM156S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_monthly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_monthly["DATE"] = pd.to_datetime(df_monthly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "LREM25TTUSQ156S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_quarterly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_quarterly["DATE"] = pd.to_datetime(
df_quarterly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "LREM25TTUSA156S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_annually = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_annually["DATE"] = pd.to_datetime(
df_annually["DATE"], format="%Y-%m-%d")
df = pd.merge_asof(
df_monthly,
df_quarterly,
on="DATE",
direction="backward")
df = pd.merge_asof(df, df_annually, on="DATE", direction="backward")
df.columns = ["Date", "ER_Monthly", "ER_Quarterly", "ER_Annually"]
def pce_monthly(startdate="1959-01-01", enddate="2021-01-01"):
"""
Full Name: PCE
Description: Percent, Monthly, Seasonally Adjusted
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "PCE",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
return df
def cpi(startdate="1960-01-01", enddate="2021-01-01"):
"""
Full Name: Consumer Price Index: Total All Items for the United States
Description: Percent, Monthly, Quarterly and Annually, Seasonally Adjusted
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "CPALTT01USM661S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_monthly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_monthly["DATE"] = pd.to_datetime(df_monthly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "CPALTT01USQ661S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_quarterly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_quarterly["DATE"] = pd.to_datetime(
df_quarterly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "CPALTT01USA661S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_annually = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_annually["DATE"] = pd.to_datetime(
df_annually["DATE"], format="%Y-%m-%d")
df = pd.merge_asof(
df_monthly,
df_quarterly,
on="DATE",
direction="backward")
df = pd.merge_asof(df, df_annually, on="DATE", direction="backward")
df.columns = ["Date", "CPI_Monthly", "CPI_Quarterly", "CPI_Annually"]
df["Date"] = pd.to_datetime(df["Date"], format = "%Y-%m-%d")
df[["CPI_Monthly", "CPI_Quarterly", "CPI_Annually"]] = df[["CPI_Monthly", "CPI_Quarterly", "CPI_Annually"]].astype(float)
return df
def m1(startdate="1960-01-01", enddate="2021-01-01"):
"""
Full Name: Consumer Price Index: M3 for the United States
Description: Growth Rate Previous Period, Monthly, Quarterly and Annually, Seasonally Adjusted
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "WM1NS",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_weekly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_weekly["DATE"] = pd.to_datetime(df_weekly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "MANMM101USM657S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_monthly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_monthly["DATE"] = pd.to_datetime(df_monthly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "MANMM101USQ657S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_quarterly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_quarterly["DATE"] = pd.to_datetime(
df_quarterly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "MANMM101USA657S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_annually = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_annually["DATE"] = pd.to_datetime(
df_annually["DATE"], format="%Y-%m-%d")
df = pd.merge_asof(df_weekly, df_monthly, on="DATE", direction="backward")
df = pd.merge_asof(df, df_quarterly, on="DATE", direction="backward")
df = pd.merge_asof(df, df_annually, on="DATE", direction="backward")
df.columns = [
"Date",
"M1_Weekly",
"M1_Monthly",
"M1_Quarterly",
"M1_Annually"]
return df
def m2(startdate="1960-01-01", enddate="2021-01-01"):
"""
Full Name: <NAME>
Description: Seasonally Adjusted, Weekly, Monthly, Quarterly and Annually, Seasonally Adjusted
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "WM2NS",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_weekly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_weekly["DATE"] = pd.to_datetime(df_weekly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "M2SL",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_monthly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_monthly["DATE"] = pd.to_datetime(df_monthly["DATE"], format="%Y-%m-%d")
df = pd.merge_asof(df_weekly, df_monthly, on="DATE", direction="backward")
df.columns = ["Date", "M2_Weekly", "M2_Monthly"]
return df
def m3(startdate="1960-01-01", enddate="2021-01-01"):
"""
Full Name: Consumer Price Index: M3 for the United States
Description: Growth Rate Previous Period, Monthly, Quarterly and Annually, Seasonally Adjusted
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "MABMM301USM657S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_monthly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_monthly["DATE"] = pd.to_datetime(df_monthly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "MABMM301USQ657S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_quarterly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_quarterly["DATE"] = pd.to_datetime(
df_quarterly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "MABMM301USA657S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_annually = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_annually["DATE"] = pd.to_datetime(
df_annually["DATE"], format="%Y-%m-%d")
df = pd.merge_asof(
df_monthly,
df_quarterly,
on="DATE",
direction="backward")
df = pd.merge_asof(df, df_annually, on="DATE", direction="backward")
df.columns = ["Date", "M3_Monthly", "M3_Quarterly", "M3_Annually"]
return df
def ltgby_10(startdate="1955-01-01", enddate="2021-01-01"):
"""
Full Name: Long-Term Government Bond Yields: 10-year: Main (Including Benchmark) for the United States
Description: Percent,Not Seasonally Adjusted, Monthly, Quarterly and Annually
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "IRLTLT01USM156N",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_monthly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_monthly["DATE"] = | pd.to_datetime(df_monthly["DATE"], format="%Y-%m-%d") | pandas.to_datetime |
#!/usr/bin/python
import sys
from collections import defaultdict
from os import listdir, path
import pandas
import json
PIPELINE_NAME = "neoANT-HILL"
LOG_FILE = "/params.log"
PICARD = "/home/biodocker/picard.jar"
GATK = "/home/biodocker/gatk-4.1.0.0/gatk"
SNPEFF = "/home/biodocker/snpEff/snpEff.jar"
SNPSIFT = "/home/biodocker/snpEff/SnpSift.jar"
QUANTISEQ = "Rscript /home/biodocker/quantiseq/deconvolution/quanTIseq_decon.R "
ABSOLUTE_PATH = "/home/biodocker/neoanthill/"
OUTPUT_PATH = "/home/biodocker/output/"
INPUT_PATH = "/home/biodocker/input/"
#FUNCTIONS
ADD_PROCESS = ["aprediction", "equantification", "linfiltrates"]
APREDICTION_PROCESS = 0
EQUANTIFICATION_PROCESS = 1
LINFILTRATES_PROCESS = 2
# GENERIC OPTIONS
ABSOLUTE_PATH_OPTION = "absolute_path"
ADD_PROCESSING_OPTION = "aprocessing"
INPUT_OPTION = "input"
FASTQ_INPUT_OPTION = "fastq"
OUTPUT_OPTION = "output"
# BP OPTIONS
TYPE_OPTION = "type"
CLASS_OPTION = "class"
METHOD_OPTION = "method"
PARALLEL_OPTION = "parallel"
LENGTH_OPTION = "length"
ALLELE_OPTION = "allele"
# DIRECTORIES
MUTATION_DIRECTORY = "/mutations/"
PREDICTION_FILTERED_DIRECTORY = "predictions/filtered/"
PREDICTION_NOT_FILTERED_DIRECTORY = "predictions/not_filtered/"
PREDICTION_RAW_DIRECTORY = "predictions/raw/"
ALLELE_DIRECTORY = "/allele_prediction/"
VARIANT_CALLING_DIRECTORY = "/variant_calling/"
GENE_EXPRESSION = "/gene_expression/"
IMMUNE_INFILTRATING = "/immune_infiltrating/"
# MUTATIONS
MUTATION_MISSENSE = "missense_variant"
MUTATION_FRAMESHIFT = "frameshift_variant"
MUTATION_INFRAME = ["conservative_inframe_insertion", "disruptive_inframe_insertion", "conservative_inframe_deletion", "disruptive_inframe_deletion"]
# TASKS
TASK_ANALYZE_PARAMETERS = "analysing parameters"
TASK_EXTRACT_VCF_INFO = "extracting vcf info"
TASK_LOAD_PROTEIN_FILE = "loading protein refseqs"
TASK_LOAD_TRANSCRIPT_FILE = "loading transcript refseqs"
TASK_PROCESS_MUTATION = "processing mutations"
TASK_WRITE_REPORT_FILE = "writting report files"
TASK_PREDICT_BINDING = "predicting bindings"
TASK_FILTER_BINDING = "filtering predictions"
TASK_GENE_EXPRESSION = "quantifying transcript expression"
TASK_ANNOTATING_VCF = "annotating vcf"
TASK_VARIANT_CALLING = "calling variants"
TASK_TUMOR_IMMUNE_PROFILE = "profiling tumor-immune cells"
TASK_ALLELE_TYPING = "typing HLA alleles"
# TASK STATUS
TASK_ERROR = "er"
TASK_SUCCESS = "ok"
# DATA
DBSNP = ABSOLUTE_PATH + "data/dbsnp_138.b37.vcf.gz"
MILLS = ABSOLUTE_PATH + "data/Mills_and_1000G_gold_standard.indels.b37.vcf.gz"
KNOWN = ABSOLUTE_PATH + "data/1000G_phase1.indels.b37.vcf.gz"
GENOME_FASTA_FILE = ABSOLUTE_PATH + "data/Homo_sapiens.GRCh37.fa"
REFSEQ_TRANSCRIPTS_FASTA = ABSOLUTE_PATH + "data/transcript_refseq.fasta"
REFSEQ_HUMAN_FASTA = ABSOLUTE_PATH + "data/protein_refseq.fasta"
HUMAN_TRANSCRIPTS_INDEX = ABSOLUTE_PATH +"data/human_transcript_index"
DEFAULT_ALLELE_LIST = [0, ABSOLUTE_PATH + "data/hla_class_i.alleles", ABSOLUTE_PATH + "data/hla_class_ii.alleles"]
# PREDICT METHODS
PREDIC_METHODS = [0,
["IEDB_recommended",
"ann",
"comblib_sidney2008",
"consensus",
"netmhcstabpan",
"netmhcpan",
"smm",
"smmpmbec",
"pickpocket",
"netmhccons"],
["IEDB_recommended",
"comblib",
"consensus3",
"NetMHCIIpan",
"nn_align",
"smm_align",
"sturniolo"]
]
# FILTERS
FILTER_LIST = {"percentile_rank": 7, "ann_ic50": 8, "smm_ic50": 10, "comblib_sidney2008_score": 12, "netmhcpan_ic50": 14, "ic50": 6, }
REPORT = "report: "
GENERAL_USAGE = "general usage: ./main [-opt] [arg] [inputfile]"
AMINO = {'Ala': 'a', 'Arg': 'r', 'Asn': 'n', 'Asp': 'd', 'Cys': 'c', 'Gln': 'q',
'Glu': 'e', 'Gly': 'g', 'His': 'h', 'Ile': 'i', 'Leu': 'l', 'Lys': 'k',
'Met': 'm', 'Phe': 'f', 'Pro': 'p', 'Ser': 's', 'Thr': 't', 'Trp': 'w',
'Tyr': 'y', 'Val': 'v', 'Ter': '*'}
CODON = {'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L', 'CTT': 'L',
'CTC': 'L', 'CTA': 'L', 'CTG': 'L', 'ATT': 'I', 'ATC': 'I',
'ATA': 'I', 'ATG': 'M', 'GTT': 'V', 'GTC': 'V', 'GTA': 'V',
'GTG': 'V', 'TCT': 'S', 'TCC': 'S', 'TCA': 'S', 'TCG': 'S',
'CCT': 'P', 'CCC': 'P', 'CCA': 'P', 'CCG': 'P', 'ACT': 'T',
'ACC': 'T', 'ACA': 'T', 'ACG': 'T', 'GCT': 'A', 'GCC': 'A',
'GCA': 'A', 'GCG': 'A', 'TAT': 'Y', 'TAC': 'Y', 'CAT': 'H',
'CAC': 'H', 'CAA': 'Q', 'CAG': 'Q', 'AAT': 'N', 'AAC': 'N',
'AAA': 'K', 'AAG': 'K', 'GAT': 'D', 'GAC': 'D', 'GAA': 'E',
'GAG': 'E', 'TGT': 'C', 'TGC': 'C', 'TGG': 'W', 'CGT': 'R',
'CGC': 'R', 'CGA': 'R', 'CGG': 'R', 'AGT': 'S', 'AGC': 'S',
'AGA': 'R', 'AGG': 'R', 'GGT': 'G', 'GGC': 'G', 'GGA': 'G',
'GGG': 'G', 'TAA': 'STOP', 'TAG': 'STOP', 'TGA': 'STOP'}
DEFAULT_METHOD = "IEDB_recommended"
PREDICTION_CLASS_COMMAND = [0, "/home/biodocker/iedb/mhc_i/src/predict_binding.py ", "/home/biodocker/iedb/mhc_ii/mhc_II_binding.py "]
def print_task(task):
repetition = 40 - len(task)
print ("[ -> " + task + " " + ("." * repetition)),
sys.stdout.flush()
def print_status(status):
if status == TASK_SUCCESS:
print (status + " ]")
else:
print (status + " ]")
def save_log(ops):
output = ops[OUTPUT_OPTION]
if CLASS_OPTION in ops.keys():
p_class = str(ops[CLASS_OPTION])
ops[p_class + METHOD_OPTION] = ops[METHOD_OPTION]
ops[p_class + ALLELE_OPTION] = ops[ALLELE_OPTION]
ops[p_class + LENGTH_OPTION] = ops[LENGTH_OPTION]
ops.pop(METHOD_OPTION, None)
ops.pop(ALLELE_OPTION, None)
ops.pop(LENGTH_OPTION, None)
if path.isfile(output + LOG_FILE):
with open(output + LOG_FILE, "r") as f:
dic = json.load(f)
dic.update(ops)
ops = dic
with open(output + LOG_FILE, "w") as f:
json.dump(ops, f)
def read_log(out):
params = None
with open(OUTPUT_PATH + out + LOG_FILE, "r") as f:
params = json.load(f)
return params
def read_list(filepath):
elements = []
with open(filepath, "r") as f:
for row in f:
row = row.strip("\r")
row = row.strip("\n")
elements.append(row)
return elements
def read_predicted_alleles(outpath):
outpath += ALLELE_DIRECTORY
files = [f for f in listdir(outpath) if path.isfile(path.join(outpath, f))]
p_allele = defaultdict(list)
str_result = ""
for file in files:
if file.endswith(".tsv"):
sample = file.split(".tsv")[0]
with open(outpath + file, "r") as f:
f.readline()
for line in f:
p_allele[line.rstrip()].append(sample)
else:
continue
for allele in sorted(p_allele.keys()):
str_result += allele + " ("
for sample in p_allele[allele]:
str_result += sample + ", "
str_result = str_result[0:-2] + ")\n"
return str_result
def read_c1_binding_results(out, sample):
result_path = OUTPUT_PATH + out + "/c1_predictions/not_filtered/" + sample + ".txt"
try:
c1_lines = pandas.read_csv(result_path, sep='\t')
except:
c1_lines = pandas.DataFrame()
return c1_lines
def read_c2_binding_results(out, sample):
result_path = OUTPUT_PATH + out + "/c2_predictions/not_filtered/" + sample + ".txt"
try:
c2_lines = pandas.read_csv(result_path, sep='\t')
except:
c2_lines = pandas.DataFrame()
return c2_lines
def read_gene_exp(out, sample):
result_path = OUTPUT_PATH + out + "/gene_expression/" + sample + ".tsv"
try:
gene_exp = | pandas.read_csv(result_path, sep='\t') | pandas.read_csv |
import argparse
from email.mime import image
import os
from tqdm import tqdm
import pandas as pd
import logging
from src.utils.common import read_yaml, create_directories
from src.stage_01_get_data import main as loader_main
from sklearn.metrics import confusion_matrix, f1_score
import numpy as np
import warnings
import torch
STAGE = "STAGE_NAME" ## <<< change stage name
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter("[%(asctime)s: %(levelname)s: %(module)s]: %(message)s")
file_handler = logging.FileHandler(os.path.join("logs", "running_logs.log"))
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
warnings.filterwarnings('ignore')
def main(config_path):
## read config files
config = read_yaml(config_path)
train_data_loader, test_data_loader, labels_dict = loader_main(config_path)
pred = np.array([])
target = np.array([])
prediction_data_dir = config['data']['PRED_DATA_DIR']
create_directories([prediction_data_dir])
prediction_data_file_name = config['data']['PRED_DATA_FILE_NAME']
prediction_data_file_path = os.path.join(prediction_data_dir, prediction_data_file_name)
model_dir = config['artifacts']['model_dir']
trained_model_name = config['artifacts']['trained_model_name']
trained_model_path = os.path.join(model_dir, trained_model_name)
model = torch.load(trained_model_path)
logger.info(f"trained model loaded")
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
model.to(DEVICE)
logger.info(f"trained model loaded into {DEVICE}")
with torch.no_grad():
for batch, data in enumerate(test_data_loader):
images = data[0].to(DEVICE)
labels = data[1].to(DEVICE)
y_pred = model(images)
pred = np.concatenate((pred, torch.argmax(y_pred, 1).cpu().numpy()))
target = np.concatenate((target, labels.cpu().numpy()))
logger.info("prediction for test data finished")
df = | pd.DataFrame({"Actual":target, "Prediction":pred}) | pandas.DataFrame |
from django.core.management.base import BaseCommand, CommandError
from etldjango.settings import GCP_PROJECT_ID, BUCKET_NAME, BUCKET_ROOT
from .utils.storage import Bucket_handler, GetBucketData
from .utils.extractor import Data_Extractor
from datetime import datetime, timedelta
from .utils.unicodenorm import normalizer_str
from etldata.models import DB_minsa_muertes, DB_positividad_salida, DB_capacidad_hosp, DB_minsa_muertes, DB_rt, DB_epidemiologico, DB_vacunas
from django.contrib.gis.geos import Point
# from django.utils import timezone
from django.db.models import F, Sum, Avg, Count, StdDev, Max, Q
from tqdm import tqdm
import pandas as pd
import numpy as np
import os
import time
# datetime.now(tz=timezone.utc) # you can use this value
#db = DB_epidemiologico.objects.filter(fecha="2021-05-17")
#db.delete()
class Command(BaseCommand):
help = "Epidemiolog: Command for create the resumen using the current date in the DB"
bucket = Bucket_handler(project_id=GCP_PROJECT_ID)
filename = 'poblacion.csv'
def add_arguments(self, parser):
"""
Example:
- for initialize the database using the last three weeks
$python manage.py worker_t_epidem full --w 3
- for append the last three weeks
$python manage.py worker_t_epidem last --w 3
"""
parser.add_argument(
'mode', type=str, help="full/last , full: load the last 5 weeks, last: load the last week")
parser.add_argument(
'--w', type=int, help="reset the database and load the #w last weeks")
def print_shell(self, text):
self.stdout.write(self.style.SUCCESS(text))
def save_table(self, table, db, mode):
if mode == 'full':
records = table.to_dict(orient='records')
records = [db(**record) for record in tqdm(records)]
_ = db.objects.all().delete()
_ = db.objects.bulk_create(records)
elif mode == 'last':
# this is posible because the table is sorter by "-fecha"
last_record = db.objects.all()[:1]
last_record = list(last_record)
if len(last_record) > 0:
last_date = str(last_record[0].fecha.date())
else:
last_date = '2021-01-01'
table = table.loc[table.fecha > last_date]
if len(table):
self.print_shell("Storing new records")
records = table.to_dict(orient='records')
records = [db(**record) for record in tqdm(records)]
_ = db.objects.bulk_create(records)
else:
self.print_shell("No new data was found to store")
def handle(self, *args, **options):
mode = options["mode"]
w = options["w"]
assert mode in ['full', 'last'], "Error in --mode argument"
weeks = self.get_weeks_from_args(mode, w)
self.print_shell("Computing epidemiology score")
# Downloading data from bucket
self.downloading_source_csv()
self.load_poblacion_table_popu()
table_vacc = self.query_vacunados(DB_vacunas, weeks)
#
table_pos = self.query_test_positivos(DB_positividad_salida, weeks)
table_pos = self.normalizer_100k_population(table_pos,
['total', 'total_pos'])
table_uci = self.query_uci_status(DB_capacidad_hosp, weeks)
table_minsa = self.query_deaths_minsa(DB_minsa_muertes, weeks)
table_minsa = self.normalizer_100k_population(table_minsa,
['n_muertes'])
table_rt = self.query_rt_score(DB_rt, weeks)
table = self.merge_tables(
table_pos, table_uci, table_minsa, table_rt, table_vacc)
table = self.aggregate_avg_by_week(table)
table = self.calc_vacc_progress(table)
table = self.scoring_variables(table)
table = self.last_week_comparation(table)
self.save_table(table, DB_epidemiologico, mode)
self.print_shell('Work Done!')
def get_weeks_from_args(self, mode, weeks):
if weeks:
return weeks + 2
elif mode == 'full':
return 6
elif mode == 'last':
return 4
def downloading_source_csv(self):
"""
Function to download the csv file which contain all the url and standar names
for the the data from the goberment, then
read that file and download all the files form source.
"""
self.print_shell('Downloading poblacion.csv ... ')
self.bucket.download_blob(bucket_name=BUCKET_NAME,
source_blob_name="data_source/"+self.filename,
destination_file_name="temp/"+self.filename)
def load_poblacion_table_popu(self):
table = pd.read_csv('temp/'+self.filename)
table.rename(columns={
'Region': 'region',
'total': 'poblacion'
}, inplace=True)
table.region = table.region.apply(lambda x: normalizer_str(x))
print(table)
self.table_popu = table
def get_fecha_max(self, db, fecha='fecha'):
query = db.objects.values(fecha)
query = query.aggregate(Max(fecha))
query = query[fecha+'__max'].date()
print(query)
return query
def query_test_positivos(self, db, weeks):
fecha_max = self.get_fecha_max(db)
fecha_min = fecha_max - timedelta(days=8*weeks)
query = db.objects.values('fecha',
'region',
'total_test',
'total_pos')
query = query.filter(fecha__gt=fecha_min)
query = query.annotate(positividad=(
F('total_pos') / F('total_test')*100))
query = query.order_by('region')
query = pd.DataFrame.from_records(query)
query.rename(columns={
'total_test': 'total'
}, inplace=True)
print(query)
return query
def query_uci_status(self, db, weeks):
fecha_max = self.get_fecha_max(db, 'fecha_corte')
fecha_min = fecha_max - timedelta(days=8*weeks)
query = db.objects.values('fecha_corte',
'region',
)
query = query.filter(fecha_corte__gt=fecha_min)
query = query.exclude(region='PERU')
query = query.annotate(uci_p=F('uci_zc_cama_ocup') /
F('uci_zc_cama_total')*100,
camas_p=F('uci_znc_cama_ocup') /
F('uci_znc_cama_total')*100
)
query = pd.DataFrame.from_records(query)
query.rename(columns={'fecha_corte': 'fecha'}, inplace=True)
print(query)
return query
def query_deaths_minsa(self, db, weeks):
columns = ['fecha',
'region',
'n_muertes', ]
fecha_max = self.get_fecha_max(db,)
fecha_min = fecha_max - timedelta(days=8*weeks)
query = db.objects
query = query.filter(fecha__gt=fecha_min)
query = query.exclude(region='PERU')
query = query.order_by('region')
query = query.values(*columns)
query = pd.DataFrame.from_records(query)
print(query.loc[query.region == 'PUNO'].n_muertes.mean())
print(query.loc[query.region == 'PUNO'])
return query
def query_rt_score(self, db, weeks):
fecha_max = self.get_fecha_max(db, 'date')
fecha_min = fecha_max - timedelta(days=8*weeks)
query = db.objects
query = query.values('date',
'region',
'ml')
query = query.filter(date__gt=fecha_min)
query = query.exclude(region='PERU')
query = query.order_by('region')
query = pd.DataFrame.from_records(query)
query.rename(columns={'date': 'fecha'}, inplace=True)
print(query)
return query
def normalizer_100k_population(self, table, columns):
def change_normal(x, column):
if x['region'] == 'LIMA':
x['region'] = 'LIMA METROPOLITANA'
n_pp = self.table_popu.loc[self.table_popu.region == x['region']]
n_pp = n_pp['poblacion'].tolist()[0]
return x[column]/n_pp*100000
for column in columns:
table[column] = table.apply(change_normal, args=(column,), axis=1)
print(table.isnull().sum())
print(table.info())
return table
@ staticmethod
def rename_total_table_columns(table):
table.rename(columns={
'total': 'avg_test',
'total_pos': 'incid_100',
'n_muertes': 'fall_100',
'ml': 'rt',
'uci_p': 'uci',
'camas_p': 'camas_covid',
'vacc_acum': 'vacc_acum',
}, inplace=True)
return table
def merge_tables(self, posit, uci, minsa, rt, table_vacc):
total = posit.merge(uci,
on=["fecha", "region"],
how="outer")
total = total.merge(minsa,
on=["fecha", "region"],
how="outer")
total = total.merge(rt,
on=["fecha", "region"],
how="outer")
total = total.merge(table_vacc,
on=["fecha", "region"],
how="outer")
total = total.merge(self.table_popu,
on=['region'],
how='outer')
total['n_week'] = total.fecha.apply(lambda x: (x).isocalendar()[1])
# total['n_week'] = total.fecha.apply(
# lambda x: (x+timedelta(days=1)).isocalendar()[1])
# total['n_week'] =
# the current week never process
curr_week = (datetime.now()).isocalendar()[1]
print('current week ', curr_week)
total = total.loc[(total['n_week'] > total['n_week'].min())
& (total['n_week'] < curr_week)
]
total = self.rename_total_table_columns(total)
print(total.isnull().sum())
cols = total.columns.tolist()
cols.remove('fecha')
total[cols] = total[cols].apply(pd.to_numeric, errors='ignore')
# print(total.sort_values(by='fecha').tail(50))
# print(total.loc[total.region == 'LIMA METROPOLITANA'])
# print(posit.loc[posit.region ==
# 'LIMA METROPOLITANA'].sort_values(by='fecha'))
return total
def scoring_variables(self, table):
print(table.info())
print(table.head())
print(table.isnull().sum())
table = table.fillna(0)
cut_fall = [-0.01, 2, 5, 7, 1e7]
cut_uci = [-0.01, 70, 90, 98, 100]
cut_incid = [-0.01, 80, 100, 120, 1e7]
cut_rt = [-0.01, .7, 1.1, 1.6, 1e7]
cut_pos = [-0.01, 11, 15, 20, 1e7]
cut_test = [-1e7, 34*7, 60*7, 100*7, 1e7]
color = [1, 2, 3, 4]
table['fall_score'] = pd.cut(table.fall_100,
cut_fall,
labels=color).astype(int)
table['uci_score'] = pd.cut(table.uci,
cut_uci,
labels=color).astype(int)
table['incid_score'] = pd.cut(table.incid_100,
cut_incid,
labels=color).astype(int)
table['rt_score'] = pd.cut(table.rt,
cut_rt,
labels=color).astype(int)
table['posit_score'] = pd.cut(table.positividad,
cut_pos,
labels=color).astype(int)
table['test_score'] = pd.cut(table.avg_test,
cut_test,
labels=color[::-1]).astype(int)
table['score'], table['val_score'] = self.calculate_score(table)
print(table.describe())
return table
@ staticmethod
def calculate_score(table):
cut_score = [0, 31, 38, 43, 1e7]
color = [1, 2, 3, 4]
w = [4, 3, 2.5, 2, 1.5, 1]
result = table['fall_score']*w[0] + table['uci_score']*w[1] + \
table['incid_score'] * w[2] + table['rt_score']*w[3] + \
table['posit_score'] * w[4] + table['test_score']*w[5]
return pd.cut(result, cut_score, labels=color).astype(int), result
def date_table_factory(self, fechas_orig, region_name):
min_ = fechas_orig.min()
max_ = fechas_orig.max()
totaldatelist = pd.date_range(start=min_, end=max_).tolist()
totaldatelist = pd.DataFrame(data={"fecha": totaldatelist})
totaldatelist['region'] = region_name
return totaldatelist
def aggregate_avg_by_week(self, table):
table = table.groupby(["region", ])
table_acum = pd.DataFrame()
for region in table:
region_name = region[0]
temp = region[1].sort_values(by="fecha")
totaldatelist = self.date_table_factory(temp.fecha, region_name)
temp = totaldatelist.merge(temp,
on=["fecha", 'region'],
how="outer",
)
temp = temp.sort_values(by="fecha")
temp = temp.reset_index(drop=True)
temp = temp.fillna(method="ffill")
temp = temp.fillna(method="bfill")
temp = temp.dropna()
temp = temp.groupby(["n_week", "region"]).agg({
'fecha': 'first',
'avg_test': 'sum',
'incid_100': 'sum',
'positividad': 'mean',
'uci': 'last',
'camas_covid': 'last',
'fall_100': 'sum',
'rt': 'mean',
'vacc_acum': 'max',
'poblacion': 'last',
})
temp = temp.reset_index()
# temp.fecha = temp.fecha.apply(lambda x: x.date())
table_acum = table_acum.append(temp, ignore_index=True)
# print(table_acum.info())
table_acum['rt'] = table_acum['rt'].astype(float)
# print(table_acum.head())
# print(table_acum.tail(12))
# print(table_acum.head(12))
return table_acum
def calc_vacc_progress(self, table):
table['vacc_prog'] = table.vacc_acum/table.poblacion*100
return table
def query_vacunados(self, db, weeks):
fecha_max = self.get_fecha_max(db,)
fecha_min = fecha_max - timedelta(days=8*weeks)
# Records diarios por region
query = db.objects
query = query.filter(dosis=1)
histo = query.values('fecha', 'region')
histo = histo.annotate(vacc_acum=Sum('cantidad'))
histo = histo.order_by('fecha', 'region')
histo = | pd.DataFrame.from_records(histo) | pandas.DataFrame.from_records |
import pandas as pd
import numpy as np
from texttable import Texttable
from cape_privacy.pandas import dtypes
from cape_privacy.pandas.transformations import NumericPerturbation
from cape_privacy.pandas.transformations import DatePerturbation
from cape_privacy.pandas.transformations import NumericRounding
from cape_privacy.pandas.transformations import Tokenizer
from faker import Faker
from anonympy.pandas import utils_pandas as _utils
from sklearn.decomposition import PCA
class dfAnonymizer(object):
"""
Initializes pandas DataFrame as a dfAnonymizer object.
Parameters:
----------
df: pandas DataFrame
Returns:
----------
dfAnonymizer object
Raises
----------
Exception:
* If ``df`` is not a DataFrame
See also
----------
dfAnonymizer.to_df : Return a DataFrame
Examples
----------
>>> from anonympy.pandas import dfAnonymizer
>>> from anonympy.pandas.utils_pandas import load_dataset
Contructing dfAnonymizer object:
>>> df = load_dataset()
>>> anonym = dfAnonymizer(df)
>>> anonym.to_df()
name age ... email ssn
0 Bruce 33 ... <EMAIL> 343554334
1 Tony 48 ... <EMAIL> 656564664
"""
def __init__(self,
df: pd.DataFrame):
if df.__class__.__name__ != "DataFrame":
raise Exception(f"{df} is not a pandas DataFrame.")
# Private Attributes
self._df = df.copy()
self._df2 = df.copy()
self._methods_applied = {}
self._synthetic_data = 'Synthetic Data'
self._tokenization = 'Tokenization'
self._numeric_perturbation = 'Numeric Perturbation'
self._datetime_perturbation = 'Datetime Perturbation'
self._round = 'Generalization - Rounding'
self._bin = 'Generalization - Binning'
self._drop = 'Column Suppression'
self._sample = 'Resampling'
self._PCA = 'PCA Masking'
self._email = 'Partial Masking'
# Public Attributes
self.anonymized_columns = []
self.columns = self._df.columns.tolist()
self.unanonymized_columns = self.columns.copy()
self.numeric_columns = _utils.get_numeric_columns(self._df)
self.categorical_columns = _utils.get_categorical_columns(self._df)
self.datetime_columns = _utils.get_datetime_columns(self._df)
self._available_methods = _utils.av_methods
self._fake_methods = _utils.faker_methods
def __str__(self):
return self._info().draw()
def __repr__(self):
return self._info().draw()
def _dtype_checker(self, column: str):
'''
Returns the dtype of the column
Parameters
----------
column: str
Returns
----------
dtype: numpy dtype
'''
dtype = self._df[column].dtype
if dtype == np.float32:
return dtypes.Float
elif dtype == np.float64:
return dtypes.Double
elif dtype == np.byte:
return dtypes.Byte
elif dtype == np.short:
return dtypes.Short
elif dtype == np.int32:
return dtypes.Integer
elif dtype == np.int64:
return dtypes.Long
else:
return None
def anonymize(self,
methods=None,
locale=['en_US'],
seed=None,
inplace=True):
'''
Anonymize all columns using different methods for each dtype.
If dictionary is not provided, for numerical columns
``numeric_rounding`` is applied.
``categorical_fake`` and ``categorical_tokenization`` for
categorical columns
and ``datetime_noise`` or ``datetime_fake`` are applied for columns of
datetime type.
Parameters
----------
methods : Optional[Dict[str, str]], default None
{column_name: anonympy_method}. Call ``available_methods`` for list
of all methods.
locale : str or List[str], default ['en_US']
See https://faker.readthedocs.io/en/master/locales.html for all
faker's locales.
inplace : bool, default True
If True the changes will be applied to `dfAnonymizer` obejct, else
output is returned.
seed : Optional[int], default None
Pass an integer for reproducible output across multiple function
calls.
Returns
----------
If inplace is False, pandas Series or DataFrame is returned
See Also
--------
dfAnonymizer.categorical_fake_auto : Replace values with synthetically
generated ones
Examples
----------
>>> from anonympy.pandas import dfAnonymizer
>>> from anonympy.pandas.utils_pandas import load_dataset, \
available_methods
>>> df = load_dataset()
>>> anonym = dfAnonymizer(df)
If methods None:
>>> anonym.anonymize(inplace = False)
name age ... email ssn
0 <NAME> 30 ... <EMAIL> 718-51-5290
1 <NAME> 50 ... <EMAIL> 684-81-8137
Passing a dict for specifying which methods to apply:
>>> available_methods('numeric')
numeric_noise numeric_binning numeric_masking numeric_rounding
>>> anonym.anonymize({'name':'categorical_fake',
... 'age':'numeric_noise',
... 'email':'categorical_email_masking',
... 'salary': 'numeric_rounding'}, inplace = False)
name age email salary
0 <NAME> 37 <EMAIL> 60000.0
1 <NAME> 52 <EMAIL> 50000.0
'''
if not methods:
if inplace:
# try synthetic data
self.categorical_fake_auto(locale=locale, seed=seed)
# if there are still columns left unanonymized
if self.unanonymized_columns:
for column in self.unanonymized_columns.copy():
if column in self.numeric_columns:
self.numeric_rounding(column)
elif column in self.categorical_columns:
self.categorical_tokenization(column,
key=str(seed))
elif column in self.datetime_columns:
self.datetime_noise(column, seed=seed)
else:
# try synthetic data
temp = self.categorical_fake_auto(locale=locale,
inplace=False,
seed=seed)
unanonymized = self.unanonymized_columns.copy()
if isinstance(temp, pd.DataFrame):
unanonymized = [column for column in unanonymized
if column not in temp.columns.to_list()]
elif isinstance(temp, pd.Series):
unanonymized.remove(temp.name)
temp = | pd.DataFrame(temp) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import talib
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 30)
pd.set_option('precision', 7)
pd.options.display.float_format = '{:,.3f}'.format
import warnings
warnings.simplefilter(action = "ignore", category = FutureWarning)
from sklearn import preprocessing, svm, cross_validation, metrics, pipeline, grid_search
from scipy.stats import sem
from sklearn.decomposition import PCA, KernelPCA
from sklearn.metrics import matthews_corrcoef
'''
读入一支股票指定年份的ohlcv数据
输入:baseDir,stockCode为字符, startYear,yearNum为整数,
输出:dataframe
'''
def readWSDFile(baseDir, stockCode, startYear, yearNum=1):
# 解析日期
dateparse = lambda x: | pd.datetime.strptime(x, '%Y-%m-%d') | pandas.datetime.strptime |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2021/11/8 17:48
Desc: 同花顺-板块-行业板块
http://q.10jqka.com.cn/thshy/
"""
import os
from datetime import datetime
import pandas as pd
import requests
from bs4 import BeautifulSoup
from py_mini_racer import py_mini_racer
from tqdm import tqdm
from mssdk.utils import demjson
def _get_js_path_ths(name: str = None, module_file: str = None) -> str:
"""
获取 JS 文件的路径(从模块所在目录查找)
:param name: 文件名
:type name: str
:param module_file: 模块路径
:type module_file: str
:return: 路径
:rtype: str
"""
module_folder = os.path.abspath(os.path.dirname(os.path.dirname(module_file)))
module_json_path = os.path.join(module_folder, "stock_feature", name)
return module_json_path
def _get_file_content_ths(file_name: str = "ase.min.js") -> str:
"""
获取 JS 文件的内容
:param file_name: JS 文件名
:type file_name: str
:return: 文件内容
:rtype: str
"""
setting_file_name = file_name
setting_file_path = _get_js_path_ths(setting_file_name, __file__)
with open(setting_file_path) as f:
file_data = f.read()
return file_data
def stock_board_industry_name_ths() -> pd.DataFrame:
"""
同花顺-板块-行业板块-行业
http://q.10jqka.com.cn/thshy/
:return: 所有行业板块的名称和链接
:rtype: pandas.DataFrame
"""
code_name_ths_map = {'881101': '种植业与林业',
'881102': '养殖业',
'881103': '农产品加工',
'881104': '农业服务',
'881105': '煤炭开采加工',
'881107': '油气开采及服务',
'881108': '化学原料',
'881109': '化学制品',
'881110': '化工合成材料',
'881112': '钢铁',
'881114': '金属新材料',
'881115': '建筑材料',
'881116': '建筑装饰',
'881117': '通用设备',
'881118': '专用设备',
'881119': '仪器仪表',
'881120': '电力设备',
'881121': '半导体及元件',
'881122': '光学光电子',
'881123': '其他电子',
'881124': '消费电子',
'881125': '汽车整车',
'881126': '汽车零部件',
'881127': '非汽车交运',
'881128': '汽车服务',
'881129': '通信设备',
'881130': '计算机设备',
'881131': '白色家电',
'881132': '黑色家电',
'881133': '饮料制造',
'881134': '食品加工制造',
'881135': '纺织制造',
'881136': '服装家纺',
'881137': '造纸',
'881138': '包装印刷',
'881139': '家用轻工',
'881140': '化学制药',
'881141': '中药',
'881142': '生物制品',
'881143': '医药商业',
'881144': '医疗器械',
'881145': '电力',
'881146': '燃气',
'881148': '港口航运',
'881149': '公路铁路运输',
'881151': '机场航运',
'881152': '物流',
'881153': '房地产开发',
'881155': '银行',
'881156': '保险及其他',
'881157': '证券',
'881158': '零售',
'881159': '贸易',
'881160': '景点及旅游',
'881161': '酒店及餐饮',
'881162': '通信服务',
'881163': '计算机应用',
'881164': '传媒',
'881165': '综合',
'881166': '国防军工',
'881167': '非金属材料',
'881168': '工业金属',
'881169': '贵金属',
'881170': '小金属',
'881171': '自动化设备',
'881172': '电子化学品',
'881173': '小家电',
'881174': '厨卫电器',
'881175': '医疗服务',
'881176': '房地产服务',
'881177': '互联网电商',
'881178': '教育',
'881179': '其他社会服务',
'881180': '石油加工贸易',
'881181': '环保',
'881182': '美容护理',
'884001': '种子生产',
'884002': '粮食种植',
'884003': '其他种植业',
'884004': '林业',
'884005': '海洋捕捞',
'884006': '水产养殖',
'884007': '畜禽养殖',
'884008': '饲料',
'884009': '果蔬加工',
'884010': '粮油加工',
'884011': '其他农产品加工',
'884012': '农业综合',
'884013': '动物保健',
'884014': '煤炭开采',
'884015': '焦炭加工',
'884016': '油气开采',
'884018': '油服工程',
'884020': '石油加工',
'884021': '油品石化贸易',
'884022': '纯碱',
'884023': '氯碱',
'884024': '无机盐',
'884025': '其他化学原料',
'884026': '氮肥',
'884027': '磷肥及磷化工',
'884028': '农药',
'884030': '涂料油墨',
'884031': '钾肥',
'884032': '民爆用品',
'884033': '纺织化学用品',
'884034': '其他化学制品',
'884035': '复合肥',
'884036': '氟化工',
'884039': '聚氨酯',
'884041': '涤纶',
'884043': '粘胶',
'884044': '其他纤维',
'884045': '氨纶',
'884046': '其他塑料制品',
'884048': '改性塑料',
'884050': '其他橡胶制品',
'884051': '炭黑',
'884052': '普钢',
'884053': '铝',
'884054': '铜',
'884055': '铅锌',
'884056': '其他金属新材料',
'884057': '磁性材料',
'884058': '非金属材料Ⅲ',
'884059': '玻璃玻纤',
'884060': '水泥',
'884062': '其他建材',
'884063': '耐火材料',
'884064': '管材',
'884065': '装饰园林',
'884066': '房屋建设',
'884067': '基础建设',
'884068': '专业工程',
'884069': '机床工具',
'884071': '磨具磨料',
'884073': '制冷空调设备',
'884074': '其他通用设备',
'884075': '金属制品',
'884076': '纺织服装设备',
'884077': '工程机械',
'884078': '农用机械',
'884080': '能源及重型设备',
'884081': '印刷包装机械',
'884082': '其他专用设备',
'884083': '楼宇设备',
'884084': '环保设备',
'884085': '电机',
'884086': '电气自控设备',
'884088': '输变电设备',
'884089': '线缆部件及其他',
'884090': '分立器件',
'884091': '半导体材料',
'884092': '印制电路板',
'884093': '被动元件',
'884094': '面板',
'884095': 'LED',
'884096': '光学元件',
'884098': '消费电子零部件及组装',
'884099': '乘用车',
'884100': '商用载货车',
'884101': '商用载客车',
'884105': '轨交设备',
'884106': '其他交运设备',
'884107': '汽车服务Ⅲ',
'884112': '冰洗',
'884113': '空调',
'884115': '小家电Ⅲ',
'884116': '其他白色家电',
'884117': '彩电',
'884118': '其他黑色家电',
'884119': '其他酒类',
'884120': '软饮料',
'884123': '肉制品',
'884124': '调味发酵品',
'884125': '乳品',
'884126': '其他食品',
'884128': '棉纺',
'884130': '印染',
'884131': '辅料',
'884132': '其他纺织',
'884136': '鞋帽及其他',
'884137': '家纺',
'884139': '家具',
'884140': '其他家用轻工',
'884141': '饰品',
'884142': '文娱用品',
'884143': '原料药',
'884144': '化学制剂',
'884145': '医疗设备',
'884146': '火电',
'884147': '水电',
'884149': '热力',
'884150': '新能源发电',
'884152': '燃气Ⅲ',
'884153': '港口',
'884154': '高速公路',
'884155': '铁路运输',
'884156': '机场',
'884157': '航空运输',
'884158': '多元金融',
'884159': '保险',
'884160': '百货零售',
'884161': '专业连锁',
'884162': '商业物业经营',
'884163': '人工景点',
'884164': '自然景点',
'884165': '旅游综合',
'884167': '酒店',
'884168': '餐饮',
'884172': '有线电视网络',
'884173': '通信服务Ⅲ',
'884174': '软件开发',
'884176': '出版',
'884177': '影视院线',
'884178': '广告营销',
'884179': '其他传媒',
'884180': '航天装备',
'884181': '航空装备',
'884182': '地面兵装',
'884183': '航海装备',
'884184': '特钢',
'884185': '贵金属Ⅲ',
'884186': '其他小金属',
'884188': '白酒',
'884189': '啤酒',
'884191': '航运',
'884192': '仪器仪表Ⅲ',
'884193': '其他电子Ⅲ',
'884194': '汽车零部件Ⅲ',
'884195': '造纸Ⅲ',
'884197': '中药Ⅲ',
'884199': '医药商业Ⅲ',
'884200': '公交',
'884201': '物流Ⅲ',
'884202': '住宅开发',
'884203': '产业地产',
'884205': '证券Ⅲ',
'884206': '贸易Ⅲ',
'884207': '计算机设备Ⅲ',
'884208': '综合Ⅲ',
'884209': '钛白粉',
'884210': '食品及饲料添加剂',
'884211': '有机硅',
'884212': '合成树脂',
'884213': '膜材料',
'884214': '冶钢原料',
'884215': '稀土',
'884216': '能源金属',
'884217': '工程咨询服务',
'884218': '机器人',
'884219': '工控设备',
'884220': '激光设备',
'884221': '其他自动化设备',
'884222': '光伏设备',
'884223': '风电设备',
'884224': '电池',
'884225': '其他电源设备',
'884226': '集成电路设计',
'884227': '集成电路制造',
'884228': '集成电路封测',
'884229': '半导体设备',
'884230': '品牌消费电子',
'884231': '电子化学品Ⅲ',
'884232': '厨卫电器Ⅲ',
'884233': '休闲食品',
'884234': '服装',
'884235': '印刷',
'884236': '包装',
'884237': '瓷砖地板',
'884238': '血液制品',
'884239': '疫苗',
'884240': '其他生物制品',
'884242': '医疗耗材',
'884243': '体外诊断',
'884244': '医疗研发外包',
'884245': '其他医疗服务',
'884246': '电能综合服务',
'884247': '商业地产',
'884248': '房地产服务Ⅲ',
'884249': '国有大型银行',
'884250': '股份制银行',
'884251': '城商行',
'884252': '农商行',
'884253': '其他银行',
'884254': '旅游零售',
'884255': '互联网电商Ⅲ',
'884256': '教育Ⅲ',
'884257': '专业服务',
'884258': '体育',
'884259': '其他社会服务Ⅲ',
'884260': '游戏',
'884261': '数字媒体',
'884262': '通信网络设备及器件',
'884263': '通信线缆及配套',
'884264': '通信终端及配件',
'884265': '其他通信设备',
'884266': '军工电子',
'884267': '大气治理',
'884268': '水务及水治理',
'884269': '固废治理',
'884270': '综合环境治理',
'884271': '个护用品',
'884272': '化妆品',
'884273': '医疗美容',
'884274': 'IT服务'}
temp_df = | pd.DataFrame.from_dict(code_name_ths_map, orient="index") | pandas.DataFrame.from_dict |
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
matplotlib.use("Qt5Agg") # 声明使用QT5
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
import mpl_finance as mpf
from matplotlib.pylab import date2num
class Figure_Canvas(FigureCanvas):
def __init__(self, parent=None, width=30, height=4, dpi=100):
plt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
fig = Figure(figsize=(width, height), dpi=100) # 创建一个Figure
FigureCanvas.__init__(self, fig) # 初始化父类
self.setParent(parent)
self.axes = fig.add_subplot(111) # 调用figure下面的add_subplot方法
def pricePlot(self, date, start, end, high, low):
"""
Draw k-line chart.
:param date: stock date, a pd series type
:param start: open price, a pd series type
:param end: closed price, a pd series type
:param high: the highest price, a pd series type
:param low: the lowest price, a pd series type
:return: None
"""
date = pd.to_datetime(date)
date1 = date.apply(lambda x: date2num(x))
data = | pd.concat([date1, start, end, high, low], axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
# pylint: disable=W0612,E1101
from datetime import datetime
import operator
import nose
from functools import wraps
import numpy as np
import pandas as pd
from pandas import Series, DataFrame, Index, isnull, notnull, pivot, MultiIndex
from pandas.core.datetools import bday
from pandas.core.nanops import nanall, nanany
from pandas.core.panel import Panel
from pandas.core.series import remove_na
import pandas.core.common as com
from pandas import compat
from pandas.compat import range, lrange, StringIO, OrderedDict, signature
from pandas import SparsePanel
from pandas.util.testing import (assert_panel_equal, assert_frame_equal,
assert_series_equal, assert_almost_equal,
assert_produces_warning, ensure_clean,
assertRaisesRegexp, makeCustomDataframe as
mkdf, makeMixedDataFrame)
import pandas.core.panel as panelm
import pandas.util.testing as tm
def ignore_sparse_panel_future_warning(func):
"""
decorator to ignore FutureWarning if we have a SparsePanel
can be removed when SparsePanel is fully removed
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
if isinstance(self.panel, SparsePanel):
with assert_produces_warning(FutureWarning,
check_stacklevel=False):
return func(self, *args, **kwargs)
else:
return func(self, *args, **kwargs)
return wrapper
class PanelTests(object):
panel = None
def test_pickle(self):
unpickled = self.round_trip_pickle(self.panel)
assert_frame_equal(unpickled['ItemA'], self.panel['ItemA'])
def test_rank(self):
self.assertRaises(NotImplementedError, lambda: self.panel.rank())
def test_cumsum(self):
cumsum = self.panel.cumsum()
assert_frame_equal(cumsum['ItemA'], self.panel['ItemA'].cumsum())
def not_hashable(self):
c_empty = Panel()
c = Panel(Panel([[[1]]]))
self.assertRaises(TypeError, hash, c_empty)
self.assertRaises(TypeError, hash, c)
class SafeForLongAndSparse(object):
_multiprocess_can_split_ = True
def test_repr(self):
repr(self.panel)
@ignore_sparse_panel_future_warning
def test_copy_names(self):
for attr in ('major_axis', 'minor_axis'):
getattr(self.panel, attr).name = None
cp = self.panel.copy()
getattr(cp, attr).name = 'foo'
self.assertIsNone(getattr(self.panel, attr).name)
def test_iter(self):
tm.equalContents(list(self.panel), self.panel.items)
def test_count(self):
f = lambda s: notnull(s).sum()
self._check_stat_op('count', f, obj=self.panel, has_skipna=False)
def test_sum(self):
self._check_stat_op('sum', np.sum)
def test_mean(self):
self._check_stat_op('mean', np.mean)
def test_prod(self):
self._check_stat_op('prod', np.prod)
def test_median(self):
def wrapper(x):
if isnull(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper)
def test_min(self):
self._check_stat_op('min', np.min)
def test_max(self):
self._check_stat_op('max', np.max)
def test_skew(self):
try:
from scipy.stats import skew
except ImportError:
raise nose.SkipTest("no scipy.stats.skew")
def this_skew(x):
if len(x) < 3:
return np.nan
return skew(x, bias=False)
self._check_stat_op('skew', this_skew)
# def test_mad(self):
# f = lambda x: np.abs(x - x.mean()).mean()
# self._check_stat_op('mad', f)
def test_var(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.var(x, ddof=1)
self._check_stat_op('var', alt)
def test_std(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1)
self._check_stat_op('std', alt)
def test_sem(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1) / np.sqrt(len(x))
self._check_stat_op('sem', alt)
# def test_skew(self):
# from scipy.stats import skew
# def alt(x):
# if len(x) < 3:
# return np.nan
# return skew(x, bias=False)
# self._check_stat_op('skew', alt)
def _check_stat_op(self, name, alternative, obj=None, has_skipna=True):
if obj is None:
obj = self.panel
# # set some NAs
# obj.ix[5:10] = np.nan
# obj.ix[15:20, -2:] = np.nan
f = getattr(obj, name)
if has_skipna:
def skipna_wrapper(x):
nona = remove_na(x)
if len(nona) == 0:
return np.nan
return alternative(nona)
def wrapper(x):
return alternative(np.asarray(x))
for i in range(obj.ndim):
result = f(axis=i, skipna=False)
assert_frame_equal(result, obj.apply(wrapper, axis=i))
else:
skipna_wrapper = alternative
wrapper = alternative
for i in range(obj.ndim):
result = f(axis=i)
if not tm._incompat_bottleneck_version(name):
assert_frame_equal(result, obj.apply(skipna_wrapper, axis=i))
self.assertRaises(Exception, f, axis=obj.ndim)
# Unimplemented numeric_only parameter.
if 'numeric_only' in signature(f).args:
self.assertRaisesRegexp(NotImplementedError, name, f,
numeric_only=True)
class SafeForSparse(object):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def test_get_axis(self):
assert (self.panel._get_axis(0) is self.panel.items)
assert (self.panel._get_axis(1) is self.panel.major_axis)
assert (self.panel._get_axis(2) is self.panel.minor_axis)
def test_set_axis(self):
new_items = Index(np.arange(len(self.panel.items)))
new_major = Index(np.arange(len(self.panel.major_axis)))
new_minor = Index(np.arange(len(self.panel.minor_axis)))
# ensure propagate to potentially prior-cached items too
item = self.panel['ItemA']
self.panel.items = new_items
if hasattr(self.panel, '_item_cache'):
self.assertNotIn('ItemA', self.panel._item_cache)
self.assertIs(self.panel.items, new_items)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.major_axis = new_major
self.assertIs(self.panel[0].index, new_major)
self.assertIs(self.panel.major_axis, new_major)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.minor_axis = new_minor
self.assertIs(self.panel[0].columns, new_minor)
self.assertIs(self.panel.minor_axis, new_minor)
def test_get_axis_number(self):
self.assertEqual(self.panel._get_axis_number('items'), 0)
self.assertEqual(self.panel._get_axis_number('major'), 1)
self.assertEqual(self.panel._get_axis_number('minor'), 2)
def test_get_axis_name(self):
self.assertEqual(self.panel._get_axis_name(0), 'items')
self.assertEqual(self.panel._get_axis_name(1), 'major_axis')
self.assertEqual(self.panel._get_axis_name(2), 'minor_axis')
def test_get_plane_axes(self):
# what to do here?
index, columns = self.panel._get_plane_axes('items')
index, columns = self.panel._get_plane_axes('major_axis')
index, columns = self.panel._get_plane_axes('minor_axis')
index, columns = self.panel._get_plane_axes(0)
@ignore_sparse_panel_future_warning
def test_truncate(self):
dates = self.panel.major_axis
start, end = dates[1], dates[5]
trunced = self.panel.truncate(start, end, axis='major')
expected = self.panel['ItemA'].truncate(start, end)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(before=start, axis='major')
expected = self.panel['ItemA'].truncate(before=start)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(after=end, axis='major')
expected = self.panel['ItemA'].truncate(after=end)
assert_frame_equal(trunced['ItemA'], expected)
# XXX test other axes
def test_arith(self):
self._test_op(self.panel, operator.add)
self._test_op(self.panel, operator.sub)
self._test_op(self.panel, operator.mul)
self._test_op(self.panel, operator.truediv)
self._test_op(self.panel, operator.floordiv)
self._test_op(self.panel, operator.pow)
self._test_op(self.panel, lambda x, y: y + x)
self._test_op(self.panel, lambda x, y: y - x)
self._test_op(self.panel, lambda x, y: y * x)
self._test_op(self.panel, lambda x, y: y / x)
self._test_op(self.panel, lambda x, y: y ** x)
self._test_op(self.panel, lambda x, y: x + y) # panel + 1
self._test_op(self.panel, lambda x, y: x - y) # panel - 1
self._test_op(self.panel, lambda x, y: x * y) # panel * 1
self._test_op(self.panel, lambda x, y: x / y) # panel / 1
self._test_op(self.panel, lambda x, y: x ** y) # panel ** 1
self.assertRaises(Exception, self.panel.__add__, self.panel['ItemA'])
@staticmethod
def _test_op(panel, op):
result = op(panel, 1)
assert_frame_equal(result['ItemA'], op(panel['ItemA'], 1))
def test_keys(self):
tm.equalContents(list(self.panel.keys()), self.panel.items)
def test_iteritems(self):
# Test panel.iteritems(), aka panel.iteritems()
# just test that it works
for k, v in self.panel.iteritems():
pass
self.assertEqual(len(list(self.panel.iteritems())),
len(self.panel.items))
@ignore_sparse_panel_future_warning
def test_combineFrame(self):
def check_op(op, name):
# items
df = self.panel['ItemA']
func = getattr(self.panel, name)
result = func(df, axis='items')
assert_frame_equal(result['ItemB'], op(self.panel['ItemB'], df))
# major
xs = self.panel.major_xs(self.panel.major_axis[0])
result = func(xs, axis='major')
idx = self.panel.major_axis[1]
assert_frame_equal(result.major_xs(idx),
op(self.panel.major_xs(idx), xs))
# minor
xs = self.panel.minor_xs(self.panel.minor_axis[0])
result = func(xs, axis='minor')
idx = self.panel.minor_axis[1]
assert_frame_equal(result.minor_xs(idx),
op(self.panel.minor_xs(idx), xs))
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv']
if not compat.PY3:
ops.append('div')
# pow, mod not supported for SparsePanel as flex ops (for now)
if not isinstance(self.panel, SparsePanel):
ops.extend(['pow', 'mod'])
else:
idx = self.panel.minor_axis[1]
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.pow(self.panel.minor_xs(idx), axis='minor')
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.mod(self.panel.minor_xs(idx), axis='minor')
for op in ops:
try:
check_op(getattr(operator, op), op)
except:
com.pprint_thing("Failing operation: %r" % op)
raise
if compat.PY3:
try:
check_op(operator.truediv, 'div')
except:
com.pprint_thing("Failing operation: %r" % 'div')
raise
@ignore_sparse_panel_future_warning
def test_combinePanel(self):
result = self.panel.add(self.panel)
self.assert_panel_equal(result, self.panel * 2)
@ignore_sparse_panel_future_warning
def test_neg(self):
self.assert_panel_equal(-self.panel, self.panel * -1)
# issue 7692
def test_raise_when_not_implemented(self):
p = Panel(np.arange(3 * 4 * 5).reshape(3, 4, 5),
items=['ItemA', 'ItemB', 'ItemC'],
major_axis=pd.date_range('20130101', periods=4),
minor_axis=list('ABCDE'))
d = p.sum(axis=1).ix[0]
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'div', 'mod', 'pow']
for op in ops:
with self.assertRaises(NotImplementedError):
getattr(p, op)(d, axis=0)
@ignore_sparse_panel_future_warning
def test_select(self):
p = self.panel
# select items
result = p.select(lambda x: x in ('ItemA', 'ItemC'), axis='items')
expected = p.reindex(items=['ItemA', 'ItemC'])
self.assert_panel_equal(result, expected)
# select major_axis
result = p.select(lambda x: x >= datetime(2000, 1, 15), axis='major')
new_major = p.major_axis[p.major_axis >= datetime(2000, 1, 15)]
expected = p.reindex(major=new_major)
self.assert_panel_equal(result, expected)
# select minor_axis
result = p.select(lambda x: x in ('D', 'A'), axis=2)
expected = p.reindex(minor=['A', 'D'])
self.assert_panel_equal(result, expected)
# corner case, empty thing
result = p.select(lambda x: x in ('foo', ), axis='items')
self.assert_panel_equal(result, p.reindex(items=[]))
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
@ignore_sparse_panel_future_warning
def test_abs(self):
result = self.panel.abs()
result2 = abs(self.panel)
expected = np.abs(self.panel)
self.assert_panel_equal(result, expected)
self.assert_panel_equal(result2, expected)
df = self.panel['ItemA']
result = df.abs()
result2 = abs(df)
expected = np.abs(df)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
s = df['A']
result = s.abs()
result2 = abs(s)
expected = np.abs(s)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
self.assertEqual(result.name, 'A')
self.assertEqual(result2.name, 'A')
class CheckIndexing(object):
_multiprocess_can_split_ = True
def test_getitem(self):
self.assertRaises(Exception, self.panel.__getitem__, 'ItemQ')
def test_delitem_and_pop(self):
expected = self.panel['ItemA']
result = self.panel.pop('ItemA')
assert_frame_equal(expected, result)
self.assertNotIn('ItemA', self.panel.items)
del self.panel['ItemB']
self.assertNotIn('ItemB', self.panel.items)
self.assertRaises(Exception, self.panel.__delitem__, 'ItemB')
values = np.empty((3, 3, 3))
values[0] = 0
values[1] = 1
values[2] = 2
panel = Panel(values, lrange(3), lrange(3), lrange(3))
# did we delete the right row?
panelc = panel.copy()
del panelc[0]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[1]
assert_frame_equal(panelc[0], panel[0])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[2]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[0], panel[0])
def test_setitem(self):
# LongPanel with one item
lp = self.panel.filter(['ItemA', 'ItemB']).to_frame()
with tm.assertRaises(ValueError):
self.panel['ItemE'] = lp
# DataFrame
df = self.panel['ItemA'][2:].filter(items=['A', 'B'])
self.panel['ItemF'] = df
self.panel['ItemE'] = df
df2 = self.panel['ItemF']
assert_frame_equal(df, df2.reindex(index=df.index, columns=df.columns))
# scalar
self.panel['ItemG'] = 1
self.panel['ItemE'] = True
self.assertEqual(self.panel['ItemG'].values.dtype, np.int64)
self.assertEqual(self.panel['ItemE'].values.dtype, np.bool_)
# object dtype
self.panel['ItemQ'] = 'foo'
self.assertEqual(self.panel['ItemQ'].values.dtype, np.object_)
# boolean dtype
self.panel['ItemP'] = self.panel['ItemA'] > 0
self.assertEqual(self.panel['ItemP'].values.dtype, np.bool_)
self.assertRaises(TypeError, self.panel.__setitem__, 'foo',
self.panel.ix[['ItemP']])
# bad shape
p = Panel(np.random.randn(4, 3, 2))
with tm.assertRaisesRegexp(ValueError,
"shape of value must be \(3, 2\), "
"shape of given object was \(4, 2\)"):
p[0] = np.random.randn(4, 2)
def test_setitem_ndarray(self):
from pandas import date_range, datetools
timeidx = date_range(start=datetime(2009, 1, 1),
end=datetime(2009, 12, 31),
freq=datetools.MonthEnd())
lons_coarse = np.linspace(-177.5, 177.5, 72)
lats_coarse = np.linspace(-87.5, 87.5, 36)
P = Panel(items=timeidx, major_axis=lons_coarse,
minor_axis=lats_coarse)
data = np.random.randn(72 * 36).reshape((72, 36))
key = datetime(2009, 2, 28)
P[key] = data
assert_almost_equal(P[key].values, data)
def test_set_minor_major(self):
# GH 11014
df1 = DataFrame(['a', 'a', 'a', np.nan, 'a', np.nan])
df2 = | DataFrame([1.0, np.nan, 1.0, np.nan, 1.0, 1.0]) | pandas.DataFrame |
import datetime as dt
import gc
import json
import logging
import os
import pickle
from glob import glob
from typing import Dict, List, Optional, Tuple, Union
import h5py
import matplotlib as mpl
import matplotlib.dates as mdates
import matplotlib.gridspec as gs
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pyproj
import rasterio as rio
import simplekml
from cataloging.vi import gliImage, ngrdiImage, osaviImage
from fluidml.common import Task
#from PIL import Image
from pycpd import RigidRegistration
from pykml import parser
from rasterio.enums import Resampling
from rasterio.transform import rowcol, xy
from rasterio.windows import Window
from scipy.ndimage import distance_transform_edt
from scipy.optimize import curve_fit
from scipy.signal import find_peaks
#from skimage.exposure import equalize_adapthist
from skimage.feature import peak_local_max
from skimage.filters import gaussian, threshold_otsu
from skimage.measure import label, regionprops
from skimage.segmentation import watershed
from skimage.transform import hough_line, hough_line_peaks, resize
from sklearn.neighbors import NearestNeighbors
logger = logging.getLogger(__name__)
# suppress pickle 'error' from rasterio
logging.Logger.manager.loggerDict['rasterio'].setLevel(logging.CRITICAL)
logging.Logger.manager.loggerDict['matplotlib'].setLevel(logging.CRITICAL)
import warnings
warnings.filterwarnings("ignore")
mpl.use('Agg')
def read_raster(
image_path: str,
all_channels: np.array,
channels: List[str]
):
ch = [np.argmax(all_channels == c)+1 for c in channels]
raster = rio.open(image_path)
if raster.dtypes[0] == "float32":
data = raster.read(ch, fill_value=np.nan)
data /= np.nanmax(data)
elif raster.dtypes[0] == "uint8":
if "alpha" in all_channels:
data = raster.read(ch).astype(np.float32)
alpha_ch = raster.read(int(np.argmax(all_channels == "alpha")+1))
for d in data[:,:]:
d[alpha_ch == 0] = np.nan
else:
data = raster.read(ch, fill_value=0).astype(np.float32)
else:
raise NotImplementedError()
return np.transpose(data, axes=(1,2,0))
def write_onechannel_raster(
image_path: str,
image: np.array,
meta: Dict, dtype: str
):
if dtype == 'float32':
meta.update({
'dtype': 'float32',
'height': image.shape[0],'count': 1,'nodata': -32767,
'width': image.shape[1]})
elif dtype == 'uint8':
meta.update({
'dtype': 'uint8',
'height': image.shape[0],'count': 1,'nodata': 0,
'width': image.shape[1]})
else:
raise NotImplementedError()
with rio.open(image_path, "w", **meta) as dest:
dest.write(image,1)
def calc_m_per_px(
raster_meta: Dict
) -> float:
# read CRS of rasterio data
proj_crs = pyproj.crs.CRS.from_user_input(raster_meta["crs"])
# GPS coordinates of anchor point
lon0, lat0 = xy(raster_meta["transform"],0,0)
# calculate UTM zone
utm_zone = int(np.floor((lon0/360)*60+31))
utm = pyproj.Proj(proj='utm', zone=utm_zone, ellps='WGS84')
UTM0_x, UTM0_y = utm(*xy(raster_meta["transform"],0,0))
UTM1_x, UTM1_y = utm(*xy(raster_meta["transform"],0,1))
UTM2_x, UTM2_y = utm(*xy(raster_meta["transform"],1,0))
# calculate unit pixel distances
pxx = abs(UTM1_x - UTM0_x)
pxy = abs(UTM2_y - UTM0_y)
# take mean (assume quadratic pixels)
m_per_px = np.mean([pxx, pxy])
return m_per_px
def px_to_utm(
point_cloud: np.ndarray,
raster_meta: Dict
) -> Tuple[np.ndarray, pyproj.proj.Proj]:
# read CRS of rasterio data
proj_crs = pyproj.crs.CRS.from_user_input(raster_meta["crs"])
# GPS coordinates of point cloud
lon, lat = np.asarray(xy(raster_meta["transform"],*point_cloud.T))
# calculate UTM zone
utm_zone = int(np.floor((lon.mean()/360)*60+31))
utm_transform = pyproj.Proj(proj='utm', zone=utm_zone, ellps='WGS84')
utm = np.asarray(utm_transform(lon, lat)).T
return utm, utm_transform
def readCoordsFromKml(
filename: str
) -> np.ndarray:
with open(filename, "r") as kmlfile:
root = parser.parse(kmlfile).getroot()
lonlat = []
for c in root.Document.iterchildren():
lonlat.append([float(x) for x in c.Point.coordinates.text.split(",")[:2]])
lonlat = np.asarray(lonlat)
return lonlat
def growFunction(
x: float,
g: float,
lg: float,
xg: float,
d: float,
ld: float,
xd: float
) -> float:
if d > 0:
return (g/(1+np.exp(-lg*(x-xg)))) - d/(1+np.exp(-ld*(x-xd)))
else:
return (g/(1+np.exp(-lg*(x-xg))))
def cumDays(
observation_dates: Union[List[float],np.array]
) -> np.array:
cum_days = np.cumsum([d.days for d in np.diff(np.sort(observation_dates))]).astype(float)
cum_days = np.hstack((0, cum_days))
return cum_days
def growScaling(
cum_days: np.array,
bounds: Tuple,
grow_func_params: np.array
) -> np.array:
earliest, latest = bounds
grow_func = growFunction(cum_days, *grow_func_params)
maxgrow_val = np.max(grow_func)
grow_func = (grow_func - grow_func[0]) / (maxgrow_val - grow_func[0])
scaled = grow_func * (latest - earliest) + earliest
return scaled
def makeDirectory(
directory: str
) -> None:
if not os.path.exists(directory):
os.makedirs(directory)
def group_points(
points: np.array,
layers: np.array,
max_dist: float
) -> Tuple[np.array, np.array]:
nn = NearestNeighbors(n_neighbors=1, n_jobs=-1)
# initialization
# -> all labels to -1
labels = -np.ones_like(layers)
# all given layers
uni_layers = np.unique(layers)
# -> give points of first layer individual group labels
labels[layers == uni_layers[0]] = np.arange(np.sum(layers == uni_layers[0]))
# -> first evaluation point cloud: first layer
centroids = points[layers == uni_layers[0]]
ind = np.arange(len(points))
for i in range(1, len(uni_layers)):
# fit nearest neighbor model
nn.fit(centroids)
# evaluate on next layer
dist, ass_group = nn.kneighbors(points[layers == uni_layers[i]])
dist = dist.flatten()
ass_group = ass_group.flatten()
# exclude points that have more than max_dist distance to a neighbor
# new_member array:
# 1 = valid member candidate for existing group
# 0 = valid member candidate for new group
# -1 = excluded due to multiple candidates for a single group
new_member = (dist <= max_dist).astype(int)
# if multiple (valid!) points are assigned to the same group, take the nearest
valid = np.copy(new_member).astype(bool)
valid_ind = np.arange(len(valid))[valid]
for j, counts in enumerate(np.bincount(ass_group[valid])):
if counts > 1:
ass_group_ind = valid_ind[ass_group[valid] == j]
best_ind = ass_group_ind[np.argsort(dist[ass_group_ind])]
new_member[best_ind[1:]] = -1
# assign the group labels to the new members
layer_ind = ind[layers == uni_layers[i]]
old_layer_ind = layer_ind[new_member == 1]
labels[old_layer_ind] = ass_group[new_member == 1]
# give new group labels to points not registered so far
new_layer_ind = layer_ind[new_member == 0]
labels[new_layer_ind] = np.arange(labels.max()+1, labels.max()+1+len(new_layer_ind))
# new reference cloud are the centroids of the so far accumulated clusters
centroids = np.stack([np.mean(points[labels == label], axis=0) for label in range(labels.max()+1)])
return labels, centroids
def inverse_transform(
xy_centered_aligned,
xy_center,
transform_coeffs
):
s = transform_coeffs[0]
rot = np.deg2rad(transform_coeffs[1])
t = transform_coeffs[2:]
rot_inv = np.array([[np.cos(rot), np.sin(rot)], [-np.sin(rot), np.cos(rot)]])
return rot_inv@(xy_centered_aligned-t).T/s + xy_center
def add_non_detected(
df_less: pd.DataFrame,
df_meta: pd.DataFrame
) -> pd.DataFrame:
dates = np.unique(df_meta["date"])
xy_center = df_meta["xy_center"].iloc[0]
df_add = pd.DataFrame()
for g_id in np.unique(df_less["group_id"]):
df_group = df_less[df_less["group_id"] == g_id]
missing_dates = dates[np.isin(dates, df_group["date"], invert=True)]
for d in missing_dates:
xy_centered_aligned = df_group["xy_centered_aligned_cm"].mean(axis=0) # group centroid [cm (UTM)]
cropline_y = df_group["y_cropline_rotated_cm"].iloc[0]
align_transform = df_meta[df_meta["date"] == d]["align_transform"].iloc[0]
gps_transform = df_meta[df_meta["date"] == d]["gps_transform"].iloc[0]
utm_transform = df_meta[df_meta["date"] == d]["utm_transform"].iloc[0]
#cr = df_meta[df_meta["date"] == d]["cover_ratio"].values
#mc = df_meta[df_meta["date"] == d]["align_median_confidence"].values
xy_backtrans = inverse_transform(xy_centered_aligned, xy_center, align_transform)
lonlat_backtrans = utm_transform(*xy_backtrans/100., inverse=True)
df_add = df_add.append(
dict([("field_id" , df_group["field_id"].iloc[0]),
("date" , d),
("group_id" , g_id),
("group_size" , df_group["group_size"].iloc[0]),
("group_cropline_id" , df_group["group_cropline_id"].iloc[0]),
("xy_cm" , xy_backtrans),
("xy_px" , list(rowcol(gps_transform, *lonlat_backtrans))),
("lonlat" , lonlat_backtrans),
("xy_centered_aligned_cm" , xy_centered_aligned),
("xy_centroid_centered_aligned_cm" , xy_centered_aligned),
("y_cropline_rotated_cm" , cropline_y),
("centroid_dist_cm" , 0.),
("detected" , False)]), ignore_index=True)
return df_add
def filterGoodPlantsByPercDet(
plants_df: pd.DataFrame,
meta_df: pd.DataFrame,
filter_coverratio: float,
perc_min_det: float
) -> pd.DataFrame:
plants_meta_df = plants_df.merge(meta_df, on=["date", "field_id"], how="left")
n_dates = len(np.unique(meta_df["date"]))
# good plant group := at least perc_min_det direct detection ratio up to certain given cover ratio
good_idx = []
for f_id in np.unique(meta_df["field_id"]):
n_counts_below_cr_thres = np.sum(np.unique(plants_meta_df[plants_meta_df["field_id"]==f_id]["cover_ratio"]) <= filter_coverratio)
groups, counts = np.unique(plants_meta_df[(plants_meta_df["field_id"]==f_id) & (plants_meta_df["cover_ratio"] <= filter_coverratio) & (plants_meta_df["detected"] == True)]["group_id"], return_counts=True)
interest_groups = groups[counts/float(n_counts_below_cr_thres) >= perc_min_det]
candidates = plants_meta_df[(plants_meta_df["field_id"]==f_id) & (np.isin(plants_meta_df["group_id"], interest_groups))]
for g_id in interest_groups:
cand_group = candidates[candidates["group_id"]==g_id]
if len(cand_group)==n_dates:
good_idx.extend(cand_group.index)
good_df = plants_meta_df.loc[good_idx].sort_values(["field_id", "group_id", "date"])
return good_df
class SegmentSoilPlants(Task):
def __init__(
self,
image_path: str,
image_channels: List[str],
veg_index: str,
use_watershed: bool,
max_coverratio: float,
make_orthoimage: bool,
orthoimage_dir: str,
plot_result: bool,
plot_dir: str,
plot_format: str,
plot_dpi: int,
plot_cmap: str
):
super().__init__()
self.image_path = image_path
self.image_channels = np.asarray(image_channels)
self.veg_index = veg_index
self.use_watershed = use_watershed
self.max_coverratio = max_coverratio
self.make_orthoimage = make_orthoimage
self.orthoimage_dir = orthoimage_dir
self.plot_result = plot_result
self.plot_dir = plot_dir
self.plot_format = plot_format
self.plot_dpi = plot_dpi
self.plot_cmap = plot_cmap
def plot_raw(
self
):
logger.info(f"{self.name}-{self.date.date()} -> Plot raw image.")
if len(self.image_channels) < 4:
n_rows, n_cols = 1, len(self.image_channels)
else:
n_rows, n_cols = 2, len(self.image_channels)//2
fig, ax = plt.subplots(n_rows, n_cols, sharex=True, sharey=True, figsize=(self.width/500*n_cols, self.height/800*n_rows))
data = read_raster(self.image_path, self.image_channels, self.image_channels)
for (i, (a, c)) in enumerate(zip(ax.ravel(), self.image_channels)):
im = a.imshow(data[:,:,i], cmap=self.plot_cmap)
try:
fig.colorbar(im, ax=a)
except:
pass
a.set(xlabel='x', ylabel='y', title = c, aspect='equal')
fig.suptitle("raw image data")
fig.savefig(os.path.join(self.plot_dir, f"{self.field_id}_{self.date.date()}_01_channels"+self.plot_format), dpi=self.plot_dpi, bbox_inches='tight')
fig.clf()
del data, fig, ax, im
plt.close("all")
gc.collect()
def plot_segmentation(
self
):
logger.info(f"{self.name}-{self.date.date()} -> Plot segmentation image.")
fig = plt.figure(figsize=(3*self.width/500, self.height/500), tight_layout=True)
gridspec = gs.GridSpec(1,3,width_ratios=[2,1,2], figure=fig)
ax1 = fig.add_subplot(gridspec[0])
ax2 = fig.add_subplot(gridspec[1])
ax3 = fig.add_subplot(gridspec[2])
m = ax1.imshow(self.vi_image.astype(float), cmap=self.plot_cmap, vmin=-1, vmax=1)
cb = fig.colorbar(m, ax=ax1)
cb.set_label("VI")
ax1.set(title=f"{self.veg_index} image", xlabel="px", ylabel="px")
ax2.hist(self.vi_image[np.isfinite(self.vi_image)], bins=256, orientation="horizontal", color="C0")
ax2.set(title=f"{self.veg_index} value distribution", ylim=(-1,1), xlabel="counts", xscale="log")
if self.cover_ratio_est < 0.01:
ax2.axhline(self.thres, c='r', label=f"Threshold (99-percentile): {self.thres:.2f}")
else:
ax2.axhline(self.thres, c='r', label=f"Threshold (Otsu): {self.thres:.2f}")
ax2.legend()
ax3.imshow(self.seg_mask, cmap=self.plot_cmap)
ax3.set(title=f"Segmented plant area (cover ratio: {100.*self.cover_ratio:.2f} %)", xlabel="px", ylabel="px")
fig.savefig(os.path.join(self.plot_dir, f"{self.field_id}_{self.date.date()}_02_segmentation"+self.plot_format), dpi=self.plot_dpi, bbox_inches='tight')
fig.clf()
plt.close("all")
del fig, ax1, ax2, ax3
gc.collect()
def run(
self
):
try:
self.field_id, d = os.path.basename(self.image_path).replace(".tif", "").split("_")[:2]
year = int(d[:4])
month = int(d[4:6])
day = int(d[6:8])
self.date = dt.datetime(year, month, day)
except:
logger.error(f"Wrong image path or no files found: {self.image_path}")
logger.info(f"{self.name}-{self.date.date()} -> Load image.")
raster = rio.open(self.image_path)
raster_meta = raster.meta
self.height, self.width = raster.shape
px_res = calc_m_per_px(raster_meta)*100. # cm/px
logger.info(f"{self.name}-{self.date.date()} -> Calculated resolution: {px_res:.4f} cm/px.")
del raster
gc.collect()
# calculate Vegetation Index which has values in [-1,1]
if self.veg_index == "NGRDI":
channels = read_raster(self.image_path, self.image_channels, ["R", "G"])
self.vi_image = ngrdiImage(R = channels[:,:,0], G = channels[:,:,1])
est_thres = 0
elif self.veg_index == "GLI":
channels = read_raster(self.image_path, self.image_channels, ["R", "G", "B"])
self.vi_image = gliImage(R = channels[:,:,0], G = channels[:,:,1], B = channels[:,:,2])
est_thres = 0.2
elif self.veg_index == "OSAVI":
channels = read_raster(self.image_path, self.image_channels, ["R", "NIR"])
self.vi_image = osaviImage(R = channels[:,:,0], NIR = channels[:,:,1], y_osavi = 0.6)
est_thres = 0.25
del channels
gc.collect()
# cover ratio estimation
self.cover_ratio_est = np.nansum(self.vi_image >= est_thres)/np.sum(np.isfinite(self.vi_image))
logger.info(f"{self.name}-{self.date.date()} -> Use {self.veg_index} Vegetation Index. Cover ratio estimation: {self.cover_ratio_est*100.:.2f} %")
if self.cover_ratio_est <= self.max_coverratio:
# calculate threshold with Otsu's method
if self.cover_ratio_est < 0.01:
self.thres = np.percentile(self.vi_image[np.isfinite(self.vi_image)], 99)
logger.warn(f"{self.name}-{self.date.date()} -> Estimated cover ratio below 1 % -> Take 99-percentile as threshold: {self.thres:.2f}")
else:
self.thres = threshold_otsu(self.vi_image[np.isfinite(self.vi_image)])
logger.info(f"{self.name}-{self.date.date()} -> Otsu threshold: {self.thres:.2f}")
# segmentation
if self.use_watershed:
logger.info(f"{self.name}-{self.date.date()} -> Segment soil and plants with watershed method.")
markers = np.zeros_like(self.vi_image, dtype=np.uint8)
markers[self.vi_image <= self.thres] = 1 # soil
markers[self.vi_image > self.thres] = 2 # plant
self.seg_mask = (watershed(self.vi_image, markers) - 1).astype(bool) # True -> plant, False -> soil
del markers
else:
logger.info(f"{self.name}-{self.date.date()} -> Segment soil and plants without watershed method.")
self.seg_mask = np.zeros_like(self.vi_image, dtype=bool) # True -> plant, False -> soil
self.seg_mask[self.vi_image > self.thres] = True # plant
self.cover_ratio = np.sum(self.seg_mask)/np.sum(np.isfinite(self.vi_image))
logger.info(f"{self.name}-{self.date.date()} -> Cover ratio recalculated: {self.cover_ratio*100.:.2f} %")
if self.plot_result:
makeDirectory(self.plot_dir)
self.plot_segmentation()
gc.collect()
else:
logger.warn(f"{self.name}-{self.date.date()} -> Estimated cover ratio ({self.cover_ratio_est*100.:.2f} %) is too high to extract plants -> Skip plot.")
self.seg_mask = []
self.cover_ratio = self.cover_ratio_est
self.save(obj=self.seg_mask, name="segmentation_mask", type_='pickle')
self.save(obj=self.cover_ratio, name="cover_ratio", type_='json')
self.save(obj=self.field_id, name="field_id", type_='json')
self.save(obj=self.date, name="date", type_='pickle')
self.save(obj=raster_meta, name="raster_meta", type_='pickle')
self.save(obj=px_res, name="px_resolution", type_='json')
if (self.make_orthoimage) and (self.seg_mask != []):
makeDirectory(self.orthoimage_dir)
logger.info(f"{self.name}-{self.date.date()} -> Save segmentation mask as orthoimage.")
write_onechannel_raster(os.path.join(self.orthoimage_dir, f"{self.field_id}_{self.date.date()}_segmentation.tif"),
np.uint8(self.seg_mask*255),
raster_meta,
"uint8")
# plot raw channel information
if self.plot_result:
makeDirectory(self.plot_dir)
self.plot_raw()
gc.collect()
class FitGrowFunction(Task):
def __init__(
self,
plot_result: bool,
plot_dir: str,
plot_format: str,
plot_dpi: int
):
super().__init__()
self.plot_result = plot_result
self.plot_dir = plot_dir
self.plot_format = plot_format
self.plot_dpi = plot_dpi
def plot(
self
):
logger.info(f"{self.name} -> Plot Grow function.")
g, lg, xg, d, ld, xd = self.fit
cd = np.linspace(0, self.cum_days[-1], 1000)
cal_days = [self.observation_dates[0] + dt.timedelta(days=x) for x in self.cum_days]
fig, ax = plt.subplots()
ax.scatter(self.cum_days, self.cover_ratios, label="observations")
if d > 0:
label = r"grow function fit: $f(x)=\frac{g}{1+e^{-\lambda_g(x-x_g)}}-\frac{d}{1+e^{-\lambda_d(x-x_d)}}$"+f"\n$g$={g:.4g}, $\\lambda_g$={lg:.4g}, $x_g$={xg:.4g}\n$d$={d:.4g}, $\\lambda_d$={ld:.4g}, $x_d$={xd:.4g}"
else:
label = r"grow function fit: $f(x)=\frac{g}{1+e^{-\lambda_g(x-x_g)}}$"+f"\n$g$={g:.4g}, $\\lambda_g$={lg:.4g}, $x_g$={xg:.4g}"
ax.plot(cd, growFunction(cd, *self.fit), c="r", label=label)
ax.format_xdata = mdates.DateFormatter('%Y-%m-%d')
ax.set(xlabel="days", ylabel="cover ratio")
ax.legend()
ax.grid()
ax_dt = ax.twiny()
ax_dt.set_xlim(map(lambda cd: self.observation_dates[0] + dt.timedelta(days=cd), ax.get_xlim()))
ax_dt.set_xlabel("calendar date")
ax_dt.set_xticks(cal_days)
ax_dt.tick_params(axis='x', labelrotation=90)
ax.set(title=f"{self.field_id}: grow function fit")
savename = os.path.join(self.plot_dir, f"{self.field_id}_grow_function"+self.plot_format)
fig.savefig(savename, dpi=self.plot_dpi, bbox_inches='tight')
plt.close("all")
del fig, ax, ax_dt
def run(
self,
reduced_results: List[Dict[str, Dict]]
):
cover_ratios = []
observation_dates = []
for r in reduced_results:
cover_ratios.append(r["result"]["cover_ratio"])
observation_dates.append(r["result"]["date"])
observation_dates = np.asarray(observation_dates)
cover_ratios = np.asarray(cover_ratios)
sort = np.argsort(observation_dates)
self.observation_dates = observation_dates[sort]
self.cover_ratios = cover_ratios[sort]
self.cum_days = cumDays(self.observation_dates)
self.field_id = reduced_results[0]["result"]["field_id"]
try:
self.fit, self.cov = curve_fit(growFunction, self.cum_days, self.cover_ratios,
p0=[0.8, 0.1, self.cum_days[-1]/3, 0.3, 0.1, 2*self.cum_days[-1]/3],
maxfev=1000000)
# calculate corrected cover ratios with grow function
#gf_cover_ratio = growFunction(self.cum_days, *self.fit)
#self.save(obj=gf_cover_ratio, name="grow_function_cover_ratios", type_='pickle')
#self.save(obj=self.observation_dates, name="dates", type_='pickle')
logger.info(f"{self.name} -> Grow function fitted")
if self.plot_result:
makeDirectory(self.plot_dir)
self.plot()
gc.collect()
except Exception as e:
self.fit = np.nan
self.cov = np.nan
logger.warning(f"{self.name} -> Grow function could not be fitted. Error: {e}")
self.save(obj=self.fit, name="grow_function_fit_params", type_='pickle')
self.save(obj=self.cov, name="grow_function_cov_matrix", type_='pickle')
class ExtractPlantPositions(Task):
def __init__(
self,
min_peak_distance: float,
peak_threshold: float,
gauss_sigma_bounds: Tuple[float, float],
use_growfunction: bool,
make_orthoimage: bool,
orthoimage_dir: str,
plot_result: bool,
plot_dir: str,
plot_format: str,
plot_dpi: int,
plot_cmap: str
):
super().__init__()
self.min_peak_distance = min_peak_distance
self.peak_threshold = peak_threshold
self.gauss_sigma_bounds = gauss_sigma_bounds
self.use_growfunction = use_growfunction
self.make_orthoimage = make_orthoimage
self.orthoimage_dir = orthoimage_dir
self.plot_result = plot_result
self.plot_dir = plot_dir
self.plot_format = plot_format
self.plot_dpi = plot_dpi
self.plot_cmap = plot_cmap
def plot_gauss_blur(
self
):
logger.info(f"{self.name}-{self.date.date()} -> Plot Gaussian blur image.")
fig, ax = plt.subplots(figsize=(self.width/500, self.height/500))
im = ax.imshow(self.blurred, cmap='gray')
ax.set(title=f"Gaussian blur ($\sigma$ = {self.sigma:.2f} px)", aspect='equal', xlabel='x [cm]', ylabel='y [cm]')
fig.savefig(os.path.join(self.plot_dir, f"{self.field_id}_{self.date.date()}_03_gauss_blur"+self.plot_format), dpi=self.plot_dpi, bbox_inches='tight')
fig.clf()
plt.close("all")
del fig, ax
def plot_peaks(
self
):
logger.info(f"{self.name}-{self.date.date()} -> Plot peak position image.")
fig, ax = plt.subplots(figsize=(self.width/500, self.height/500))
ax.scatter(*self.peaks.T[::-1], color='red', s=2, label=f"{len(self.peaks)} peaks")
ax.imshow(self.blurred, cmap=self.plot_cmap)
ax.set(title=f"Peaks (min. distance = {self.min_peak_distance} cm = {self.min_peak_distance/self.px_res:.2f} px)", aspect='equal', xlabel='x [px]', ylabel='y [px]')
ax.legend()
fig.savefig(os.path.join(self.plot_dir, f"{self.field_id}_{self.date.date()}_04_peaks"+self.plot_format), dpi=self.plot_dpi, bbox_inches='tight')
fig.clf()
plt.close("all")
del fig, ax
def run(
self,
segmentation_mask: np.ndarray,
#grow_function_cover_ratios: np.array,
#dates: np.array,
px_resolution: float,
cover_ratio: float,
date: dt.datetime,
field_id: str,
raster_meta: Dict
):
self.date = date
self.field_id = field_id
self.px_res = px_resolution
if len(segmentation_mask) > 0:
# apply gaussian filter with scaled sigma
if self.use_growfunction:
raise NotImplementedError()
#cover_ratio = grow_function_cover_ratios[dates == date]
#logger.info(f"{self.name}-{self.date.date()} -> Use cover ratio from grow function fit. ({100.*cover_ratio:.2f} %)")
else:
logger.info(f"{self.name}-{self.date.date()} -> Use standard cover ratio. ({100.*cover_ratio:.2f} %)")
self.sigma = (self.gauss_sigma_bounds[0] + cover_ratio*np.diff(self.gauss_sigma_bounds)[0]) / self.px_res
logger.info(f"{self.name}-{self.date.date()} -> Blurring with sigma = {self.sigma*px_resolution:.2f} cm = {self.sigma:.2f} px.")
self.blurred = gaussian(segmentation_mask.astype(np.float32), sigma=self.sigma)
# detect peaks
logger.info(f"{self.name}-{self.date.date()} -> Detect peaks with threshold {self.peak_threshold} and min. distance = {self.min_peak_distance} cm = {self.min_peak_distance/self.px_res:.2f} px.")
self.peaks = peak_local_max(self.blurred, min_distance=int(np.round(self.min_peak_distance/self.px_res)), threshold_abs=self.peak_threshold, exclude_border=False)
# convert peak position from pixel to cm coordinates with UTM coordinate transformation
utm_peaks, utm_transform = px_to_utm(point_cloud=self.peaks, raster_meta=raster_meta)
utm_peaks *= 100 # m * 100 = cm
n_peaks = len(self.peaks)
self.height, self.width = self.blurred.shape
logger.info(f"{self.name}-{self.date.date()} -> {n_peaks} peaks detected.")
if (self.make_orthoimage):
makeDirectory(self.orthoimage_dir)
logger.info(f"{self.name}-{self.date.date()} -> Save Gauss blurred orthoimage.")
write_onechannel_raster(os.path.join(self.orthoimage_dir, f"{self.field_id}_{self.date.date()}_blurred.tif"),
self.blurred,
raster_meta,
"float32")
logger.info(f"{self.name}-{self.date.date()} -> Export found peak positions as KML file.")
kml = simplekml.Kml()
for (lon, lat) in np.asarray(xy(raster_meta["transform"], *self.peaks.T)).T:
kml.newpoint(coords=[(lon, lat)])
kml.save(os.path.join(self.orthoimage_dir, f"{self.field_id}_{self.date.date()}_peaks.kml"))
else:
logger.warn(f"{self.name}-{self.date.date()} -> No segmentation mask due to large cover ratio -> Skip plot.")
utm_peaks = np.array([])
# calculate UTM zone
lon, lat = np.asarray(xy(raster_meta["transform"], raster_meta["height"]//2, raster_meta["width"]//2))
utm_zone = int(np.floor((lon/360)*60+31))
utm_transform = pyproj.Proj(proj='utm', zone=utm_zone, ellps='WGS84')
self.save(obj=utm_peaks, name="plant_positions", type_="pickle")
self.save(obj=utm_transform, name="utm_transform", type_="pickle")
# plot blurred image and contrast image with peak positions
if (len(segmentation_mask) > 0) and self.plot_result:
makeDirectory(self.plot_dir)
self.plot_gauss_blur()
self.plot_peaks()
gc.collect()
class LoadPeaks(Task):
def __init__(
self,
field_id: str,
plot_result: bool,
plot_dir: str,
plot_format: str,
plot_dpi: int,
plot_cmap: str
):
super().__init__()
self.field_id = field_id
self.plot_result = plot_result
self.plot_dir = plot_dir
self.plot_format = plot_format
self.plot_dpi = plot_dpi
self.plot_cmap = plot_cmap
def plot(
self
):
logger.info(f"{self.name} -> Plot raw peaks image.")
fig, ax = plt.subplots()
ax.scatter(*self.C.T, s=2, alpha=0.8, c=self.layers, cmap=self.plot_cmap)
ax.set(title=f"{self.field_id}\nraw points", xlabel='x [cm]', ylabel='y [cm]', aspect='equal')
fig.savefig(os.path.join(self.plot_dir, f"{self.field_id}_01_raw"+self.plot_format), dpi=self.plot_dpi, bbox_inches='tight')
fig.clf()
plt.close("all")
del fig, ax
def run(
self,
reduced_results: List[Dict[str, Dict]]
):
cover_ratios, dates, gps_transforms, px_resolutions, field_ids, peaks, utm_transforms, segmentation_masks = [], [], [], [], [], [], [], []
for r in reduced_results:
try:
if len(r["config"].keys()) == 1:
cover_ratios.append(r["result"]["cover_ratio"])
dates.append(r["result"]["date"])
gps_transforms.append(r["result"]["raster_meta"]["transform"])
px_resolutions.append(r["result"]["px_resolution"])
field_ids.append(r["result"]["field_id"])
segmentation_masks.append(r["result"]["segmentation_mask"])
else:
peaks.append(r["result"]["plant_positions"])
utm_transforms.append(r["result"]["utm_transform"])
except:
logger.error(r)
assert len(np.unique(field_ids)) == 1, logger.error(f"{self.name} -> Multiple field IDs!")
assert np.unique(field_ids)[0] == self.field_id, logger.error(f"{self.name} -> Wrong field ID!")
cover_ratios = np.asarray(cover_ratios)
px_resolutions = np.asarray(px_resolutions)
dates = | pd.DatetimeIndex(dates) | pandas.DatetimeIndex |
"""Perform classifications using landsat, sentinel-1 or both."""
import os
import rasterio
import rasterio.features
import numpy as np
import pandas as pd
import geopandas as gpd
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.decomposition import PCA
from sklearn.metrics import (
f1_score, precision_score, recall_score, accuracy_score
)
from tqdm import tqdm
from imblearn.over_sampling import RandomOverSampler
from scipy.ndimage import uniform_filter
from metadata import CASE_STUDIES, DATA_DIR
def list_available_features(data_dir):
"""List all available images in a given directory and its
subdirectories.
Parameters
----------
data_dir : str
Path to the directory where images are stored.
Returns
-------
features : list of tuple
Available features as a list of tuples (label, path).
"""
features = []
for directory, _, files in os.walk(data_dir):
files = [f for f in files if f.endswith('.tif')]
for f in files:
path = os.path.join(directory, f)
label = f.replace('.tif', '')
features.append((label, path))
return features
def ndarray_from_images(images, mask):
"""Construct X ndarray from an iterable of images and according to
a provided binary raster mask.
Parameters
----------
images : iterable of numpy 2d arrays
Images as an iterable of numpy 2d arrays.
mask : binary numpy 2d array
Raster mask ; true pixels will be excluded.
Returns
-------
X : numpy array
Array of shape (n_samples, n_images).
"""
# Initialize the X array of shape (n_samples, n_images)
out_shape = (images[0][~mask].ravel().shape[0], len(images))
X = np.empty(shape=out_shape, dtype=np.float64)
# Populate with image data
for i, img in enumerate(images):
X[:, i] = img[~mask].ravel()
return X
def get_train_test(data_dir, case_study, test_size=0.3, seed=111):
"""Construct train and test rasters from reference land cover shapefiles.
Train and test samples are randomly splitted at the polygon-level.
Parameters
----------
data_dir : str
Path to the directory where reference shapefiles are stored.
case_study : Metadata
Metadata object corresponding to a given case study. Used
to retrieve rasterization parameters.
test_size : float
Size of the test sample (between 0 and 1).
seed : int
Random seed for reproducibility.
Returns
-------
train : 2d array
Training samples as a 2d numpy array.
test : 2d array
Testing samples as a 2d numpy array.
"""
# Get built-up train and test samples
bu = gpd.read_file(os.path.join(reference_dir, 'builtup.shp'))
if bu.crs != case_study.crs:
bu = bu.to_crs(case_study.crs)
bu_train, bu_test = train_test_split(
bu, test_size=test_size, random_state=seed)
train = rasterio.features.rasterize(
shapes=((geom, 1) for geom in bu_train.geometry),
out_shape=(case_study.height, case_study.width),
transform=case_study.affine, dtype=np.uint8)
test = rasterio.features.rasterize(
shapes=((geom, 1) for geom in bu_test.geometry),
out_shape=(case_study.height, case_study.width),
transform=case_study.affine, dtype=np.uint8)
# Get non-built-up train and test samples
# Each land cover is splitted individually at the polygon-level
# Legend: so=2, lv=3, hv=4
NB_LAND_COVERS = ['baresoil', 'lowveg', 'highveg']
for i, land_cover in enumerate(NB_LAND_COVERS):
lc = gpd.read_file(os.path.join(reference_dir, land_cover + '.shp'))
if lc.crs != case_study.crs:
lc = lc.to_crs(case_study.crs)
lc_train, lc_test = train_test_split(
lc, test_size=test_size, random_state=seed)
lc_train_raster = rasterio.features.rasterize(
shapes=((geom, 1) for geom in lc_train.geometry),
out_shape=(case_study.height, case_study.width),
transform=case_study.affine, dtype=np.uint8)
lc_test_raster = rasterio.features.rasterize(
shapes=((geom, 1) for geom in lc_test.geometry),
out_shape=(case_study.height, case_study.width),
transform=case_study.affine, dtype=np.uint8)
train[lc_train_raster == 1] = i + 2
test[lc_test_raster == 1] = i + 2
return train, test
def dim_reduction(X, n_components=6):
"""Perform dimensionality reduction on input data
using PCA.
Parameters
----------
X : array
Input data as an array of shape (n_samples, n_features).
n_components : int
PCA components.
Returns
-------
X_reduced : array
Output reduced data array of shape (n_samples, n_components).
"""
pca = PCA(n_components=6)
pca.fit(X)
return pca.transform(X)
def random_forest(X, train, mask, seed=111, **kwargs):
"""Classify image data based on a given training dataset with
the Random Forest classifier.
Parameters
----------
X : array
Image data to classify. Array of shape
(n_samples, n_features).
train : 2d array
Training samples as a 2d numpy array.
mask : 2d array
Pixels to exclude from the analysis.
seed : int
Random seed for reproducibility.
**kwargs : args
Parameters to pass to the RF classifier.
Returns
-------
probabilities : 2d array
RF probabilistic output as a 2d image.
importances : array
RF feature importances as an array of shape
(n_features).
"""
# Construct training dataset from X based on `train`
X_train = X[train[~mask].ravel() > 0, :]
y_train = train[~mask].ravel()[train[~mask].ravel() > 0]
y_train[y_train > 1] = 2
# Oversampling to handle class imbalance
ros = RandomOverSampler(random_state=seed)
X_train, y_train = ros.fit_sample(X_train, y_train)
# Classification with RF
rf = RandomForestClassifier(random_state=seed, **kwargs)
rf.fit(X_train, y_train)
X_pred = rf.predict_proba(X)[:, 0]
# Reconstruct probabilities raster with original shape
probabilities = np.zeros(shape=mask.shape, dtype=np.float64)
probabilities[~mask] = X_pred
return probabilities, rf.feature_importances_
def assess(probabilities, test, mask, prob_threshold=0.75):
"""Compute assessment metrics based on the provided test samples.
Metrics computed: F1-score, precision, recall, and accuracy
in each land cover.
Parameters
----------
probabilities : 2d array
RF probabilistic output.
test : 2d array
Test samples.
mask : 2d array
Pixels excluded from the analysis.
prob_threshold : float
RF probabilities binary threshold.
Returns
-------
metrics : pandas Serie
Assessment metrics.
"""
metrics = | pd.Series() | pandas.Series |
# plots.py
import matplotlib
matplotlib.rcParams = matplotlib.rc_params_from_file('../../matplotlibrc')
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
def randomWalk():
"""Creates plot of symmetric one-D random lattice walk"""
N = 1000 #length of random walk
s = np.zeros(N)
s[1:] = np.random.binomial(1, .5, size=(N-1,))*2-1 #coin flips
s = pd.Series(s)
s = s.cumsum() #random walk
s.plot()
plt.ylim([-50,50])
plt.savefig("randomWalk.pdf")
#randomWalk()
def biasedRandomWalk():
"""Create plots of biased random walk of different lengths."""
N = 100 #length of random walk
s1 = np.zeros(N)
s1[1:] = np.random.binomial(1, .51, size=(N-1,))*2-1 #coin flips
s1 = pd.Series(s1)
s1 = s1.cumsum() #random walk
plt.subplot(211)
s1.plot()
N = 10000 #length of random walk
s1 = np.zeros(N)
s1[1:] = np.random.binomial(1, .51, size=(N-1,))*2-1 #coin flips
s1 = pd.Series(s1)
s1 = s1.cumsum() #random walk
plt.subplot(212)
s1.plot()
plt.savefig("biasedRandomWalk.pdf")
#biasedRandomWalk()
def dfPlot():
"""Plot columns of DataFrame against each other."""
xvals = pd.Series(np.sqrt(np.arange(1000)))
yvals = pd.Series(np.random.randn(1000).cumsum())
df = | pd.DataFrame({'xvals':xvals,'yvals':yvals}) | pandas.DataFrame |
#!/usr/bin/env python
"""
BSD 2-Clause License
Copyright (c) 2021 (<EMAIL>)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from __future__ import division
import argparse
import itertools
import json
import operator
import os
import re
import sys
import pickle
import math
from distutils.util import strtobool
import numpy as np
import pysam
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from matplotlib.backends.backend_pdf import PdfPages
from polyleven import levenshtein
from Bio import SeqIO
import seaborn as sns
import pandas as pd
from scipy import stats
###### Usage
#python plot_identity_error_alignment_normUnal.py -i basecaller1/norm_unaligned_assembly_polished basecaller2/norm_unaligned_assembly_polished basecaller3/norm_unaligned_assembly_polished -l basecaller1 basecaller2 basecaller3 -o outfolder -p appendix_outputname
#
def safe_div(x, y):
if y == 0:
return None
return x / y
plt.rcParams["patch.force_edgecolor"] = False
def plot_error_identity(df, pdf=None):
sns.set(font_scale=1)
fig = plt.figure(figsize=(13,2))
fig.subplots_adjust(left=0, right=1, bottom=0, top=2, hspace=0.2, wspace=0.2)
ax = fig.add_subplot(1, 2, 1)
sns.barplot(x="basecaller", hue="genome", y="error", data=df, linewidth=0, ax=ax)
plt.xlabel("Basecallers")
plt.ylabel("Error")
plt.title("Error rate of aligned reads to reference genome")
ax.get_legend().remove()
ax = fig.add_subplot(1, 2, 2)
sns.barplot(x="basecaller", hue="genome", y="identity", data=df, linewidth=0, ax=ax)
plt.xlabel("Basecallers")
plt.ylabel("Identity")
plt.title("Identity rate of aligned reads to reference genome")
ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., fontsize=10)
if pdf is not None:
pdf.savefig(fig, bbox_inches="tight")
plt.close("all")
def plot_match_mismatch_indels(df, pdf=None, stacked=True):
sns.set(font_scale=1)
fig = plt.figure(figsize=(10,5))
fig.subplots_adjust(left=0, right=1, bottom=0, top=2, hspace=0.2, wspace=0.2)
if stacked:
ax = fig.add_subplot(2, 2, 1)
ax2 = ax.twiny()
#sns.barplot(x="basecaller", hue="genome", y="match", data=df, linewidth=0, ax=ax)
#plt.xlabel("Basecallers")
#plt.ylabel("%Matches")
#plt.title("Matches")
df0 = df[['basecaller', 'genome', 'mismatch', 'deletion', 'insertion', 'unaligned']]
cols = df0.columns
u, idx = np.unique(df.basecaller.tolist(), return_index=True)
order = u[np.argsort(idx)] #[u[index] for index in sorted(idx)]
df0['basecaller'] = pd.Categorical(df0.basecaller, categories=order, ordered=True) # ['f', 'a', 'w', 'h'] # prevent sorting
df0.set_index(['basecaller', 'genome'], inplace=True)
colors = plt.cm.Paired.colors
df1 = df0.unstack(level=-1) # unstack the 'Context' column
(df1['mismatch']+df1['deletion']+df1['insertion']+df1['unaligned']).plot(kind='bar', color=[colors[1], colors[0]], rot=0, ax=ax, linewidth=0)
print(df1['mismatch']+df1['deletion']+df1['insertion']+df1['unaligned'])
(df1['deletion']+df1['insertion']+df1['unaligned']).plot(kind='bar', color=[colors[3], colors[2]], rot=0, ax=ax, linewidth=0)
(df1['insertion']+df1['unaligned']).plot(kind='bar', color=[colors[5], colors[4]], rot=0, ax=ax, linewidth=0)
df1['unaligned'].plot(kind='bar', color=[colors[7], colors[6]], rot=0, ax=ax, linewidth=0)
#legend_labels = [f'{val} ({context})' for val, context in df1.columns]
ticks = []
for r in range(df.shape[0]//2):
ticks.append(r - 0.25)
ticks.append(r + 0.05)
ax.set_xticks(ticks)
ax.set_xticklabels(['lambda', 'ecoli'] * (df.shape[0]//2), rotation=45, fontsize=8)
ax.grid(axis="x")
legend_labels = []
labels = ["mismatch", "", "deletion", "", "insertion", "", "unaligned", ""]
#for val in labels:
# if val in legend_labels:
# legend_labels.append("")
# else:
# legend_labels.append(val)
#legend_labels = [f'{val} ({context})' for val, context in df1.columns]3
ax.legend(labels, bbox_to_anchor=(-0.08, 1.2), loc=2, borderaxespad=0., ncol=4, fontsize=10) #(1.05, 1)
ax.set_ylabel("mean error in %")
ax.set_xlabel("species")
ax.set_yscale('log') #,base=20)
#ax.text(0.02, -0.2, ' '.join(order), transform=ax.transAxes, fontsize=11) #horizontalalignment='center', verticalalignment='center'
ax2.set_xlim(ax.get_xlim())
ax2.set_xticks([0.02, 1, 2, 3])
ax2.set_xticklabels(order, fontsize=10)
ax.xaxis.set_ticks_position('none')
#ax2.xaxis.set_ticks_position('none')
ax2.grid(axis="x")
#ax.legend(legend_labels, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.ylabel("Proportion of errors")
else:
ax = fig.add_subplot(2, 2, 1)
sns.barplot(x="basecaller", hue="genome", y="mismatch", data=df, linewidth=0, ax=ax)
plt.xlabel("Basecallers")
plt.ylabel("Mismatches")
plt.title("Mismatches")
ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.xticks(fontsize=8)
#ax._legend.remove()
ax = fig.add_subplot(2, 2, 3)
sns.barplot(x="basecaller", hue="genome", y="deletion", data=df, linewidth=0, ax=ax)
plt.xlabel("Basecallers")
plt.ylabel("Deletion")
plt.title("Deletion")
ax.get_legend().remove()
plt.xticks(fontsize=8)
#ax._legend.remove()
ax = fig.add_subplot(2, 2, 4)
sns.barplot(x="basecaller", hue="genome", y="insertion", data=df, linewidth=0, ax=ax)
plt.xlabel("Basecallers")
plt.ylabel("Insertion")
plt.title("Insertion")
ax.get_legend().remove()
plt.xticks(fontsize=8)
#ax._legend.remove()
if pdf is not None:
pdf.savefig(fig, bbox_inches="tight")
plt.close("all")
def plot_boxplot(data, labels, pdf=None, title="relative read length", ylabel="read length / reference length in %", reference=None):
sns.set(font_scale=1)
fig = plt.figure(figsize=(6,2))
fig.subplots_adjust(left=0, right=1, bottom=0, top=2, hspace=0.2, wspace=0.2)
ax = fig.add_subplot(1, 1, 1)
box = plt.boxplot(data, patch_artist=True)
ticks = np.arange(1, len(labels)+1)
plt.xticks(ticks, labels, rotation=45, ha="right")
plt.ylabel(ylabel)
plt.xlabel("Basecaller")
plt.title(title)
#plt.yscale('log') #,base=20)
if reference is not None:
plt.axhline(reference, c='r')
colors = len(labels[-3:]) * ['#EAEAF2'] + 3* ["#88888C"]
#colors2 = len(labels[-3:]) * ['#DD8655'] + 3* ["#181819"]
for patch, color in zip(box['boxes'], colors):
patch.set_facecolor(color)
#med.set_facecolor(color2)
if pdf is not None:
pdf.savefig(fig, bbox_inches="tight")
plt.close("all")
def make_argparser():
parser = argparse.ArgumentParser(description='Prints summary about alignment of basecalled reads.')
parser.add_argument('-i', '--fastq', nargs="*",
help='FASTA/Q files with basecalled reads.')
parser.add_argument('-l', '--labels', nargs="*",
help='list of labels. same order as list with fastq/a files')
parser.add_argument('-o', '--out',
help='out path.')
parser.add_argument('-p', '--prefix', default="basecalled",
help='out path.')
parser.add_argument('--stacked', type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=True,
help='stack error rates in plot.')
return parser
def median_abs_dev(x):
return(stats.median_absolute_deviation(x))
def report_errors(argv):
parser = make_argparser()
args = parser.parse_args(argv[1:])
fastq = args.fastq
basecallers = args.labels
out = args.out
prefix = args.prefix
stacked = args.stacked
with PdfPages(out + "/{}_error_alignment_rates.pdf".format(prefix)) as pdf:
lambd = []
ecoli = []
df = pd.DataFrame(columns=['basecaller', 'genome', 'match', 'mismatch', 'deletion', 'insertion', 'unaligned', 'identity', 'error', 'mqual', 'relative read length', 'aligned \% of read'])
df_std = pd.DataFrame(columns=['basecaller', 'genome', 'match', 'mismatch', 'deletion', 'insertion', 'unaligned','identity', 'error', 'mqual', 'relative read length', 'aligned \% of read'])
df_all = pd.DataFrame(columns=['basecaller', 'genome', 'match', 'mismatch', 'deletion', 'insertion', 'unaligned', 'identity', 'error', 'mqual', 'relative read length', 'aligned \% of read'])
df_all_std = pd.DataFrame(columns=['basecaller', 'genome', 'match', 'mismatch', 'deletion', 'insertion', 'unaligned','identity', 'error', 'mqual', 'relative read length', 'aligned \% of read'])
df_median = | pd.DataFrame(columns=['basecaller', 'genome', 'match', 'mismatch', 'deletion', 'insertion', 'unaligned', 'identity', 'error', 'mqual', 'relative read length', 'aligned \% of read']) | pandas.DataFrame |
import logging
from collections import defaultdict
import numpy as np
import pandas as pd
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster._kmeans import _mini_batch_convergence
from sklearn.utils.validation import check_random_state
from progressivis import ProgressiveError, SlotDescriptor
from progressivis.core.utils import indices_len
from ..table.module import TableModule
from ..table import Table, TableSelectedView
from ..table.dshape import dshape_from_dtype, dshape_from_columns
from ..io import DynVar
from ..utils.psdict import PsDict
from ..core.decorators import process_slot, run_if_any
from ..table.filtermod import FilterMod
from ..stats import Var
logger = logging.getLogger(__name__)
class MBKMeans(TableModule):
"""
Mini-batch k-means using the sklearn implementation.
"""
parameters = [('samples', np.dtype(int), 50)]
inputs = [
SlotDescriptor('table', type=Table, required=True),
SlotDescriptor('var', type=Table, required=True),
SlotDescriptor('moved_center', type=PsDict, required=False)
]
outputs = [
SlotDescriptor('labels', type=Table, required=False),
SlotDescriptor('conv', type=PsDict, required=False)
]
def __init__(self, n_clusters, columns=None, batch_size=100, tol=0.01,
is_input=True, is_greedy=True, random_state=None, **kwds):
super().__init__(**kwds)
self.mbk = MiniBatchKMeans(n_clusters=n_clusters,
batch_size=batch_size,
verbose=True,
tol=tol,
random_state=random_state)
self.columns = columns
self.n_clusters = n_clusters
self.default_step_size = 100
self._labels = None
self._remaining_inits = 10
self._initialization_steps = 0
self._is_input = is_input
self._tol = tol
self._conv_out = PsDict({'convergence': 'unknown'})
self.params.samples = n_clusters
self._is_greedy = is_greedy
self._arrays = None
self.convergence_context = {}
def predict_step_size(self, duration):
p = super().predict_step_size(duration)
return max(p, self.n_clusters)
def reset(self, init='k-means++'):
self.mbk = MiniBatchKMeans(n_clusters=self.mbk.n_clusters,
batch_size=self.mbk.batch_size,
init=init,
# tol=self._rel_tol,
random_state=self.mbk.random_state)
dfslot = self.get_input_slot('table')
dfslot.reset()
self.set_state(self.state_ready)
self.convergence_context = {}
# do not resize result to zero
# it contains 1 row per centroid
if self._labels is not None:
self._labels.truncate()
def starting(self):
super().starting()
opt_slot = self.get_output_slot('labels')
if opt_slot:
logger.debug('Maintaining labels')
self.maintain_labels(True)
else:
logger.debug('Not maintaining labels')
self.maintain_labels(False)
def maintain_labels(self, yes=True):
if yes and self._labels is None:
self._labels = Table(self.generate_table_name('labels'),
dshape="{labels: int64}",
create=True)
elif not yes:
self._labels = None
def labels(self):
return self._labels
def get_data(self, name):
if name == 'labels':
return self.labels()
if name == 'conv':
return self._conv_out
return super().get_data(name)
def is_greedy(self):
return self._is_greedy
def _process_labels(self, locs):
labels = self.mbk.labels_
u_locs = locs & self._labels.index # ids to update
if not u_locs: # shortcut
self._labels.append({'labels': labels},
indices=locs)
return
a_locs = locs - u_locs # ids to append
if not a_locs: # 2nd shortcut
self._labels.loc[locs, 'labels'] = labels
return
df = | pd.DataFrame({'labels': labels}, index=locs) | pandas.DataFrame |
import pathlib
import pandas as pd
import pytest
from pytest import approx
import process_improve.batch.features as features
# General
@pytest.fixture(scope="module")
def batch_data():
"""Returns a small example of a batch data set."""
folder = (
pathlib.Path(__file__).parents[2] / "process_improve" / "datasets" / "batch"
)
return pd.read_csv(
folder / "batch-fake-data.csv",
index_col=1,
header=0,
)
# return
def test_verify_file(batch_data):
df = batch_data
assert df.shape[0] == 501
assert df.shape[1] == 5
def test_corner_cases(batch_data):
"""
Certain corner cases: to ensure coverage in
"""
df = batch_data
data = df.set_index(pd.to_datetime(df["DateTime"])).drop("DateTime", axis=1)
step1 = features.f_mean(data).reset_index()
# Tests removal of internal columns.
_, tags, *_ = features._prepare_data(step1)
assert "__phase_grouper__" not in tags
assert "__batch_grouper__" not in tags
# Test calling a single tag name
assert features.f_mean(data, tags="Temp1").loc[:, "Temp1_mean"].values[0] == approx(
-19.482056, rel=1e-7
)
def test_age_col_specification(batch_data):
"""
Some features, like slopes, need to know a value from the x-axis. Often this is a column
representing the time since start of the batch: age_col
"""
df = batch_data
df = df.drop("DateTime", axis=1)
# Check that the index is currently the time-tag
assert df.index.name == "UCI_minutes"
# This test is for the case when the time_tag is NOT the index. So reset that:
df = df.reset_index()
slopes = features.f_slope(
df, x_axis_tag="UCI_minutes", tags=["Temp1", "Temp2"], age_col="UCI_minutes"
)
assert slopes.shape == (1, 2)
def test_data_preprocessing(batch_data):
"""Simple tests regarding the mean, median, etc. Location-based features."""
df = batch_data
df = df.set_index( | pd.to_datetime(df["DateTime"]) | pandas.to_datetime |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pandas
import decimal
class Response(object):
def __init__(self, data=None):
self.__data__ = data
def data(self):
return | pandas.DataFrame(self.__data__) | pandas.DataFrame |
from pymongo import MongoClient
import pandas as pd
pd.set_option("display.max_rows",None,"display.max_columns",None)
pd.options.mode.chained_assignment = None
import datetime
from datetime import datetime
server = MongoClient('mongodb://localhost:27017')
db=server['salesfokuz_lead']
leadsactivity = db['lead_log']
dadb = server['da']
meeting_time_users= dadb['meetingtime_user_id']
meeting_time_organizations= dadb['meetingtime_organization_id']
livepipeline = [
{
"$addFields": {
"punch_status": {
"$toString": "$data.punch_status"
}}
},
{
"$addFields": {
"punch_in": {
"$toString": "$data.punch_in_datetime"
}}
},
{
"$addFields": {
"punch_out": {
"$toString": "$data.punch_out_datetime"
}}
},
{
"$addFields": {
"organization_id": {
"$toString": "$data.organization_id"
}
}
},
{
'$project': {
'_id': 0,
'user_id': 1,
"module": 1,
"action": 1,
"lead_id":1,
"punch_in":1,
"organization_id":1,
"punch_out": 1,
"punch_status": 1,
}
}
]
################## END #######################
################## aggregating data from mongodb using the pipeline #############
leadlogdata = list(leadsactivity.aggregate(livepipeline))
# print(leadlogdata)
################## END #######################
leaddf = pd.DataFrame(leadlogdata)
# print(leaddf)
leaddf.fillna(0, inplace = True)
leaddf=leaddf.replace('', 0)
leaddf=leaddf.drop(leaddf[leaddf.punch_in == 0].index)
leaddf=leaddf.drop(leaddf[leaddf.punch_out == 0].index)
leaddf=leaddf.drop(leaddf[leaddf.action == 'Cancelled the'].index)
leaddf=leaddf[leaddf.module == 'meeting']
# print(leaddf)
################################################
leaddf['punch_in']=pd.to_datetime(leaddf['punch_in'])
leaddf['punch_out']= | pd.to_datetime(leaddf['punch_out']) | pandas.to_datetime |
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
import numpy as np
import itertools
import json
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.metrics import confusion_matrix
import google.datalab.bigquery as bq
from . import _util
class ConfusionMatrix(object):
"""Represents a confusion matrix."""
def __init__(self, cm, labels):
"""
Args:
cm: a 2-dimensional matrix with row index being target, column index being predicted,
and values being count.
labels: the labels whose order matches the row/column indexes.
"""
self._cm = cm
self._labels = labels
@staticmethod
def from_csv(input_csv, headers=None, schema_file=None):
"""Create a ConfusionMatrix from a csv file.
Args:
input_csv: Path to a Csv file (with no header). Can be local or GCS path.
headers: Csv headers. If present, it must include 'target' and 'predicted'.
schema_file: Path to a JSON file containing BigQuery schema. Used if "headers" is None.
If present, it must include 'target' and 'predicted' columns.
Returns:
A ConfusionMatrix that can be plotted.
Raises:
ValueError if both headers and schema_file are None, or it does not include 'target'
or 'predicted' columns.
"""
if headers is not None:
names = headers
elif schema_file is not None:
with _util.open_local_or_gcs(schema_file, mode='r') as f:
schema = json.load(f)
names = [x['name'] for x in schema]
else:
raise ValueError('Either headers or schema_file is needed')
all_files = _util.glob_files(input_csv)
all_df = []
for file_name in all_files:
with _util.open_local_or_gcs(file_name, mode='r') as f:
all_df.append(pd.read_csv(f, names=names))
df = | pd.concat(all_df, ignore_index=True) | pandas.concat |
import numpy as np
import pandas as pd
from typing import Mapping, List, Tuple
from collections import defaultdict, OrderedDict
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn.linear_model import LinearRegression, Lasso
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.datasets import load_boston, load_iris, load_wine, load_digits, \
load_breast_cancer, load_diabetes, fetch_mldata
from matplotlib.collections import LineCollection
import time
from pandas.api.types import is_string_dtype, is_object_dtype, is_categorical_dtype, \
is_bool_dtype
from sklearn.ensemble.partial_dependence import partial_dependence, \
plot_partial_dependence
from sklearn import svm
from sklearn.neighbors import KNeighborsRegressor
from pdpbox import pdp
from rfpimp import *
from scipy.integrate import cumtrapz
from stratx.partdep import *
from stratx.ice import *
import inspect
import statsmodels.api as sm
from sklearn.datasets import load_boston
from stratx.partdep import *
def df_string_to_cat(df: pd.DataFrame) -> dict:
catencoders = {}
for colname in df.columns:
if is_string_dtype(df[colname]) or is_object_dtype(df[colname]):
df[colname] = df[colname].astype('category').cat.as_ordered()
catencoders[colname] = df[colname].cat.categories
return catencoders
def toy_weather_data():
def temp(x): return np.sin((x + 365 / 2) * (2 * np.pi) / 365)
def noise(state): return np.random.normal(-5, 5, sum(df['state'] == state))
df = pd.DataFrame()
df['dayofyear'] = range(1, 365 + 1)
df['state'] = np.random.choice(['CA', 'CO', 'AZ', 'WA'], len(df))
df['temperature'] = temp(df['dayofyear'])
df.loc[df['state'] == 'CA', 'temperature'] = 70 + df.loc[
df['state'] == 'CA', 'temperature'] * noise('CA')
df.loc[df['state'] == 'CO', 'temperature'] = 40 + df.loc[
df['state'] == 'CO', 'temperature'] * noise('CO')
df.loc[df['state'] == 'AZ', 'temperature'] = 90 + df.loc[
df['state'] == 'AZ', 'temperature'] * noise('AZ')
df.loc[df['state'] == 'WA', 'temperature'] = 60 + df.loc[
df['state'] == 'WA', 'temperature'] * noise('WA')
return df
def weather():
df_yr1 = toy_weather_data()
df_yr1['year'] = 1980
df_yr2 = toy_weather_data()
df_yr2['year'] = 1981
df_yr3 = toy_weather_data()
df_yr3['year'] = 1982
df_raw = pd.concat([df_yr1, df_yr2, df_yr3], axis=0)
df = df_raw.copy()
catencoders = df_string_to_cat(df_raw.copy())
# states = catencoders['state']
# print(states)
#
# df_cat_to_catcode(df)
names = {'CO': 5, 'CA': 10, 'AZ': 15, 'WA': 20}
df['state'] = df['state'].map(names)
catnames = OrderedDict()
for k,v in names.items():
catnames[v] = k
X = df.drop('temperature', axis=1)
y = df['temperature']
# leaf_xranges, leaf_slopes, slope_counts_at_x, dx, slope_at_x, pdpx, pdpy, ignored_ = \
# partial_dependence(X=X, y=y, colname='dayofyear',
# verbose=True)
# print(pdpx)
# print(pdpy)
plot_catstratpd(X, y, 'state', 'temperature', catnames=catnames,
# min_samples_leaf=30,
n_trials=10,
min_y_shifted_to_zero=True,
show_x_counts=False,
bootstrap=True,
yrange=(-2, 60),
figsize=(2.1,2.5)
)
plt.show()
def bigX_data(n):
x1 = np.random.uniform(-1, 1, size=n)
x2 = np.random.uniform(-1, 1, size=n)
x3 = np.random.uniform(-1, 1, size=n)
y = 0.2 * x1 - 5 * x2 + 10 * x2 * np.where(x3 >= 0, 1, 0) + np.random.normal(0, 1,
size=n)
df = pd.DataFrame()
df['x1'] = x1
df['x2'] = x2
df['x3'] = x3
df['y'] = y
return df
def bigX():
print(f"----------- {inspect.stack()[0][3]} -----------")
n = 1000
df = bigX_data(n=n)
X = df.drop('y', axis=1)
y = df['y']
# plot_stratpd_gridsearch(X, y, 'x2', 'y',
# min_samples_leaf_values=[2,5,10,20,30],
# # nbins_values=[1,3,5,6,10],
# yrange=(-4,4))
#
# plt.tight_layout()
# plt.show()
# return
# Partial deriv is just 0.2 so this is correct. flat deriv curve, net effect line at slope .2
# ICE is way too shallow and not line at n=1000 even
fig, axes = plt.subplots(2, 2, figsize=(4, 4), sharey=True)
# Partial deriv wrt x2 is -5 plus 10 about half the time so about 0
# Should not expect a criss-cross like ICE since deriv of 1_x3>=0 is 0 everywhere
# wrt to any x, even x3. x2 *is* affecting y BUT the net effect at any spot
# is what we care about and that's 0. Just because marginal x2 vs y shows non-
# random plot doesn't mean that x2's net effect is nonzero. We are trying to
# strip away x1/x3's effect upon y. When we do, x2 has no effect on y.
# Ask what is net effect at every x2? 0.
plot_stratpd(X, y, 'x2', 'y', ax=axes[0, 0], yrange=(-4, 4),
show_slope_lines=True,
n_trials=1,
min_samples_leaf=20,
pdp_marker_size=2)
# Partial deriv wrt x3 of 1_x3>=0 is 0 everywhere so result must be 0
plot_stratpd(X, y, 'x3', 'y', ax=axes[1, 0], yrange=(-4, 4),
show_slope_lines=True,
n_trials=1,
min_samples_leaf=20,
pdp_marker_size=2)
rf = RandomForestRegressor(n_estimators=100, min_samples_leaf=1, oob_score=True)
rf.fit(X, y)
print(f"RF OOB {rf.oob_score_}")
ice = predict_ice(rf, X, 'x2', 'y', numx=100)
plot_ice(ice, 'x2', 'y', ax=axes[0, 1], yrange=(-4, 4))
ice = predict_ice(rf, X, 'x3', 'y', numx=100)
plot_ice(ice, 'x3', 'y', ax=axes[1, 1], yrange=(-4, 4))
axes[0, 1].get_yaxis().set_visible(False)
axes[1, 1].get_yaxis().set_visible(False)
axes[0, 0].set_title("StratPD", fontsize=10)
axes[0, 1].set_title("PD/ICE", fontsize=10)
plt.show()
def boston():
boston = load_boston()
df = pd.DataFrame(boston.data, columns=boston.feature_names)
df['MEDV'] = boston.target
X = df.drop('MEDV', axis=1)
y = df['MEDV']
# WORKS ONLY WITH DATAFRAMES AT MOMENT
plt.figure(figsize=(3.5,3.5))
plot_stratpd(X, y, 'LSTAT', 'MEDV', yrange=(-20, 5), n_trials=10)
plt.tight_layout()
plt.savefig("../../images/boston_LSTAT.svg")
plt.show()
def diabetes():
diabetes = load_diabetes()
df = | pd.DataFrame(diabetes.data, columns=diabetes.feature_names) | pandas.DataFrame |
import sys, os, time, datetime, warnings, configparser
import pandas as pd
import numpy as np
import talib
import concurrent.futures
import matplotlib.pyplot as plt
from tqdm import tqdm
cur_path = os.path.dirname(os.path.abspath(__file__))
for _ in range(2):
root_path = cur_path[0:cur_path.rfind('/', 0, len(cur_path))]
cur_path = root_path
sys.path.append(root_path + "/" + 'Source/FetchData/')
sys.path.append(root_path + "/" + 'Source/DataBase/')
from Fetch_Data_Stock_US_StockList import getStocksList_US
from Fetch_Data_Stock_US_Daily import updateStockData_US_Daily
from Fetch_Data_Stock_US_Weekly import updateStockData_US_Weekly
from Fetch_Data_Stock_US_Monthly import updateStockData_US_Monthly
from DB_API import queryStock
def get_single_stock_data_daily(root_path, symbol):
'''
All data is from quandl wiki dataset
Feature set: [Open High Low Close Volume Ex-Dividend Split Ratio Adj. Open Adj. High Adj. Low
Adj. Close Adj. Volume]
'''
df, lastUpdateTime = queryStock(root_path, "DB_STOCK", "SHEET_US", "_DAILY", symbol, "daily_update")
df.index = pd.to_datetime(df.index)
if df.empty:
print("daily empty df", symbol)
return df
if 'adj_close' in df:
df = df.drop('close', 1)
df = df.rename(columns = {'adj_close':'close'})
return df
def get_single_stock_data_weekly(root_path, symbol):
'''
All data is from quandl wiki dataset
Feature set: [Open High Low Close Volume Ex-Dividend Split Ratio Adj. Open Adj. High Adj. Low
Adj. Close Adj. Volume]
'''
df, lastUpdateTime = queryStock(root_path, "DB_STOCK", "SHEET_US", "_WEEKLY", symbol, "weekly_update")
df.index = | pd.to_datetime(df.index) | pandas.to_datetime |
from pathlib import Path
import logging
import numpy as np
import pandas as pd
from pytest import approx, mark
from lenskit.algorithms.user_knn import UserUser
from lenskit.algorithms.item_knn import ItemItem
from lenskit.algorithms.basic import PopScore
from lenskit.algorithms.ranking import PlackettLuce
from lenskit.algorithms import Recommender
from lenskit.util.test import ml_test, demo_recs
from lenskit.metrics.topn import _dcg, precision, recall
from lenskit import topn, batch, crossfold as xf
_log = logging.getLogger(__name__)
def test_split_keys():
rla = topn.RecListAnalysis()
recs, truth = topn._df_keys(['algorithm', 'user', 'item', 'rank', 'score'],
['user', 'item', 'rating'])
assert truth == ['user']
assert recs == ['algorithm', 'user']
def test_split_keys_gcol():
recs, truth = topn._df_keys(['algorithm', 'user', 'item', 'rank', 'score', 'fishtank'],
['user', 'item', 'rating'],
['algorithm', 'fishtank', 'user'])
assert truth == ['user']
assert recs == ['algorithm', 'fishtank', 'user']
def test_run_one():
rla = topn.RecListAnalysis()
rla.add_metric(topn.precision)
rla.add_metric(topn.recall)
recs = | pd.DataFrame({'user': 1, 'item': [2]}) | pandas.DataFrame |
import sys
import glob
import pandas as pd
from flask import Flask
from flask import jsonify
# list of dataframes
dfs = []
# Read the CSV files
for f in glob.glob("Firewall*.csv"):
print("Reading file: [%s]" % f)
local_df = pd.read_csv(f, low_memory=False)
dfs.append(local_df)
full_df = | pd.concat(dfs) | pandas.concat |
import gc as _gc
import pandas as _pd
import numpy as _np
from . import databases as _databases
from . import profiles as _profiles
class Columns(_databases.Columns):
"""
Container for the columns names defined in this module.
"""
SPLIT_SUF = '_SPLIT'
REF = 'REF'
QRY = 'QRY'
REF_SPLIT = '{}{}'.format(REF, SPLIT_SUF)
QRY_SPLIT = '{}{}'.format(QRY, SPLIT_SUF)
PROF_Q = _databases.Columns.PROF_Q
PROF_A = _databases.Columns.PROF_A
STR_SEP = '|'
def get_IDs_names(
species,
):
"""
Returns dict of KEGG Organism IDs as keys and biological names as values.
Parameters
-------
species: list of str
List of full biological names to convert into KEGG Organism IDs.
Returns
------
dict
"""
kegg_db = _databases.KEGG('Orthology')
kegg_db.parse_organism_info(
organism=None,
reference_species=species,
IDs=None,
X_ref=None,
KOs=None,
IDs_only=True,
)
return {k.lower(): v for k, v in kegg_db.ID_name.items()}
def profilize_organism(*args, **kwargs):
"""
Returns pandas.DataFrame with Phylogenetic Profile for each ORF name of an
organism.
Parameters
-------
organism: str
Full biological name of the organism.
reference_species: list of str
List of full biological names to build the Phylogenetic Profile.
IDs: str, path
Filename of the KEGG Organism IDs. Downloaded to a temporary file if
<None>.
X_ref: str, path
Filename of the ORF-KEGG Orthology Group cross-reference.
Downloaded to a temporary file if <None>.
KOs: str, path
Filename of the KEGG Orthology Group-Organism cross-reference.
Downloaded to a temporary file if <None>.
threads: int
Number of threads to utilize when downloading from KEGG. More means
faster but can make KEGG block the download temporarily. Default: <2>
Returns
------
pandas.DataFrame
"""
kegg_db = _databases.KEGG('Orthology')
kegg_db.parse_organism_info(*args, **kwargs)
return kegg_db.organism_info.drop(columns=_databases.Columns.KEGG_ID)
def read_sga(
filename,
version=2,
):
"""
Returns pandas.DataFrame with Genetic Interaction Network from
the Costanzo's SGA experiment either version 1 or 2.
Parameters
-------
filename: str, path
Filename of the SGA.
version: int
Version number of the Costanzo's SGA experiment. 1 or 2 available.
Returns
-------
pandas.DataFrame
"""
if version == 1:
sga = _databases.SGA1()
elif version == 2:
sga = _databases.SGA2()
else:
raise errors.ParserError("Only versions 1 and 2 of Costanzo's SGA experiment are supported.")
sga.parse(filename=filename)
return sga.sga
def read_profiles(
filename,
**kwargs
):
"""
Returns pandas.Series with prwlr.profiles.Profile objects from CSV file.
Together with prwlr.core.save_profiles provides a convenient way of
saving/reading-in prwlr.profiles.Profile objects to/from a flat text file.
Parameters
-------
filename: str, path
CSV file name.
Returns
------
pandas.Series
"""
ref_qry_df = | _pd.read_csv(filename, **kwargs) | pandas.read_csv |
import _io
import random
import numpy as np
import pandas as pd
import networkx as nx
from pandas.core.indexing import IndexingError
from recommenders.lod_reordering import LODPersonalizedReordering
import evaluation_utils as eval
class PathReordering(LODPersonalizedReordering):
def __init__(self, train_file: str, output_rec_file: str, prop_path: str, prop_cols: list, cols_used: list,
n_reorder: int,
policy: str, p_items: float, hybrid=False, n_sentences=3):
"""
Path Reordering class: this algorithm will reorder the output of other recommendation algorithm based on the
best path from an historic item and a recommended one. The best paths are extracted based on the value for each
object of the LOD with the semantic profile
:param train_file: train file in which the recommendations of where computed
:param output_rec_file: output file of the recommendation algorithm
:param prop_path: path to the properties on dbpedia or wikidata
:param prop_cols: columns of the property set
:param cols_used: columns used from the test and train set
:param policy: the policy to get the historic items to get the best paths. Possible values: 'all' for all items
'last' for the last interacted items, 'first' for the first interacted items and 'random' for
the random interacted items
:param p_items: percentage from 0 to 1 of items to consider in the policy. E.g. policy last and p_items = 0.1,
then the paths will consider only the last 0.1 * len(items inteacted). If p_items is bigger than 1
it will use the policy of the p_items of the user historic
:param hybrid: if the reorder of the recommendations should [True] or not consider the score from the recommender
:param n_sentences: number of paths to generate the sentence of explanation
"""
self.policy = policy
self.p_items = p_items
self.output_name = 'path[policy=' + str(policy) + "_items=" + str(p_items).replace('.', '') + "_reorder=" + str(
n_reorder) + "]"
if self.policy == 'random':
random.seed(42)
if hybrid:
self.output_name = self.output_name[:-1] + "_hybrid]"
super().__init__(train_file, output_rec_file, self.output_name, prop_path, prop_cols, cols_used, n_reorder,
hybrid, n_sentences)
def reorder(self):
"""
Function that reorders the recommendations made by the recommendation algorithm based on an adapted TF-IDF to
the LOD, where the words of a document are the values of properties of the items the user iteracted and all the
documents are all items properties
:return: file with recommendations for every user reordered
"""
reorder = pd.DataFrame({'user_id': pd.Series([], dtype=int),
'item_id': | pd.Series([], dtype=int) | pandas.Series |
import copy
import time
import numpy as np
import pandas as pd
import torch
from torch.utils.data import Dataset, IterableDataset
class Column(object):
"""A column. Data is write-once, immutable-after.
Typical usage:
col = Column('myCol').Fill(data).SetDistribution(domain_vals)
"data" and "domain_vals" are NOT copied.
"""
def __init__(self, name, distribution_size=None, pg_name=None):
self.name = name
# Data related fields.
self.data = None
self.all_distinct_values = None
self.distribution_size = distribution_size
# pg_name is the name of the corresponding column in Postgres. This is
# put here since, e.g., PG disallows whitespaces in names.
self.pg_name = pg_name if pg_name else name
self._val_to_bin_cache = {}
def Name(self):
"""Name of this column."""
return self.name
def DistributionSize(self):
"""This column will take on discrete values in [0, N).
Used to dictionary-encode values to this discretized range.
"""
return self.distribution_size
def BinToVal(self, bin_id):
assert bin_id >= 0 and bin_id < self.distribution_size, bin_id
return self.all_distinct_values[bin_id]
def ValToBin(self, val):
if val in self._val_to_bin_cache:
return self._val_to_bin_cache[val]
if isinstance(self.all_distinct_values, list):
return self.all_distinct_values.index(val)
inds = np.where(self.all_distinct_values == val)
if len(inds[0]) <= 0:
raise IndexError("Value not found")
res = inds[0][0]
self._val_to_bin_cache[val] = res
return res
def SetDistribution(self, distinct_values):
"""This is all the values this column will ever see."""
assert self.all_distinct_values is None
# pd.isnull returns true for both np.nan and np.datetime64('NaT').
is_nan = pd.isnull(distinct_values)
contains_nan = np.any(is_nan)
dv_no_nan = distinct_values[~is_nan]
# IMPORTANT: np.sort puts NaT values at beginning, and NaN values at
# end for our purposes we always add any null value to the beginning.
vs = np.sort(np.unique(dv_no_nan))
if contains_nan and np.issubdtype(distinct_values.dtype, np.datetime64):
vs = np.insert(vs, 0, np.datetime64('NaT'))
elif contains_nan:
vs = np.insert(vs, 0, np.nan)
if self.distribution_size is not None:
assert len(vs) == self.distribution_size
self.all_distinct_values = vs
self.distribution_size = len(vs)
return self
def Fill(self, data_instance, infer_dist=False):
assert self.data is None
self.data = data_instance
# If no distribution is currently specified, then infer distinct values
# from data.
if infer_dist:
self.SetDistribution(self.data)
return self
def __repr__(self):
return 'Column({}, distribution_size={})'.format(
self.name, self.distribution_size)
class Table(object):
"""A collection of Columns."""
def __init__(self, name, columns, pg_name=None, validate_cardinality=True):
"""Creates a Table.
Args:
name: Name of this table object.
columns: List of Column instances to populate this table.
pg_name: name of the corresponding table in Postgres.
"""
self.name = name
if validate_cardinality:
self.cardinality = self._validate_cardinality(columns)
else:
# Used as a wrapper, not a real table.
self.cardinality = None
self.columns = columns
# Bin to val funcs useful for sampling. Takes
# (col 1's bin id, ..., col N's bin id)
# and converts it to
# (col 1's val, ..., col N's val).
self.column_bin_to_val_funcs = [c.BinToVal for c in columns]
self.val_to_bin_funcs = [c.ValToBin for c in columns]
self.name_to_index = {c.Name(): i for i, c in enumerate(self.columns)}
if pg_name:
self.pg_name = pg_name
else:
self.pg_name = name
def __repr__(self):
return '{}({})'.format(self.name, self.columns)
def _validate_cardinality(self, columns):
"""Checks that all the columns have same the number of rows."""
cards = [len(c.data) for c in columns]
c = np.unique(cards)
assert len(c) == 1, c
return c[0]
def to_df(self):
return pd.DataFrame({c.name: c.data for c in self.columns})
def Name(self):
"""Name of this table."""
return self.name
def Columns(self):
"""Return the list of Columns under this table."""
return self.columns
def ColumnIndex(self, name):
"""Returns index of column with the specified name."""
assert name in self.name_to_index
return self.name_to_index[name]
class CsvTable(Table):
def __init__(self,
name,
filename_or_df,
cols,
type_casts,
pg_name=None,
pg_cols=None,
dropna=False,
is_str_col=False,
order_seed=None,
char_limit=200,
tie_cols=False,
**kwargs):
"""Accepts same arguments as pd.read_csv().
Args:
filename_or_df: pass in str to reload; otherwise accepts a loaded
pd.Dataframe.
"""
self.name = name
self.pg_name = pg_name
self.tie_cols = tie_cols
if isinstance(filename_or_df, str):
self.data = self._load(filename_or_df, cols, **kwargs)
else:
assert (isinstance(filename_or_df, pd.DataFrame))
self.data = filename_or_df
if is_str_col:
self.data = self._separate_characters(self.data, cols, char_limit)
if order_seed is not None:
ordering = self.data.columns.tolist()
rng = np.random.RandomState(order_seed)
rng.shuffle(ordering)
print(
"Rearranging columns from", self.data.columns.tolist(),
"to", ordering, "seed", order_seed)
self.data = self.data[ordering]
self.dropna = dropna
if dropna:
# NOTE: this might make the resulting dataframe much smaller.
self.data = self.data.dropna()
cols = self.data.columns
self.columns = self._build_columns(self.data, cols, type_casts, pg_cols)
super(CsvTable, self).__init__(name, self.columns, pg_name)
def _load(self, filename, cols, **kwargs):
print('Loading csv...', end=' ')
s = time.time()
# Use [cols] here anyway to reorder columns by 'cols'.
df = pd.read_csv(filename, usecols=cols, **kwargs)[cols]
print('done, took {:.1f}s'.format(time.time() - s))
return df
def _build_columns(self, data, cols, type_casts, pg_cols):
"""Example args:
cols = ['Model Year', 'Reg Valid Date', 'Reg Expiration Date']
type_casts = {'Model Year': int}
Returns: a list of Columns.
"""
print('Parsing...', end=' ')
s = time.time()
for col, typ in type_casts.items():
if col not in data:
continue
if typ != np.datetime64:
data[col] = data[col].astype(typ, copy=False)
else:
# Both infer_datetime_format and cache are critical for perf.
data[col] = pd.to_datetime(data[col],
infer_datetime_format=True,
cache=True)
# Discretize & create Columns.
columns = []
if pg_cols is None:
pg_cols = [None] * len(cols)
if self.tie_cols:
vocab = np.concatenate([
data[c].value_counts(dropna=False).index.values for c in cols
])
vocab = np.sort(np.unique(vocab))
else:
vocab = None
for c, p in zip(cols, pg_cols):
col = Column(c, pg_name=p)
col.Fill(data[c])
# dropna=False so that if NA/NaN is present in data,
# all_distinct_values will capture it.
#
# For numeric: np.nan
# For datetime: np.datetime64('NaT')
# For strings: ?? (haven't encountered yet)
#
# To test for former, use np.isnan(...).any()
# To test for latter, use np.isnat(...).any()
if vocab is not None:
col.SetDistribution(vocab)
else:
col.SetDistribution(data[c].value_counts(dropna=False).index.values)
columns.append(col)
print('done, took {:.1f}s'.format(time.time() - s))
return columns
def _separate_characters(self, data, cols, limit):
assert len(cols) == 1, "should only have 1 str col"
str_col = data[cols[0]]
return pd.DataFrame(str_col.apply(lambda x: list(x[:limit])).tolist()).fillna(value="$")
class TableDataset(Dataset):
"""Wraps a Table and yields each row as a Dataset element."""
def __init__(self, table, input_encoding=None):
"""Wraps a Table.
Args:
table: the Table.
"""
super(TableDataset, self).__init__()
self.table = copy.deepcopy(table)
print('Discretizing table...', end=' ')
s = time.time()
# [cardianlity, num cols].
self.tuples_np = np.stack(
[self.Discretize(c) for c in self.table.Columns()], axis=1)
self.tuples = torch.as_tensor(
self.tuples_np.astype(np.float32, copy=False))
print('done, took {:.1f}s'.format(time.time() - s))
def Discretize(self, col):
"""Discretize values into its Column's bins.
Args:
col: the Column.
Returns:
col_data: discretized version; an np.ndarray of type np.int32.
"""
return Discretize(col)
def size(self):
return len(self.tuples)
def __len__(self):
return len(self.tuples)
def __getitem__(self, idx):
return self.tuples[idx]
def Discretize(col, data=None):
"""Transforms data values into integers using a Column's vocab.
Args:
col: the Column.
data: list-like data to be discretized. If None, defaults to col.data.
Returns:
col_data: discretized version; an np.ndarray of type np.int32.
"""
# pd.Categorical() does not allow categories be passed in an array
# containing np.nan. It makes it a special case to return code -1
# for NaN values.
if data is None:
data = col.data
# pd.isnull returns true for both np.nan and np.datetime64('NaT').
isnan = pd.isnull(col.all_distinct_values)
if isnan.any():
# We always add nan or nat to the beginning.
assert isnan.sum() == 1, isnan
assert isnan[0], isnan
dvs = col.all_distinct_values[1:]
bin_ids = | pd.Categorical(data, categories=dvs) | pandas.Categorical |
from flask import Flask, redirect, request, url_for,render_template
from application import app, db
from application.models import Products,Orders,Customers #,SummaryOrder,OrdersSummary,ItemTable,OrdersTable,,CustomersTable
import sqlalchemy as sql
import pandas as pd
from datetime import datetime
@app.route('/')
def home():
return render_template('home.html',title='home')
# create customers
@app.route('/customers/add', methods=['GET','POST'])
def add_customer():
return ('<h1>Add New Customer</h1><br>' + render_template('customerform.html',title='add_customer')
+('<br><br> <a href="/customers" type="button">Return to Customers home</a> </br>')
+ ('<br> <a href="/customers/update2" type="button">Update customer records</a> </br>')
+ ('<br> <a href="/" type="button">Return to home</a> </br>'))
@app.route('/customers/add/customer',methods=['GET','POST'])
def add_customers():
connect_string ="mysql+pymysql://root:[email protected]/Tuckshop"
sql_engine = sql.create_engine(connect_string)
df = pd.read_sql_table('customers', sql_engine)
if request.method=='POST':
if len(df.loc[(df.first_name == request.form['first_name']) & (df.last_name == request.form['last_name']) & ((df.customer_dob == request.form['customer_dob'])|(df.customer_address == request.form['customer_address']))]) == 0:
new_first_name = request.form['first_name']
new_last_name = request.form['last_name']
new_customer_address = request.form['customer_address']
new_customer_dob = request.form['customer_dob']
new_customer = Customers(first_name=new_first_name,last_name=new_last_name,customer_address=new_customer_address,customer_dob=new_customer_dob)#,prepaid_balance=new_prepaid_balance)
db.session.add(new_customer)
db.session.commit()
return redirect(url_for('read_customers'))
else:
return ("<h4><br>"+"It looks like " + str(request.form['first_name']) + " " + str(request.form['last_name'])+ " already exists in the system." + "</h4>" + '<a href="/customers/add" type="button">Try again?</a> </br>'
+ ('<br><br> <a href="/customers/update2" type="button">Update customer records</a> </br>')+('<br> <a href="/customers" type="button">Return to Customers home</a> </br>'))
# read customers
@app.route('/customers')
def read_customers():
connect_string ="mysql+pymysql://root:[email protected]/Tuckshop"
sql_engine = sql.create_engine(connect_string)
df = pd.read_sql_table('customers', sql_engine)
df.rename(columns={'id':'Customer ID','first_name':'First Name','last_name':'Surname','customer_address':'Address','customer_dob':'Date of Birth'},inplace=True)
html = df.to_html()
return ('<h1>Customers</h1><br>')+html+('<br> <a href="/customers/add" type="button">Add new customer</a> </br>')+('<br> <a href="/customers/update2" type="button">Edit customer records (Update/Delete)</a> </br>')+('<br><br><br> <a href="/products">Navigate to Products</a><br><br>')+('<a href="/orders">Navigate to Orders</a>')+('<br><br> <a href="/" type="button">Return to Home</a> </br>')
# update customers
@app.route('/customers/update2')
def customer_update_page():
connect_string ="mysql+pymysql://root:[email protected]/Tuckshop"
sql_engine = sql.create_engine(connect_string)
df = pd.read_sql_table('customers', sql_engine)
df1 = df.copy()
df1['Update'] = 'update'
df1['Delete'] = 'delete'
for n in range(len(df1)):
df1.iloc[n,-1] = "<a href=/customers/delete/"+ str(df1.loc[n,'id']) + ">delete</a>"
df1.iloc[n,-2] = "<a href=/customers/update/"+ str(df1.loc[n,'id']) + ">update</a>"
df1.rename(columns={'id':'Customer ID','first_name':'First Name','last_name':'Surname','customer_address':'Address','customer_dob':'Date of Birth'},inplace=True)
html = df1.to_html(render_links=True,escape=False)
return ('<h1>Update Customers</h1><br>')+ html + ('<br> <a href="/customers">Back to Customers</a> </br>') + ('<br> <a href="/products">Navigate to Products</a> </br>') + ('<br> <a href="/orders">Navigate to Orders</a> </br>')
@app.route('/customers/update', methods = ['GET','POST'])
def update_customer():
update_record = Customers.query.filter_by(id=request.form['entry']).first()
if request.method=='POST':
update_record = Customers.query.filter_by(id=request.form['entry']).first()
update_record.first_name = request.form['first_name']
update_record.last_name = request.form['last_name']
update_record.customer_address = request.form['customer_address']
update_record.customer_dob = request.form['customer_dob']
db.session.commit()
return redirect(url_for('read_customers'))
@app.route('/customers/update/<int:customer_record>',methods=['GET','POST'])
def customer_update1(customer_record):
people = str(customer_record)
connect_string ="mysql+pymysql://root:[email protected]/Tuckshop"
sql_engine = sql.create_engine(connect_string)
df = pd.read_sql_table('customers', sql_engine)
df1 = df.loc[df.id==int(customer_record),:]
df1.rename(columns={'id':'Customer ID','first_name':'<NAME>','last_name':'Surname','customer_address':'Address','customer_dob':'Date of Birth'},inplace=True)
html = df1.to_html(escape=False)
record_no = customer_record
return ('<h1>Update Customers</h1><br>')+ html + "<br><br>" + render_template('customer_update.html',value=record_no) +('<br> <a href="/customers">Back to Customers</a> </br>')+('<br> <a href="/products">Navigate to Products</a> </br>')+('<br> <a href="/orders">Navigate to Orders</a> </br>')
# delete customers
@app.route('/customers/delete/<int:customer_>')
def delete_customers(customer_):
if Orders.query.filter_by(fk_customer_id=customer_).count() == 0:
customer_to_delete = Customers.query.filter_by(id=customer_).first()
db.session.delete(customer_to_delete)
db.session.commit()
return redirect(url_for('read_customers'))
else:
return "Oops! The customer you tried to delete has already placed an order. Please update the orders records if you need to remove this customer." +('<br> <a href="/customers">Return to Customers?</a> </br>')
# create products
@app.route('/products/add', methods=['GET','POST'])
def add_product():
if request.method == 'POST':
page = ''
return '<h1>Add New Product</h1><br>'+ render_template('stockform.html',title='add_item')+('<br><br> <a href="/products" type="button">Return to Products home</a> </br>')+ ('<br> <a href="/products/update2" type="button">Update product records</a> </br>')
@app.route('/products/add/item',methods=['GET','POST'])
def add_products():
connect_string ="mysql+pymysql://root:[email protected]/Tuckshop"
sql_engine = sql.create_engine(connect_string)
df = pd.read_sql_table('products', sql_engine)
if request.method=='POST':
if len(df.loc[(df.product_name == request.form['product_name']) & (df.product_brand == request.form['brand'])]) == 0:
new_product_name = request.form['product_name']
new_product_brand = request.form['brand']
new_product_quantity = request.form['quantity']
new_product_itemcost = request.form['itemcost']
new_product_price = request.form['price']
new_product = Products(product_name=new_product_name,product_brand=new_product_brand,quantity_in_stock=new_product_quantity,cost_per_item=new_product_itemcost,price=new_product_price)
db.session.add(new_product)
db.session.commit()
return redirect(url_for('read_products'))
else:
return ("<h4><br>"+"It looks like " + str(request.form['brand']) + " " + str(request.form['product_name'])+ " already exists in the system." + "</h4>" + '<a href="/products/add" type="button">Try again?</a> </br>'
+ ('<br><br> <a href="/products/update2" type="button">Update products records</a> </br>')+('<br> <a href="/products" type="button">Return to Products home</a> </br>')+('<br> <br><a href="/" type="button">Return to Home</a> </br>'))
# read products
@app.route('/products')
def read_products():
connect_string ="mysql+pymysql://root:[email protected]/Tuckshop"
sql_engine = sql.create_engine(connect_string)
df = pd.read_sql_table('products', sql_engine)
df.price = ('£'+df.price.astype('str')).str.ljust(5,'0')
df.cost_per_item = ('£'+df.cost_per_item.astype('str')).str.ljust(5,'0')
df.rename(columns={'id':'Product ID','product_name':'Product','product_brand':'Brand','quantity_in_stock':'Quantity in stock','cost_per_item':'Individual Cost','price':'Price'},inplace=True)
html = df.to_html()
return ('<h1>Products</h1><br>')+html+('<br> <a href="/products/add">Add new item to stocklist</a> </br>')+('<br> <a href="/products/update2">Edit stocklist (Update/Delete)</a> </br><br>')+('<br><br> <a href="/orders">Navigate to Orders</a> </br>')+('<br> <a href="/customers">Navigate to Customers</a> </br>') +('<br><br> <a href="/" type="button">Return to Home</a> </br>')
# update products
@app.route('/products/update2')
def products_update_page():
connect_string ="mysql+pymysql://root:[email protected]/Tuckshop"
sql_engine = sql.create_engine(connect_string)
df = pd.read_sql_table('products', sql_engine)
df1 = df.copy()
df1['Update'] = 'update'
df1['Delete'] = 'delete'
for n in range(len(df1)):
df1.iloc[n,-1] = "<a href=/products/delete/"+ str(df1.loc[n,'id']) + ">delete</a>"
df1.iloc[n,-2] = "<a href=/products/update/"+ str(df1.loc[n,'id']) + ">update</a>"
df1.price = ('£' + df1.price.astype('str')).str.ljust(5,'0')
df1.cost_per_item = ('£' + df1.cost_per_item.astype('str')).str.ljust(5,'0')
df1.rename(columns={'id':'Product ID','product_name':'Product','product_brand':'Brand','quantity_in_stock':'Quantity in stock','cost_per_item':'Individual Cost','price':'Price'},inplace=True)
html = df1.to_html(render_links=True,escape=False)
return ('<h1>Update Product List</h1><br>')+ html +('<br> <a href="/products">Back to Products home</a> </br>')+('<br> <br><a href="/customers">Navigate to Customers</a> </br>')+('<br> <a href="/orders">Navigate to Orders</a> </br>')+('<br> <br><a href="/" type="button">Return to Home</a> </br>')
@app.route('/products/update', methods = ['GET','POST'])
def update_product():
if request.method=='POST':
update_record = Products.query.filter_by(id=request.form['entry']).first()
update_record.product_name = request.form['product_name']
update_record.product_brand = request.form['product_brand']
update_record.price = request.form['price']
update_record.quantity_in_stock = request.form['quantity_in_stock']
update_record.cost_per_item = request.form['cost_per_item']
db.session.commit()
return redirect(url_for('products_update_page'))
@app.route('/products/update/<int:product_record>',methods=['GET','POST'])
def product_update1(product_record):
connect_string ="mysql+pymysql://root:[email protected]/Tuckshop"
sql_engine = sql.create_engine(connect_string)
df = pd.read_sql_table('products', sql_engine)
df1 = df.loc[df.id==int(product_record),:]
df1.rename(columns={'id':'Product ID','product_name':'Product','product_brand':'Brand','quantity_in_stock':'Quantity in stock','cost_per_item':'Individual Cost','price':'Price'},inplace=True)
html = df1.to_html(escape=False)
record_no = product_record
return ('<h1>Update Products List</h1><br>')+html + "<br><br>" + render_template('product_update.html', value1 = record_no) + ('<br> <a href="/products">Back to Products</a> </br>')+('<br> <a href="/customers">Navigate to Customers</a> </br>')+('<br> <a href="/orders">Navigate to Orders</a> </br>')+('<br> <a href="/products" type="button">Return to Products home</a> </br>')+('<br> <br><a href="/" type="button">Return to Home</a> </br>')
# delete products
@app.route('/products/delete/<int:product_>',methods=['GET','POST'])
def delete_products(product_):
if Orders.query.filter_by(fk_product_id=product_).count() == 0:
product_to_delete = Products.query.filter_by(id=product_).first()
db.session.delete(product_to_delete)
db.session.commit()
return redirect(url_for('read_products'))
else: return "Oops! You tried to delete a product that has already been purchased"+('<br> <br><a href="/products/update2">Try Again?</a> </br>')+('<br> <br><br><a href="/products">Return to Products</a> </br>') +('<br> <a href="/products" type="button">Return to Products home</a> </br>')+('<br> <br><a href="/" type="button">Return to Home</a> </br>')
# create orders
@app.route('/orders/add', methods = ['GET','POST'])
def add_order():
connect_string ="mysql+pymysql://root:[email protected]/Tuckshop"
sql_engine = sql.create_engine(connect_string)
df = | pd.read_sql_table('products', sql_engine) | pandas.read_sql_table |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
from numpy import nan
import numpy as np
import pandas as pd
from pandas.types.common import is_integer, is_scalar
from pandas import Index, Series, DataFrame, isnull, date_range
from pandas.core.index import MultiIndex
from pandas.core.indexing import IndexingError
from pandas.tseries.index import Timestamp
from pandas.tseries.offsets import BDay
from pandas.tseries.tdi import Timedelta
from pandas.compat import lrange, range
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tests.series.common import TestData
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
class TestSeriesIndexing(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_get(self):
# GH 6383
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56, 45,
51, 39, 55, 43, 54, 52, 51, 54]))
result = s.get(25, 0)
expected = 0
self.assertEqual(result, expected)
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56,
45, 51, 39, 55, 43, 54, 52, 51, 54]),
index=pd.Float64Index(
[25.0, 36.0, 49.0, 64.0, 81.0, 100.0,
121.0, 144.0, 169.0, 196.0, 1225.0,
1296.0, 1369.0, 1444.0, 1521.0, 1600.0,
1681.0, 1764.0, 1849.0, 1936.0],
dtype='object'))
result = s.get(25, 0)
expected = 43
self.assertEqual(result, expected)
# GH 7407
# with a boolean accessor
df = pd.DataFrame({'i': [0] * 3, 'b': [False] * 3})
vc = df.i.value_counts()
result = vc.get(99, default='Missing')
self.assertEqual(result, 'Missing')
vc = df.b.value_counts()
result = vc.get(False, default='Missing')
self.assertEqual(result, 3)
result = vc.get(True, default='Missing')
self.assertEqual(result, 'Missing')
def test_delitem(self):
# GH 5542
# should delete the item inplace
s = Series(lrange(5))
del s[0]
expected = Series(lrange(1, 5), index=lrange(1, 5))
assert_series_equal(s, expected)
del s[1]
expected = Series(lrange(2, 5), index=lrange(2, 5))
assert_series_equal(s, expected)
# empty
s = Series()
def f():
del s[0]
self.assertRaises(KeyError, f)
# only 1 left, del, add, del
s = Series(1)
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
s[0] = 1
assert_series_equal(s, Series(1))
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
# Index(dtype=object)
s = Series(1, index=['a'])
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
s['a'] = 1
assert_series_equal(s, Series(1, index=['a']))
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
def test_getitem_setitem_ellipsis(self):
s = Series(np.random.randn(10))
np.fix(s)
result = s[...]
assert_series_equal(result, s)
s[...] = 5
self.assertTrue((result == 5).all())
def test_getitem_negative_out_of_bounds(self):
s = Series(tm.rands_array(5, 10), index=tm.rands_array(10, 10))
self.assertRaises(IndexError, s.__getitem__, -11)
self.assertRaises(IndexError, s.__setitem__, -11, 'foo')
def test_pop(self):
# GH 6600
df = DataFrame({'A': 0, 'B': np.arange(5, dtype='int64'), 'C': 0, })
k = df.iloc[4]
result = k.pop('B')
self.assertEqual(result, 4)
expected = Series([0, 0], index=['A', 'C'], name=4)
assert_series_equal(k, expected)
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
self.assertEqual(self.series[idx1], self.series.get(idx1))
self.assertEqual(self.objSeries[idx2], self.objSeries.get(idx2))
self.assertEqual(self.series[idx1], self.series[5])
self.assertEqual(self.objSeries[idx2], self.objSeries[5])
self.assertEqual(
self.series.get(-1), self.series.get(self.series.index[-1]))
self.assertEqual(self.series[5], self.series.get(self.series.index[5]))
# missing
d = self.ts.index[0] - BDay()
self.assertRaises(KeyError, self.ts.__getitem__, d)
# None
# GH 5652
for s in [Series(), Series(index=list('abc'))]:
result = s.get(None)
self.assertIsNone(result)
def test_iget(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.irow(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget_value(1)
for i in range(len(s)):
result = s.iloc[i]
exp = s[s.index[i]]
assert_almost_equal(result, exp)
# pass a slice
result = s.iloc[slice(1, 3)]
expected = s.ix[2:4]
assert_series_equal(result, expected)
# test slice is a view
result[:] = 0
self.assertTrue((s[1:3] == 0).all())
# list of integers
result = s.iloc[[0, 2, 3, 4, 5]]
expected = s.reindex(s.index[[0, 2, 3, 4, 5]])
assert_series_equal(result, expected)
def test_iget_nonunique(self):
s = Series([0, 1, 2], index=[0, 1, 0])
self.assertEqual(s.iloc[2], 2)
def test_getitem_regression(self):
s = Series(lrange(5), index=lrange(5))
result = s[lrange(5)]
assert_series_equal(result, s)
def test_getitem_setitem_slice_bug(self):
s = Series(lrange(10), lrange(10))
result = s[-12:]
assert_series_equal(result, s)
result = s[-7:]
assert_series_equal(result, s[3:])
result = s[:-12]
assert_series_equal(result, s[:0])
s = Series(lrange(10), lrange(10))
s[-12:] = 0
self.assertTrue((s == 0).all())
s[:-12] = 5
self.assertTrue((s == 0).all())
def test_getitem_int64(self):
idx = np.int64(5)
self.assertEqual(self.ts[idx], self.ts[5])
def test_getitem_fancy(self):
slice1 = self.series[[1, 2, 3]]
slice2 = self.objSeries[[1, 2, 3]]
self.assertEqual(self.series.index[2], slice1.index[1])
self.assertEqual(self.objSeries.index[2], slice2.index[1])
self.assertEqual(self.series[2], slice1[1])
self.assertEqual(self.objSeries[2], slice2[1])
def test_getitem_boolean(self):
s = self.series
mask = s > s.median()
# passing list is OK
result = s[list(mask)]
expected = s[mask]
assert_series_equal(result, expected)
self.assert_index_equal(result.index, s.index[mask])
def test_getitem_boolean_empty(self):
s = Series([], dtype=np.int64)
s.index.name = 'index_name'
s = s[s.isnull()]
self.assertEqual(s.index.name, 'index_name')
self.assertEqual(s.dtype, np.int64)
# GH5877
# indexing with empty series
s = Series(['A', 'B'])
expected = Series(np.nan, index=['C'], dtype=object)
result = s[Series(['C'], dtype=object)]
assert_series_equal(result, expected)
s = Series(['A', 'B'])
expected = Series(dtype=object, index=Index([], dtype='int64'))
result = s[Series([], dtype=object)]
assert_series_equal(result, expected)
# invalid because of the boolean indexer
# that's empty or not-aligned
def f():
s[Series([], dtype=bool)]
self.assertRaises(IndexingError, f)
def f():
s[Series([True], dtype=bool)]
self.assertRaises(IndexingError, f)
def test_getitem_generator(self):
gen = (x > 0 for x in self.series)
result = self.series[gen]
result2 = self.series[iter(self.series > 0)]
expected = self.series[self.series > 0]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_type_promotion(self):
# GH12599
s = pd.Series()
s["a"] = pd.Timestamp("2016-01-01")
s["b"] = 3.0
s["c"] = "foo"
expected = Series([pd.Timestamp("2016-01-01"), 3.0, "foo"],
index=["a", "b", "c"])
assert_series_equal(s, expected)
def test_getitem_boolean_object(self):
# using column from DataFrame
s = self.series
mask = s > s.median()
omask = mask.astype(object)
# getitem
result = s[omask]
expected = s[mask]
assert_series_equal(result, expected)
# setitem
s2 = s.copy()
cop = s.copy()
cop[omask] = 5
s2[mask] = 5
assert_series_equal(cop, s2)
# nans raise exception
omask[5:10] = np.nan
self.assertRaises(Exception, s.__getitem__, omask)
self.assertRaises(Exception, s.__setitem__, omask, 5)
def test_getitem_setitem_boolean_corner(self):
ts = self.ts
mask_shifted = ts.shift(1, freq=BDay()) > ts.median()
# these used to raise...??
self.assertRaises(Exception, ts.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.__setitem__, mask_shifted, 1)
# ts[mask_shifted]
# ts[mask_shifted] = 1
self.assertRaises(Exception, ts.ix.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.ix.__setitem__, mask_shifted, 1)
# ts.ix[mask_shifted]
# ts.ix[mask_shifted] = 2
def test_getitem_setitem_slice_integers(self):
s = Series(np.random.randn(8), index=[2, 4, 6, 8, 10, 12, 14, 16])
result = s[:4]
expected = s.reindex([2, 4, 6, 8])
assert_series_equal(result, expected)
s[:4] = 0
self.assertTrue((s[:4] == 0).all())
self.assertTrue(not (s[4:] == 0).any())
def test_getitem_out_of_bounds(self):
# don't segfault, GH #495
self.assertRaises(IndexError, self.ts.__getitem__, len(self.ts))
# GH #917
s = Series([])
self.assertRaises(IndexError, s.__getitem__, -1)
def test_getitem_setitem_integers(self):
# caused bug without test
s = Series([1, 2, 3], ['a', 'b', 'c'])
self.assertEqual(s.ix[0], s['a'])
s.ix[0] = 5
self.assertAlmostEqual(s['a'], 5)
def test_getitem_box_float64(self):
value = self.ts[5]
tm.assertIsInstance(value, np.float64)
def test_getitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
self.assertRaises(KeyError, s.__getitem__, 1)
self.assertRaises(KeyError, s.ix.__getitem__, 1)
def test_getitem_unordered_dup(self):
obj = Series(lrange(5), index=['c', 'a', 'a', 'b', 'b'])
self.assertTrue(is_scalar(obj['c']))
self.assertEqual(obj['c'], 0)
def test_getitem_dups_with_missing(self):
# breaks reindex, so need to use .ix internally
# GH 4246
s = Series([1, 2, 3, 4], ['foo', 'bar', 'foo', 'bah'])
expected = s.ix[['foo', 'bar', 'bah', 'bam']]
result = s[['foo', 'bar', 'bah', 'bam']]
assert_series_equal(result, expected)
def test_getitem_dups(self):
s = Series(range(5), index=['A', 'A', 'B', 'C', 'C'], dtype=np.int64)
expected = Series([3, 4], index=['C', 'C'], dtype=np.int64)
result = s['C']
assert_series_equal(result, expected)
def test_getitem_dataframe(self):
rng = list(range(10))
s = pd.Series(10, index=rng)
df = pd.DataFrame(rng, index=rng)
self.assertRaises(TypeError, s.__getitem__, df > 5)
def test_getitem_callable(self):
# GH 12533
s = pd.Series(4, index=list('ABCD'))
result = s[lambda x: 'A']
self.assertEqual(result, s.loc['A'])
result = s[lambda x: ['A', 'B']]
tm.assert_series_equal(result, s.loc[['A', 'B']])
result = s[lambda x: [True, False, True, True]]
tm.assert_series_equal(result, s.iloc[[0, 2, 3]])
def test_setitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
# equivalent of an append
s2 = s.copy()
s2[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
s2 = s.copy()
s2.ix[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
def test_setitem_float_labels(self):
# note labels are floats
s = Series(['a', 'b', 'c'], index=[0, 0.5, 1])
tmp = s.copy()
s.ix[1] = 'zoo'
tmp.iloc[2] = 'zoo'
assert_series_equal(s, tmp)
def test_setitem_callable(self):
# GH 12533
s = pd.Series([1, 2, 3, 4], index=list('ABCD'))
s[lambda x: 'A'] = -1
tm.assert_series_equal(s, pd.Series([-1, 2, 3, 4], index=list('ABCD')))
def test_setitem_other_callable(self):
# GH 13299
inc = lambda x: x + 1
s = pd.Series([1, 2, -1, 4])
s[s < 0] = inc
expected = pd.Series([1, 2, inc, 4])
tm.assert_series_equal(s, expected)
def test_slice(self):
numSlice = self.series[10:20]
numSliceEnd = self.series[-10:]
objSlice = self.objSeries[10:20]
self.assertNotIn(self.series.index[9], numSlice.index)
self.assertNotIn(self.objSeries.index[9], objSlice.index)
self.assertEqual(len(numSlice), len(numSlice.index))
self.assertEqual(self.series[numSlice.index[0]],
numSlice[numSlice.index[0]])
self.assertEqual(numSlice.index[1], self.series.index[11])
self.assertTrue(tm.equalContents(numSliceEnd, np.array(self.series)[
-10:]))
# test return view
sl = self.series[10:20]
sl[:] = 0
self.assertTrue((self.series[10:20] == 0).all())
def test_slice_can_reorder_not_uniquely_indexed(self):
s = Series(1, index=['a', 'a', 'b', 'b', 'c'])
s[::-1] # it works!
def test_slice_float_get_set(self):
self.assertRaises(TypeError, lambda: self.ts[4.0:10.0])
def f():
self.ts[4.0:10.0] = 0
self.assertRaises(TypeError, f)
self.assertRaises(TypeError, self.ts.__getitem__, slice(4.5, 10.0))
self.assertRaises(TypeError, self.ts.__setitem__, slice(4.5, 10.0), 0)
def test_slice_floats2(self):
s = Series(np.random.rand(10), index=np.arange(10, 20, dtype=float))
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
i = np.arange(10, 20, dtype=float)
i[2] = 12.2
s.index = i
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
def test_slice_float64(self):
values = np.arange(10., 50., 2)
index = Index(values)
start, end = values[[5, 15]]
s = Series(np.random.randn(20), index=index)
result = s[start:end]
expected = s.iloc[5:16]
assert_series_equal(result, expected)
result = s.loc[start:end]
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(20, 3), index=index)
result = df[start:end]
expected = df.iloc[5:16]
tm.assert_frame_equal(result, expected)
result = df.loc[start:end]
tm.assert_frame_equal(result, expected)
def test_setitem(self):
self.ts[self.ts.index[5]] = np.NaN
self.ts[[1, 2, 17]] = np.NaN
self.ts[6] = np.NaN
self.assertTrue(np.isnan(self.ts[6]))
self.assertTrue(np.isnan(self.ts[2]))
self.ts[np.isnan(self.ts)] = 5
self.assertFalse(np.isnan(self.ts[2]))
# caught this bug when writing tests
series = Series(tm.makeIntIndex(20).astype(float),
index=tm.makeIntIndex(20))
series[::2] = 0
self.assertTrue((series[::2] == 0).all())
# set item that's not contained
s = self.series.copy()
s['foobar'] = 1
app = Series([1], index=['foobar'], name='series')
expected = self.series.append(app)
assert_series_equal(s, expected)
# Test for issue #10193
key = pd.Timestamp('2012-01-01')
series = pd.Series()
series[key] = 47
expected = pd.Series(47, [key])
assert_series_equal(series, expected)
series = pd.Series([], pd.DatetimeIndex([], freq='D'))
series[key] = 47
expected = pd.Series(47, pd.DatetimeIndex([key], freq='D'))
assert_series_equal(series, expected)
def test_setitem_dtypes(self):
# change dtypes
# GH 4463
expected = Series([np.nan, 2, 3])
s = Series([1, 2, 3])
s.iloc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s.loc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s[0] = np.nan
assert_series_equal(s, expected)
s = Series([False])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan]))
s = Series([False, True])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan, 1.0]))
def test_set_value(self):
idx = self.ts.index[10]
res = self.ts.set_value(idx, 0)
self.assertIs(res, self.ts)
self.assertEqual(self.ts[idx], 0)
# equiv
s = self.series.copy()
res = s.set_value('foobar', 0)
self.assertIs(res, s)
self.assertEqual(res.index[-1], 'foobar')
self.assertEqual(res['foobar'], 0)
s = self.series.copy()
s.loc['foobar'] = 0
self.assertEqual(s.index[-1], 'foobar')
self.assertEqual(s['foobar'], 0)
def test_setslice(self):
sl = self.ts[5:20]
self.assertEqual(len(sl), len(sl.index))
self.assertTrue(sl.index.is_unique)
def test_basic_getitem_setitem_corner(self):
# invalid tuples, e.g. self.ts[:, None] vs. self.ts[:, 2]
with tm.assertRaisesRegexp(ValueError, 'tuple-index'):
self.ts[:, 2]
with tm.assertRaisesRegexp(ValueError, 'tuple-index'):
self.ts[:, 2] = 2
# weird lists. [slice(0, 5)] will work but not two slices
result = self.ts[[slice(None, 5)]]
expected = self.ts[:5]
assert_series_equal(result, expected)
# OK
self.assertRaises(Exception, self.ts.__getitem__,
[5, slice(None, None)])
self.assertRaises(Exception, self.ts.__setitem__,
[5, slice(None, None)], 2)
def test_basic_getitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
result = self.ts[indices]
expected = self.ts.reindex(indices)
assert_series_equal(result, expected)
result = self.ts[indices[0]:indices[2]]
expected = self.ts.ix[indices[0]:indices[2]]
assert_series_equal(result, expected)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 2, 5, 7, 8]
arr_inds = np.array([0, 2, 5, 7, 8])
result = s[inds]
expected = s.reindex(inds)
assert_series_equal(result, expected)
result = s[arr_inds]
expected = s.reindex(arr_inds)
assert_series_equal(result, expected)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
expected = Timestamp('2011-01-01', tz='US/Eastern')
result = s.loc['a']
self.assertEqual(result, expected)
result = s.iloc[0]
self.assertEqual(result, expected)
result = s['a']
self.assertEqual(result, expected)
def test_basic_setitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices] = 0
exp.ix[indices] = 0
assert_series_equal(cp, exp)
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices[0]:indices[2]] = 0
exp.ix[indices[0]:indices[2]] = 0
assert_series_equal(cp, exp)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 4, 6]
arr_inds = np.array([0, 4, 6])
cp = s.copy()
exp = s.copy()
s[inds] = 0
s.ix[inds] = 0
assert_series_equal(cp, exp)
cp = s.copy()
exp = s.copy()
s[arr_inds] = 0
s.ix[arr_inds] = 0
assert_series_equal(cp, exp)
inds_notfound = [0, 4, 5, 6]
arr_inds_notfound = np.array([0, 4, 5, 6])
self.assertRaises(Exception, s.__setitem__, inds_notfound, 0)
self.assertRaises(Exception, s.__setitem__, arr_inds_notfound, 0)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
s2 = s.copy()
expected = Timestamp('2011-01-03', tz='US/Eastern')
s2.loc['a'] = expected
result = s2.loc['a']
self.assertEqual(result, expected)
s2 = s.copy()
s2.iloc[0] = expected
result = s2.iloc[0]
self.assertEqual(result, expected)
s2 = s.copy()
s2['a'] = expected
result = s2['a']
self.assertEqual(result, expected)
def test_ix_getitem(self):
inds = self.series.index[[3, 4, 7]]
assert_series_equal(self.series.ix[inds], self.series.reindex(inds))
assert_series_equal(self.series.ix[5::2], self.series[5::2])
# slice with indices
d1, d2 = self.ts.index[[5, 15]]
result = self.ts.ix[d1:d2]
expected = self.ts.truncate(d1, d2)
assert_series_equal(result, expected)
# boolean
mask = self.series > self.series.median()
assert_series_equal(self.series.ix[mask], self.series[mask])
# ask for index value
self.assertEqual(self.ts.ix[d1], self.ts[d1])
self.assertEqual(self.ts.ix[d2], self.ts[d2])
def test_ix_getitem_not_monotonic(self):
d1, d2 = self.ts.index[[5, 15]]
ts2 = self.ts[::2][[1, 2, 0]]
self.assertRaises(KeyError, ts2.ix.__getitem__, slice(d1, d2))
self.assertRaises(KeyError, ts2.ix.__setitem__, slice(d1, d2), 0)
def test_ix_getitem_setitem_integer_slice_keyerrors(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# this is OK
cp = s.copy()
cp.ix[4:10] = 0
self.assertTrue((cp.ix[4:10] == 0).all())
# so is this
cp = s.copy()
cp.ix[3:11] = 0
self.assertTrue((cp.ix[3:11] == 0).values.all())
result = s.ix[4:10]
result2 = s.ix[3:11]
expected = s.reindex([4, 6, 8, 10])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# non-monotonic, raise KeyError
s2 = s.iloc[lrange(5) + lrange(5, 10)[::-1]]
self.assertRaises(KeyError, s2.ix.__getitem__, slice(3, 11))
self.assertRaises(KeyError, s2.ix.__setitem__, slice(3, 11), 0)
def test_ix_getitem_iterator(self):
idx = iter(self.series.index[:10])
result = self.series.ix[idx]
assert_series_equal(result, self.series[:10])
def test_setitem_with_tz(self):
for tz in ['US/Eastern', 'UTC', 'Asia/Tokyo']:
orig = pd.Series(pd.date_range('2016-01-01', freq='H', periods=3,
tz=tz))
self.assertEqual(orig.dtype, 'datetime64[ns, {0}]'.format(tz))
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2016-01-01 02:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
self.assertEqual(vals.dtype, 'datetime64[ns, {0}]'.format(tz))
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_setitem_with_tz_dst(self):
# GH XXX
tz = 'US/Eastern'
orig = pd.Series(pd.date_range('2016-11-06', freq='H', periods=3,
tz=tz))
self.assertEqual(orig.dtype, 'datetime64[ns, {0}]'.format(tz))
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-11-06 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2016-11-06 02:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
self.assertEqual(vals.dtype, 'datetime64[ns, {0}]'.format(tz))
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-11-06 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_where(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(cond).dropna()
rs2 = s[cond]
assert_series_equal(rs, rs2)
rs = s.where(cond, -s)
assert_series_equal(rs, s.abs())
rs = s.where(cond)
assert (s.shape == rs.shape)
assert (rs is not s)
# test alignment
cond = Series([True, False, False, True, False], index=s.index)
s2 = -(s.abs())
expected = s2[cond].reindex(s2.index[:3]).reindex(s2.index)
rs = s2.where(cond[:3])
assert_series_equal(rs, expected)
expected = s2.abs()
expected.ix[0] = s2[0]
rs = s2.where(cond[:3], -s2)
assert_series_equal(rs, expected)
self.assertRaises(ValueError, s.where, 1)
self.assertRaises(ValueError, s.where, cond[:3].values, -s)
# GH 2745
s = Series([1, 2])
s[[True, False]] = [0, 1]
expected = Series([0, 2])
assert_series_equal(s, expected)
# failures
self.assertRaises(ValueError, s.__setitem__, tuple([[[True, False]]]),
[0, 2, 3])
self.assertRaises(ValueError, s.__setitem__, tuple([[[True, False]]]),
[])
# unsafe dtype changes
for dtype in [np.int8, np.int16, np.int32, np.int64, np.float16,
np.float32, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype=dtype)
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
# these are allowed operations, but are upcasted
for dtype in [np.int64, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
s[mask] = values
expected = Series(values + lrange(5, 10), dtype='float64')
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
# GH 9731
s = Series(np.arange(10), dtype='int64')
mask = s > 5
values = [2.5, 3.5, 4.5, 5.5]
s[mask] = values
expected = Series(lrange(6) + values, dtype='float64')
assert_series_equal(s, expected)
# can't do these as we are forced to change the itemsize of the input
# to something we cannot
for dtype in [np.int8, np.int16, np.int32, np.float16, np.float32]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
self.assertRaises(Exception, s.__setitem__, tuple(mask), values)
# GH3235
s = Series(np.arange(10), dtype='int64')
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype='int64')
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
s = Series(np.arange(10), dtype='int64')
mask = s > 5
s[mask] = [0] * 4
expected = Series([0, 1, 2, 3, 4, 5] + [0] * 4, dtype='int64')
assert_series_equal(s, expected)
s = Series(np.arange(10))
mask = s > 5
def f():
s[mask] = [5, 4, 3, 2, 1]
self.assertRaises(ValueError, f)
def f():
s[mask] = [0] * 5
self.assertRaises(ValueError, f)
# dtype changes
s = Series([1, 2, 3, 4])
result = s.where(s > 2, np.nan)
expected = Series([np.nan, np.nan, 3, 4])
assert_series_equal(result, expected)
# GH 4667
# setting with None changes dtype
s = Series(range(10)).astype(float)
s[8] = None
result = s[8]
self.assertTrue(isnull(result))
s = Series(range(10)).astype(float)
s[s > 8] = None
result = s[isnull(s)]
expected = Series(np.nan, index=[9])
assert_series_equal(result, expected)
def test_where_setitem_invalid(self):
# GH 2702
# make sure correct exceptions are raised on invalid list assignment
# slice
s = Series(list('abc'))
def f():
s[0:3] = list(range(27))
self.assertRaises(ValueError, f)
s[0:3] = list(range(3))
expected = Series([0, 1, 2])
assert_series_equal(s.astype(np.int64), expected, )
# slice with step
s = Series(list('abcdef'))
def f():
s[0:4:2] = list(range(27))
self.assertRaises(ValueError, f)
s = Series(list('abcdef'))
s[0:4:2] = list(range(2))
expected = Series([0, 'b', 1, 'd', 'e', 'f'])
assert_series_equal(s, expected)
# neg slices
s = Series(list('abcdef'))
def f():
s[:-1] = list(range(27))
self.assertRaises(ValueError, f)
s[-3:-1] = list(range(2))
expected = Series(['a', 'b', 'c', 0, 1, 'f'])
assert_series_equal(s, expected)
# list
s = Series(list('abc'))
def f():
s[[0, 1, 2]] = list(range(27))
self.assertRaises(ValueError, f)
s = Series(list('abc'))
def f():
s[[0, 1, 2]] = list(range(2))
self.assertRaises(ValueError, f)
# scalar
s = Series(list('abc'))
s[0] = list(range(10))
expected = Series([list(range(10)), 'b', 'c'])
assert_series_equal(s, expected)
def test_where_broadcast(self):
# Test a variety of differently sized series
for size in range(2, 6):
# Test a variety of boolean indices
for selection in [
# First element should be set
np.resize([True, False, False, False, False], size),
# Set alternating elements]
np.resize([True, False], size),
# No element should be set
np.resize([False], size)]:
# Test a variety of different numbers as content
for item in [2.0, np.nan, np.finfo(np.float).max,
np.finfo(np.float).min]:
# Test numpy arrays, lists and tuples as the input to be
# broadcast
for arr in [np.array([item]), [item], (item, )]:
data = np.arange(size, dtype=float)
s = Series(data)
s[selection] = arr
# Construct the expected series by taking the source
# data or item based on the selection
expected = Series([item if use_item else data[
i] for i, use_item in enumerate(selection)])
assert_series_equal(s, expected)
s = Series(data)
result = s.where(~selection, arr)
assert_series_equal(result, expected)
def test_where_inplace(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.copy()
rs.where(cond, inplace=True)
assert_series_equal(rs.dropna(), s[cond])
assert_series_equal(rs, s.where(cond))
rs = s.copy()
rs.where(cond, -s, inplace=True)
assert_series_equal(rs, s.where(cond, -s))
def test_where_dups(self):
# GH 4550
# where crashes with dups in index
s1 = Series(list(range(3)))
s2 = Series(list(range(3)))
comb = pd.concat([s1, s2])
result = comb.where(comb < 2)
expected = Series([0, 1, np.nan, 0, 1, np.nan],
index=[0, 1, 2, 0, 1, 2])
assert_series_equal(result, expected)
# GH 4548
# inplace updating not working with dups
comb[comb < 1] = 5
expected = Series([5, 1, 2, 5, 1, 2], index=[0, 1, 2, 0, 1, 2])
assert_series_equal(comb, expected)
comb[comb < 2] += 10
expected = Series([5, 11, 2, 5, 11, 2], index=[0, 1, 2, 0, 1, 2])
assert_series_equal(comb, expected)
def test_where_datetime(self):
s = Series(date_range('20130102', periods=2))
expected = Series([10, 10], dtype='datetime64[ns]')
mask = np.array([False, False])
rs = s.where(mask, [10, 10])
assert_series_equal(rs, expected)
rs = s.where(mask, 10)
assert_series_equal(rs, expected)
rs = s.where(mask, 10.0)
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, 10.0])
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, np.nan])
expected = Series([10, None], dtype='datetime64[ns]')
assert_series_equal(rs, expected)
def test_where_timedelta(self):
s = Series([1, 2], dtype='timedelta64[ns]')
expected = Series([10, 10], dtype='timedelta64[ns]')
mask = np.array([False, False])
rs = s.where(mask, [10, 10])
assert_series_equal(rs, expected)
rs = s.where(mask, 10)
assert_series_equal(rs, expected)
rs = s.where(mask, 10.0)
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, 10.0])
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, np.nan])
expected = Series([10, None], dtype='timedelta64[ns]')
assert_series_equal(rs, expected)
def test_mask(self):
# compare with tested results in test_where
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(~cond, np.nan)
assert_series_equal(rs, s.mask(cond))
rs = s.where(~cond)
rs2 = s.mask(cond)
assert_series_equal(rs, rs2)
rs = s.where(~cond, -s)
rs2 = s.mask(cond, -s)
assert_series_equal(rs, rs2)
cond = Series([True, False, False, True, False], index=s.index)
s2 = -(s.abs())
rs = s2.where(~cond[:3])
rs2 = s2.mask(cond[:3])
assert_series_equal(rs, rs2)
rs = s2.where(~cond[:3], -s2)
rs2 = s2.mask(cond[:3], -s2)
assert_series_equal(rs, rs2)
self.assertRaises(ValueError, s.mask, 1)
self.assertRaises(ValueError, s.mask, cond[:3].values, -s)
# dtype changes
s = Series([1, 2, 3, 4])
result = s.mask(s > 2, np.nan)
expected = Series([1, 2, np.nan, np.nan])
assert_series_equal(result, expected)
def test_mask_broadcast(self):
# GH 8801
# copied from test_where_broadcast
for size in range(2, 6):
for selection in [
# First element should be set
np.resize([True, False, False, False, False], size),
# Set alternating elements]
np.resize([True, False], size),
# No element should be set
np.resize([False], size)]:
for item in [2.0, np.nan, np.finfo(np.float).max,
np.finfo(np.float).min]:
for arr in [np.array([item]), [item], (item, )]:
data = np.arange(size, dtype=float)
s = Series(data)
result = s.mask(selection, arr)
expected = Series([item if use_item else data[
i] for i, use_item in enumerate(selection)])
assert_series_equal(result, expected)
def test_mask_inplace(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.copy()
rs.mask(cond, inplace=True)
assert_series_equal(rs.dropna(), s[~cond])
assert_series_equal(rs, s.mask(cond))
rs = s.copy()
rs.mask(cond, -s, inplace=True)
assert_series_equal(rs, s.mask(cond, -s))
def test_ix_setitem(self):
inds = self.series.index[[3, 4, 7]]
result = self.series.copy()
result.ix[inds] = 5
expected = self.series.copy()
expected[[3, 4, 7]] = 5
assert_series_equal(result, expected)
result.ix[5:10] = 10
expected[5:10] = 10
assert_series_equal(result, expected)
# set slice with indices
d1, d2 = self.series.index[[5, 15]]
result.ix[d1:d2] = 6
expected[5:16] = 6 # because it's inclusive
assert_series_equal(result, expected)
# set index value
self.series.ix[d1] = 4
self.series.ix[d2] = 6
self.assertEqual(self.series[d1], 4)
self.assertEqual(self.series[d2], 6)
def test_where_numeric_with_string(self):
# GH 9280
s = pd.Series([1, 2, 3])
w = s.where(s > 1, 'X')
self.assertFalse(is_integer(w[0]))
self.assertTrue(is_integer(w[1]))
self.assertTrue(is_integer(w[2]))
self.assertTrue(isinstance(w[0], str))
self.assertTrue(w.dtype == 'object')
w = s.where(s > 1, ['X', 'Y', 'Z'])
self.assertFalse(is_integer(w[0]))
self.assertTrue(is_integer(w[1]))
self.assertTrue(is_integer(w[2]))
self.assertTrue(isinstance(w[0], str))
self.assertTrue(w.dtype == 'object')
w = s.where(s > 1, np.array(['X', 'Y', 'Z']))
self.assertFalse(is_integer(w[0]))
self.assertTrue(is_integer(w[1]))
self.assertTrue(is_integer(w[2]))
self.assertTrue(isinstance(w[0], str))
self.assertTrue(w.dtype == 'object')
def test_setitem_boolean(self):
mask = self.series > self.series.median()
# similiar indexed series
result = self.series.copy()
result[mask] = self.series * 2
expected = self.series * 2
assert_series_equal(result[mask], expected[mask])
# needs alignment
result = self.series.copy()
result[mask] = (self.series * 2)[0:5]
expected = (self.series * 2)[0:5].reindex_like(self.series)
expected[-mask] = self.series[mask]
assert_series_equal(result[mask], expected[mask])
def test_ix_setitem_boolean(self):
mask = self.series > self.series.median()
result = self.series.copy()
result.ix[mask] = 0
expected = self.series
expected[mask] = 0
assert_series_equal(result, expected)
def test_ix_setitem_corner(self):
inds = list(self.series.index[[5, 8, 12]])
self.series.ix[inds] = 5
self.assertRaises(Exception, self.series.ix.__setitem__,
inds + ['foo'], 5)
def test_get_set_boolean_different_order(self):
ordered = self.series.sort_values()
# setting
copy = self.series.copy()
copy[ordered > 0] = 0
expected = self.series.copy()
expected[expected > 0] = 0
assert_series_equal(copy, expected)
# getting
sel = self.series[ordered > 0]
exp = self.series[self.series > 0]
assert_series_equal(sel, exp)
def test_setitem_na(self):
# these induce dtype changes
expected = Series([np.nan, 3, np.nan, 5, np.nan, 7, np.nan, 9, np.nan])
s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])
s[::2] = np.nan
assert_series_equal(s, expected)
# get's coerced to float, right?
expected = Series([np.nan, 1, np.nan, 0])
s = Series([True, True, False, False])
s[::2] = np.nan
assert_series_equal(s, expected)
expected = Series([np.nan, np.nan, np.nan, np.nan, np.nan, 5, 6, 7, 8,
9])
s = Series(np.arange(10))
s[:5] = np.nan
assert_series_equal(s, expected)
def test_basic_indexing(self):
s = Series(np.random.randn(5), index=['a', 'b', 'a', 'a', 'b'])
self.assertRaises(IndexError, s.__getitem__, 5)
self.assertRaises(IndexError, s.__setitem__, 5, 0)
self.assertRaises(KeyError, s.__getitem__, 'c')
s = s.sort_index()
self.assertRaises(IndexError, s.__getitem__, 5)
self.assertRaises(IndexError, s.__setitem__, 5, 0)
def test_int_indexing(self):
s = Series(np.random.randn(6), index=[0, 0, 1, 1, 2, 2])
self.assertRaises(KeyError, s.__getitem__, 5)
self.assertRaises(KeyError, s.__getitem__, 'c')
# not monotonic
s = Series(np.random.randn(6), index=[2, 2, 0, 0, 1, 1])
self.assertRaises(KeyError, s.__getitem__, 5)
self.assertRaises(KeyError, s.__getitem__, 'c')
def test_datetime_indexing(self):
from pandas import date_range
index = date_range('1/1/2000', '1/7/2000')
index = index.repeat(3)
s = Series(len(index), index=index)
stamp = Timestamp('1/8/2000')
self.assertRaises(KeyError, s.__getitem__, stamp)
s[stamp] = 0
self.assertEqual(s[stamp], 0)
# not monotonic
s = Series(len(index), index=index)
s = s[::-1]
self.assertRaises(KeyError, s.__getitem__, stamp)
s[stamp] = 0
self.assertEqual(s[stamp], 0)
def test_timedelta_assignment(self):
# GH 8209
s = Series([])
s.loc['B'] = timedelta(1)
tm.assert_series_equal(s, Series(Timedelta('1 days'), index=['B']))
s = s.reindex(s.index.insert(0, 'A'))
tm.assert_series_equal(s, Series(
[np.nan, Timedelta('1 days')], index=['A', 'B']))
result = s.fillna(timedelta(1))
expected = Series(Timedelta('1 days'), index=['A', 'B'])
tm.assert_series_equal(result, expected)
s.loc['A'] = timedelta(1)
tm.assert_series_equal(s, expected)
# GH 14155
s = Series(10 * [np.timedelta64(10, 'm')])
s.loc[[1, 2, 3]] = np.timedelta64(20, 'm')
expected = pd.Series(10 * [np.timedelta64(10, 'm')])
expected.loc[[1, 2, 3]] = pd.Timedelta(np.timedelta64(20, 'm'))
tm.assert_series_equal(s, expected)
def test_underlying_data_conversion(self):
# GH 4080
df = DataFrame(dict((c, [1, 2, 3]) for c in ['a', 'b', 'c']))
df.set_index(['a', 'b', 'c'], inplace=True)
s = Series([1], index=[(2, 2, 2)])
df['val'] = 0
df
df['val'].update(s)
expected = DataFrame(
dict(a=[1, 2, 3], b=[1, 2, 3], c=[1, 2, 3], val=[0, 1, 0]))
expected.set_index(['a', 'b', 'c'], inplace=True)
tm.assert_frame_equal(df, expected)
# GH 3970
# these are chained assignments as well
pd.set_option('chained_assignment', None)
df = DataFrame({"aa": range(5), "bb": [2.2] * 5})
df["cc"] = 0.0
ck = [True] * len(df)
df["bb"].iloc[0] = .13
# TODO: unused
df_tmp = df.iloc[ck] # noqa
df["bb"].iloc[0] = .15
self.assertEqual(df['bb'].iloc[0], 0.15)
pd.set_option('chained_assignment', 'raise')
# GH 3217
df = DataFrame(dict(a=[1, 3], b=[np.nan, 2]))
df['c'] = np.nan
df['c'].update(pd.Series(['foo'], index=[0]))
expected = DataFrame(dict(a=[1, 3], b=[np.nan, 2], c=['foo', np.nan]))
tm.assert_frame_equal(df, expected)
def test_preserveRefs(self):
seq = self.ts[[5, 10, 15]]
seq[1] = np.NaN
self.assertFalse(np.isnan(self.ts[10]))
def test_drop(self):
# unique
s = Series([1, 2], index=['one', 'two'])
expected = Series([1], index=['one'])
result = s.drop(['two'])
assert_series_equal(result, expected)
result = s.drop('two', axis='rows')
assert_series_equal(result, expected)
# non-unique
# GH 5248
s = Series([1, 1, 2], index=['one', 'two', 'one'])
expected = Series([1, 2], index=['one', 'one'])
result = s.drop(['two'], axis=0)
assert_series_equal(result, expected)
result = s.drop('two')
assert_series_equal(result, expected)
expected = Series([1], index=['two'])
result = s.drop(['one'])
assert_series_equal(result, expected)
result = s.drop('one')
assert_series_equal(result, expected)
# single string/tuple-like
s = Series(range(3), index=list('abc'))
self.assertRaises(ValueError, s.drop, 'bc')
self.assertRaises(ValueError, s.drop, ('a', ))
# errors='ignore'
s = Series(range(3), index=list('abc'))
result = s.drop('bc', errors='ignore')
assert_series_equal(result, s)
result = s.drop(['a', 'd'], errors='ignore')
expected = s.ix[1:]
assert_series_equal(result, expected)
# bad axis
self.assertRaises(ValueError, s.drop, 'one', axis='columns')
# GH 8522
s = Series([2, 3], index=[True, False])
self.assertTrue(s.index.is_object())
result = s.drop(True)
expected = Series([3], index=[False])
assert_series_equal(result, expected)
def test_align(self):
def _check_align(a, b, how='left', fill=None):
aa, ab = a.align(b, join=how, fill_value=fill)
join_index = a.index.join(b.index, how=how)
if fill is not None:
diff_a = aa.index.difference(join_index)
diff_b = ab.index.difference(join_index)
if len(diff_a) > 0:
self.assertTrue((aa.reindex(diff_a) == fill).all())
if len(diff_b) > 0:
self.assertTrue((ab.reindex(diff_b) == fill).all())
ea = a.reindex(join_index)
eb = b.reindex(join_index)
if fill is not None:
ea = ea.fillna(fill)
eb = eb.fillna(fill)
assert_series_equal(aa, ea)
assert_series_equal(ab, eb)
self.assertEqual(aa.name, 'ts')
self.assertEqual(ea.name, 'ts')
self.assertEqual(ab.name, 'ts')
self.assertEqual(eb.name, 'ts')
for kind in JOIN_TYPES:
_check_align(self.ts[2:], self.ts[:-5], how=kind)
_check_align(self.ts[2:], self.ts[:-5], how=kind, fill=-1)
# empty left
_check_align(self.ts[:0], self.ts[:-5], how=kind)
_check_align(self.ts[:0], self.ts[:-5], how=kind, fill=-1)
# empty right
_check_align(self.ts[:-5], self.ts[:0], how=kind)
_check_align(self.ts[:-5], self.ts[:0], how=kind, fill=-1)
# both empty
_check_align(self.ts[:0], self.ts[:0], how=kind)
_check_align(self.ts[:0], self.ts[:0], how=kind, fill=-1)
def test_align_fill_method(self):
def _check_align(a, b, how='left', method='pad', limit=None):
aa, ab = a.align(b, join=how, method=method, limit=limit)
join_index = a.index.join(b.index, how=how)
ea = a.reindex(join_index)
eb = b.reindex(join_index)
ea = ea.fillna(method=method, limit=limit)
eb = eb.fillna(method=method, limit=limit)
assert_series_equal(aa, ea)
assert_series_equal(ab, eb)
for kind in JOIN_TYPES:
for meth in ['pad', 'bfill']:
_check_align(self.ts[2:], self.ts[:-5], how=kind, method=meth)
_check_align(self.ts[2:], self.ts[:-5], how=kind, method=meth,
limit=1)
# empty left
_check_align(self.ts[:0], self.ts[:-5], how=kind, method=meth)
_check_align(self.ts[:0], self.ts[:-5], how=kind, method=meth,
limit=1)
# empty right
_check_align(self.ts[:-5], self.ts[:0], how=kind, method=meth)
_check_align(self.ts[:-5], self.ts[:0], how=kind, method=meth,
limit=1)
# both empty
_check_align(self.ts[:0], self.ts[:0], how=kind, method=meth)
_check_align(self.ts[:0], self.ts[:0], how=kind, method=meth,
limit=1)
def test_align_nocopy(self):
b = self.ts[:5].copy()
# do copy
a = self.ts.copy()
ra, _ = a.align(b, join='left')
ra[:5] = 5
self.assertFalse((a[:5] == 5).any())
# do not copy
a = self.ts.copy()
ra, _ = a.align(b, join='left', copy=False)
ra[:5] = 5
self.assertTrue((a[:5] == 5).all())
# do copy
a = self.ts.copy()
b = self.ts[:5].copy()
_, rb = a.align(b, join='right')
rb[:3] = 5
self.assertFalse((b[:3] == 5).any())
# do not copy
a = self.ts.copy()
b = self.ts[:5].copy()
_, rb = a.align(b, join='right', copy=False)
rb[:2] = 5
self.assertTrue((b[:2] == 5).all())
def test_align_sameindex(self):
a, b = self.ts.align(self.ts, copy=False)
self.assertIs(a.index, self.ts.index)
self.assertIs(b.index, self.ts.index)
# a, b = self.ts.align(self.ts, copy=True)
# self.assertIsNot(a.index, self.ts.index)
# self.assertIsNot(b.index, self.ts.index)
def test_align_multiindex(self):
# GH 10665
midx = pd.MultiIndex.from_product([range(2), range(3), range(2)],
names=('a', 'b', 'c'))
idx = pd.Index(range(2), name='b')
s1 = pd.Series(np.arange(12, dtype='int64'), index=midx)
s2 = pd.Series(np.arange(2, dtype='int64'), index=idx)
# these must be the same results (but flipped)
res1l, res1r = s1.align(s2, join='left')
res2l, res2r = s2.align(s1, join='right')
expl = s1
tm.assert_series_equal(expl, res1l)
tm.assert_series_equal(expl, res2r)
expr = pd.Series([0, 0, 1, 1, np.nan, np.nan] * 2, index=midx)
tm.assert_series_equal(expr, res1r)
tm.assert_series_equal(expr, res2l)
res1l, res1r = s1.align(s2, join='right')
res2l, res2r = s2.align(s1, join='left')
exp_idx = pd.MultiIndex.from_product([range(2), range(2), range(2)],
names=('a', 'b', 'c'))
expl = pd.Series([0, 1, 2, 3, 6, 7, 8, 9], index=exp_idx)
tm.assert_series_equal(expl, res1l)
tm.assert_series_equal(expl, res2r)
expr = pd.Series([0, 0, 1, 1] * 2, index=exp_idx)
tm.assert_series_equal(expr, res1r)
tm.assert_series_equal(expr, res2l)
def test_reindex(self):
identity = self.series.reindex(self.series.index)
# __array_interface__ is not defined for older numpies
# and on some pythons
try:
self.assertTrue(np.may_share_memory(self.series.index,
identity.index))
except (AttributeError):
pass
self.assertTrue(identity.index.is_(self.series.index))
self.assertTrue(identity.index.identical(self.series.index))
subIndex = self.series.index[10:20]
subSeries = self.series.reindex(subIndex)
for idx, val in compat.iteritems(subSeries):
self.assertEqual(val, self.series[idx])
subIndex2 = self.ts.index[10:20]
subTS = self.ts.reindex(subIndex2)
for idx, val in compat.iteritems(subTS):
self.assertEqual(val, self.ts[idx])
stuffSeries = self.ts.reindex(subIndex)
self.assertTrue(np.isnan(stuffSeries).all())
# This is extremely important for the Cython code to not screw up
nonContigIndex = self.ts.index[::2]
subNonContig = self.ts.reindex(nonContigIndex)
for idx, val in compat.iteritems(subNonContig):
self.assertEqual(val, self.ts[idx])
# return a copy the same index here
result = self.ts.reindex()
self.assertFalse((result is self.ts))
def test_reindex_nan(self):
ts = Series([2, 3, 5, 7], index=[1, 4, nan, 8])
i, j = [nan, 1, nan, 8, 4, nan], [2, 0, 2, 3, 1, 2]
assert_series_equal(ts.reindex(i), ts.iloc[j])
ts.index = ts.index.astype('object')
# reindex coerces index.dtype to float, loc/iloc doesn't
assert_series_equal(ts.reindex(i), ts.iloc[j], check_index_type=False)
def test_reindex_corner(self):
# (don't forget to fix this) I think it's fixed
self.empty.reindex(self.ts.index, method='pad') # it works
# corner case: pad empty series
reindexed = self.empty.reindex(self.ts.index, method='pad')
# pass non-Index
reindexed = self.ts.reindex(list(self.ts.index))
assert_series_equal(self.ts, reindexed)
# bad fill method
ts = self.ts[::2]
self.assertRaises(Exception, ts.reindex, self.ts.index, method='foo')
def test_reindex_pad(self):
s = Series(np.arange(10), dtype='int64')
s2 = s[::2]
reindexed = s2.reindex(s.index, method='pad')
reindexed2 = s2.reindex(s.index, method='ffill')
assert_series_equal(reindexed, reindexed2)
expected = Series([0, 0, 2, 2, 4, 4, 6, 6, 8, 8], index=np.arange(10))
assert_series_equal(reindexed, expected)
# GH4604
s = Series([1, 2, 3, 4, 5], index=['a', 'b', 'c', 'd', 'e'])
new_index = ['a', 'g', 'c', 'f']
expected = Series([1, 1, 3, 3], index=new_index)
# this changes dtype because the ffill happens after
result = s.reindex(new_index).ffill()
assert_series_equal(result, expected.astype('float64'))
result = s.reindex(new_index).ffill(downcast='infer')
assert_series_equal(result, expected)
expected = Series([1, 5, 3, 5], index=new_index)
result = s.reindex(new_index, method='ffill')
assert_series_equal(result, expected)
# inferrence of new dtype
s = Series([True, False, False, True], index=list('abcd'))
new_index = 'agc'
result = s.reindex(list(new_index)).ffill()
expected = Series([True, True, False], index=list(new_index))
assert_series_equal(result, expected)
# GH4618 shifted series downcasting
s = Series(False, index=lrange(0, 5))
result = s.shift(1).fillna(method='bfill')
expected = Series(False, index=lrange(0, 5))
assert_series_equal(result, expected)
def test_reindex_nearest(self):
s = Series(np.arange(10, dtype='int64'))
target = [0.1, 0.9, 1.5, 2.0]
actual = s.reindex(target, method='nearest')
expected = Series(np.around(target).astype('int64'), target)
assert_series_equal(expected, actual)
actual = s.reindex_like(actual, method='nearest')
assert_series_equal(expected, actual)
actual = s.reindex_like(actual, method='nearest', tolerance=1)
assert_series_equal(expected, actual)
actual = s.reindex(target, method='nearest', tolerance=0.2)
expected = Series([0, 1, np.nan, 2], target)
assert_series_equal(expected, actual)
def test_reindex_backfill(self):
pass
def test_reindex_int(self):
ts = self.ts[::2]
int_ts = Series(np.zeros(len(ts), dtype=int), index=ts.index)
# this should work fine
reindexed_int = int_ts.reindex(self.ts.index)
# if NaNs introduced
self.assertEqual(reindexed_int.dtype, np.float_)
# NO NaNs introduced
reindexed_int = int_ts.reindex(int_ts.index[::2])
self.assertEqual(reindexed_int.dtype, np.int_)
def test_reindex_bool(self):
# A series other than float, int, string, or object
ts = self.ts[::2]
bool_ts = Series(np.zeros(len(ts), dtype=bool), index=ts.index)
# this should work fine
reindexed_bool = bool_ts.reindex(self.ts.index)
# if NaNs introduced
self.assertEqual(reindexed_bool.dtype, np.object_)
# NO NaNs introduced
reindexed_bool = bool_ts.reindex(bool_ts.index[::2])
self.assertEqual(reindexed_bool.dtype, np.bool_)
def test_reindex_bool_pad(self):
# fail
ts = self.ts[5:]
bool_ts = Series(np.zeros(len(ts), dtype=bool), index=ts.index)
filled_bool = bool_ts.reindex(self.ts.index, method='pad')
self.assertTrue(isnull(filled_bool[:5]).all())
def test_reindex_like(self):
other = self.ts[::2]
assert_series_equal(self.ts.reindex(other.index),
self.ts.reindex_like(other))
# GH 7179
day1 = datetime(2013, 3, 5)
day2 = datetime(2013, 5, 5)
day3 = datetime(2014, 3, 5)
series1 = Series([5, None, None], [day1, day2, day3])
series2 = Series([None, None], [day1, day3])
result = series1.reindex_like(series2, method='pad')
expected = Series([5, np.nan], index=[day1, day3])
assert_series_equal(result, expected)
def test_reindex_fill_value(self):
# -----------------------------------------------------------
# floats
floats = Series([1., 2., 3.])
result = floats.reindex([1, 2, 3])
expected = Series([2., 3., np.nan], index=[1, 2, 3])
assert_series_equal(result, expected)
result = floats.reindex([1, 2, 3], fill_value=0)
expected = Series([2., 3., 0], index=[1, 2, 3])
assert_series_equal(result, expected)
# -----------------------------------------------------------
# ints
ints = Series([1, 2, 3])
result = ints.reindex([1, 2, 3])
expected = Series([2., 3., np.nan], index=[1, 2, 3])
assert_series_equal(result, expected)
# don't upcast
result = ints.reindex([1, 2, 3], fill_value=0)
expected = Series([2, 3, 0], index=[1, 2, 3])
self.assertTrue(issubclass(result.dtype.type, np.integer))
assert_series_equal(result, expected)
# -----------------------------------------------------------
# objects
objects = Series([1, 2, 3], dtype=object)
result = objects.reindex([1, 2, 3])
expected = Series([2, 3, np.nan], index=[1, 2, 3], dtype=object)
assert_series_equal(result, expected)
result = objects.reindex([1, 2, 3], fill_value='foo')
expected = Series([2, 3, 'foo'], index=[1, 2, 3], dtype=object)
assert_series_equal(result, expected)
# ------------------------------------------------------------
# bools
bools = Series([True, False, True])
result = bools.reindex([1, 2, 3])
expected = Series([False, True, np.nan], index=[1, 2, 3], dtype=object)
assert_series_equal(result, expected)
result = bools.reindex([1, 2, 3], fill_value=False)
expected = | Series([False, True, False], index=[1, 2, 3]) | pandas.Series |
import numpy as np
import pandas as pd
def fetch_students():
''' Fetches the two dataset csv files and merges it '''
student_mat = pd.read_csv("dataset/student-mat.csv")
student_por = pd.read_csv("dataset/student-por.csv")
students = pd.concat([student_mat, student_por])
return students
def create_data_for_nn(students_dataframe):
input_data = pd.DataFrame()
output_data = pd.DataFrame()
# Input data
# Numerical
# Age
input_data['age'] = pd.Series(
data=students_dataframe['age'].values, index=students_dataframe.index)
# Absences count
input_data['absences'] = pd.Series(
data=students_dataframe['absences'].values, index=students_dataframe.index)
# Family relationship status [bad to good -> 0-1]
input_data['famrel'] = pd.Series(
data=((students_dataframe['famrel'].values - 1) / 4), index=students_dataframe.index)
# Health status [bad to good -> 0-1]
input_data['health'] = pd.Series(
data=((students_dataframe['health'].values - 1) / 4), index=students_dataframe.index)
# Free time after school [0-1]
input_data['freetime'] = pd.Series(
data=((students_dataframe['freetime'].values - 1) / 4), index=students_dataframe.index)
# Going out with friends [0-1]
input_data['goout'] = pd.Series(
data=((students_dataframe['goout'].values - 1) / 4), index=students_dataframe.index)
# Travel time in minutes [0 to 60+ minutes -> 0 to 1]
input_data['traveltime'] = pd.Series(
data=((students_dataframe['traveltime'].values) / 4), index=students_dataframe.index)
# Weekly study time in hours [0 to 10+ hours -> 0 to 1]
input_data['studytime'] = pd.Series(
data=((students_dataframe['studytime'].values) / 4), index=students_dataframe.index)
# Number of past class failures [0 to 4+ failures -> 0 to 1]
input_data['failures'] = pd.Series(
data=((students_dataframe['failures'].values) / 4), index=students_dataframe.index)
# School success [Bad to good -> 0-1]
# Rounded average of the G1, G2 and G3 divided by 20 will be used as school success from 0 to 1
input_data['success'] = pd.Series(data=(students_dataframe['G1'] +
students_dataframe['G2'] +
students_dataframe['G3']) / 3 / 20,
index=students_dataframe.index)
# Mother education status
# [None, Primary education (4th grade), 5th to 9th grade, Secondary education, Higher education]
input_data['Medu_none'] = pd.Series(data=0, index=students_dataframe.index)
input_data['Medu_primary'] = pd.Series(
data=0, index=students_dataframe.index)
input_data['Medu_fivenine'] = pd.Series(
data=0, index=students_dataframe.index)
input_data['Medu_secondary'] = pd.Series(
data=0, index=students_dataframe.index)
input_data['Medu_higher'] = pd.Series(
data=0, index=students_dataframe.index)
input_data.loc[students_dataframe['Medu'] == 0, 'Medu_none'] = 1
input_data.loc[students_dataframe['Medu'] == 1, 'Medu_primary'] = 1
input_data.loc[students_dataframe['Medu'] == 2, 'Medu_fivenine'] = 1
input_data.loc[students_dataframe['Medu'] == 3, 'Medu_secondary'] = 1
input_data.loc[students_dataframe['Medu'] == 4, 'Medu_higher'] = 1
# Father education status
# [None, Primary education (4th grade), 5th to 9th grade, Secondary education, Higher education]
input_data['Fedu_none'] = pd.Series(data=0, index=students_dataframe.index)
input_data['Fedu_primary'] = pd.Series(
data=0, index=students_dataframe.index)
input_data['Fedu_fivenine'] = pd.Series(
data=0, index=students_dataframe.index)
input_data['Fedu_secondary'] = pd.Series(
data=0, index=students_dataframe.index)
input_data['Fedu_higher'] = pd.Series(
data=0, index=students_dataframe.index)
input_data.loc[students_dataframe['Fedu'] == 0, 'Fedu_none'] = 1
input_data.loc[students_dataframe['Fedu'] == 1, 'Fedu_primary'] = 1
input_data.loc[students_dataframe['Fedu'] == 2, 'Fedu_fivenine'] = 1
input_data.loc[students_dataframe['Fedu'] == 3, 'Fedu_secondary'] = 1
input_data.loc[students_dataframe['Fedu'] == 4, 'Fedu_higher'] = 1
# Mother's job
# [Teacher, Health care related, Civil services, At home, Other]
input_data['Mjob_teacher'] = pd.Series(
data=0, index=students_dataframe.index)
input_data['Mjob_health'] = pd.Series(
data=0, index=students_dataframe.index)
input_data['Mjob_civilser'] = pd.Series(
data=0, index=students_dataframe.index)
input_data['Mjob_athome'] = pd.Series(
data=0, index=students_dataframe.index)
input_data['Mjob_other'] = pd.Series(
data=0, index=students_dataframe.index)
input_data.loc[students_dataframe['Mjob'] == 'teacher', 'Mjob_teacher'] = 1
input_data.loc[students_dataframe['Mjob'] == 'health', 'Mjob_health'] = 1
input_data.loc[students_dataframe['Mjob']
== 'services', 'Mjob_civilser'] = 1
input_data.loc[students_dataframe['Mjob'] == 'at_home', 'Mjob_athome'] = 1
input_data.loc[students_dataframe['Mjob'] == 'other', 'Mjob_other'] = 1
# Father's job
# [Teacher, Health care related, Civil services, At home, Other]
input_data['Fjob_teacher'] = pd.Series(
data=0, index=students_dataframe.index)
input_data['Fjob_health'] = pd.Series(
data=0, index=students_dataframe.index)
input_data['Fjob_civilser'] = pd.Series(
data=0, index=students_dataframe.index)
input_data['Fjob_athome'] = pd.Series(
data=0, index=students_dataframe.index)
input_data['Fjob_other'] = pd.Series(
data=0, index=students_dataframe.index)
input_data.loc[students_dataframe['Fjob'] == 'teacher', 'Fjob_teacher'] = 1
input_data.loc[students_dataframe['Fjob'] == 'health', 'Fjob_health'] = 1
input_data.loc[students_dataframe['Fjob']
== 'services', 'Fjob_civilser'] = 1
input_data.loc[students_dataframe['Fjob'] == 'at_home', 'Fjob_athome'] = 1
input_data.loc[students_dataframe['Fjob'] == 'other', 'Fjob_other'] = 1
# Reason to chose this school
# [ Close to home, School reputation, Course preference, Other ]
input_data['reason_closehome'] = pd.Series(
data=0, index=students_dataframe.index)
input_data['reason_rep'] = pd.Series(
data=0, index=students_dataframe.index)
input_data['reason_pref'] = pd.Series(
data=0, index=students_dataframe.index)
input_data['reason_other'] = pd.Series(
data=0, index=students_dataframe.index)
input_data.loc[students_dataframe['reason']
== 'home', 'reason_closehome'] = 1
input_data.loc[students_dataframe['reason']
== 'reputation', 'reason_rep'] = 1
input_data.loc[students_dataframe['reason'] == 'course', 'reason_pref'] = 1
input_data.loc[students_dataframe['reason'] == 'other', 'reason_other'] = 1
# One hot
# Sex [M(Male) = 0, F(Female) = 1]
input_data['sex'] = pd.Series(data=0, index=students_dataframe.index)
input_data.loc[students_dataframe['sex'] == 'F', 'sex'] = 1
# Address [R(Rural) = 0, U(Urban) = 1]
input_data['address'] = | pd.Series(data=0, index=students_dataframe.index) | pandas.Series |
"""
This module will include the guessing advantage implementation.
"""
from math import log, exp, sqrt, inf
from statistics import median
import time
from enum import Enum
from statsmodels.distributions.empirical_distribution import ECDF
import multiprocessing as mp
# import swifter
import numpy as np
import pandas as pd
from itertools import repeat
import os
import glob
import pickle
import gc
from scipy.stats import laplace
# import amun.multiprocessing_helper_functions
# import concurrent.futures
import math
class AggregateType(Enum):
SUM = 1
AVG = 2
MIN = 3
MAX = 4
FREQ= 5
def calculate_epsilon_from_delta(dfg, delta):
# we will eventually have different epsilons for frequency and for time
# we can output two epsilons, or have two different functions
# in any case, it is safe to take the minimum of these epsilons if DP interface takes only one
# for frequencies, we have r = 1
r = 1
# need to learn how to compute these things from dfg format
# for times, we need to compute r as the maximum possible time between two subsequent events
# we can get even better results if:
# 1) we have event log instead of dfg
# 2) we compute a diffrent epsilon for different edges of dfg
# r = ....
p = (1 - delta) / 2
epsilon = - log(p / (1 - p) * (1 / (delta + p) - 1)) / log(exp(1)) * (1 / r)
return epsilon
def calculate_epsilon_freq(dfg, delta):
sens = 1.0
p = (1.0 - delta) / 2.0
R_ij = 1.0 # from the discussion with Alisa
epsilon = - log(p / (1.0 - p) * (1.0 / (delta + p) - 1)) / log(exp(1.0)) * (1.0 / R_ij)
return epsilon, sens
def calculate_epsilon_time(dfg, delta, precision, aggregate_type):
epsilon = {}
sens = 1
n = 1 # length of the database
""" calculating sensitivity based on type of aggregate"""
if aggregate_type == AggregateType.AVG:
sens = 1 / n
elif aggregate_type == AggregateType.MAX or aggregate_type == AggregateType.MIN or aggregate_type == AggregateType.SUM:
sens = 1
else:
assert "Wrong aggregate type"
"""************parallel by edge******************"""
p = mp.Pool(mp.cpu_count())
result = p.starmap(calculate_epsilon_per_pair, zip(dfg.values(), repeat(delta), repeat(precision)))
p.close()
p.join()
epsilon = dict(zip(list(dfg.keys()), result))
return epsilon, sens
def calculate_epsilon_time_parallel(dfg, delta, precision, aggregate_type):
epsilon = {}
sens = 1
n = 1 # length of the database
""" calculating sensitivity based on type of aggregate"""
if aggregate_type == AggregateType.AVG:
sens = 1 / n
elif aggregate_type == AggregateType.MAX or aggregate_type == AggregateType.MIN or aggregate_type == AggregateType.SUM:
sens = 1
else:
assert "Wrong aggregate type"
"""************parallel by edge******************"""
p = mp.Pool(mp.cpu_count())
result = p.starmap(calculate_epsilon_per_pair_parallel, zip(dfg.values(), repeat(delta), repeat(precision)))
epsilon = dict(zip(list(dfg.keys()), list(result)))
p.close()
p.join()
return epsilon, sens
def calculate_epsilon_per_pair(values, delta, precision):
# values = [0.0, 0.2, .4, .6, .7, 10, 20, 100, 400, 500, 1000, 2000]
values =list(map(abs, values))
values = sorted(values)
R_ij = max(values)
epsilons = []
r_ij = R_ij * precision
cdf = ECDF(values)
epsilon = inf
flag = 1
prev = values[0]
for i in values:
if i != prev:
flag = 0
prev = i
if not flag:
for t_k in values:
p_k = calculate_cdf(cdf, t_k + r_ij) - calculate_cdf(cdf, t_k - r_ij)
# covering the case with risk less than or equal 1-p_k
if not (round(1 - p_k, 2) <= delta): # the round is for very small differences, like 0.050000000000000044
eps = - log(p_k / (1.0 - p_k) * (1.0 / (delta + p_k) - 1.0)) / log(exp(1.0)) * (1.0 / R_ij)
# eps= -(log( (1-p_k)/p_k * (1/(delta*p_k) -1) )) /(R_ij)
epsilons.append(eps)
else:
epsilons.append(inf)
if len(epsilons) > 0:
epsilon = min(epsilons)
else:
# fix the ECDF when all the values are equal.
# after the discussion, we decided to let the user know about that issue and maybe has can handle it on his own.
# epsilon=-inf
epsilon = inf
return epsilon
def calculate_epsilon_per_pair_parallel(values, delta, precision):
# values = [0.0, 0.2, .4, .6, .7, 10, 20, 100, 400, 500, 1000, 2000]
values =list(map(abs, values))
values = sorted(values)
R_ij = max(values)
epsilons = []
r_ij = R_ij * precision
cdf = ECDF(values)
epsilon = inf
flag = 1
prev = values[0]
for i in values:
if i != prev:
flag = 0
prev = i
if not flag:
for t_k in values:
p_k = calculate_cdf(cdf, t_k + r_ij) - calculate_cdf(cdf, t_k - r_ij)
# covering the case with risk less than or equal 1-p_k
if not (round(1 - p_k, 2) <= delta): # the round is for very small differences, like 0.050000000000000044
eps = - log(p_k / (1.0 - p_k) * (1.0 / (delta + p_k) - 1.0)) / log(exp(1.0)) * (1.0 / R_ij)
epsilons.append(eps)
else:
epsilons.append(inf)
if len(epsilons) > 0:
epsilon = min(epsilons)
else:
# fix the ECDF when all the values are equal.
# after the discussion, we decided to let the user know about that issue and maybe has can handle it on his own.
# epsilon=-inf
epsilon = inf
return epsilon
def calculate_epsilon_from_distance_time(dfg_time, distance, precision, aggregate_type=AggregateType.SUM):
beta = 0.05
# reflect the new equation of delta for the time per time instance. Then take the maximum delta from all the instances.
sens_time = 1
""" calculating sensitivity based on type of aggregate"""
if aggregate_type == AggregateType.AVG:
sens_time = 1 / len(dfg_time[0])
elif aggregate_type == AggregateType.MAX or aggregate_type == AggregateType.MIN or aggregate_type == AggregateType.SUM:
sens_time = 1
else:
assert "Wrong aggregate type"
# calculate epsilon
# the equation to be calculated per instance first as p is different from frequency.
epsilon_time = sens_time / distance * log(1 / beta)
# Calculate delta ( the risk) from guessing advantage equation
delta_time = []
delta_dfg = {}
for x in dfg_time.keys():
delta_edge = []
R_ij = max(dfg_time[x])
r_ij = R_ij * precision
# fix the case of time is fixed
flag = 1
prev = dfg_time[x][0]
current = dfg_time[x]
for t_k in dfg_time[x]:
# fix the case of time is fixed
if t_k != prev:
flag = 0
prev = t_k
cdf = ECDF(dfg_time[x])
# p_k is calculated for every instance.
cdf1 = calculate_cdf(cdf, t_k + r_ij)
cdf2 = calculate_cdf(cdf, t_k - r_ij)
p_k = cdf1 - cdf2
# current_delta = p_k*( 1/( (1-p_k) * exp(-R_ij * epsilon_time) +p_k) -1)
current_delta = (p_k / ((1 - p_k) * exp(-R_ij * epsilon_time) + p_k)) - p_k
# eps = - log(p_k / (1.0 - p_k) * (1.0 / (current_delta + p_k) - 1.0)) / log(exp(1.0)) * (1.0 / R_ij)
# we append the deltas and take the maximum delta out of them
# if current_delta != float.nan:
delta_edge.append(current_delta)
if current_delta != 0:
delta_time.append(current_delta)
delta_dfg[x] = max(delta_edge)
if len(delta_time) > 0:
delta_time = median(delta_time)
delta_time = median(delta_dfg.values())
return epsilon_time, delta_time, delta_dfg
def calculate_cdf(cdf, val):
cur_idx = 0
for idx, i in enumerate(cdf.x[:-1]):
if val > i:
cur_idx += 1
if val < cdf.x[idx + 1]:
cur_idx -= 1
break
return cdf.y[cur_idx]
def calculate_epsilon_from_distance_time_percentage_distance(dfg_time, distance, precision, aggregate_type=AggregateType.SUM):
beta = 0.05
# reflect the new equation of delta for the time per time instance. Then take the maximum delta from all the
# instances.
sens_time = 1
""" calculating sensitivity based on type of aggregate"""
if aggregate_type == AggregateType.AVG:
sens_time = 1 / len(dfg_time.keys())
elif aggregate_type == AggregateType.MAX or aggregate_type == AggregateType.MIN or aggregate_type == AggregateType.SUM:
sens_time = 1
else:
assert "Wrong aggregate type"
# m is the number of edges
m = len(dfg_time.keys())
# calculating R among all the edges
R = 0
for x in dfg_time.keys():
R = R + max(dfg_time[x])
# Calculate delta ( the risk) from guessing advantage equation
delta_time = []
delta_dfg = {}
epsilon_time = {}
delta_per_event=[]
"""************parallel by edge******************"""
""" Current slurm Bottleneck"""
p = mp.Pool(mp.cpu_count())
result = p.starmap(epsilon_time_from_distance,
zip(dfg_time.values(), repeat(aggregate_type), repeat(beta), repeat(distance), repeat(precision),
repeat(sens_time)))
p.close()
p.join()
delta_dfg = dict(zip(list(dfg_time.keys()), [x[0] for x in result]))
keys = list(dfg_time.keys())
for idx, res in enumerate(result):
delta_edge, delta_per_event_inner, delta_time_inner, epsilon_time_inner = res
key = keys[idx]
epsilon_time[key] = epsilon_time_inner
delta_time = delta_time + delta_time_inner
delta_per_event_inner = list(map(lambda i: [key, i], delta_per_event_inner))
delta_per_event = delta_per_event + delta_per_event_inner
delta_dfg[key] = max(delta_edge)
if len(delta_time) > 0:
delta_time = max(delta_time)
delta_time = median(delta_dfg.values())
return epsilon_time, delta_time, delta_dfg, delta_per_event
def epsilon_time_from_distance(dfg_time_inner, aggregate_type, beta, distance,
precision, sens_time):
delta_time_inner = []
delta_edge = []
delta_per_event = []
R_ij = max(dfg_time_inner)
r_ij = R_ij * precision
accurate_result = 0
# calculating the accurate result
if aggregate_type == AggregateType.AVG:
accurate_result = sum(dfg_time_inner) * 1.0 / len(dfg_time_inner)
elif aggregate_type == AggregateType.SUM:
accurate_result = sum(dfg_time_inner) * 1.0
elif aggregate_type == AggregateType.MIN:
accurate_result = min(dfg_time_inner) * 1.0
elif aggregate_type == AggregateType.MAX:
accurate_result = max(dfg_time_inner) * 1.0
# in case of the time is instant, we set epsilon to avoid the error of division by zero
if accurate_result == 0:
epsilon_time_ij = 1
else:
distance_ij = accurate_result * distance # hence distance is between 0 and 1
# calculate epsilon
epsilon_time_ij = sens_time / distance_ij * log(1 / beta)
epsilon_time_inner = epsilon_time_ij
# fix the case of time is fixed
flag = 1
prev = dfg_time_inner[0]
current = dfg_time_inner
for t_k in dfg_time_inner:
# fix the case of time is fixed
if t_k != prev:
flag = 0
prev = t_k
cdf = ECDF(dfg_time_inner)
# p_k is calculated for every instance.
cdf1 = calculate_cdf(cdf, t_k + r_ij)
cdf2 = calculate_cdf(cdf, t_k - r_ij)
p_k = cdf1 - cdf2
# current_delta = p_k*( 1/( (1-p_k) * exp(-R_ij * epsilon_time) +p_k) -1)
current_delta = (p_k / ((1 - p_k) * exp(-R_ij * epsilon_time_ij) + p_k)) - p_k
# eps = - log(p_k / (1.0 - p_k) * (1.0 / (current_delta + p_k) - 1.0)) / log(exp(1.0)) * (1.0 / R_ij)
# we append the deltas and take the maximum delta out of them
# if current_delta != float.nan:
delta_edge.append(current_delta)
# delta_per_event.append([x, current_delta])
delta_per_event.append(current_delta) # *****************!!!!!!!!!!!! changed
if current_delta != 0:
delta_time_inner.append(current_delta)
return delta_edge, delta_per_event, delta_time_inner, epsilon_time_inner
def calculate_epsilon_from_distance_freq_percentage_distances(dfg_freq, distance_percentage):
beta = 0.01
sens_freq = 1
# for frequencies, we have r = 1
r_freq = 1
delta_dfg = {}
epsilon_dfg = {}
p = mp.Pool(mp.cpu_count())
result = p.starmap(epsilon_freq_from_distance,
zip(dfg_freq.values(), repeat(beta), repeat(distance_percentage), repeat(sens_freq)))
p.close()
p.join()
delta_dfg = dict(zip(list(dfg_freq.keys()), [x[0] for x in result]))
epsilon_dfg = dict(zip(list(dfg_freq.keys()), [x[1] for x in result]))
delta_freq = max(list(delta_dfg.values()))
return epsilon_dfg, delta_dfg, delta_freq
def epsilon_freq_from_distance(dfg_freq_inner, beta, distance_percentage, sens_freq):
distance = distance_percentage * dfg_freq_inner
epsilon_freq = sens_freq / distance * log(1 / beta)
epsilon_dfg_inner = epsilon_freq
# Calculate delta ( the risk) from guessing advantage equation
# the following equation is validated by calculations
delta_freq = (1 - sqrt(exp(- epsilon_freq))) / (1 + sqrt(exp(- epsilon_freq)))
return delta_freq, epsilon_dfg_inner
def calculate_cdf_vectorized(data):
cdf, val=data.relative_time_ecdf,data.val_plus
cur_idx = 0
for idx, i in enumerate(cdf.x[:-1]):
if val > i:
cur_idx += 1
if val < cdf.x[idx + 1]:
cur_idx -= 1
break
return cdf.y[cur_idx]
def estimate_epsilon_risk_vectorized(data, delta, precision,tmp_dir):
# NOTE: in the current version, there are no fixed time values.
# Becuase the starting time now is being anonymized.
data_state_max = data.groupby('state').relative_time.max()
data_state_max['state'] = data_state_max.index
# data= pd.merge(data, data_cdf, on=['state'], suffixes=("","_ecdf"))
data = pd.merge(data, data_state_max, on=['state'], suffixes=("", "_max"))
#calculate cdfs in vectorized manner
data['r_ij']=data['relative_time_max']*precision
data['val_plus']=data['relative_time'] + data['r_ij']
data['val_minus'] = data['relative_time'] - data['r_ij']
data.drop(['r_ij'], inplace=True, axis=1)
# data['cdf_plus']=np.vectorize(calculate_cdf)(data.relative_time_ecdf,data.val_plus)
# data['cdf_minus'] = np.vectorize(calculate_cdf)(data.relative_time_ecdf, data.val_minus)
#optimize calculate cdf function
"""
CDF calculation using pandas
https://stackoverflow.com/questions/25577352/plotting-cdf-of-a-pandas-series-in-python
"""
# data['cdf_plus'] = data[['relative_time_ecdf','val_plus']].swifter.apply(lambda x: calculate_cdf(x.relative_time_ecdf,x.val_plus),axis=1)
# data['cdf_minus'] = data[['relative_time_ecdf', 'val_minus']].swifter.apply(
# lambda x: calculate_cdf(x.relative_time_ecdf, x.val_minus), axis=1)
#state, relative_time
stats_df = data.groupby(['state', 'relative_time'])['relative_time'].agg('count').pipe(pd.DataFrame).rename(
columns={'relative_time': 'frequency'})
# PDF
stats_df['pdf'] = stats_df['frequency'] / stats_df.groupby(['state']).frequency.sum()
# CDF
stats_df['cdf'] = stats_df['pdf'].groupby(['state']).cumsum()
stats_df = stats_df.reset_index()
stats_df.drop(['pdf'], inplace=True, axis=1)
#the plus_and_minus works like a value lookup
plus_and_minus=data.groupby(['state', 'relative_time','val_plus','val_minus']).state.agg('count').pipe(pd.DataFrame)\
.drop('state',axis=1)\
.reset_index()
#calculating CDF of the value + r_ij
# temp = stats_df[['state', 'relative_time', 'cdf']].merge(plus_and_minus[['state', 'val_plus']], how='cross',
# suffixes=("", "_right"))
# temp = temp.loc[
# (temp.state == temp.state_right) & (temp.val_plus >= temp.relative_time), ['state','relative_time', 'val_plus', 'cdf']]\
# .groupby(['state', 'val_plus']).cdf.max().reset_index()
stats_df=stats_df[['state', 'relative_time', 'cdf']]
# print("fix memory part")
#fixing memory issues
data.to_pickle('data.p')
del(data)
# stats_df.to_pickle('stats_df.p')
""" ********* Performing chunking join **********"""
# we use chunks to avoid running out of memory for large event logs
# stats_df.to_csv('stats_df.csv',index=False, header=True, float_format='%.15f', compression='gzip', encoding='utf-8')
# stats_df_cols=stats_df.columns
chunk_size=10000 # number of states per chunk
#the problem is the first state all cases go through it.
no_of_chunks, max_large_state=partitioning_df(stats_df,plus_and_minus,tmp_dir,chunk_size)
# print("Partitioning Done")
del(stats_df)
del(plus_and_minus)
gc.collect()
chunck_join(no_of_chunks,max_large_state,tmp_dir)
# del(plus_and_minus)
#loading data back from hard disk
data=pd.read_pickle('data.p')
#appending cdf
cdf=append_cdf(tmp_dir,1)
# add the first cdf values to the dataframe
data = data.merge(cdf, how='left', on=['state', 'relative_time'], suffixes=("", "_right"))
data.drop(['val_plus'], inplace=True, axis=1)
del(cdf)
#appending cdf2
cdf2=append_cdf(tmp_dir,2)
# add the values to the dataframe
data = data.merge(cdf2, how='left', on=['state', 'relative_time'], suffixes=("", "_right"))
del(cdf2)
# the minimum value of each distirubtion drops due to the condition "temp.val_minus >= temp.relative_time"
# to fix that, we perform left join and replace the nans with zeros which means that the CDF of a value that is lower than
# the minimum is zero
data.cdf_minus=data.cdf_minus.fillna(0)
# and the maximum is 1
data.cdf_plus = data.cdf_plus.fillna(1)
# print("Second CDF done")
# data= data.merge(cdf, how='left', on=['state','relative_time'], suffixes=("","_right"))
# print(cdf[['state','relative_time']])
# print(data.loc[data.cdf_plus.isna(), ['state','relative_time']])
# data['cdf_minus'] = data[['relative_time_ecdf', 'val_minus']].swifter.apply(calculate_cdf_vectorized,axis=1)
#calculate p_k in a vectorized manner
data['p_k'] = data.cdf_plus - data.cdf_minus
#calculate epsilon in a vectorized manner
# data['eps'] = - np.log(data.p_k / (1.0 - data.p_k) * (1.0 / (delta + data.p_k) - 1.0))/ log(exp(1.0))* (1.0 / data.relative_time_max)
data['eps'] = - np.log(data.p_k / (1.0 - data.p_k) * (1.0 / (delta + data.p_k) - 1.0))
data['eps']=data['eps']/ log(exp(1.0))
data['eps'] = data['eps']* (1.0 / data.relative_time_max.replace(0,-inf))
#drop unused columns
data.drop(['p_k','cdf_plus','cdf_minus','val_minus','relative_time_max'], inplace=True, axis=1)
# data.drop('case:concept:name_linker',inplace=True,axis=1)
# data['eps'] = data.swifter.apply(
# lambda x: estimate_epsilon_risk_dataframe(x['relative_time'], x['relative_time_ecdf'], x['relative_time_max'],
# delta, precision), axis=1)
return data
def get_noise_case_variant(delta):
p=(1-delta)/2.0
eps=- np.log(p / (1.0 - p) * (1.0 / (delta + p) - 1.0))
sens_time = 1
noise = laplace.rvs(loc=0, scale=sens_time / eps, size=1)[0]
noise = int(math.ceil(abs(noise)))
print("case variant noise: %s"%(noise))
return noise,eps
def normalize_relative_time(data):
if data['relative_time_min']==data['relative_time_max']:
#return 1 in case of equal values, that will help to extract the noise value later
return 1
return (data['relative_time']-data['relative_time_min'])/(data['relative_time_max']-data['relative_time_min'])
def match_vals(row, cumsum):
# cdf=float(cumsum[cumsum.index==row['relative_time']])
#cdf plus
# val_plus= row['relative_time']+precision
# group_cdf=cumsum.loc[(row['prev_state'], row['concept:name'], row['state']),['relative_time','cdf']]
group_cdf = cumsum.loc[(row['prev_state'], row['concept:name'], row['state']),['cdf']]
# t1 = group_cdf.loc[group_cdf.index <= row['val_plus']]
# cdf_plus=float(t1.iloc[-1][0])
if row['val_plus']>=1:
cdf_plus=1.0
else:
cdf_plus=group_cdf.loc[group_cdf.index <= row['val_plus']]
cdf_plus = float(cdf_plus.iloc[-1][0])
# cdf_plus=float(group_cdf[group_cdf.relative_time <= row['val_plus']].cdf.max())
#cdf minus
# val_minus = row['relative_time'] - precision
if row['val_minus'] <= 0:
cdf_minus = 0.0
else:
# query=group_cdf[group_cdf.relative_time <= row['val_minus']]
query=group_cdf.loc[group_cdf.index <= row['val_minus']]
if query.shape[0]==0:
#in case the val_minus is lower than the minimum value but greater than zero
cdf_minus=0
else:
cdf_minus = float(query.iloc[-1][0])
# cdf_minus = float(query.cdf.max())
# if cdf_minus==nan:
# cdf_minus===0
return [ cdf_plus, cdf_minus]
def estimate_epsilon_risk_vectorized_with_normalization(data,mode, delta, tmp_dir):
# NOTE: in the current version, there are no fixed time values.
# Becuase the starting time now is being anonymized.
# We estimate the min and max values for the normalization
data['relative_time_max'] = data.groupby(['prev_state','concept:name','state'])['relative_time'].transform('max')
data['relative_time_min'] = data.groupby(['prev_state','concept:name','state'])['relative_time'].transform('min')
# perform normalization (scaling the values between 0 and 1, we use min max method
data['relative_time_original']=data['relative_time']
data['relative_time']= data[['relative_time','relative_time_min', 'relative_time_max']].apply(normalize_relative_time, axis=1)
data=estimate_P_k(data, delta,tmp_dir)
#calculate epsilon in a vectorized manner
# handle p_k+delta >1
if mode=='filtering':
"""**************** Filtering ****************"""
'''delete records with prior knowledge + delta >=1'''
# cases_to_delete = data.loc[data.p_k==1]['case:concept:name'].unique()
cases_to_delete = data.loc[data.p_k+delta >= 1]['case:concept:name'].unique()
#if the resulted event log is empty filter only p_k==1
# this case happens only in the Credit Requirement.
if data['case:concept:name'].unique().shape[0]== cases_to_delete.shape[0]:
cases_to_delete = data.loc[data.p_k == 1]['case:concept:name'].unique()
data = data[~data['case:concept:name'].isin(cases_to_delete)]
data = data.reset_index(drop=True)
data = estimate_P_k(data, delta, tmp_dir)
"""******************************************"""
data['eps'] =data.apply(epsilon_vectorized_internal,delta=delta, axis=1)
#drop unused columns
# we keep the max and min to denormalize the values
data.drop([ 'cdf_plus', 'cdf_minus', 'val_minus','val_plus'], inplace=True, axis=1)
return data
def estimate_P_k(data, delta,tmp_dir):
# calculate cdfs in vectorized manner
"""for the normalized input, the r_ij equals 1"""
# The range became +/- precision as r_ij =1
# data['val_plus']=data['relative_time'] + precision
# data['val_minus'] = data['relative_time'] - precision
"""Estimate precision as one per unit time"""
# precision = 10 * 1 *(data['relative_time_max'] - data['relative_time_min'])/ (
# data['relative_time_max'] - data['relative_time_min'])**2 # within 10 minutes and time unit is in minutes
# #nans happens when max=min
# precision = precision.fillna(0)
data['precision'] = 10 * 1* (data['relative_time_max'] - data['relative_time_min']) / (data['relative_time_max'] - data['relative_time_min'])**2
data.precision = data.precision.fillna(0)
# precision >max becomes 1
data['precision'].loc[data.precision > 1] = 1
# normalize precision
# precision = (precision - data['relative_time_min']) / (data['relative_time_max'] - data['relative_time_min'])
# precision = (precision ) / (data['relative_time_max'] - data['relative_time_min'])
data['val_plus'] = data['relative_time'] + data['precision']
data['val_plus']=data.val_plus.replace(inf,1)
data['val_plus'].loc[data.val_plus > 1] = 1
data['val_minus'] = data['relative_time'] - data['precision']
data['val_minus'] = data.val_minus.replace(-inf, 0)
data['val_minus'].loc[data.val_minus<0]=0
# #no cdf below zero, so we replace -ve values with zeros
# # no cdf greater than 1, so we replace values >1 with 1
# optimize calculate cdf function
# start partitioning here
data=estimate_CDF_paritioned(data, tmp_dir)
# calculate p_k in a vectorized manner
data['p_k'] = 0
# adding a fix to the case of fixed distrubtion
data['p_k'] = data.apply(estimate_P_k_vectorized, delta=delta, axis=1)
data.drop('original_index',inplace=True, axis=1)
return data
def estimate_CDF_paritioned(data, tmp_dir, chunk_size = 1000):
""" the first state for large files is very large. We split the first state in a separate file.
Then all the other states are splitted into several files.
"""
# stats_df.to_csv('stats_df.csv', index=False, header=True, float_format='%.15f', compression='gzip',
# encoding='utf-8')
data=data.reset_index()
data=data.rename(columns={'index': 'original_index'})
data.to_pickle(os.path.join(tmp_dir, 'data_df_all'))
no_of_chunks, max_large_state = partition_eventlog(chunk_size, data, tmp_dir)
del data
# iterate over every partition
cdf_plus, cdf_minus=estimate_CDF_with_partitioning(no_of_chunks, max_large_state, tmp_dir)
# read data back from the pickle file
data=pd.read_pickle(os.path.join(tmp_dir, 'data_df_all'))
data['cdf_plus'] = cdf_plus
data['cdf_minus'] = cdf_minus
# return len(list(range(0, unique_states.shape[0], chunk_size))) #number of chunks
return data # number of chunks , max largest state
def partition_eventlog(chunk_size, data, tmp_dir):
large_state_size=1000
data.sort_values(['prev_state', 'concept:name', 'state'], ascending=True, inplace=True)
# unique_states = stats_df.state.unique()
# unique_states = data.groupby(['prev_state', 'concept:name', 'state']).size().reset_index().rename(
# columns={0: 'count'}).drop('count', axis=1)
unique_states = data.groupby(['prev_state', 'concept:name', 'state']).original_index.apply(list)
large_states = data.groupby(['prev_state', 'concept:name', 'state']).relative_time.count()
# separating large states from the others
small_states = list(large_states.loc[large_states <= large_state_size].index)
large_states = large_states.reset_index().rename(columns={'relative_time': 'counts'})
large_states = large_states[large_states.counts > large_state_size].reset_index()
unique_states = unique_states.loc[small_states]
# unique_states = unique_states.reset_index()
# unique_states = unique_states.loc[small_states]
unique_states=unique_states.reset_index()
# ['prev_state', 'concept:name', 'state']
curr_dir = os.getcwd()
idx = 0
"""large state separately"""
for index, row in large_states.iterrows():
res = data.loc[(data.state == row['state']) & (data.prev_state == row['prev_state']) & (
data['concept:name'] == row['concept:name']), :]
res.to_pickle(os.path.join(tmp_dir, 'data_df_%s' % (idx)))
# row_id = unique_states.index[
# (unique_states.state == row['state']) & (unique_states.prev_state == row['prev_state']) & (
# unique_states['concept:name'] == row['concept:name'])].tolist()[0]
# unique_states.drop(row_id, axis=0, inplace=True)
idx += 1
""" splitting other states regularly"""
max_index_of_large_states = idx
# print("partition of large states is %s"%(max_index_of_large_states-1))
for i in range(0, unique_states.shape[0], chunk_size):
# print("Current Chunck is: %s" % (i))
current_states = unique_states.loc[i:i + chunk_size-1,'original_index']
current_states = current_states.apply(pd.Series).stack().reset_index(drop=True)
# res = stats_df.loc[stats_df.state.isin(current_states), :]
# res = data.iloc[current_states.index]
current_states = current_states.astype('int32')
res = data.loc[current_states]
# res = data.iloc[current_states]
res.to_pickle(os.path.join(tmp_dir, 'data_df_%s' % (idx)))
# plus_and_minus.loc[plus_and_minus.state.isin(current_states), :]\
# .to_pickle(os.path.join( tmp_dir,'plus_and_minus_%s'%(idx)))
idx += 1
return idx, max_index_of_large_states
def estimate_CDF_with_partitioning(num_of_chunks, max_large_state, tmp_dir):
cdf_plus_combined=pd.Series()
cdf_minus_combined = pd.Series()
cdf_plus_lst=[]
for i in range(0,num_of_chunks):
data=pd.read_pickle(os.path.join( tmp_dir, 'data_df_%s' % (i)))
data=data[['original_index','prev_state', 'concept:name', 'state', 'relative_time','val_plus','val_minus']]
#the first state is large, so we separate it from the others
if i<max_large_state:
#single state partitions
cdf_minus, cdf_plus = estimate_CDF_per_partition_single_transition(data)
# print("*")
# chunk_merge_plus_single_large_state(stats_df,plus_and_minus,i,tmp_dir)
else:
#multiple states partitions
cdf_minus, cdf_plus = estimate_CDF_per_partition(data)
# chunk_merge_plus(stats_df,plus_and_minus,i,tmp_dir)
# store the cdf and keep the same index as the the orignial data
cdf_plus_combined= cdf_plus_combined.append(cdf_plus)
cdf_minus_combined= cdf_minus_combined.append(cdf_minus)
cdf_plus_lst.append(cdf_plus)
return cdf_plus_combined, cdf_minus_combined
def estimate_CDF_per_partition(data):
stats_df = data.groupby(['prev_state', 'concept:name', 'state', 'relative_time'])['relative_time'].agg(
'count').pipe(pd.DataFrame).rename(
columns={'relative_time': 'frequency'})
# PDF
stats_df['pdf'] = stats_df['frequency'] / stats_df.groupby(['prev_state', 'concept:name', 'state']).frequency.sum()
""" CDF plus"""
stats_df['cdf'] = stats_df['pdf'].groupby(['prev_state', 'concept:name', 'state']).cumsum()
temp = data[['prev_state', 'concept:name', 'state', 'relative_time', 'val_plus']]
stats_df = stats_df[['cdf']]
stats_df = stats_df.reset_index()
temp = temp.merge(stats_df, how='inner', on=['prev_state', 'concept:name', 'state'],
suffixes=("", "_right"))
temp = temp.loc[temp.val_plus >= temp.relative_time_right]
temp = temp.groupby(['prev_state', 'concept:name', 'state', 'relative_time', 'val_plus']).cdf.max().reset_index()
#reseting the data index to keep it the same after merge
data=data.reset_index()
t_join = data.merge(temp, on=['prev_state', 'concept:name', 'state', 'relative_time'], how='left')
#reindexing the t_join dataframe with the original data index
t_join=t_join.set_index('index')
cdf_plus = t_join.cdf
"""CDF minus"""
temp = data[['prev_state', 'concept:name', 'state', 'relative_time', 'val_minus']]
temp = temp.merge(stats_df, how='inner', on=['prev_state', 'concept:name', 'state'],
suffixes=("", "_right"))
# negative values
temp.loc[temp.val_minus < 0, 'val_minus'] = 0
temp = temp.loc[temp.val_minus >= temp.relative_time_right]
temp = temp.groupby(['prev_state', 'concept:name', 'state', 'relative_time', 'val_minus']).cdf.max().reset_index()
#reseting the data index to keep it the same after merge
data=data.reset_index()
t_join = data.merge(temp, on=['prev_state', 'concept:name', 'state', 'relative_time'], how='left')
# reindexing the t_join dataframe with the original data index
t_join = t_join.set_index('index')
cdf_minus = t_join.cdf
cdf_minus = cdf_minus.fillna(0)
return cdf_minus, cdf_plus
# def estimate_CDF_per_partition_single_transition(data):
# stats_df = data.groupby(['prev_state', 'concept:name', 'state', 'relative_time'])['relative_time'].agg(
# 'count').pipe(pd.DataFrame).rename(
# columns={'relative_time': 'frequency'})
# # stats_df = data.groupby(['relative_time'])['relative_time'].agg(
# # 'count').pipe(pd.DataFrame).rename(
# # columns={'relative_time': 'frequency'})
# # PDF
# stats_df['pdf'] = stats_df['frequency'] / stats_df.groupby(['prev_state', 'concept:name', 'state']).frequency.sum()
# # stats_df['pdf'] = stats_df['frequency'] / stats_df.frequency.sum()
# """ CDF plus"""
# # stats_df['cdf'] = stats_df['pdf'].groupby(['prev_state', 'concept:name', 'state']).cumsum()
# stats_df['cdf'] = stats_df['pdf'].cumsum()
#
# temp = data[['prev_state', 'concept:name', 'state', 'relative_time', 'val_plus']]
# stats_df = stats_df[['cdf']]
# stats_df = stats_df.reset_index()
#
# # fix the below error (memory)
# temp = temp.merge(stats_df, how='inner', on=['prev_state', 'concept:name', 'state'],
# suffixes=("", "_right"))
# temp = temp.loc[temp.val_plus >= temp.relative_time_right]
# temp = temp.groupby(['prev_state', 'concept:name', 'state', 'relative_time', 'val_plus']).cdf.max().reset_index()
#
# #reseting the data index to keep it the same after merge
# data=data.reset_index()
# t_join = data.merge(temp, on=['prev_state', 'concept:name', 'state', 'relative_time'], how='left')
# #reindexing the t_join dataframe with the original data index
# t_join=t_join.set_index('index')
#
# cdf_plus = t_join.cdf
#
# """CDF minus"""
# temp = data[['prev_state', 'concept:name', 'state', 'relative_time', 'val_minus']]
# temp = temp.merge(stats_df, how='inner', on=['prev_state', 'concept:name', 'state'],
# suffixes=("", "_right"))
# # negative values
# temp.loc[temp.val_minus < 0, 'val_minus'] = 0
# temp = temp.loc[temp.val_minus >= temp.relative_time_right]
# temp = temp.groupby(['prev_state', 'concept:name', 'state', 'relative_time', 'val_minus']).cdf.max().reset_index()
#
# #reseting the data index to keep it the same after merge
# data=data.reset_index()
#
# t_join = data.merge(temp, on=['prev_state', 'concept:name', 'state', 'relative_time'], how='left')
#
# # reindexing the t_join dataframe with the original data index
# t_join = t_join.set_index('index')
#
# cdf_minus = t_join.cdf
# cdf_minus = cdf_minus.fillna(0)
#
# return cdf_minus, cdf_plus
def estimate_CDF_per_partition_single_transition(data):
#here we don't need to group by 'prev_state', 'concept:name', 'state'
stats_df = data.groupby(['prev_state', 'concept:name', 'state', 'relative_time'])['relative_time'].agg(
'count').pipe(pd.DataFrame).rename(
columns={'relative_time': 'frequency'})
# PDF
stats_df['pdf'] = stats_df['frequency'] / stats_df.groupby(['prev_state', 'concept:name', 'state']).frequency.sum()
stats_df['cdf'] = stats_df['pdf'].groupby(['prev_state', 'concept:name', 'state']).cumsum()
stats_df = stats_df[['cdf']]
stats_df = stats_df.reset_index()
cdf_plus = cdf_plus_single_state(data, stats_df)
cdf_minus = cdf_minus_single_state(data, stats_df)
return cdf_minus, cdf_plus
def cdf_minus_single_state(data, stats_df):
"""CDF minus"""
temp = data[['original_index','prev_state', 'concept:name', 'state', 'relative_time', 'val_minus']]
# negative values
temp.loc[temp.val_minus < 0, 'val_minus'] = 0
#todo: replicate the above.
# temp = temp.merge(stats_df, how='inner', on=['prev_state', 'concept:name', 'state'],
# suffixes=("", "_right"))
# temp = temp.loc[temp.val_minus >= temp.relative_time_right]
stats_df.state = stats_df.state.astype('int32')
stats_df.prev_state = stats_df.prev_state.astype('int32')
temp.sort_values('val_minus', inplace=True)
stats_df.sort_values('relative_time', inplace=True)
temp = pd.merge_asof(temp, stats_df, left_on='val_minus', right_on='relative_time',
# by=['prev_state', 'concept:name', 'state', 'relative_time'],
# direction="backward", tolerance=None,
suffixes=("", "_right"))
temp = temp.groupby(['prev_state', 'concept:name', 'state', 'relative_time', 'val_minus']).cdf.max().reset_index()
# reseting the data index to keep it the same after merge
data = data.reset_index()
t_join = data.merge(temp, on=['prev_state', 'concept:name', 'state', 'relative_time'], how='left')
# reindexing the t_join dataframe with the original data index
t_join = t_join.set_index('original_index')
cdf_minus = t_join.cdf
cdf_minus = cdf_minus.fillna(0)
return cdf_minus
def cdf_plus_single_state(data, stats_df):
""" CDF plus"""
temp = data[['original_index','prev_state', 'concept:name', 'state', 'relative_time', 'val_plus']]
# TODO: reduce the size used by the following merge
# temp = temp.merge(stats_df, how='inner', on=['prev_state', 'concept:name', 'state'],
# suffixes=("", "_right"))
# temp = temp.loc[temp.val_plus >= temp.relative_time_right]
stats_df.state = stats_df.state.astype('int32')
stats_df.prev_state = stats_df.prev_state.astype('int32')
temp.sort_values('val_plus', inplace=True)
stats_df.sort_values('relative_time', inplace=True)
temp=pd.merge_asof(temp,stats_df,left_on='val_plus', right_on='relative_time', #by=['prev_state', 'concept:name', 'state', 'relative_time'],
# direction="backward", tolerance=None,
suffixes=("", "_right"))
temp = temp.groupby(['prev_state', 'concept:name', 'state', 'relative_time', 'val_plus']).cdf.max().reset_index()
# reseting the data index to keep it the same after merge
data = data.reset_index()
t_join = data.merge(temp, on=['prev_state', 'concept:name', 'state', 'relative_time'], how='left')
# reindexing the t_join dataframe with the original data index
t_join = t_join.set_index('original_index')
cdf_plus = t_join.cdf
return cdf_plus
def estimate_P_k_vectorized(data,delta):
if data.relative_time_max==data.relative_time_min:
#in case of fixed distribution, use the worst case scenario
return (1-delta)/2
elif data.prev_state==0: #start time
return 0
return data.cdf_plus - data.cdf_minus
def epsilon_vectorized_internal(data, delta):
if data.p_k+delta >=1:
#in case p_k+delta>1, set epsilon = 0.5
return 0.7
if data.p_k==0:
#in case of cdf_plus == cdf_minus, the epsilon will be inf
# we set epsilon to 10 at that case.
return 10
# r =1 because of normalization
return (- np.log(data.p_k / (1.0 - data.p_k) * (1.0 / (delta + data.p_k) - 1.0)))
def partitioning_df(stats_df,plus_and_minus,tmp_dir,chunk_size = 1000):
""" the first state for large files is very large. We split the first state in a separate file.
Then all the other states are splitted into several files.
"""
# stats_df.to_csv('stats_df.csv', index=False, header=True, float_format='%.15f', compression='gzip',
# encoding='utf-8')
stats_df.sort_values(['prev_state','concept:name','state'], ascending=True, inplace=True)
plus_and_minus.sort_values(['prev_state','concept:name','state'], ascending=True, inplace=True)
# unique_states = stats_df.state.unique()
unique_states = stats_df.groupby(['prev_state','concept:name','state']).size().reset_index().rename(columns={0:'count'}).drop('count',axis=1)
large_states=stats_df.groupby(['prev_state','concept:name','state']).relative_time.count()
#separating large states from the others
large_states=large_states[large_states>1000].reset_index()
#['prev_state', 'concept:name', 'state']
curr_dir = os.getcwd()
idx=0
"""large state separately"""
for index,row in large_states.iterrows():
res = stats_df.loc[(stats_df.state==row['state']) & (stats_df.prev_state==row['prev_state']) & (stats_df['concept:name']==row['concept:name']), :]
res.to_pickle(os.path.join( tmp_dir, 'stats_df_%s' % (idx)))
plus_and_minus.loc[ (plus_and_minus.state==row['state']) & (plus_and_minus.prev_state==row['prev_state']) & (plus_and_minus['concept:name']==row['concept:name']), :] \
.to_pickle(os.path.join( tmp_dir, 'plus_and_minus_%s' % (idx)))
# unique_states=unique_states[unique_states!=current_state]
row_id=unique_states.index[ (unique_states.state==row['state'] )& (unique_states.prev_state==row['prev_state']) & (unique_states['concept:name']==row['concept:name'])].tolist()[0]
unique_states.drop(row_id, axis=0,inplace=True)
idx += 1
""" splitting other states regularly"""
max_index_of_large_states=idx
# print("partition of large states is %s"%(max_index_of_large_states-1))
for i in range(0, unique_states.shape[0], chunk_size):
# print("Current Chunck is: %s" % (i))
current_states = unique_states[i:i + chunk_size]
# res = stats_df.loc[stats_df.state.isin(current_states), :]
res = stats_df.iloc[current_states.index]
res.to_pickle(os.path.join( tmp_dir,'stats_df_%s'%(idx)))
# plus_and_minus.loc[plus_and_minus.state.isin(current_states), :]\
# .to_pickle(os.path.join( tmp_dir,'plus_and_minus_%s'%(idx)))
plus_and_minus.iloc[current_states.index] \
.to_pickle(os.path.join( tmp_dir, 'plus_and_minus_%s' % (idx)))
idx+=1
# return len(list(range(0, unique_states.shape[0], chunk_size))) #number of chunks
return idx , max_index_of_large_states # number of chunks , max largest state
def append_cdf(tmp_dir,num=1):
cdf_name=0
if num==1:
cdf_name='cdf_*'
else:
cdf_name='cdf2_*'
curr_dir = os.getcwd()
# dir_path=os.path.join( tmp_dir,cdf_name)
list_of_files = glob.glob(os.path.join( tmp_dir,cdf_name))
cdf=[]
for i in list_of_files:
with open(i,'rb') as handle:
cdf.append(pickle.load(handle))
cdf= | pd.concat(cdf) | pandas.concat |
from wf_core_data_dashboard import core
import wf_core_data
import mefs_utils
import pandas as pd
import inflection
import urllib.parse
import os
def generate_mefs_table_data(
test_events_path,
student_info_path,
student_assignments_path
):
test_events = pd.read_pickle(test_events_path)
student_info = pd.read_pickle(student_info_path)
student_assignments = pd.read_pickle(student_assignments_path)
students = mefs_utils.summarize_by_student(
test_events=test_events,
student_info=student_info,
student_assignments=student_assignments
)
groups = mefs_utils.summarize_by_group(
students=students,
grouping_variables=[
'school_year',
'group_name_mefs'
]
)
return students, groups
def groups_page_html(
groups,
school_year=None,
group_name_mefs=None,
title=None,
subtitle=None,
include_details_link=True
):
if title is None:
title = 'MEFS results'
if subtitle is None:
subtitle = ':'.join(filter(
lambda x: x is not None,
[
school_year,
group_name_mefs
]
)).replace('/', ':')
table_html = groups_table_html(
groups,
school_year=school_year,
group_name_mefs=group_name_mefs,
include_details_link=include_details_link
)
template = core.get_template("groups_table.html")
return template.render(
title=title,
subtitle=subtitle,
table_html=table_html
)
def students_page_html(
students,
school_year=None,
group_name_mefs=None,
title=None,
subtitle=None
):
if title is None:
title = 'MEFS results'
if subtitle is None:
subtitle = ':'.join(filter(
lambda x: x is not None,
[
school_year,
group_name_mefs
]
)).replace('/', ':')
table_html = students_table_html(
students=students,
school_year=school_year,
group_name_mefs=group_name_mefs
)
template = core.get_template("students_table.html")
return template.render(
title=title,
subtitle=subtitle,
table_html=table_html
)
def groups_table_html(
groups,
school_year=None,
group_name_mefs=None,
include_details_link=True
):
groups = groups.copy()
groups['mean_ending_total_score_sem_range'] = groups.apply(
lambda row: '{:.1f} – {:.1f}'.format(
row['mean_ending_total_score'] - row['mean_ending_total_score_sem'],
row['mean_ending_total_score'] + row['mean_ending_total_score_sem'],
) if not pd.isna(row['mean_ending_total_score']) and not pd.isna(row['mean_ending_total_score_sem']) else '',
axis=1
)
groups['mean_total_score_growth_sem_range'] = groups.apply(
lambda row: '{:+.1f} – {:+.1f}'.format(
row['mean_total_score_growth'] - row['mean_total_score_growth_sem'],
row['mean_total_score_growth'] + row['mean_total_score_growth_sem'],
) if not pd.isna(row['mean_total_score_growth']) and not pd.isna(row['mean_total_score_growth_sem']) else '',
axis=1
)
groups['mean_total_score_growth_per_school_year_sem_range'] = groups.apply(
lambda row: '{:+.1f} – {:+.1f}'.format(
row['mean_total_score_growth_per_school_year'] - row['mean_total_score_growth_per_school_year_sem'],
row['mean_total_score_growth_per_school_year'] + row['mean_total_score_growth_per_school_year_sem'],
) if not pd.isna(row['mean_total_score_growth_per_school_year']) and not pd.isna(row['mean_total_score_growth_per_school_year_sem']) else '',
axis=1
)
groups['mean_ending_percentile_sem_range'] = groups.apply(
lambda row: '{:.1f} – {:.1f}'.format(
row['mean_ending_percentile'] - row['mean_ending_percentile_sem'],
row['mean_ending_percentile'] + row['mean_ending_percentile_sem'],
) if not pd.isna(row['mean_ending_percentile']) and not pd.isna(row['mean_ending_percentile_sem']) else '',
axis=1
)
groups['mean_percentile_growth_sem_range'] = groups.apply(
lambda row: '{:+.1f} – {:+.1f}'.format(
row['mean_percentile_growth'] - row['mean_percentile_growth_sem'],
row['mean_percentile_growth'] + row['mean_percentile_growth_sem'],
) if not pd.isna(row['mean_percentile_growth']) and not pd.isna(row['mean_percentile_growth_sem']) else '',
axis=1
)
groups['mean_percentile_growth_per_school_year_sem_range'] = groups.apply(
lambda row: '{:+.1f} – {:+.1f}'.format(
row['mean_percentile_growth_per_school_year'] - row['mean_percentile_growth_per_school_year_sem'],
row['mean_percentile_growth_per_school_year'] + row['mean_percentile_growth_per_school_year_sem'],
) if not pd.isna(row['mean_percentile_growth_per_school_year']) and not pd.isna(row['mean_percentile_growth_per_school_year_sem']) else '',
axis=1
)
groups['mean_ending_total_score'] = groups['mean_ending_total_score'].apply(
lambda x: '{:.1f}'.format(x) if not pd.isna(x) else ''
)
groups['ending_total_score_sd'] = groups['ending_total_score_sd'].apply(
lambda x: '{:.1f}'.format(x) if not pd.isna(x) else ''
)
groups['mean_ending_total_score_sem'] = groups['mean_ending_total_score_sem'].apply(
lambda x: '{:.1f}'.format(x) if not pd.isna(x) else ''
)
groups['mean_ending_percentile'] = groups['mean_ending_percentile'].apply(
lambda x: '{:.1f}'.format(x) if not pd.isna(x) else ''
)
groups['ending_percentile_sd'] = groups['ending_percentile_sd'].apply(
lambda x: '{:.1f}'.format(x) if not pd.isna(x) else ''
)
groups['mean_ending_percentile_sem'] = groups['mean_ending_percentile_sem'].apply(
lambda x: '{:.1f}'.format(x) if not pd.isna(x) else ''
)
groups['mean_total_score_growth'] = groups['mean_total_score_growth'].apply(
lambda x: '{:+.1f}'.format(x) if not pd.isna(x) else ''
)
groups['total_score_growth_sd'] = groups['total_score_growth_sd'].apply(
lambda x: '{:.1f}'.format(x) if not pd.isna(x) else ''
)
groups['mean_total_score_growth_sem'] = groups['mean_total_score_growth_sem'].apply(
lambda x: '{:.1f}'.format(x) if not pd.isna(x) else ''
)
groups['mean_total_score_growth_per_school_year'] = groups['mean_total_score_growth_per_school_year'].apply(
lambda x: '{:+.1f}'.format(x) if not pd.isna(x) else ''
)
groups['total_score_growth_per_school_year_sd'] = groups['total_score_growth_per_school_year_sd'].apply(
lambda x: '{:.1f}'.format(x) if not pd.isna(x) else ''
)
groups['mean_total_score_growth_per_school_year_sem'] = groups['mean_total_score_growth_per_school_year_sem'].apply(
lambda x: '{:.1f}'.format(x) if not pd.isna(x) else ''
)
groups['mean_percentile_growth'] = groups['mean_percentile_growth'].apply(
lambda x: '{:+.1f}'.format(x) if not pd.isna(x) else ''
)
groups['percentile_growth_sd'] = groups['percentile_growth_sd'].apply(
lambda x: '{:.1f}'.format(x) if not pd.isna(x) else ''
)
groups['mean_percentile_growth_sem'] = groups['mean_percentile_growth_sem'].apply(
lambda x: '{:.1f}'.format(x) if not pd.isna(x) else ''
)
groups['mean_percentile_growth_per_school_year'] = groups['mean_percentile_growth_per_school_year'].apply(
lambda x: '{:+.1f}'.format(x) if not | pd.isna(x) | pandas.isna |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import (
datetime,
time,
timedelta,
)
from itertools import (
product,
starmap,
)
import operator
import warnings
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.offsets import shift_months
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
DateOffset,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
TimedeltaArray,
)
from pandas.core.ops import roperator
from pandas.tests.arithmetic.common import (
assert_cannot_add,
assert_invalid_addsub_type,
assert_invalid_comparison,
get_upcast_box,
)
# ------------------------------------------------------------------
# Comparisons
class TestDatetime64ArrayLikeComparisons:
# Comparison tests for datetime64 vectors fully parametrized over
# DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
# Test comparison with zero-dimensional array is unboxed
tz = tz_naive_fixture
box = box_with_array
dti = date_range("20130101", periods=3, tz=tz)
other = np.array(dti.to_numpy()[0])
dtarr = tm.box_expected(dti, box)
xbox = get_upcast_box(dtarr, other, True)
result = dtarr <= other
expected = np.array([True, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
"foo",
-1,
99,
4.0,
object(),
timedelta(days=2),
# GH#19800, GH#19301 datetime.date comparison raises to
# match DatetimeIndex/Timestamp. This also matches the behavior
# of stdlib datetime.datetime
datetime(2001, 1, 1).date(),
# GH#19301 None and NaN are *not* cast to NaT for comparisons
None,
np.nan,
],
)
def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):
# GH#22074, GH#15966
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
dtarr = tm.box_expected(rng, box_with_array)
assert_invalid_comparison(dtarr, other, box_with_array)
@pytest.mark.parametrize(
"other",
[
# GH#4968 invalid date/int comparisons
list(range(10)),
np.arange(10),
np.arange(10).astype(np.float32),
np.arange(10).astype(object),
pd.timedelta_range("1ns", periods=10).array,
np.array(pd.timedelta_range("1ns", periods=10)),
list(pd.timedelta_range("1ns", periods=10)),
pd.timedelta_range("1 Day", periods=10).astype(object),
pd.period_range("1971-01-01", freq="D", periods=10).array,
pd.period_range("1971-01-01", freq="D", periods=10).astype(object),
],
)
def test_dt64arr_cmp_arraylike_invalid(
self, other, tz_naive_fixture, box_with_array
):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data
obj = tm.box_expected(dta, box_with_array)
assert_invalid_comparison(obj, other, box_with_array)
def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="h", periods=5, tz=tz)._data
other = np.array([0, 1, 2, dta[3], Timedelta(days=1)])
result = dta == other
expected = np.array([False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = dta != other
tm.assert_numpy_array_equal(result, ~expected)
msg = "Invalid comparison between|Cannot compare type|not supported between"
with pytest.raises(TypeError, match=msg):
dta < other
with pytest.raises(TypeError, match=msg):
dta > other
with pytest.raises(TypeError, match=msg):
dta <= other
with pytest.raises(TypeError, match=msg):
dta >= other
def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
# GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
tz = tz_naive_fixture
box = box_with_array
ts = Timestamp("2021-01-01", tz=tz)
ser = Series([ts, NaT])
obj = tm.box_expected(ser, box)
xbox = get_upcast_box(obj, ts, True)
expected = Series([True, False], dtype=np.bool_)
expected = tm.box_expected(expected, xbox)
result = obj == ts
tm.assert_equal(result, expected)
class TestDatetime64SeriesComparison:
# TODO: moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize(
"pair",
[
(
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[NaT, NaT, Timestamp("2011-01-03")],
),
(
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[NaT, NaT, Timedelta("3 days")],
),
(
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
[NaT, NaT, Period("2011-03", freq="M")],
),
],
)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("dtype", [None, object])
@pytest.mark.parametrize(
"op, expected",
[
(operator.eq, Series([False, False, True])),
(operator.ne, Series([True, True, False])),
(operator.lt, Series([False, False, False])),
(operator.gt, Series([False, False, False])),
(operator.ge, Series([False, False, True])),
(operator.le, Series([False, False, True])),
],
)
def test_nat_comparisons(
self,
dtype,
index_or_series,
reverse,
pair,
op,
expected,
):
box = index_or_series
l, r = pair
if reverse:
# add lhs / rhs switched data
l, r = r, l
left = Series(l, dtype=dtype)
right = box(r, dtype=dtype)
result = op(left, right)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
],
)
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons_scalar(self, dtype, data, box_with_array):
box = box_with_array
left = Series(data, dtype=dtype)
left = tm.box_expected(left, box)
xbox = get_upcast_box(left, NaT, True)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left == NaT, expected)
tm.assert_equal(NaT == left, expected)
expected = [True, True, True]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left != NaT, expected)
tm.assert_equal(NaT != left, expected)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left < NaT, expected)
tm.assert_equal(NaT > left, expected)
tm.assert_equal(left <= NaT, expected)
tm.assert_equal(NaT >= left, expected)
tm.assert_equal(left > NaT, expected)
tm.assert_equal(NaT < left, expected)
tm.assert_equal(left >= NaT, expected)
tm.assert_equal(NaT <= left, expected)
@pytest.mark.parametrize("val", [datetime(2000, 1, 4), datetime(2000, 1, 5)])
def test_series_comparison_scalars(self, val):
series = Series(date_range("1/1/2000", periods=10))
result = series > val
expected = Series([x > val for x in series])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"left,right", [("lt", "gt"), ("le", "ge"), ("eq", "eq"), ("ne", "ne")]
)
def test_timestamp_compare_series(self, left, right):
# see gh-4982
# Make sure we can compare Timestamps on the right AND left hand side.
ser = Series(date_range("20010101", periods=10), name="dates")
s_nat = ser.copy(deep=True)
ser[0] = Timestamp("nat")
ser[3] = Timestamp("nat")
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# No NaT
expected = left_f(ser, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), ser)
tm.assert_series_equal(result, expected)
# NaT
expected = left_f(ser, Timestamp("nat"))
result = right_f(Timestamp("nat"), ser)
tm.assert_series_equal(result, expected)
# Compare to Timestamp with series containing NaT
expected = left_f(s_nat, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), s_nat)
tm.assert_series_equal(result, expected)
# Compare to NaT with series containing NaT
expected = left_f(s_nat, NaT)
result = right_f(NaT, s_nat)
tm.assert_series_equal(result, expected)
def test_dt64arr_timestamp_equality(self, box_with_array):
# GH#11034
ser = Series([Timestamp("2000-01-29 01:59:00"), Timestamp("2000-01-30"), NaT])
ser = tm.box_expected(ser, box_with_array)
xbox = get_upcast_box(ser, ser, True)
result = ser != ser
expected = tm.box_expected([False, False, True], xbox)
tm.assert_equal(result, expected)
warn = FutureWarning if box_with_array is pd.DataFrame else None
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[0]
expected = tm.box_expected([False, True, True], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[2]
expected = tm.box_expected([True, True, True], xbox)
tm.assert_equal(result, expected)
result = ser == ser
expected = tm.box_expected([True, True, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[0]
expected = tm.box_expected([True, False, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[2]
expected = tm.box_expected([False, False, False], xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"datetimelike",
[
Timestamp("20130101"),
datetime(2013, 1, 1),
np.datetime64("2013-01-01T00:00", "ns"),
],
)
@pytest.mark.parametrize(
"op,expected",
[
(operator.lt, [True, False, False, False]),
(operator.le, [True, True, False, False]),
(operator.eq, [False, True, False, False]),
(operator.gt, [False, False, False, True]),
],
)
def test_dt64_compare_datetime_scalar(self, datetimelike, op, expected):
# GH#17965, test for ability to compare datetime64[ns] columns
# to datetimelike
ser = Series(
[
Timestamp("20120101"),
Timestamp("20130101"),
np.nan,
Timestamp("20130103"),
],
name="A",
)
result = op(ser, datetimelike)
expected = Series(expected, name="A")
tm.assert_series_equal(result, expected)
class TestDatetimeIndexComparisons:
# TODO: moved from tests.indexes.test_base; parametrize and de-duplicate
def test_comparators(self, comparison_op):
index = tm.makeDateIndex(100)
element = index[len(index) // 2]
element = Timestamp(element).to_datetime64()
arr = np.array(index)
arr_result = comparison_op(arr, element)
index_result = comparison_op(index, element)
assert isinstance(index_result, np.ndarray)
tm.assert_numpy_array_equal(arr_result, index_result)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
def test_dti_cmp_datetimelike(self, other, tz_naive_fixture):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
if tz is not None:
if isinstance(other, np.datetime64):
# no tzaware version available
return
other = localize_pydatetime(other, dti.tzinfo)
result = dti == other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = dti > other
expected = np.array([False, True])
tm.assert_numpy_array_equal(result, expected)
result = dti >= other
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
result = dti < other
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
result = dti <= other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, object])
def test_dti_cmp_nat(self, dtype, box_with_array):
left = DatetimeIndex([Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")])
right = DatetimeIndex([NaT, NaT, Timestamp("2011-01-03")])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
xbox = get_upcast_box(left, right, True)
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = lhs != rhs
expected = np.array([True, True, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs == NaT, expected)
tm.assert_equal(NaT == rhs, expected)
expected = np.array([True, True, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs != NaT, expected)
tm.assert_equal(NaT != lhs, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs < NaT, expected)
tm.assert_equal(NaT > lhs, expected)
def test_dti_cmp_nat_behaves_like_float_cmp_nan(self):
fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
didx1 = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
didx2 = DatetimeIndex(
["2014-02-01", "2014-03-01", NaT, NaT, "2014-06-01", "2014-07-01"]
)
darr = np.array(
[
np.datetime64("2014-02-01 00:00"),
np.datetime64("2014-03-01 00:00"),
np.datetime64("nat"),
np.datetime64("nat"),
np.datetime64("2014-06-01 00:00"),
np.datetime64("2014-07-01 00:00"),
]
)
cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, np.nan), (didx1, NaT)]:
result = idx1 < val
expected = np.array([False, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, True, True, True, True])
tm.assert_numpy_array_equal(result, expected)
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
result = idx1 < val
expected = np.array([True, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
expected = np.array([False, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
expected = np.array([False, False, True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
expected = np.array([False, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, False, True, True, True])
tm.assert_numpy_array_equal(result, expected)
def test_comparison_tzawareness_compat(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
box = box_with_array
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box)
dz = tm.box_expected(dz, box)
if box is pd.DataFrame:
tolist = lambda x: x.astype(object).values.tolist()[0]
else:
tolist = list
if op not in [operator.eq, operator.ne]:
msg = (
r"Invalid comparison between dtype=datetime64\[ns.*\] "
"and (Timestamp|DatetimeArray|list|ndarray)"
)
with pytest.raises(TypeError, match=msg):
op(dr, dz)
with pytest.raises(TypeError, match=msg):
op(dr, tolist(dz))
with pytest.raises(TypeError, match=msg):
op(dr, np.array(tolist(dz), dtype=object))
with pytest.raises(TypeError, match=msg):
op(dz, dr)
with pytest.raises(TypeError, match=msg):
op(dz, tolist(dr))
with pytest.raises(TypeError, match=msg):
op(dz, np.array(tolist(dr), dtype=object))
# The aware==aware and naive==naive comparisons should *not* raise
assert np.all(dr == dr)
assert np.all(dr == tolist(dr))
assert np.all(tolist(dr) == dr)
assert np.all(np.array(tolist(dr), dtype=object) == dr)
assert np.all(dr == np.array(tolist(dr), dtype=object))
assert np.all(dz == dz)
assert np.all(dz == tolist(dz))
assert np.all(tolist(dz) == dz)
assert np.all(np.array(tolist(dz), dtype=object) == dz)
assert np.all(dz == np.array(tolist(dz), dtype=object))
def test_comparison_tzawareness_compat_scalars(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box_with_array)
dz = tm.box_expected(dz, box_with_array)
# Check comparisons against scalar Timestamps
ts = Timestamp("2000-03-14 01:59")
ts_tz = Timestamp("2000-03-14 01:59", tz="Europe/Amsterdam")
assert np.all(dr > ts)
msg = r"Invalid comparison between dtype=datetime64\[ns.*\] and Timestamp"
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dr, ts_tz)
assert np.all(dz > ts_tz)
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dz, ts)
if op not in [operator.eq, operator.ne]:
# GH#12601: Check comparison against Timestamps and DatetimeIndex
with pytest.raises(TypeError, match=msg):
op(ts, dz)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
# Bug in NumPy? https://github.com/numpy/numpy/issues/13841
# Raising in __eq__ will fallback to NumPy, which warns, fails,
# then re-raises the original exception. So we just need to ignore.
@pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:Converting timezone-aware:FutureWarning")
def test_scalar_comparison_tzawareness(
self, comparison_op, other, tz_aware_fixture, box_with_array
):
op = comparison_op
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
xbox = get_upcast_box(dtarr, other, True)
if op in [operator.eq, operator.ne]:
exbool = op is operator.ne
expected = np.array([exbool, exbool], dtype=bool)
expected = tm.box_expected(expected, xbox)
result = op(dtarr, other)
tm.assert_equal(result, expected)
result = op(other, dtarr)
tm.assert_equal(result, expected)
else:
msg = (
r"Invalid comparison between dtype=datetime64\[ns, .*\] "
f"and {type(other).__name__}"
)
with pytest.raises(TypeError, match=msg):
op(dtarr, other)
with pytest.raises(TypeError, match=msg):
op(other, dtarr)
def test_nat_comparison_tzawareness(self, comparison_op):
# GH#19276
# tzaware DatetimeIndex should not raise when compared to NaT
op = comparison_op
dti = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
expected = np.array([op == operator.ne] * len(dti))
result = op(dti, NaT)
tm.assert_numpy_array_equal(result, expected)
result = op(dti.tz_localize("US/Pacific"), NaT)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_str(self, tz_naive_fixture):
# GH#22074
# regardless of tz, we expect these comparisons are valid
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
other = "1/1/2000"
result = rng == other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng != other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng < other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = rng <= other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng > other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng >= other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_list(self):
rng = date_range("1/1/2000", periods=10)
result = rng == list(rng)
expected = rng == rng
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
pd.timedelta_range("1D", periods=10),
pd.timedelta_range("1D", periods=10).to_series(),
pd.timedelta_range("1D", periods=10).asi8.view("m8[ns]"),
],
ids=lambda x: type(x).__name__,
)
def test_dti_cmp_tdi_tzawareness(self, other):
# GH#22074
# reversion test that we _don't_ call _assert_tzawareness_compat
# when comparing against TimedeltaIndex
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
result = dti == other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = dti != other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
msg = "Invalid comparison between"
with pytest.raises(TypeError, match=msg):
dti < other
with pytest.raises(TypeError, match=msg):
dti <= other
with pytest.raises(TypeError, match=msg):
dti > other
with pytest.raises(TypeError, match=msg):
dti >= other
def test_dti_cmp_object_dtype(self):
# GH#22074
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
other = dti.astype("O")
result = dti == other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
other = dti.tz_localize(None)
result = dti != other
tm.assert_numpy_array_equal(result, expected)
other = np.array(list(dti[:5]) + [Timedelta(days=1)] * 5)
result = dti == other
expected = np.array([True] * 5 + [False] * 5)
tm.assert_numpy_array_equal(result, expected)
msg = ">=' not supported between instances of 'Timestamp' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
dti >= other
# ------------------------------------------------------------------
# Arithmetic
class TestDatetime64Arithmetic:
# This class is intended for "finished" tests that are fully parametrized
# over DataFrame/Series/Index/DatetimeArray
# -------------------------------------------------------------
# Addition/Subtraction of timedelta-like
@pytest.mark.arm_slow
def test_dt64arr_add_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
# GH#22005, GH#22163 check DataFrame doesn't raise TypeError
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("2000-01-01 02:00", "2000-02-01 02:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng + two_hours
tm.assert_equal(result, expected)
rng += two_hours
tm.assert_equal(rng, expected)
def test_dt64arr_sub_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("1999-12-31 22:00", "2000-01-31 22:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng - two_hours
tm.assert_equal(result, expected)
rng -= two_hours
tm.assert_equal(rng, expected)
# TODO: redundant with test_dt64arr_add_timedeltalike_scalar
def test_dt64arr_add_td64_scalar(self, box_with_array):
# scalar timedeltas/np.timedelta64 objects
# operate with np.timedelta64 correctly
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:01"), Timestamp("20130101 9:02:01")]
)
dtarr = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(1, "s")
tm.assert_equal(result, expected)
result = np.timedelta64(1, "s") + dtarr
tm.assert_equal(result, expected)
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(5, "ms")
tm.assert_equal(result, expected)
result = np.timedelta64(5, "ms") + dtarr
tm.assert_equal(result, expected)
def test_dt64arr_add_sub_td64_nat(self, box_with_array, tz_naive_fixture):
# GH#23320 special handling for timedelta64("NaT")
tz = tz_naive_fixture
dti = date_range("1994-04-01", periods=9, tz=tz, freq="QS")
other = np.timedelta64("NaT")
expected = DatetimeIndex(["NaT"] * 9, tz=tz)
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
other - obj
def test_dt64arr_add_sub_td64ndarray(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
tdi = TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
expected = date_range("2015-12-31", "2016-01-02", periods=3, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + tdarr
tm.assert_equal(result, expected)
result = tdarr + dtarr
tm.assert_equal(result, expected)
expected = date_range("2016-01-02", "2016-01-04", periods=3, tz=tz)
expected = tm.box_expected(expected, box_with_array)
result = dtarr - tdarr
tm.assert_equal(result, expected)
msg = "cannot subtract|(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
tdarr - dtarr
# -----------------------------------------------------------------
# Subtraction of datetime-like scalars
@pytest.mark.parametrize(
"ts",
[
Timestamp("2013-01-01"),
Timestamp("2013-01-01").to_pydatetime(),
Timestamp("2013-01-01").to_datetime64(),
],
)
def test_dt64arr_sub_dtscalar(self, box_with_array, ts):
# GH#8554, GH#22163 DataFrame op should _not_ return dt64 dtype
idx = date_range("2013-01-01", periods=3)._with_freq(None)
idx = tm.box_expected(idx, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = idx - ts
tm.assert_equal(result, expected)
def test_dt64arr_sub_datetime64_not_ns(self, box_with_array):
# GH#7996, GH#22163 ensure non-nano datetime64 is converted to nano
# for DataFrame operation
dt64 = np.datetime64("2013-01-01")
assert dt64.dtype == "datetime64[D]"
dti = date_range("20130101", periods=3)._with_freq(None)
dtarr = tm.box_expected(dti, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = dtarr - dt64
tm.assert_equal(result, expected)
result = dt64 - dtarr
tm.assert_equal(result, -expected)
def test_dt64arr_sub_timestamp(self, box_with_array):
ser = date_range("2014-03-17", periods=2, freq="D", tz="US/Eastern")
ser = ser._with_freq(None)
ts = ser[0]
ser = tm.box_expected(ser, box_with_array)
delta_series = Series([np.timedelta64(0, "D"), np.timedelta64(1, "D")])
expected = tm.box_expected(delta_series, box_with_array)
tm.assert_equal(ser - ts, expected)
tm.assert_equal(ts - ser, -expected)
def test_dt64arr_sub_NaT(self, box_with_array):
# GH#18808
dti = DatetimeIndex([NaT, Timestamp("19900315")])
ser = tm.box_expected(dti, box_with_array)
result = ser - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
dti_tz = dti.tz_localize("Asia/Tokyo")
ser_tz = tm.box_expected(dti_tz, box_with_array)
result = ser_tz - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
# -------------------------------------------------------------
# Subtraction of datetime-like array-like
def test_dt64arr_sub_dt64object_array(self, box_with_array, tz_naive_fixture):
dti = date_range("2016-01-01", periods=3, tz=tz_naive_fixture)
expected = dti - dti
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
result = obj - obj.astype(object)
tm.assert_equal(result, expected)
def test_dt64arr_naive_sub_dt64ndarray(self, box_with_array):
dti = date_range("2016-01-01", periods=3, tz=None)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
expected = dtarr - dtarr
result = dtarr - dt64vals
tm.assert_equal(result, expected)
result = dt64vals - dtarr
tm.assert_equal(result, expected)
def test_dt64arr_aware_sub_dt64ndarray_raises(
self, tz_aware_fixture, box_with_array
):
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
msg = "subtraction must have the same timezones or"
with pytest.raises(TypeError, match=msg):
dtarr - dt64vals
with pytest.raises(TypeError, match=msg):
dt64vals - dtarr
# -------------------------------------------------------------
# Addition of datetime-like others (invalid)
def test_dt64arr_add_dt64ndarray_raises(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
assert_cannot_add(dtarr, dt64vals)
def test_dt64arr_add_timestamp_raises(self, box_with_array):
# GH#22163 ensure DataFrame doesn't cast Timestamp to i8
idx = DatetimeIndex(["2011-01-01", "2011-01-02"])
ts = idx[0]
idx = tm.box_expected(idx, box_with_array)
assert_cannot_add(idx, ts)
# -------------------------------------------------------------
# Other Invalid Addition/Subtraction
@pytest.mark.parametrize(
"other",
[
3.14,
np.array([2.0, 3.0]),
# GH#13078 datetime +/- Period is invalid
Period("2011-01-01", freq="D"),
# https://github.com/pandas-dev/pandas/issues/10329
time(1, 2, 3),
],
)
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_invalid(self, dti_freq, other, box_with_array):
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
dtarr = tm.box_expected(dti, box_with_array)
msg = "|".join(
[
"unsupported operand type",
"cannot (add|subtract)",
"cannot use operands with types",
"ufunc '?(add|subtract)'? cannot use operands with types",
"Concatenation operation is not implemented for NumPy arrays",
]
)
assert_invalid_addsub_type(dtarr, other, msg)
@pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "H"])
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_parr(
self, dti_freq, pi_freq, box_with_array, box_with_array2
):
# GH#20049 subtracting PeriodIndex should raise TypeError
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
pi = dti.to_period(pi_freq)
dtarr = tm.box_expected(dti, box_with_array)
parr = tm.box_expected(pi, box_with_array2)
msg = "|".join(
[
"cannot (add|subtract)",
"unsupported operand",
"descriptor.*requires",
"ufunc.*cannot use operands",
]
)
assert_invalid_addsub_type(dtarr, parr, msg)
def test_dt64arr_addsub_time_objects_raises(self, box_with_array, tz_naive_fixture):
# https://github.com/pandas-dev/pandas/issues/10329
tz = tz_naive_fixture
obj1 = date_range("2012-01-01", periods=3, tz=tz)
obj2 = [time(i, i, i) for i in range(3)]
obj1 = tm.box_expected(obj1, box_with_array)
obj2 = tm.box_expected(obj2, box_with_array)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
# If `x + y` raises, then `y + x` should raise here as well
msg = (
r"unsupported operand type\(s\) for -: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 - obj2
msg = "|".join(
[
"cannot subtract DatetimeArray from ndarray",
"ufunc (subtract|'subtract') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 - obj1
msg = (
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 + obj2
msg = "|".join(
[
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'",
"ufunc (add|'add') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 + obj1
class TestDatetime64DateOffsetArithmetic:
# -------------------------------------------------------------
# Tick DateOffsets
# TODO: parametrize over timezone?
def test_dt64arr_series_add_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:05"), Timestamp("20130101 9:02:05")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser + pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
def test_dt64arr_series_sub_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:00:55"), Timestamp("20130101 9:01:55")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser - pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = -pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
pd.offsets.Second(5) - ser
@pytest.mark.parametrize(
"cls_name", ["Day", "Hour", "Minute", "Second", "Milli", "Micro", "Nano"]
)
def test_dt64arr_add_sub_tick_DateOffset_smoke(self, cls_name, box_with_array):
# GH#4532
# smoke tests for valid DateOffsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
ser = tm.box_expected(ser, box_with_array)
offset_cls = getattr(pd.offsets, cls_name)
ser + offset_cls(5)
offset_cls(5) + ser
ser - offset_cls(5)
def test_dti_add_tick_tzaware(self, tz_aware_fixture, box_with_array):
# GH#21610, GH#22163 ensure DataFrame doesn't return object-dtype
tz = tz_aware_fixture
if tz == "US/Pacific":
dates = date_range("2012-11-01", periods=3, tz=tz)
offset = dates + pd.offsets.Hour(5)
assert dates[0] + pd.offsets.Hour(5) == offset[0]
dates = date_range("2010-11-01 00:00", periods=3, tz=tz, freq="H")
expected = DatetimeIndex(
["2010-11-01 05:00", "2010-11-01 06:00", "2010-11-01 07:00"],
freq="H",
tz=tz,
)
dates = tm.box_expected(dates, box_with_array)
expected = tm.box_expected(expected, box_with_array)
# TODO: sub?
for scalar in [pd.offsets.Hour(5), np.timedelta64(5, "h"), timedelta(hours=5)]:
offset = dates + scalar
tm.assert_equal(offset, expected)
offset = scalar + dates
tm.assert_equal(offset, expected)
# -------------------------------------------------------------
# RelativeDelta DateOffsets
def test_dt64arr_add_sub_relativedelta_offsets(self, box_with_array):
# GH#10699
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec
# DateOffset relativedelta fastpath
relative_kwargs = [
("years", 2),
("months", 5),
("days", 3),
("hours", 5),
("minutes", 10),
("seconds", 2),
("microseconds", 5),
]
for i, (unit, value) in enumerate(relative_kwargs):
off = DateOffset(**{unit: value})
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
off = DateOffset(**dict(relative_kwargs[: i + 1]))
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
off - vec
# -------------------------------------------------------------
# Non-Tick, Non-RelativeDelta DateOffsets
# TODO: redundant with test_dt64arr_add_sub_DateOffset? that includes
# tz-aware cases which this does not
@pytest.mark.parametrize(
"cls_and_kwargs",
[
"YearBegin",
("YearBegin", {"month": 5}),
"YearEnd",
("YearEnd", {"month": 5}),
"MonthBegin",
"MonthEnd",
"SemiMonthEnd",
"SemiMonthBegin",
"Week",
("Week", {"weekday": 3}),
"Week",
("Week", {"weekday": 6}),
"BusinessDay",
"BDay",
"QuarterEnd",
"QuarterBegin",
"CustomBusinessDay",
"CDay",
"CBMonthEnd",
"CBMonthBegin",
"BMonthBegin",
"BMonthEnd",
"BusinessHour",
"BYearBegin",
"BYearEnd",
"BQuarterBegin",
("LastWeekOfMonth", {"weekday": 2}),
(
"FY5253Quarter",
{
"qtr_with_extra_week": 1,
"startingMonth": 1,
"weekday": 2,
"variation": "nearest",
},
),
("FY5253", {"weekday": 0, "startingMonth": 2, "variation": "nearest"}),
("WeekOfMonth", {"weekday": 2, "week": 2}),
"Easter",
("DateOffset", {"day": 4}),
("DateOffset", {"month": 5}),
],
)
@pytest.mark.parametrize("normalize", [True, False])
@pytest.mark.parametrize("n", [0, 5])
def test_dt64arr_add_sub_DateOffsets(
self, box_with_array, n, normalize, cls_and_kwargs
):
# GH#10699
# assert vectorized operation matches pointwise operations
if isinstance(cls_and_kwargs, tuple):
# If cls_name param is a tuple, then 2nd entry is kwargs for
# the offset constructor
cls_name, kwargs = cls_and_kwargs
else:
cls_name = cls_and_kwargs
kwargs = {}
if n == 0 and cls_name in [
"WeekOfMonth",
"LastWeekOfMonth",
"FY5253Quarter",
"FY5253",
]:
# passing n = 0 is invalid for these offset classes
return
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec
offset_cls = getattr(pd.offsets, cls_name)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
offset = offset_cls(n, normalize=normalize, **kwargs)
expected = DatetimeIndex([x + offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + offset)
expected = DatetimeIndex([x - offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - offset)
expected = DatetimeIndex([offset + x for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, offset + vec)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
offset - vec
def test_dt64arr_add_sub_DateOffset(self, box_with_array):
# GH#10699
s = date_range("2000-01-01", "2000-01-31", name="a")
s = tm.box_expected(s, box_with_array)
result = s + DateOffset(years=1)
result2 = DateOffset(years=1) + s
exp = date_range("2001-01-01", "2001-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
result = s - DateOffset(years=1)
exp = date_range("1999-01-01", "1999-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.Day()
result2 = pd.offsets.Day() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-16 00:15:00", tz="US/Central"),
Timestamp("2000-02-16", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.MonthEnd()
result2 = pd.offsets.MonthEnd() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-31 00:15:00", tz="US/Central"),
Timestamp("2000-02-29", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
@pytest.mark.parametrize(
"other",
[
np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)]),
np.array([pd.offsets.DateOffset(years=1), pd.offsets.MonthEnd()]),
np.array( # matching offsets
[pd.offsets.DateOffset(years=1), pd.offsets.DateOffset(years=1)]
),
],
)
@pytest.mark.parametrize("op", [operator.add, roperator.radd, operator.sub])
@pytest.mark.parametrize("box_other", [True, False])
def test_dt64arr_add_sub_offset_array(
self, tz_naive_fixture, box_with_array, box_other, op, other
):
# GH#18849
# GH#10699 array of offsets
tz = tz_naive_fixture
dti = date_range("2017-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
other = np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
expected = DatetimeIndex([op(dti[n], other[n]) for n in range(len(dti))])
expected = tm.box_expected(expected, box_with_array)
if box_other:
other = tm.box_expected(other, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
res = op(dtarr, other)
tm.assert_equal(res, expected)
@pytest.mark.parametrize(
"op, offset, exp, exp_freq",
[
(
"__add__",
DateOffset(months=3, days=10),
[
Timestamp("2014-04-11"),
Timestamp("2015-04-11"),
Timestamp("2016-04-11"),
Timestamp("2017-04-11"),
],
None,
),
(
"__add__",
DateOffset(months=3),
[
Timestamp("2014-04-01"),
Timestamp("2015-04-01"),
Timestamp("2016-04-01"),
Timestamp("2017-04-01"),
],
"AS-APR",
),
(
"__sub__",
DateOffset(months=3, days=10),
[
Timestamp("2013-09-21"),
Timestamp("2014-09-21"),
Timestamp("2015-09-21"),
Timestamp("2016-09-21"),
],
None,
),
(
"__sub__",
DateOffset(months=3),
[
Timestamp("2013-10-01"),
Timestamp("2014-10-01"),
Timestamp("2015-10-01"),
Timestamp("2016-10-01"),
],
"AS-OCT",
),
],
)
def test_dti_add_sub_nonzero_mth_offset(
self, op, offset, exp, exp_freq, tz_aware_fixture, box_with_array
):
# GH 26258
tz = tz_aware_fixture
date = date_range(start="01 Jan 2014", end="01 Jan 2017", freq="AS", tz=tz)
date = tm.box_expected(date, box_with_array, False)
mth = getattr(date, op)
result = mth(offset)
expected = DatetimeIndex(exp, tz=tz)
expected = tm.box_expected(expected, box_with_array, False)
tm.assert_equal(result, expected)
class TestDatetime64OverflowHandling:
# TODO: box + de-duplicate
def test_dt64_overflow_masking(self, box_with_array):
# GH#25317
left = Series([Timestamp("1969-12-31")])
right = Series([NaT])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
expected = TimedeltaIndex([NaT])
expected = tm.box_expected(expected, box_with_array)
result = left - right
tm.assert_equal(result, expected)
def test_dt64_series_arith_overflow(self):
# GH#12534, fixed by GH#19024
dt = Timestamp("1700-01-31")
td = Timedelta("20000 Days")
dti = date_range("1949-09-30", freq="100Y", periods=4)
ser = Series(dti)
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
ser - dt
with pytest.raises(OverflowError, match=msg):
dt - ser
with pytest.raises(OverflowError, match=msg):
ser + td
with pytest.raises(OverflowError, match=msg):
td + ser
ser.iloc[-1] = NaT
expected = Series(
["2004-10-03", "2104-10-04", "2204-10-04", "NaT"], dtype="datetime64[ns]"
)
res = ser + td
tm.assert_series_equal(res, expected)
res = td + ser
tm.assert_series_equal(res, expected)
ser.iloc[1:] = NaT
expected = Series(["91279 Days", "NaT", "NaT", "NaT"], dtype="timedelta64[ns]")
res = ser - dt
tm.assert_series_equal(res, expected)
res = dt - ser
tm.assert_series_equal(res, -expected)
def test_datetimeindex_sub_timestamp_overflow(self):
dtimax = pd.to_datetime(["now", Timestamp.max])
dtimin = pd.to_datetime(["now", Timestamp.min])
tsneg = Timestamp("1950-01-01")
ts_neg_variants = [
tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype("datetime64[ns]"),
tsneg.to_datetime64().astype("datetime64[D]"),
]
tspos = Timestamp("1980-01-01")
ts_pos_variants = [
tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype("datetime64[ns]"),
tspos.to_datetime64().astype("datetime64[D]"),
]
msg = "Overflow in int64 addition"
for variant in ts_neg_variants:
with pytest.raises(OverflowError, match=msg):
dtimax - variant
expected = Timestamp.max.value - tspos.value
for variant in ts_pos_variants:
res = dtimax - variant
assert res[1].value == expected
expected = Timestamp.min.value - tsneg.value
for variant in ts_neg_variants:
res = dtimin - variant
assert res[1].value == expected
for variant in ts_pos_variants:
with pytest.raises(OverflowError, match=msg):
dtimin - variant
def test_datetimeindex_sub_datetimeindex_overflow(self):
# GH#22492, GH#22508
dtimax = pd.to_datetime(["now", Timestamp.max])
dtimin = pd.to_datetime(["now", Timestamp.min])
ts_neg = pd.to_datetime(["1950-01-01", "1950-01-01"])
ts_pos = pd.to_datetime(["1980-01-01", "1980-01-01"])
# General tests
expected = Timestamp.max.value - ts_pos[1].value
result = dtimax - ts_pos
assert result[1].value == expected
expected = Timestamp.min.value - ts_neg[1].value
result = dtimin - ts_neg
assert result[1].value == expected
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
dtimax - ts_neg
with pytest.raises(OverflowError, match=msg):
dtimin - ts_pos
# Edge cases
tmin = pd.to_datetime([Timestamp.min])
t1 = tmin + Timedelta.max + Timedelta("1us")
with pytest.raises(OverflowError, match=msg):
t1 - tmin
tmax = pd.to_datetime([Timestamp.max])
t2 = tmax + Timedelta.min - Timedelta("1us")
with pytest.raises(OverflowError, match=msg):
tmax - t2
class TestTimestampSeriesArithmetic:
def test_empty_series_add_sub(self):
# GH#13844
a = Series(dtype="M8[ns]")
b = Series(dtype="m8[ns]")
tm.assert_series_equal(a, a + b)
tm.assert_series_equal(a, a - b)
tm.assert_series_equal(a, b + a)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
b - a
def test_operators_datetimelike(self):
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Series(
[
Timestamp("20111230"),
Timestamp("20120101"),
Timestamp("20120103"),
]
)
dt1.iloc[2] = np.nan
dt2 = Series(
[
Timestamp("20111231"),
Timestamp("20120102"),
Timestamp("20120104"),
]
)
dt1 - dt2
dt2 - dt1
# datetime64 with timetimedelta
dt1 + td1
td1 + dt1
dt1 - td1
# timetimedelta with datetime64
td1 + dt1
dt1 + td1
def test_dt64ser_sub_datetime_dtype(self):
ts = Timestamp(datetime(1993, 1, 7, 13, 30, 00))
dt = datetime(1993, 6, 22, 13, 30)
ser = Series([ts])
result = pd.to_timedelta(np.abs(ser - dt))
assert result.dtype == "timedelta64[ns]"
# -------------------------------------------------------------
# TODO: This next block of tests came from tests.series.test_operators,
# needs to be de-duplicated and parametrized over `box` classes
def test_operators_datetimelike_invalid(self, all_arithmetic_operators):
# these are all TypeEror ops
op_str = all_arithmetic_operators
def check(get_ser, test_ser):
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
op = getattr(get_ser, op_str, None)
# Previously, _validate_for_numeric_binop in core/indexes/base.py
# did this for us.
with pytest.raises(
TypeError, match="operate|[cC]annot|unsupported operand"
):
op(test_ser)
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Series(
[Timestamp("20111230"), Timestamp("20120101"), Timestamp("20120103")]
)
dt1.iloc[2] = np.nan
dt2 = Series(
[Timestamp("20111231"), Timestamp("20120102"), Timestamp("20120104")]
)
if op_str not in ["__sub__", "__rsub__"]:
check(dt1, dt2)
# ## datetime64 with timetimedelta ###
# TODO(jreback) __rsub__ should raise?
if op_str not in ["__add__", "__radd__", "__sub__"]:
check(dt1, td1)
# 8260, 10763
# datetime64 with tz
tz = "US/Eastern"
dt1 = Series(date_range("2000-01-01 09:00:00", periods=5, tz=tz), name="foo")
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(pd.timedelta_range("1 days 1 min", periods=5, freq="H"))
td2 = td1.copy()
td2.iloc[1] = np.nan
if op_str not in ["__add__", "__radd__", "__sub__", "__rsub__"]:
check(dt2, td2)
def test_sub_single_tz(self):
# GH#12290
s1 = Series([Timestamp("2016-02-10", tz="America/Sao_Paulo")])
s2 = Series([Timestamp("2016-02-08", tz="America/Sao_Paulo")])
result = s1 - s2
expected = Series([Timedelta("2days")])
tm.assert_series_equal(result, expected)
result = s2 - s1
expected = Series([Timedelta("-2days")])
tm.assert_series_equal(result, expected)
def test_dt64tz_series_sub_dtitz(self):
# GH#19071 subtracting tzaware DatetimeIndex from tzaware Series
# (with same tz) raises, fixed by #19024
dti = date_range("1999-09-30", periods=10, tz="US/Pacific")
ser = Series(dti)
expected = Series(TimedeltaIndex(["0days"] * 10))
res = dti - ser
tm.assert_series_equal(res, expected)
res = ser - dti
tm.assert_series_equal(res, expected)
def test_sub_datetime_compat(self):
# see GH#14088
s = Series([datetime(2016, 8, 23, 12, tzinfo=pytz.utc), NaT])
dt = datetime(2016, 8, 22, 12, tzinfo=pytz.utc)
exp = Series([Timedelta("1 days"), NaT])
tm.assert_series_equal(s - dt, exp)
tm.assert_series_equal(s - Timestamp(dt), exp)
def test_dt64_series_add_mixed_tick_DateOffset(self):
# GH#4532
# operate with pd.offsets
s = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series(
[Timestamp("20130101 9:06:00.005"), Timestamp("20130101 9:07:00.005")]
)
tm.assert_series_equal(result, expected)
def test_datetime64_ops_nat(self):
# GH#11349
datetime_series = Series([NaT, Timestamp("19900315")])
nat_series_dtype_timestamp = Series([NaT, NaT], dtype="datetime64[ns]")
single_nat_dtype_datetime = Series([NaT], dtype="datetime64[ns]")
# subtraction
tm.assert_series_equal(-NaT + datetime_series, nat_series_dtype_timestamp)
msg = "bad operand type for unary -: 'DatetimeArray'"
with pytest.raises(TypeError, match=msg):
-single_nat_dtype_datetime + datetime_series
tm.assert_series_equal(
-NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
with pytest.raises(TypeError, match=msg):
-single_nat_dtype_datetime + nat_series_dtype_timestamp
# addition
tm.assert_series_equal(
nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp
)
tm.assert_series_equal(
NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
tm.assert_series_equal(
nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp
)
tm.assert_series_equal(
NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
# -------------------------------------------------------------
# Invalid Operations
# TODO: this block also needs to be de-duplicated and parametrized
@pytest.mark.parametrize(
"dt64_series",
[
Series([Timestamp("19900315"), Timestamp("19900315")]),
Series([NaT, Timestamp("19900315")]),
Series([NaT, NaT], dtype="datetime64[ns]"),
],
)
@pytest.mark.parametrize("one", [1, 1.0, np.array(1)])
def test_dt64_mul_div_numeric_invalid(self, one, dt64_series):
# multiplication
msg = "cannot perform .* with this index type"
with pytest.raises(TypeError, match=msg):
dt64_series * one
with pytest.raises(TypeError, match=msg):
one * dt64_series
# division
with pytest.raises(TypeError, match=msg):
dt64_series / one
with pytest.raises(TypeError, match=msg):
one / dt64_series
# TODO: parametrize over box
def test_dt64_series_add_intlike(self, tz_naive_fixture):
# GH#19123
tz = tz_naive_fixture
dti = DatetimeIndex(["2016-01-02", "2016-02-03", "NaT"], tz=tz)
ser = Series(dti)
other = Series([20, 30, 40], dtype="uint8")
msg = "|".join(
[
"Addition/subtraction of integers and integer-arrays",
"cannot subtract .* from ndarray",
]
)
assert_invalid_addsub_type(ser, 1, msg)
assert_invalid_addsub_type(ser, other, msg)
assert_invalid_addsub_type(ser, np.array(other), msg)
assert_invalid_addsub_type(ser, pd.Index(other), msg)
# -------------------------------------------------------------
# Timezone-Centric Tests
def test_operators_datetimelike_with_timezones(self):
tz = "US/Eastern"
dt1 = Series(date_range("2000-01-01 09:00:00", periods=5, tz=tz), name="foo")
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(pd.timedelta_range("1 days 1 min", periods=5, freq="H"))
td2 = td1.copy()
td2.iloc[1] = np.nan
assert td2._values.freq is None
result = dt1 + td1[0]
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 + td2[0]
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
# odd numpy behavior with scalar timedeltas
result = td1[0] + dt1
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = td2[0] + dt2
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt1 - td1[0]
exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
td1[0] - dt1
result = dt2 - td2[0]
exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
with pytest.raises(TypeError, match=msg):
td2[0] - dt2
result = dt1 + td1
exp = (dt1.dt.tz_localize(None) + td1).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 + td2
exp = (dt2.dt.tz_localize(None) + td2).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt1 - td1
exp = (dt1.dt.tz_localize(None) - td1).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 - td2
exp = (dt2.dt.tz_localize(None) - td2).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
msg = "cannot (add|subtract)"
with pytest.raises(TypeError, match=msg):
td1 - dt1
with pytest.raises(TypeError, match=msg):
td2 - dt2
class TestDatetimeIndexArithmetic:
# -------------------------------------------------------------
# Binary operations DatetimeIndex and int
def test_dti_addsub_int(self, tz_naive_fixture, one):
# Variants of `one` for #19012
tz = tz_naive_fixture
rng = date_range("2000-01-01 09:00", freq="H", periods=10, tz=tz)
msg = "Addition/subtraction of integers"
with pytest.raises(TypeError, match=msg):
rng + one
with pytest.raises(TypeError, match=msg):
rng += one
with pytest.raises(TypeError, match=msg):
rng - one
with pytest.raises(TypeError, match=msg):
rng -= one
# -------------------------------------------------------------
# __add__/__sub__ with integer arrays
@pytest.mark.parametrize("freq", ["H", "D"])
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_dti_add_intarray_tick(self, int_holder, freq):
# GH#19959
dti = date_range("2016-01-01", periods=2, freq=freq)
other = int_holder([4, -1])
msg = "|".join(
["Addition/subtraction of integers", "cannot subtract DatetimeArray from"]
)
assert_invalid_addsub_type(dti, other, msg)
@pytest.mark.parametrize("freq", ["W", "M", "MS", "Q"])
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_dti_add_intarray_non_tick(self, int_holder, freq):
# GH#19959
dti = date_range("2016-01-01", periods=2, freq=freq)
other = int_holder([4, -1])
msg = "|".join(
["Addition/subtraction of integers", "cannot subtract DatetimeArray from"]
)
assert_invalid_addsub_type(dti, other, msg)
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_dti_add_intarray_no_freq(self, int_holder):
# GH#19959
dti = DatetimeIndex(["2016-01-01", "NaT", "2017-04-05 06:07:08"])
other = int_holder([9, 4, -1])
msg = "|".join(
["cannot subtract DatetimeArray from", "Addition/subtraction of integers"]
)
assert_invalid_addsub_type(dti, other, msg)
# -------------------------------------------------------------
# Binary operations DatetimeIndex and TimedeltaIndex/array
def test_dti_add_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = date_range("2017-01-01", periods=10, tz=tz)
expected = expected._with_freq(None)
# add with TimdeltaIndex
result = dti + tdi
tm.assert_index_equal(result, expected)
result = tdi + dti
tm.assert_index_equal(result, expected)
# add with timedelta64 array
result = dti + tdi.values
tm.assert_index_equal(result, expected)
result = tdi.values + dti
tm.assert_index_equal(result, expected)
def test_dti_iadd_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = date_range("2017-01-01", periods=10, tz=tz)
expected = expected._with_freq(None)
# iadd with TimdeltaIndex
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result += tdi
tm.assert_index_equal(result, expected)
result = pd.timedelta_range("0 days", periods=10)
result += dti
tm.assert_index_equal(result, expected)
# iadd with timedelta64 array
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result += tdi.values
tm.assert_index_equal(result, expected)
result = pd.timedelta_range("0 days", periods=10)
result += dti
tm.assert_index_equal(result, expected)
def test_dti_sub_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = date_range("2017-01-01", periods=10, tz=tz, freq="-1D")
expected = expected._with_freq(None)
# sub with TimedeltaIndex
result = dti - tdi
tm.assert_index_equal(result, expected)
msg = "cannot subtract .*TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi - dti
# sub with timedelta64 array
result = dti - tdi.values
tm.assert_index_equal(result, expected)
msg = "cannot subtract a datelike from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi.values - dti
def test_dti_isub_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = date_range("2017-01-01", periods=10, tz=tz, freq="-1D")
expected = expected._with_freq(None)
# isub with TimedeltaIndex
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result -= tdi
tm.assert_index_equal(result, expected)
# DTA.__isub__ GH#43904
dta = dti._data.copy()
dta -= tdi
tm.assert_datetime_array_equal(dta, expected._data)
out = dti._data.copy()
np.subtract(out, tdi, out=out)
tm.assert_datetime_array_equal(out, expected._data)
msg = "cannot subtract .* from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi -= dti
# isub with timedelta64 array
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result -= tdi.values
| tm.assert_index_equal(result, expected) | pandas._testing.assert_index_equal |
# -*- coding: utf-8 -*-
"""Generator capacity factor plots .
This module contain methods that are related to the capacity factor
of generators and average output plots
"""
import logging
import numpy as np
import pandas as pd
import marmot.config.mconfig as mconfig
from marmot.plottingmodules.plotutils.plot_data_helper import PlotDataHelper
from marmot.plottingmodules.plotutils.plot_library import PlotLibrary
from marmot.plottingmodules.plotutils.plot_exceptions import (MissingInputData, MissingZoneData)
class MPlot(PlotDataHelper):
"""capacity_factor MPlot class.
All the plotting modules use this same class name.
This class contains plotting methods that are grouped based on the
current module name.
The capacity_factor.py module contain methods that are
related to the capacity factor of generators.
MPlot inherits from the PlotDataHelper class to assist in creating figures.
"""
def __init__(self, argument_dict: dict):
"""
Args:
argument_dict (dict): Dictionary containing all
arguments passed from MarmotPlot.
"""
# iterate over items in argument_dict and set as properties of class
# see key_list in Marmot_plot_main for list of properties
for prop in argument_dict:
self.__setattr__(prop, argument_dict[prop])
# Instantiation of MPlotHelperFunctions
super().__init__(self.Marmot_Solutions_folder, self.AGG_BY, self.ordered_gen,
self.PLEXOS_color_dict, self.Scenarios, self.ylabels,
self.xlabels, self.gen_names_dict, self.TECH_SUBSET,
Region_Mapping=self.Region_Mapping)
self.logger = logging.getLogger('marmot_plot.'+__name__)
self.x = mconfig.parser("figure_size","xdimension")
self.y = mconfig.parser("figure_size","ydimension")
def avg_output_when_committed(self,
start_date_range: str = None,
end_date_range: str = None, **_):
"""Creates barplots of the percentage average generation output when committed by technology type.
Each scenario is plotted by a different colored grouped bar.
Args:
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: dictionary containing the created plot and its data table.
"""
outputs = {}
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"generator_Generation",self.Scenarios),
(True,"generator_Installed_Capacity",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
for zone_input in self.Zones:
CF_all_scenarios = | pd.DataFrame() | pandas.DataFrame |
from define_collection_wave import folder
from helpers import create_folder, headers
import requests
from datetime import date
import json
import pandas as pd
path_greggs = create_folder('4_Greggs',folder)
request_url = 'https://production-digital.greggs.co.uk/api/v1.0/articles/masters?ExcludeUnpublished=true&ExcludeDuplicates=true&ExcludeHiddenFromMenu=true'
product_list = requests.get(request_url, headers=headers).json()
with open(path_greggs + '/greggs.json', 'w') as f:
json.dump(product_list, f)
greggs = []
category_map = requests.get('https://api.storyblok.com/v2/cdn/datasource_entries/?datasource=category-ids&version=published&token=<KEY>').json().get('datasource_entries')
lookup = {cat.get('value'): cat.get('name')for cat in category_map}
for product in product_list:
cat_id = product.get('articleCategoryId')
product_dict = {
'collection_date':date.today().strftime("%b-%d-%Y"),
'rest_name': 'Greggs',
'menu_id': product.get('articleCode'),
'item_name': product.get('articleName'),
'servingsize': product.get('articleSize').get('size'),
'servingsizeunit': product.get('articleSize').get('unitOfMeasure'),
'item_description': product.get('customerDescription').strip(),
'category': lookup[str(cat_id)],
'allergens': [allergen.get('name') for allergen in product.get('ingredients')]}
nutrient_dict = product.get('nutritionalValues')
for nutrient in nutrient_dict:
product_dict.update({nutrient.get('name'):nutrient.get('value'),
nutrient.get('name')+'_100': nutrient.get('valuePerHundredArticleSizeUnit')})
greggs.append(product_dict)
greggs = | pd.DataFrame(greggs) | pandas.DataFrame |
"""
Main interface module to use pyEPR.
Contains code to conenct to ansys and to analyze hfss files using the EPR method.
Further contains code to be able to do autogenerated reports, analysis, and such.
Copyright <NAME>, <NAME>, and the pyEPR tea
2015, 2016, 2017, 2018, 2019, 2020
"""
from __future__ import print_function # Python 2.7 and 3 compatibility
import os
import sys
import time
import pickle
import shutil
import warnings
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
# Standard imports
from numpy import pi
from numpy.linalg import inv
from stat import S_ISREG, ST_CTIME, ST_MODE
from pandas import Series, DataFrame
from collections import OrderedDict
from pathlib import Path
# pyEPR custom imports
from . import ansys
from . import logger
from . import config
from . import Dict
from .ansys import ureg, CalcObject, ConstantVecCalcObject, set_property
from .toolbox.pythonic import print_NoNewLine, print_color, deprecated, fact, nck, \
divide_diagonal_by_2, print_matrix, DataFrame_col_diff, get_instance_vars,\
sort_df_col, sort_Series_idx
from .calcs.constants import epsilon_0, hbar, Planck, fluxQ
from .calcs.basic import CalcsBasic
from .calcs.back_box_numeric import bbq_hmt, make_dispersive
from .toolbox.plotting import cmap_discrete, legend_translucent
from .reports import plot_convergence_f_vspass, plot_convergence_max_df,\
plot_convergence_solved_elem, plot_convergence_maxdf_vs_sol
class Project_Info(object):
"""
Class containing options and information about the manipulation and analysis in HFSS.
Junction info:
-----------------------
self.junctions : OrderedDict()
A Josephson tunnel junction has to have its parameters specified here for the analysis.
Each junction is given a name and is specified by a dictionary.
It has the following properties:
`Lj_variable`: Name of HFSS variable that specifies junction inductance Lj defined
on the boundary condition in HFSS. DO NOT USE Global names that start
with $.
`rect`: Name of HFSS rectangle on which lumped boundary condition is specified.
`line`: Name of HFSS polyline which spans the length of the recntalge.
Used to define the voltage across the junction.
Used to define the current orientation for each junction.
Used to define sign of ZPF.
`length`: Length in HFSS of the junction rectangle and line
(specified in meters). You can use epr.parse_units('100um')
Example definition:
..code-block python
# Define a single junction
pinfo = Project_Info('')
pinfo.junctions['j1'] = {'Lj_variable' : 'Lj1',
'rect' : 'JJrect1',
'line' : 'JJline1',
'length' : parse_units('50um')} # Length is in meters
# Specify multiple junctions in HFSS model
n_junctions = 5
for i in range(1, 1+n_junctions):
pinfo.junctions[f'j{i}'] = {'Lj_variable' : f'Lj{i}',
'rect' : f'JJrect{i}',
'line' : f'JJline{i}',
'length' : parse_units('50um')}
HFSS app connection settings
-----------------------
project_path : str
Directory path to the hfss project file. Should be the directory, not the file.
default = None: Assumes the project is open, and thus gets the project based
on `project_name`
project_name : str, None
Name of the project within the project_path. "None" will get the current active one.
design_name : str, None
Name of the design within the project. "None" will get the current active one.
setup_name : str, None
Name of the setup within the design. "None" will get the current active one.
Additional init setting:
-----------------------
do_connect : True by default. Connect to HFSS
HFSS desgin settings
-----------------------
describe junction parameters
junc_rects = None
Name of junction rectangles in HFSS
junc_lines = None
Name of lines in HFSS used to define the current orientation for each junction
junc_LJ_names = None
Name of junction inductance variables in HFSS.
Note, DO NOT USE Global names that start with $.
junc_lens = None
Junciton rect. length, measured in meters.
"""
class _Dissipative:
#TODO: remove and turn to dict
def __init__(self):
self.dielectrics_bulk = None
self.dielectric_surfaces = None
self.resistive_surfaces = None
self.seams = None
def __init__(self, project_path=None, project_name=None, design_name=None,
setup_name=None, do_connect=True):
# Path: format path correctly to system convention
self.project_path = str(Path(project_path)) \
if not (project_path is None) else None
self.project_name = project_name
self.design_name = design_name
self.setup_name = setup_name
## HFSS desgin: describe junction parameters
# TODO: introduce modal labels
self.junctions = Dict() # See above for help
self.ports = Dict()
## Dissipative HFSS volumes and surfaces
self.dissipative = self._Dissipative()
self.options = config.ansys
# Conected to HFSS variable
self.app = None
self.desktop = None
self.project = None
self.design = None
self.setup = None
if do_connect:
self.connect()
_Forbidden = ['app', 'design', 'desktop', 'project',
'dissipative', 'setup', '_Forbidden', 'junctions']
def save(self):
'''
Return a dicitonary to save
'''
return dict(
pinfo=pd.Series(get_instance_vars(self, self._Forbidden)),
dissip=pd.Series(get_instance_vars(self.dissipative)),
options=pd.Series(get_instance_vars(self.options)),
junctions= | pd.DataFrame(self.junctions) | pandas.DataFrame |
import pandas as pd
from pandas import Period, offsets
from pandas.util import testing as tm
from pandas.tseries.frequencies import _period_code_map
class TestFreqConversion(tm.TestCase):
"Test frequency conversion of date objects"
def test_asfreq_corner(self):
val = Period(freq='A', year=2007)
result1 = val.asfreq('5t')
result2 = val.asfreq('t')
expected = Period('2007-12-31 23:59', freq='t')
self.assertEqual(result1.ordinal, expected.ordinal)
self.assertEqual(result1.freqstr, '5T')
self.assertEqual(result2.ordinal, expected.ordinal)
self.assertEqual(result2.freqstr, 'T')
def test_conv_annual(self):
# frequency conversion tests: from Annual Frequency
ival_A = Period(freq='A', year=2007)
ival_AJAN = Period(freq="A-JAN", year=2007)
ival_AJUN = Period(freq="A-JUN", year=2007)
ival_ANOV = Period(freq="A-NOV", year=2007)
ival_A_to_Q_start = Period(freq='Q', year=2007, quarter=1)
ival_A_to_Q_end = Period(freq='Q', year=2007, quarter=4)
ival_A_to_M_start = Period(freq='M', year=2007, month=1)
ival_A_to_M_end = Period(freq='M', year=2007, month=12)
ival_A_to_W_start = Period(freq='W', year=2007, month=1, day=1)
ival_A_to_W_end = Period(freq='W', year=2007, month=12, day=31)
ival_A_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_A_to_B_end = Period(freq='B', year=2007, month=12, day=31)
ival_A_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_A_to_D_end = Period(freq='D', year=2007, month=12, day=31)
ival_A_to_H_start = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_A_to_H_end = Period(freq='H', year=2007, month=12, day=31,
hour=23)
ival_A_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_A_to_T_end = Period(freq='Min', year=2007, month=12, day=31,
hour=23, minute=59)
ival_A_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_A_to_S_end = Period(freq='S', year=2007, month=12, day=31,
hour=23, minute=59, second=59)
ival_AJAN_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_AJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_AJUN_to_D_end = Period(freq='D', year=2007, month=6, day=30)
ival_AJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_ANOV_to_D_end = Period(freq='D', year=2007, month=11, day=30)
ival_ANOV_to_D_start = Period(freq='D', year=2006, month=12, day=1)
self.assertEqual(ival_A.asfreq('Q', 'S'), ival_A_to_Q_start)
self.assertEqual(ival_A.asfreq('Q', 'e'), ival_A_to_Q_end)
self.assertEqual(ival_A.asfreq('M', 's'), ival_A_to_M_start)
self.assertEqual(ival_A.asfreq('M', 'E'), ival_A_to_M_end)
self.assertEqual(ival_A.asfreq('W', 'S'), ival_A_to_W_start)
self.assertEqual(ival_A.asfreq('W', 'E'), ival_A_to_W_end)
self.assertEqual(ival_A.asfreq('B', 'S'), ival_A_to_B_start)
self.assertEqual(ival_A.asfreq('B', 'E'), ival_A_to_B_end)
self.assertEqual(ival_A.asfreq('D', 'S'), ival_A_to_D_start)
self.assertEqual(ival_A.asfreq('D', 'E'), ival_A_to_D_end)
self.assertEqual(ival_A.asfreq('H', 'S'), ival_A_to_H_start)
self.assertEqual(ival_A.asfreq('H', 'E'), ival_A_to_H_end)
self.assertEqual(ival_A.asfreq('min', 'S'), ival_A_to_T_start)
self.assertEqual(ival_A.asfreq('min', 'E'), ival_A_to_T_end)
self.assertEqual(ival_A.asfreq('T', 'S'), ival_A_to_T_start)
self.assertEqual(ival_A.asfreq('T', 'E'), ival_A_to_T_end)
self.assertEqual(ival_A.asfreq('S', 'S'), ival_A_to_S_start)
self.assertEqual(ival_A.asfreq('S', 'E'), ival_A_to_S_end)
self.assertEqual(ival_AJAN.asfreq('D', 'S'), ival_AJAN_to_D_start)
self.assertEqual(ival_AJAN.asfreq('D', 'E'), ival_AJAN_to_D_end)
self.assertEqual(ival_AJUN.asfreq('D', 'S'), ival_AJUN_to_D_start)
self.assertEqual(ival_AJUN.asfreq('D', 'E'), ival_AJUN_to_D_end)
self.assertEqual(ival_ANOV.asfreq('D', 'S'), ival_ANOV_to_D_start)
self.assertEqual(ival_ANOV.asfreq('D', 'E'), ival_ANOV_to_D_end)
self.assertEqual(ival_A.asfreq('A'), ival_A)
def test_conv_quarterly(self):
# frequency conversion tests: from Quarterly Frequency
ival_Q = Period(freq='Q', year=2007, quarter=1)
ival_Q_end_of_year = Period(freq='Q', year=2007, quarter=4)
ival_QEJAN = Period(freq="Q-JAN", year=2007, quarter=1)
ival_QEJUN = Period(freq="Q-JUN", year=2007, quarter=1)
ival_Q_to_A = Period(freq='A', year=2007)
ival_Q_to_M_start = Period(freq='M', year=2007, month=1)
ival_Q_to_M_end = Period(freq='M', year=2007, month=3)
ival_Q_to_W_start = Period(freq='W', year=2007, month=1, day=1)
ival_Q_to_W_end = Period(freq='W', year=2007, month=3, day=31)
ival_Q_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_Q_to_B_end = Period(freq='B', year=2007, month=3, day=30)
ival_Q_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_Q_to_D_end = Period(freq='D', year=2007, month=3, day=31)
ival_Q_to_H_start = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_Q_to_H_end = Period(freq='H', year=2007, month=3, day=31, hour=23)
ival_Q_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_Q_to_T_end = Period(freq='Min', year=2007, month=3, day=31,
hour=23, minute=59)
ival_Q_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_Q_to_S_end = Period(freq='S', year=2007, month=3, day=31, hour=23,
minute=59, second=59)
ival_QEJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_QEJAN_to_D_end = Period(freq='D', year=2006, month=4, day=30)
ival_QEJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_QEJUN_to_D_end = Period(freq='D', year=2006, month=9, day=30)
self.assertEqual(ival_Q.asfreq('A'), ival_Q_to_A)
self.assertEqual(ival_Q_end_of_year.asfreq('A'), ival_Q_to_A)
self.assertEqual(ival_Q.asfreq('M', 'S'), ival_Q_to_M_start)
self.assertEqual(ival_Q.asfreq('M', 'E'), ival_Q_to_M_end)
self.assertEqual(ival_Q.asfreq('W', 'S'), ival_Q_to_W_start)
self.assertEqual(ival_Q.asfreq('W', 'E'), ival_Q_to_W_end)
self.assertEqual(ival_Q.asfreq('B', 'S'), ival_Q_to_B_start)
self.assertEqual(ival_Q.asfreq('B', 'E'), ival_Q_to_B_end)
self.assertEqual(ival_Q.asfreq('D', 'S'), ival_Q_to_D_start)
self.assertEqual(ival_Q.asfreq('D', 'E'), ival_Q_to_D_end)
self.assertEqual(ival_Q.asfreq('H', 'S'), ival_Q_to_H_start)
self.assertEqual(ival_Q.asfreq('H', 'E'), ival_Q_to_H_end)
self.assertEqual(ival_Q.asfreq('Min', 'S'), ival_Q_to_T_start)
self.assertEqual(ival_Q.asfreq('Min', 'E'), ival_Q_to_T_end)
self.assertEqual(ival_Q.asfreq('S', 'S'), ival_Q_to_S_start)
self.assertEqual(ival_Q.asfreq('S', 'E'), ival_Q_to_S_end)
self.assertEqual(ival_QEJAN.asfreq('D', 'S'), ival_QEJAN_to_D_start)
self.assertEqual(ival_QEJAN.asfreq('D', 'E'), ival_QEJAN_to_D_end)
self.assertEqual(ival_QEJUN.asfreq('D', 'S'), ival_QEJUN_to_D_start)
self.assertEqual(ival_QEJUN.asfreq('D', 'E'), ival_QEJUN_to_D_end)
self.assertEqual(ival_Q.asfreq('Q'), ival_Q)
def test_conv_monthly(self):
# frequency conversion tests: from Monthly Frequency
ival_M = Period(freq='M', year=2007, month=1)
ival_M_end_of_year = Period(freq='M', year=2007, month=12)
ival_M_end_of_quarter = Period(freq='M', year=2007, month=3)
ival_M_to_A = Period(freq='A', year=2007)
ival_M_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_M_to_W_start = Period(freq='W', year=2007, month=1, day=1)
ival_M_to_W_end = Period(freq='W', year=2007, month=1, day=31)
ival_M_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_M_to_B_end = Period(freq='B', year=2007, month=1, day=31)
ival_M_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_M_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_M_to_H_start = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_M_to_H_end = Period(freq='H', year=2007, month=1, day=31, hour=23)
ival_M_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_M_to_T_end = Period(freq='Min', year=2007, month=1, day=31,
hour=23, minute=59)
ival_M_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_M_to_S_end = Period(freq='S', year=2007, month=1, day=31, hour=23,
minute=59, second=59)
self.assertEqual(ival_M.asfreq('A'), ival_M_to_A)
self.assertEqual(ival_M_end_of_year.asfreq('A'), ival_M_to_A)
self.assertEqual(ival_M.asfreq('Q'), ival_M_to_Q)
self.assertEqual(ival_M_end_of_quarter.asfreq('Q'), ival_M_to_Q)
self.assertEqual(ival_M.asfreq('W', 'S'), ival_M_to_W_start)
self.assertEqual(ival_M.asfreq('W', 'E'), ival_M_to_W_end)
self.assertEqual(ival_M.asfreq('B', 'S'), ival_M_to_B_start)
self.assertEqual(ival_M.asfreq('B', 'E'), ival_M_to_B_end)
self.assertEqual(ival_M.asfreq('D', 'S'), ival_M_to_D_start)
self.assertEqual(ival_M.asfreq('D', 'E'), ival_M_to_D_end)
self.assertEqual(ival_M.asfreq('H', 'S'), ival_M_to_H_start)
self.assertEqual(ival_M.asfreq('H', 'E'), ival_M_to_H_end)
self.assertEqual(ival_M.asfreq('Min', 'S'), ival_M_to_T_start)
self.assertEqual(ival_M.asfreq('Min', 'E'), ival_M_to_T_end)
self.assertEqual(ival_M.asfreq('S', 'S'), ival_M_to_S_start)
self.assertEqual(ival_M.asfreq('S', 'E'), ival_M_to_S_end)
self.assertEqual(ival_M.asfreq('M'), ival_M)
def test_conv_weekly(self):
# frequency conversion tests: from Weekly Frequency
ival_W = Period(freq='W', year=2007, month=1, day=1)
ival_WSUN = Period(freq='W', year=2007, month=1, day=7)
ival_WSAT = Period(freq='W-SAT', year=2007, month=1, day=6)
ival_WFRI = Period(freq='W-FRI', year=2007, month=1, day=5)
ival_WTHU = Period(freq='W-THU', year=2007, month=1, day=4)
ival_WWED = Period(freq='W-WED', year=2007, month=1, day=3)
ival_WTUE = Period(freq='W-TUE', year=2007, month=1, day=2)
ival_WMON = Period(freq='W-MON', year=2007, month=1, day=1)
ival_WSUN_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_WSUN_to_D_end = Period(freq='D', year=2007, month=1, day=7)
ival_WSAT_to_D_start = Period(freq='D', year=2006, month=12, day=31)
ival_WSAT_to_D_end = Period(freq='D', year=2007, month=1, day=6)
ival_WFRI_to_D_start = Period(freq='D', year=2006, month=12, day=30)
ival_WFRI_to_D_end = Period(freq='D', year=2007, month=1, day=5)
ival_WTHU_to_D_start = Period(freq='D', year=2006, month=12, day=29)
ival_WTHU_to_D_end = | Period(freq='D', year=2007, month=1, day=4) | pandas.Period |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pandas
from pandas.api.types import is_scalar
from pandas.compat import to_str, string_types, numpy as numpy_compat, cPickle as pkl
import pandas.core.common as com
from pandas.core.dtypes.common import (
_get_dtype_from_object,
is_list_like,
is_numeric_dtype,
is_datetime_or_timedelta_dtype,
is_dtype_equal,
is_object_dtype,
is_integer_dtype,
)
from pandas.core.index import _ensure_index_from_sequences
from pandas.core.indexing import check_bool_indexer, convert_to_index_sliceable
from pandas.util._validators import validate_bool_kwarg
import itertools
import functools
import numpy as np
import re
import sys
import warnings
from modin.error_message import ErrorMessage
from .utils import from_pandas, to_pandas, _inherit_docstrings
from .iterator import PartitionIterator
from .series import SeriesView
@_inherit_docstrings(
pandas.DataFrame, excluded=[pandas.DataFrame, pandas.DataFrame.__init__]
)
class DataFrame(object):
def __init__(
self,
data=None,
index=None,
columns=None,
dtype=None,
copy=False,
query_compiler=None,
):
"""Distributed DataFrame object backed by Pandas dataframes.
Args:
data (numpy ndarray (structured or homogeneous) or dict):
Dict can contain Series, arrays, constants, or list-like
objects.
index (pandas.Index, list, ObjectID): The row index for this
DataFrame.
columns (pandas.Index): The column names for this DataFrame, in
pandas Index object.
dtype: Data type to force. Only a single dtype is allowed.
If None, infer
copy (boolean): Copy data from inputs.
Only affects DataFrame / 2d ndarray input.
query_compiler: A query compiler object to manage distributed computation.
"""
if isinstance(data, DataFrame):
self._query_compiler = data._query_compiler
return
# Check type of data and use appropriate constructor
if data is not None or query_compiler is None:
pandas_df = pandas.DataFrame(
data=data, index=index, columns=columns, dtype=dtype, copy=copy
)
self._query_compiler = from_pandas(pandas_df)._query_compiler
else:
self._query_compiler = query_compiler
def __str__(self):
return repr(self)
def _build_repr_df(self, num_rows, num_cols):
# Add one here so that pandas automatically adds the dots
# It turns out to be faster to extract 2 extra rows and columns than to
# build the dots ourselves.
num_rows_for_head = num_rows // 2 + 1
num_cols_for_front = num_cols // 2 + 1
if len(self.index) <= num_rows:
head = self._query_compiler
tail = None
else:
head = self._query_compiler.head(num_rows_for_head)
tail = self._query_compiler.tail(num_rows_for_head)
if len(self.columns) <= num_cols:
head_front = head.to_pandas()
# Creating these empty to make the concat logic simpler
head_back = pandas.DataFrame()
tail_back = pandas.DataFrame()
if tail is not None:
tail_front = tail.to_pandas()
else:
tail_front = pandas.DataFrame()
else:
head_front = head.front(num_cols_for_front).to_pandas()
head_back = head.back(num_cols_for_front).to_pandas()
if tail is not None:
tail_front = tail.front(num_cols_for_front).to_pandas()
tail_back = tail.back(num_cols_for_front).to_pandas()
else:
tail_front = tail_back = pandas.DataFrame()
head_for_repr = pandas.concat([head_front, head_back], axis=1)
tail_for_repr = pandas.concat([tail_front, tail_back], axis=1)
return pandas.concat([head_for_repr, tail_for_repr])
def __repr__(self):
# In the future, we can have this be configurable, just like Pandas.
num_rows = 60
num_cols = 30
result = repr(self._build_repr_df(num_rows, num_cols))
if len(self.index) > num_rows or len(self.columns) > num_cols:
# The split here is so that we don't repr pandas row lengths.
return result.rsplit("\n\n", 1)[0] + "\n\n[{0} rows x {1} columns]".format(
len(self.index), len(self.columns)
)
else:
return result
def _repr_html_(self):
"""repr function for rendering in Jupyter Notebooks like Pandas
Dataframes.
Returns:
The HTML representation of a Dataframe.
"""
# In the future, we can have this be configurable, just like Pandas.
num_rows = 60
num_cols = 20
# We use pandas _repr_html_ to get a string of the HTML representation
# of the dataframe.
result = self._build_repr_df(num_rows, num_cols)._repr_html_()
if len(self.index) > num_rows or len(self.columns) > num_cols:
# We split so that we insert our correct dataframe dimensions.
return result.split("<p>")[
0
] + "<p>{0} rows x {1} columns</p>\n</div>".format(
len(self.index), len(self.columns)
)
else:
return result
def _get_index(self):
"""Get the index for this DataFrame.
Returns:
The union of all indexes across the partitions.
"""
return self._query_compiler.index
def _get_columns(self):
"""Get the columns for this DataFrame.
Returns:
The union of all indexes across the partitions.
"""
return self._query_compiler.columns
def _set_index(self, new_index):
"""Set the index for this DataFrame.
Args:
new_index: The new index to set this
"""
self._query_compiler.index = new_index
def _set_columns(self, new_columns):
"""Set the columns for this DataFrame.
Args:
new_index: The new index to set this
"""
self._query_compiler.columns = new_columns
index = property(_get_index, _set_index)
columns = property(_get_columns, _set_columns)
def _validate_eval_query(self, expr, **kwargs):
"""Helper function to check the arguments to eval() and query()
Args:
expr: The expression to evaluate. This string cannot contain any
Python statements, only Python expressions.
"""
if isinstance(expr, str) and expr is "":
raise ValueError("expr cannot be an empty string")
if isinstance(expr, str) and "@" in expr:
ErrorMessage.not_implemented("Local variables not yet supported in eval.")
if isinstance(expr, str) and "not" in expr:
if "parser" in kwargs and kwargs["parser"] == "python":
ErrorMessage.not_implemented("'Not' nodes are not implemented.")
@property
def size(self):
"""Get the number of elements in the DataFrame.
Returns:
The number of elements in the DataFrame.
"""
return len(self.index) * len(self.columns)
@property
def ndim(self):
"""Get the number of dimensions for this DataFrame.
Returns:
The number of dimensions for this DataFrame.
"""
# DataFrames have an invariant that requires they be 2 dimensions.
return 2
@property
def ftypes(self):
"""Get the ftypes for this DataFrame.
Returns:
The ftypes for this DataFrame.
"""
# The ftypes are common across all partitions.
# The first partition will be enough.
dtypes = self.dtypes.copy()
ftypes = ["{0}:dense".format(str(dtype)) for dtype in dtypes.values]
result = pandas.Series(ftypes, index=self.columns)
return result
@property
def dtypes(self):
"""Get the dtypes for this DataFrame.
Returns:
The dtypes for this DataFrame.
"""
return self._query_compiler.dtypes
@property
def empty(self):
"""Determines if the DataFrame is empty.
Returns:
True if the DataFrame is empty.
False otherwise.
"""
return len(self.columns) == 0 or len(self.index) == 0
@property
def values(self):
"""Create a numpy array with the values from this DataFrame.
Returns:
The numpy representation of this DataFrame.
"""
return to_pandas(self).values
@property
def axes(self):
"""Get the axes for the DataFrame.
Returns:
The axes for the DataFrame.
"""
return [self.index, self.columns]
@property
def shape(self):
"""Get the size of each of the dimensions in the DataFrame.
Returns:
A tuple with the size of each dimension as they appear in axes().
"""
return len(self.index), len(self.columns)
def _update_inplace(self, new_query_compiler):
"""Updates the current DataFrame inplace.
Args:
new_query_compiler: The new QueryCompiler to use to manage the data
"""
old_query_compiler = self._query_compiler
self._query_compiler = new_query_compiler
old_query_compiler.free()
def add_prefix(self, prefix):
"""Add a prefix to each of the column names.
Returns:
A new DataFrame containing the new column names.
"""
return DataFrame(query_compiler=self._query_compiler.add_prefix(prefix))
def add_suffix(self, suffix):
"""Add a suffix to each of the column names.
Returns:
A new DataFrame containing the new column names.
"""
return DataFrame(query_compiler=self._query_compiler.add_suffix(suffix))
def applymap(self, func):
"""Apply a function to a DataFrame elementwise.
Args:
func (callable): The function to apply.
"""
if not callable(func):
raise ValueError("'{0}' object is not callable".format(type(func)))
ErrorMessage.non_verified_udf()
return DataFrame(query_compiler=self._query_compiler.applymap(func))
def copy(self, deep=True):
"""Creates a shallow copy of the DataFrame.
Returns:
A new DataFrame pointing to the same partitions as this one.
"""
return DataFrame(query_compiler=self._query_compiler.copy())
def groupby(
self,
by=None,
axis=0,
level=None,
as_index=True,
sort=True,
group_keys=True,
squeeze=False,
**kwargs
):
"""Apply a groupby to this DataFrame. See _groupby() remote task.
Args:
by: The value to groupby.
axis: The axis to groupby.
level: The level of the groupby.
as_index: Whether or not to store result as index.
sort: Whether or not to sort the result by the index.
group_keys: Whether or not to group the keys.
squeeze: Whether or not to squeeze.
Returns:
A new DataFrame resulting from the groupby.
"""
axis = pandas.DataFrame()._get_axis_number(axis)
idx_name = ""
if callable(by):
by = by(self.index)
elif isinstance(by, string_types):
idx_name = by
by = self.__getitem__(by).values.tolist()
elif is_list_like(by):
if isinstance(by, pandas.Series):
by = by.values.tolist()
mismatch = (
len(by) != len(self) if axis == 0 else len(by) != len(self.columns)
)
if all(obj in self for obj in by) and mismatch:
# In the future, we will need to add logic to handle this, but for now
# we default to pandas in this case.
pass
elif mismatch:
raise KeyError(next(x for x in by if x not in self))
from .groupby import DataFrameGroupBy
return DataFrameGroupBy(
self,
by,
axis,
level,
as_index,
sort,
group_keys,
squeeze,
idx_name,
**kwargs
)
def sum(
self,
axis=None,
skipna=True,
level=None,
numeric_only=None,
min_count=0,
**kwargs
):
"""Perform a sum across the DataFrame.
Args:
axis (int): The axis to sum on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The sum of the DataFrame.
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
self._validate_dtypes_sum_prod_mean(axis, numeric_only, ignore_axis=False)
return self._query_compiler.sum(
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
min_count=min_count,
**kwargs
)
def abs(self):
"""Apply an absolute value function to all numeric columns.
Returns:
A new DataFrame with the applied absolute value.
"""
self._validate_dtypes(numeric_only=True)
return DataFrame(query_compiler=self._query_compiler.abs())
def isin(self, values):
"""Fill a DataFrame with booleans for cells contained in values.
Args:
values (iterable, DataFrame, Series, or dict): The values to find.
Returns:
A new DataFrame with booleans representing whether or not a cell
is in values.
True: cell is contained in values.
False: otherwise
"""
return DataFrame(query_compiler=self._query_compiler.isin(values=values))
def isna(self):
"""Fill a DataFrame with booleans for cells containing NA.
Returns:
A new DataFrame with booleans representing whether or not a cell
is NA.
True: cell contains NA.
False: otherwise.
"""
return DataFrame(query_compiler=self._query_compiler.isna())
def isnull(self):
"""Fill a DataFrame with booleans for cells containing a null value.
Returns:
A new DataFrame with booleans representing whether or not a cell
is null.
True: cell contains null.
False: otherwise.
"""
return DataFrame(query_compiler=self._query_compiler.isnull())
def keys(self):
"""Get the info axis for the DataFrame.
Returns:
A pandas Index for this DataFrame.
"""
return self.columns
def transpose(self, *args, **kwargs):
"""Transpose columns and rows for the DataFrame.
Returns:
A new DataFrame transposed from this DataFrame.
"""
return DataFrame(query_compiler=self._query_compiler.transpose(*args, **kwargs))
T = property(transpose)
def dropna(self, axis=0, how="any", thresh=None, subset=None, inplace=False):
"""Create a new DataFrame from the removed NA values from this one.
Args:
axis (int, tuple, or list): The axis to apply the drop.
how (str): How to drop the NA values.
'all': drop the label if all values are NA.
'any': drop the label if any values are NA.
thresh (int): The minimum number of NAs to require.
subset ([label]): Labels to consider from other axis.
inplace (bool): Change this DataFrame or return a new DataFrame.
True: Modify the data for this DataFrame, return None.
False: Create a new DataFrame and return it.
Returns:
If inplace is set to True, returns None, otherwise returns a new
DataFrame with the dropna applied.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if is_list_like(axis):
axis = [pandas.DataFrame()._get_axis_number(ax) for ax in axis]
result = self
for ax in axis:
result = result.dropna(axis=ax, how=how, thresh=thresh, subset=subset)
return self._create_dataframe_from_compiler(result._query_compiler, inplace)
axis = pandas.DataFrame()._get_axis_number(axis)
if how is not None and how not in ["any", "all"]:
raise ValueError("invalid how option: %s" % how)
if how is None and thresh is None:
raise TypeError("must specify how or thresh")
if subset is not None:
if axis == 1:
indices = self.index.get_indexer_for(subset)
check = indices == -1
if check.any():
raise KeyError(list(np.compress(check, subset)))
else:
indices = self.columns.get_indexer_for(subset)
check = indices == -1
if check.any():
raise KeyError(list(np.compress(check, subset)))
new_query_compiler = self._query_compiler.dropna(
axis=axis, how=how, thresh=thresh, subset=subset
)
return self._create_dataframe_from_compiler(new_query_compiler, inplace)
def add(self, other, axis="columns", level=None, fill_value=None):
"""Add this DataFrame to another or a scalar/list.
Args:
other: What to add this this DataFrame.
axis: The axis to apply addition over. Only applicaable to Series
or list 'other'.
level: A level in the multilevel axis to add over.
fill_value: The value to fill NaN.
Returns:
A new DataFrame with the applied addition.
"""
axis = pandas.DataFrame()._get_axis_number(axis)
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.add,
other,
axis=axis,
level=level,
fill_value=fill_value,
)
other = self._validate_other(other, axis, numeric_or_object_only=True)
new_query_compiler = self._query_compiler.add(
other=other, axis=axis, level=level, fill_value=fill_value
)
return self._create_dataframe_from_compiler(new_query_compiler)
def agg(self, func, axis=0, *args, **kwargs):
return self.aggregate(func, axis, *args, **kwargs)
def aggregate(self, func, axis=0, *args, **kwargs):
axis = pandas.DataFrame()._get_axis_number(axis)
result = None
if axis == 0:
try:
result = self._aggregate(func, axis=axis, *args, **kwargs)
except TypeError:
pass
if result is None:
kwargs.pop("is_transform", None)
return self.apply(func, axis=axis, args=args, **kwargs)
return result
def _aggregate(self, arg, *args, **kwargs):
_axis = kwargs.pop("_axis", None)
if _axis is None:
_axis = getattr(self, "axis", 0)
kwargs.pop("_level", None)
if isinstance(arg, string_types):
return self._string_function(arg, *args, **kwargs)
# Dictionaries have complex behavior because they can be renamed here.
elif isinstance(arg, dict):
return self._default_to_pandas(pandas.DataFrame.agg, arg, *args, **kwargs)
elif is_list_like(arg) or callable(arg):
return self.apply(arg, axis=_axis, args=args, **kwargs)
else:
# TODO Make pandas error
raise ValueError("type {} is not callable".format(type(arg)))
def _string_function(self, func, *args, **kwargs):
assert isinstance(func, string_types)
f = getattr(self, func, None)
if f is not None:
if callable(f):
return f(*args, **kwargs)
assert len(args) == 0
assert (
len([kwarg for kwarg in kwargs if kwarg not in ["axis", "_level"]]) == 0
)
return f
f = getattr(np, func, None)
if f is not None:
return self._default_to_pandas(pandas.DataFrame.agg, func, *args, **kwargs)
raise ValueError("{} is an unknown string function".format(func))
def align(
self,
other,
join="outer",
axis=None,
level=None,
copy=True,
fill_value=None,
method=None,
limit=None,
fill_axis=0,
broadcast_axis=None,
):
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.align,
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
broadcast_axis=broadcast_axis,
)
def all(self, axis=0, bool_only=None, skipna=None, level=None, **kwargs):
"""Return whether all elements are True over requested axis
Note:
If axis=None or axis=0, this call applies df.all(axis=1)
to the transpose of df.
"""
if axis is not None:
axis = pandas.DataFrame()._get_axis_number(axis)
else:
if bool_only:
raise ValueError("Axis must be 0 or 1 (got {})".format(axis))
return self._query_compiler.all(
axis=axis, bool_only=bool_only, skipna=skipna, level=level, **kwargs
)
def any(self, axis=0, bool_only=None, skipna=None, level=None, **kwargs):
"""Return whether any elements are True over requested axis
Note:
If axis=None or axis=0, this call applies on the column partitions,
otherwise operates on row partitions
"""
if axis is not None:
axis = pandas.DataFrame()._get_axis_number(axis)
else:
if bool_only:
raise ValueError("Axis must be 0 or 1 (got {})".format(axis))
return self._query_compiler.any(
axis=axis, bool_only=bool_only, skipna=skipna, level=level, **kwargs
)
def append(self, other, ignore_index=False, verify_integrity=False, sort=None):
"""Append another DataFrame/list/Series to this one.
Args:
other: The object to append to this.
ignore_index: Ignore the index on appending.
verify_integrity: Verify the integrity of the index on completion.
Returns:
A new DataFrame containing the concatenated values.
"""
if isinstance(other, (pandas.Series, dict)):
if isinstance(other, dict):
other = | pandas.Series(other) | pandas.Series |
'''
Expression.py - wrap various differential expression tools
===========================================================
:Tags: Python
Purpose
-------
This module provides tools for differential expression analysis
for a variety of methods.
Methods implemented are:
DESeq
EdgeR
ttest
The aim of this module is to run these individual tools and
output a table in a common format.
Usage
-----
Documentation
-------------
Requirements:
* DESeq >= 1.17
* DESeq2 >= 1.5.62
* edgeR >= 3.7.16
* gplots >= 2.14.2
* ggplot2 >= 1.0.0
* reshape >= 0.8.5
* RColorBrewer >= 1.0.5
* grid >= 3.1.1
* limma >= 3.21.18
* samr >= 2.0 (optional)
* siggenes >= 1.39.0 (optional)
Code
----
To do:
--check contrasts against design model
'''
import os
import math
import numpy
import sys
import collections
import itertools
import re
import pandas
import copy
import numpy as np
from scipy.stats import ttest_ind
import matplotlib
import matplotlib.pyplot as plt
import rpy2
from rpy2.robjects import r
from rpy2.robjects import r as R
import rpy2.robjects as ro
from rpy2.robjects import pandas2ri
from rpy2.robjects.packages import importr
from rpy2.robjects.vectors import FloatVector
try:
from rpy2.rinterface import RRuntimeError
except ImportError:
from rpy2.rinterface_lib.embedded import RRuntimeError
try:
import cgatcore.experiment as E
import cgatcore.iotools as iotools
import cgat.Stats as Stats
except ImportError:
import experiment as E
import iotools
import Stats
# activate pandas/rpy conversion
# pandas2ri.activate()
# AH: Only do this on demand, module might not be
# be able to be imported if there are any issues.
# grdevices = importr('grDevices')
def runDETest(raw_DataFrame,
design_file,
outfile,
de_caller,
**kwargs):
''' provide higher level API to run tools with default setting '''
if de_caller.lower() == "deseq":
pass
else:
raise ValueError("Unknown caller")
def splitModel(model):
'''returns the terms in the model'''
return [x for x in
re.split("[\.:,~+\s*]", re.sub("~(\s*)?", "", model)) if
len(x) > 0]
def adjustPvalues(p_values):
'''return a list of BH adjusted pvalues'''
# import r stats module to adjust pvalues
stats = importr('stats')
adj_pvalues = list(stats.p_adjust(FloatVector(p_values), method='BH'))
return adj_pvalues
def pvaluesToSignficant(p_values, fdr):
'''return a list of bools for significance'''
return [int(x < fdr) for x in p_values]
class ExperimentalDesign(object):
"""Objects representing experimental designs.
This class takes an experimental design in tabular
form and exports several convenience functions and
attributes.
`filename_or_table` can be a filename of a tab-separated table
with the following columns.
track
the sample name
include
whether or not this sample should be included
in the design
groups
a label grouping several samples into a group
pair
for paired tests, a numeric identifier linking
samples that are paired.
An example of an experimtal design with two groups and paired
samples is below::
track include group pair
sample1 1 treatment 1
sample2 1 treatment 2
sample3 1 control 1
sample4 1 control 2
When providing `filename_or_table`, the `include` column is used
to directly filter the design to remove any non-included samples.
Additional columns will be added as factors to the design.
Alternatively, `filename_or_table` can be a pandas DataFrame with
sample names as row index and the appropriate columns.
Attributes
-----------
table : pandas DataFrame
dataframe object describing the design
groups : list
list of groups in the design
conditions : list
group for each sample
pairs : list
pair status for each sample
samples: list
sample names
factors: list
factors for each sample
has_replicates : bool
True if at least one group has multiple samples
has_pairs : bool
True if design is a paired design
"""
def __init__(self, filename_or_table):
# read in table in the constructor for ExpDesign
# e.g design = ExpDesign(pd.read_csv(...))
if isinstance(filename_or_table, str):
self.table = pandas.read_csv(filename_or_table, sep="\t",
index_col=0)
elif isinstance(filename_or_table, pandas.core.frame.DataFrame):
self.table = filename_or_table
else:
raise ValueError("Type needs to be string or pandas data frame."
"Type = %s", type(filename_or_table))
assert self.table.shape, "design table is empty"
# parse the design table. Users probably expect this
# to happen once data is uploaded.
self._update()
def _update(self):
"""parse design file and fill class attributes.
Call this function whenever self.table changes.
"""
# remove all entries that should not be included
self.table = self.table[self.table["include"] != 0]
# define attributes
self.conditions = self.table['group'].tolist()
self.pairs = self.table['pair'].tolist()
# TS - use OrderedDict to retain order in unique
self.groups = (list(collections.OrderedDict.fromkeys(
self.conditions)))
self.samples = self.table.index.tolist()
# Test if replicates exist, i.e at least one group has multiple samples
# TS - does this need to be extended to check whether replicates exist
# for each group?
max_per_group = max([self.conditions.count(x) for x in self.groups])
self.has_replicates = max_per_group >= 2
# Test if pairs exist:
npairs = len(set(self.pairs))
has_pairs = npairs == 2
# ..if so, at least two samples are required per pair
if has_pairs:
min_per_pair = min([self.pairs.count(x) for x in set(self.pairs)])
self.has_pairs = min_per_pair >= 2
else:
self.has_pairs = False
# all columns except "include" may be considered as factors
self.factors = self.table.drop(["include"], axis=1)
# remove "pair" from factor if design does not include pairs
if not self.has_pairs:
self.factors.drop("pair", inplace=True, axis=1)
def validate(self, counts=None, model=None):
if counts is not None:
missing = set(self.samples).difference(set(counts.table.columns))
if len(missing) > 0:
raise ValueError(
"following samples in design table are missing"
" from counts table: %s" % ", ".join(missing))
if model is not None:
# check all model terms exist
model_terms = splitModel(model)
missing = set(model_terms).difference(
set(self.table.columns.tolist()))
if len(missing) > 0:
raise ValueError("following terms in the model are missing"
" from the design table: %s" %
", ".join(missing))
# check there are at least two values for each level
for term in model_terms:
levels = set(self.table.ix[:, term])
if len(levels) < 2:
raise ValueError("term '%s' in the model has less "
"than two levels (%s) in the "
" design table" %
(term, ", ".join(levels)))
def restrict(self, counts):
''' return design with samples not in counts table removed '''
self.table = self.table.ix[counts.table.columns, :]
def revalidate(self, counts, model=None):
''' re-validate, i.e post filtering of counts table '''
if len(set(self.samples).symmetric_difference(
set(counts.table.columns))) > 0:
self.restrict(counts)
self._update()
self.validate(counts, model)
else:
pass
def firstPairOnly(self):
'''restrict the design table to the first pair only.
If unpaired will retain whole design table
'''
if not self.pairs:
self.pairs = self.table['pair'].tolist()
self.table = self.table.ix[self.table['pair'] == min(self.pairs), ]
def getSamplesInGroup(self, group):
"""return list of sample names belonging to group."""
if group not in self.groups:
raise KeyError("group '%s' not present")
return self.table[self.table["group"] == group].index.tolist()
def getGroupForSample(self, sample):
"""return group a sample belongs to"""
return self.table.loc[sample]["group"]
def getGroups2Samples(self):
"""return a dictionary mapping a group to samples within the group.
Returns
-------
dict
with groups as keys and list of samples within a group as values.
"""
groups_to_tracks = {}
for group in self.groups:
match_group = (self.table['group'] == group).tolist()
subset = self.table.iloc[match_group, ]
groups_to_tracks[group] = subset.index.tolist()
return groups_to_tracks
def mapGroupsSuffix(self, shuffle_suffix, keep_suffix):
'''use suffixes supplied to extract groups from the
design table and return dictionaries mapping each group to tracks
for keeping with tracks which should be shuffled
'''
groups_to_keep_tracks = {}
groups_to_spike_tracks = {}
keep_suffix = keep_suffix.split(",")
for group in self.groups:
match_group = (self.table['group'] == group).tolist()
tmp_design = self.table.iloc[match_group, ]
groups_to_spike_tracks[group] = [
x + shuffle_suffix for x in tmp_design.index.tolist()]
groups_to_keep_tracks[group] = copy.copy(
groups_to_spike_tracks[group])
groups_to_keep_tracks[group].extend(
[x + y for x in tmp_design.index.tolist() for y in keep_suffix])
return groups_to_keep_tracks, groups_to_spike_tracks
class DEExperiment(object):
''' base clase for DE experiments '''
def __init__(self):
pass
def run(self):
''' Custom DE functions
return a DEResult object'''
class DEResult(object):
''' base class for DE result '''
def __init__(self, testTable=None):
self.table = testTable
def getResults(self):
''' post-process results into generic output
columns are:
- contrast
- treatment_name
- control_name
- test_id
- control_mean
- treatment_mean
- control_std
- treatment_std
- p_value
- p_value_adj
- significant
- l2fold
- transformed_l2fold
- fold
- status
'''
def calculateIHW(self, alpha=0.1):
''' Use the Independent Hypothesis Weighting method from
IGNATIADIS et al (2016) to perform weighted FDR'''
if not ('control_mean' in self.table.columns and
'treatment_mean' in self.table.columns and
'p_value' in self.table.columns):
E.error("IHW requires control_mean, treatment_mean and p_value "
"columns, have you first run the getResults method?")
runIHW = r('''function(df){
library(IHW)
mean_expression = (df$control_mean + df$treatment_mean)/2
ihw_res = ihw(df$p_value ~ mean_expression, alpha = %(alpha)s)
df$p_value_adj = adj_pvalues(ihw_res)
return(df)
}''' % locals())
self.table = pandas2ri.ri2py(runIHW(pandas2ri.py2ri(self.table)))
self.table["significant"] = pvaluesToSignficant(
self.table["p_value_adj"], alpha)
def summariseDEResults(self):
''' summarise DE results. Counts instances of possible outcomes'''
# TS: the summarising is now split by the comparison being made and a
# dict returned with keys=comparisons, value=E.Counter per comparison
self.Summary = {}
control_names = set(self.table['control_name'])
treatment_names = set(self.table['treatment_name'])
for control, treatment in itertools.product(control_names,
treatment_names):
tmp_table = self.table[self.table['control_name'] == control]
tmp_table = tmp_table[tmp_table['treatment_name'] == treatment]
tmp_table.reset_index(inplace=True)
# check control, treatment combination exists
n_rows = tmp_table.shape[0]
if n_rows > 0:
if control != treatment:
label = control + "_" + treatment
else:
label = control
label = re.sub(":", "_int_", label)
counts = E.Counter()
counts.signficant = sum(tmp_table['significant'])
counts.insignficant = (len(tmp_table['significant']) -
counts.signficant)
counts.all_over = sum([x > 0 for x in tmp_table['l2fold']])
counts.all_under = sum([x < 0 for x in tmp_table['l2fold']])
counts.signficant_over = sum(
[tmp_table['significant'][x] == 1 and
tmp_table['l2fold'][x] > 0 for x in range(0, n_rows)])
counts.signficant_under = sum(
[tmp_table['significant'][x] == 1 and
tmp_table['l2fold'][x] < 0 for x in range(0, n_rows)])
self.Summary[label] = counts
def plotMA(self, contrast=None, outfile_prefix=None,
point_alpha=1, point_size=1, R=None):
''' base function for making a MA plot '''
if not R:
R = rpy2.robjects.r
ro.globalenv['tmp_df'] = pandas2ri.py2ri(self.table)
R('''
suppressMessages(library(ggplot2))
suppressMessages(library(grid))
l_txt = element_text(size=20)
tmp_df = tmp_df[tmp_df$contrast=="%(contrast)s",]
tmp_df = tmp_df[order(-tmp_df$p_value_adj),]
p = ggplot(tmp_df, aes(log((control_mean+treatment_mean)/2,2),
transformed_l2fold,
colour=as.factor(significant))) +
geom_point(size=%(point_size)f, alpha=%(point_alpha)f) +
xlab("log2 mean expression") + ylab("log2 fold change")+
ggtitle("%(contrast)s") +
scale_colour_manual(name="Significant", values=c("black", "red")) +
guides(colour = guide_legend(override.aes = list(size=10)))+
theme_bw() +
theme(axis.text.x = l_txt, axis.text.y = l_txt,
axis.title.x = l_txt, axis.title.y = l_txt,
legend.title = l_txt, legend.text = l_txt,
title=l_txt, legend.key.size=unit(1, "cm"),
aspect.ratio=1)
suppressMessages(
ggsave(file="%(outfile_prefix)s_%(contrast)s_MA_plot.png",
width=10, height=10))''' % locals())
def plotVolcano(self, contrast=None, outfile_prefix=None, R=None):
''' base function for Volcano plotting'''
if not R:
R = rpy2.robjects.r
ro.globalenv['tmp_df'] = pandas2ri.py2ri(self.table)
R('''
suppressMessages(library(ggplot2))
suppressMessages(library(grid))
l_txt = element_text(size=20)
tmp_df = tmp_df[tmp_df$contrast=="%(contrast)s",]
p = ggplot(tmp_df, aes(transformed_l2fold, -log(p_value,10),
colour=as.factor(significant))) +
geom_point() + xlab("log2 fold change") + ylab("p-value (-log10)") +
ggtitle("%(contrast)s") +
scale_colour_manual(name="Significant", values=c("black", "#619CFF")) +
guides(colour = guide_legend(override.aes = list(size=10))) +
theme_bw() +
theme(axis.text.x = l_txt, axis.text.y = l_txt,
axis.title.x = l_txt, axis.title.y = l_txt,
legend.title = l_txt, legend.text = l_txt,
title=l_txt, legend.key.size=unit(1, "cm"))
suppressMessages(
ggsave(file="%(outfile_prefix)s_%(contrast)s_volcano_plot.png",
width=10, height=10))''' % locals())
def plotPvalueHist(self, contrast=None, outfile_prefix=None, R=None):
''' base function for Volcano plotting'''
if not R:
R = rpy2.robjects.r
ro.globalenv['tmp_df'] = pandas2ri.py2ri(self.table)
R('''
suppressMessages(library(ggplot2))
suppressMessages(library(grid))
l_txt = element_text(size=20)
tmp_df = tmp_df[tmp_df$contrast=="%(contrast)s",]
p = ggplot(tmp_df, aes(p_value)) +
geom_histogram(fill="dodgerblue4") +
xlab("p-value") + ylab("count") +
ggtitle("p-value histogram - %(contrast)s") +
theme_bw() +
theme(axis.text.x = l_txt, axis.text.y = l_txt,
axis.title.x = l_txt, axis.title.y = l_txt,
title=l_txt)
suppressMessages(
ggsave(file="%(outfile_prefix)s_%(contrast)s_p_value_histogram.png",
width=10, height=10))''' % locals())
def plotPvalueQQ(self, contrast=None, outfile_prefix=None, R=None):
''' base function for Volcano plotting'''
if not R:
R = rpy2.robjects.r
ro.globalenv['tmp_df'] = pandas2ri.py2ri(self.table)
R('''
log_obs_pvalues = sort(-log10(tmp_df[['p_value']]))
uni_pvalues=runif(length(log_obs_pvalues))
log_uni_pvalues= -log10(uni_pvalues)
log_uni_pvalues = sort(log_uni_pvalues)
png(file="%(outfile_prefix)s_%(contrast)s_p_value_qq_plot.png")
plot(log_uni_pvalues,log_obs_pvalues,
xlab=expression(Theoretical~~-log[10](italic(p))),
ylab=expression(Observed~~-log[10](italic(p))),
main="P-value QQ-plot",
pch=20)
abline(0,1)''' % locals())
class DEExperiment_TTest(DEExperiment):
'''DECaller object to run TTest on counts data'''
# TS: to do: deal with genes/regions with zero counts
def run(self, counts, design, normalise=True,
normalise_method="deseq-size-factors"):
# TS: normalisation performed here rather than earlier as
# the method of normalisation is dependent upon the DE test
if normalise is True:
counts.normalise(method=normalise_method)
df_dict = collections.defaultdict(list)
for combination in itertools.combinations(design.groups, 2):
control, treatment = combination
n_rows = counts.table.shape[0]
df_dict["control_name"].extend((control,)*n_rows)
df_dict["treatment_name"].extend((treatment,)*n_rows)
df_dict["test_id"].extend(counts.table.index.tolist())
# set all status values to "OK"
df_dict["status"].extend(("OK",)*n_rows)
# subset counts table for each combination
c_keep = [x == control for
x in design.conditions]
control_counts = counts.table.iloc[:, c_keep]
t_keep = [x == treatment for
x in design.conditions]
treatment_counts = counts.table.iloc[:, t_keep]
c_mean = control_counts.mean(axis=1)
df_dict["control_mean"].extend(c_mean)
df_dict["control_std"].extend(control_counts.std(axis=1))
t_mean = treatment_counts.mean(axis=1)
df_dict["treatment_mean"].extend(t_mean)
df_dict["treatment_std"].extend(treatment_counts.std(axis=1))
t, prob = ttest_ind(control_counts, treatment_counts, axis=1)
df_dict["p_value"].extend(prob)
result = DEResult_TTest(testTable=pandas.DataFrame(df_dict))
result.table.set_index("test_id", inplace=True)
return result
class DEResult_TTest(DEResult):
def getResults(self, fdr):
''' post-process test results table into generic results output '''
# TS - what about zero values?!
self.table["fold"] = (
self.table["treatment_mean"] / self.table["control_mean"])
self.table["p_value_adj"] = adjustPvalues(self.table["p_value"])
self.table["significant"] = pvaluesToSignficant(
self.table["p_value_adj"], fdr)
self.table["l2fold"] = list(numpy.log2(self.table["fold"]))
# note: the transformed log2 fold change is not transformed for TTest
self.table["transformed_l2fold"] = self.table["l2fold"]
self.table["contrast"] = "_vs_".join((self.table['control_name'],
self.table['treatment_name']))
class DEExperiment_edgeR(DEExperiment):
'''DEExperiment object to run edgeR on counts data
See page 13 of the EdgeR user guide::
2. Simply pick a reasonable dispersion value, based on your
experience with similar data, and use that. Although
subjective, this is still more defensible than assuming Poisson
variation. Typical values are dispersion=0.4 for human data,
dispersion=0.1 for data on genetically identical model
organisms or dispersion=0.01 for technical replicates.
'''
def run(self,
counts,
design,
model=None,
contrast=None,
outfile_prefix=None,
ref_group=None,
fdr=0.1,
dispersion=None):
if not design.has_replicates and dispersion is None:
raise ValueError("no replicates and no dispersion")
# create r objects
r_counts = pandas2ri.py2ri(counts.table)
r_groups = ro.StrVector(design.conditions)
r_pairs = ro.StrVector(design.pairs)
r_has_pairs = ro.BoolVector([design.has_pairs])
r_has_replicates = ro.BoolVector([design.has_replicates])
if model is not None:
r_factors_df = pandas2ri.py2ri(design.factors)
else:
r_factors_df = ro.default_py2ri(False)
E.info('running edgeR: groups=%s, replicates=%s, pairs=%s, '
'additional_factors:%s' %
(design.groups, design.has_replicates, design.has_pairs,
design.factors))
levels = set(design.table[contrast])
if len(levels) > 2:
E.warn(
"There are more than 2 levels for the "
"contrast specified" "(%s:%s). The log2fold changes in the "
"results table and MA plots will be for the first two "
"levels in the contrast. The p-value will be the p-value "
"for the overall significance of the contrast. Hence, some "
"genes will have a signficant p-value but 0-fold change "
"between the first two levels" % (contrast, levels))
# build DGEList object
buildDGEList = r('''
suppressMessages(library('edgeR'))
function(counts){
countsTable = DGEList(counts)
countsTable = calcNormFactors(countsTable)
return(countsTable)}''' % locals())
r_countsTable = buildDGEList(r_counts)
# build design matrix
buildDesign = r('''
function(factors_df){
for (level in colnames(factors_df)){
factors_df[[level]] <- factor(factors_df[[level]])
}
factors_df$%(contrast)s <- relevel(
factors_df$%(contrast)s, ref="%(ref_group)s")
design <- model.matrix(%(model)s, data=factors_df)
return(design)}''' % locals())
r_design = buildDesign(r_factors_df)
# fit model
fitModel = r('''
function(countsTable, design, has_replicates){
if (has_replicates[1] == TRUE) {
# estimate common dispersion
countsTable = estimateGLMCommonDisp( countsTable, design )
# estimate trended dispersion
countsTable <- estimateGLMTrendedDisp( countsTable, design)
# estimate tagwise dispersion
countsTable = estimateGLMTagwiseDisp( countsTable, design )
# fitting model to each tag
fit = glmFit( countsTable, design ) }
else {
# fitting model to each tag
fit = glmFit(countsTable, design, dispersion=%(dispersion)s) }
return(fit)}''' % locals())
r_fit = fitModel(r_countsTable, r_design, r_has_replicates)
E.info("Conducting likelihood ratio tests")
lrtTest = r('''
function(fit, design, factors_df, countsTable){
suppressMessages(library(reshape2))
lrt = glmLRT(fit)
lrt_table = as.data.frame(lrt$table)
lrt_table$contrast <- "%(contrast)s"
for (level in colnames(factors_df)){
factors_df[[level]] <- factor(factors_df[[level]])
}
factors_df$%(contrast)s <- relevel(
factors_df$%(contrast)s, ref="%(ref_group)s")
contrast_levels = as.vector(levels(factor(factors_df[["%(contrast)s"]])))
lrt_table$control_name <- contrast_levels[1]
lrt_table$treatment_name <- contrast_levels[2]
dt <- decideTestsDGE(lrt, adjust.method="BH", p.value=%(fdr)s)
isDE <- as.logical(dt)
DEnames <- rownames(fit)[isDE]
png(paste0(c("%(outfile_prefix)s", "MA.png"), collapse="_"))
plotSmear(lrt, de.tags=DEnames, cex=0.35, main="%(contrast)s")
abline(h=c(-1,1), col="blue")
dev.off()
return(lrt_table)}''' % locals())
r_lrt_table = lrtTest(r_fit, r_design, r_factors_df, r_countsTable)
result = DEResult_edgeR(testTable=pandas2ri.ri2py(r_lrt_table))
return result
class DEResult_edgeR(DEResult):
def getResults(self, fdr, DEtype="GLM"):
''' post-process test results table into generic results output '''
E.info("Generating output - results table")
df_dict = collections.defaultdict()
n_rows = self.table.shape[0]
df_dict["treatment_name"] = self.table['treatment_name']
df_dict["control_name"] = self.table['control_name']
df_dict["contrast"] = self.table['contrast']
df_dict["test_id"] = self.table.index
df_dict["control_mean"] = self.table['logCPM']
df_dict["treatment_mean"] = self.table['logCPM']
df_dict["control_std"] = (0,)*n_rows
df_dict["treatment_std"] = (0,)*n_rows
df_dict["p_value"] = self.table['PValue']
df_dict["p_value_adj"] = adjustPvalues(self.table['PValue'])
df_dict["significant"] = pvaluesToSignficant(
df_dict["p_value_adj"], fdr)
df_dict["l2fold"] = (self.table['logFC'])
# TS: the transformed log2 fold change is not transformed!
df_dict["transformed_l2fold"] = df_dict["l2fold"]
# TS: check what happens when no fold change is available
# TS: may need an if/else in list comprehension. Raise E.warn too?
df_dict["fold"] = [math.pow(2, float(x)) for
x in self.table['logFC']]
# set all status values to "OK"
# TS: again, may need an if/else to check...
df_dict["status"] = ("OK",)*n_rows
self.table = pandas.DataFrame(df_dict)
class DEExperiment_DESeq2(DEExperiment):
'''DEExperiment object to run DESeq2 on counts data'''
def run(self,
counts,
design,
fdr=0.1,
fit_type="parametric",
model=None,
outfile_prefix=None,
ref_group=None,
contrast=None,
DEtest="Wald",
R=None):
if not R:
R = rpy2.robjects.r
pandas2ri.activate()
# R will replace any "-" with "." in rownames.
# Here, we make sure the design and counts samples are the same
design.table.index = [x.replace("-", ".") for x in design.table.index]
design.factors.index = [x.replace("-", ".") for x in design.factors.index]
counts.table.columns = [x.replace("-", ".") for x in counts.table.columns]
# create r objects
ro.globalenv['counts'] = pandas2ri.py2ri(counts.table)
ro.globalenv['design'] = pandas2ri.py2ri(design.table)
ro.globalenv['factors_df'] = pandas2ri.py2ri(design.factors)
model_terms = [x for x in re.split("[\+~ ]+", model)[1:]
if x != "0"]
E.info('running DESeq2: groups=%s, replicates=%s, pairs=%s, '
'DE test: %s, additional_factors:%s, ' %
(design.groups, design.has_replicates, design.has_pairs,
DEtest, design.factors))
# load DESeq
R('''suppressMessages(library('DESeq2'))''')
# build DESeq2 Datasets (dds)
assert contrast, ("must supply a contrast for wald or LRT "
"(for LRT, contrast is used to derive reduced model")
if DEtest == "wald":
assert ref_group, "Must supply a ref_group to perform Wald test"
if ref_group:
R('''
for(column in colnames(factors_df)){
factors_df[[column]] = factor(factors_df[[column]])
}
full_model <- formula("%(model)s")
factors_df$%(contrast)s <- relevel(
factors_df$%(contrast)s, ref="%(ref_group)s")
dds <- suppressMessages(DESeqDataSetFromMatrix(
countData= counts,
colData = factors_df,
design = full_model))
''' % locals())
else:
R('''
for(column in colnames(factors_df)){
factors_df[[column]] = factor(factors_df[[column]])
}
full_model <- formula("%(model)s")
dds <- suppressMessages(DESeqDataSetFromMatrix(
countData= counts,
colData = factors_df,
design = full_model))
''' % locals())
if DEtest == "wald":
levels = set(design.table[contrast])
if len(levels) > 2:
E.warn('''Using Wald test for factor with more than 2
levels (%s:%s), Consider LRT''' % (contrast, levels))
contrast = model_terms[-1]
contrast_levels = set(design.factors[contrast])
# performDifferentialTesting
R('''
dds = suppressMessages(
DESeq(dds, test="Wald", fitType="%(fit_type)s"))
contrast_levels = as.vector(levels(dds@colData$%(contrast)s))
png("%(outfile_prefix)s_dispersion.png")
plotDispEsts(dds)
dev.off()
res = suppressMessages(results(dds))
png(paste0(c("%(outfile_prefix)s", "MA.png"), collapse="_"))
plotMA(res, alpha=%(fdr)s)
dev.off()
res = as.data.frame(res)
c = counts(dds, normalized = TRUE)
res$contrast = "%(contrast)s"
contrast_levels = levels(dds@colData$%(contrast)s)
res$control = contrast_levels[1]
res$treatment = contrast_levels[2]
res['test_id'] = rownames(res)
''' % locals())
results = pandas2ri.ri2py(ro.globalenv['res'])
# liklihood ratio test
# Note that if there are more than 3 levels for the contrast,
# the results table will include a log2-fold change from the
# first two levels only, however, MA plots will be generated
# for each combination of levels
elif DEtest == "lrt":
levels = set(design.table[contrast])
if len(levels) > 2:
E.warn('''There are more than 2 levels for the
contrast specified" "(%s:%s). The log2fold changes in the
results table and MA plots will be for the first two
levels in the contrast. The p-value will be the p-value
for the overall significance of the contrast. Hence, some
genes may have a signficant p-value but ~0-fold change
between the first two levels''' % (contrast, levels))
n = 0
reduced_model = [x for x in model_terms if x != contrast]
if len(reduced_model) > 0:
reduced_model = "~" + "+".join(reduced_model)
else:
reduced_model = "~1"
print('''
ddsLRT <- suppressMessages(
DESeq(dds, test="LRT", reduced=formula("%(reduced_model)s"),
betaPrior=TRUE, fitType="%(fit_type)s"))
png("%(outfile_prefix)s_dispersion.png")
plotDispEsts(ddsLRT)
dev.off()
contrast_levels = as.vector(levels(dds@colData$%(contrast)s))
res = suppressMessages(results(ddsLRT, addMLE=TRUE,
contrast=c("%(contrast)s",
contrast_levels[2], contrast_levels[1])))
png(paste0(c("%(outfile_prefix)s", "MA.png"), collapse="_"))
plotMA(res, alpha=%(fdr)s)
dev.off()
res = as.data.frame(res)
res$contrast = "%(contrast)s"
if(length(contrast_levels)==2){
res$control = contrast_levels[1]
res$treatment = contrast_levels[2]
}
else{
res$control = "%(contrast)s"
res$treatment = "%(contrast)s"
}
res['test_id'] = rownames(res)
''' % locals())
R('''
ddsLRT <- suppressMessages(
DESeq(dds, test="LRT", reduced=formula("%(reduced_model)s"),
betaPrior=TRUE, fitType="%(fit_type)s"))
png("%(outfile_prefix)s_dispersion.png")
plotDispEsts(ddsLRT)
dev.off()
contrast_levels = as.vector(levels(dds@colData$%(contrast)s))
res = suppressMessages(results(ddsLRT, addMLE=TRUE,
contrast=c("%(contrast)s",
contrast_levels[2], contrast_levels[1])))
png(paste0(c("%(outfile_prefix)s", "MA.png"), collapse="_"))
plotMA(res, alpha=%(fdr)s)
dev.off()
res = as.data.frame(res)
res$contrast = "%(contrast)s"
if(length(contrast_levels)==2) {
res$control = contrast_levels[1]
res$treatment = contrast_levels[2]
} else {
res$control = "%(contrast)s"
res$treatment = "%(contrast)s"
}
res['test_id'] = rownames(res)
''' % locals())
results = pandas2ri.ri2py(ro.globalenv['res'])
else:
raise ValueError("DEtest must be 'wald' or 'lrt'")
final_result = DEResult_DESeq2(testTable=results)
return final_result
class DEResult_DESeq2(DEResult):
def getResults(self, fdr):
''' post-process test results table into generic results output '''
E.info("Generating output - results table")
df_dict = collections.defaultdict()
n_rows = self.table.shape[0]
df_dict["treatment_name"] = self.table['treatment']
df_dict["control_name"] = self.table['control']
df_dict["test_id"] = self.table['test_id']
df_dict["contrast"] = self.table['contrast']
df_dict["control_mean"] = self.table['baseMean']
df_dict["treatment_mean"] = self.table['baseMean']
df_dict["control_std"] = (0,)*n_rows
df_dict["treatment_std"] = (0,)*n_rows
df_dict["p_value"] = self.table['pvalue']
df_dict["p_value_adj"] = adjustPvalues(self.table['pvalue'])
df_dict["significant"] = pvaluesToSignficant(
df_dict["p_value_adj"], fdr)
df_dict["l2fold"] = self.table['log2FoldChange']
# Transformed l2fold is the shrunken values
df_dict["transformed_l2fold"] = self.table['log2FoldChange']
# TS: check what happens when no fold change is available
# TS: may need an if/else in list comprehension. Raise E.warn too?
df_dict["fold"] = [math.pow(2, float(x)) for
x in df_dict["l2fold"]]
# set all status values to "OK"
# TS: again, may need an if/else to check...
df_dict["status"] = ("OK",)*n_rows
self.table = pandas.DataFrame(df_dict)
# causes errors if multiple instance of same test_id exist, for example
# if multiple constrasts have been tested
# self.table.set_index("test_id", inplace=True)
class DEExperiment_DEXSeq(DEExperiment):
'''DEExperiment object to run DEXSeq on counts data'''
def run(self,
design,
base_dir,
model=None,
flattenedfile=None,
outfile_prefix=None,
ref_group=None,
contrast=None,
fdr=0.1):
pandas2ri.activate()
# create r objects
E.info('running DEXSeq: groups=%s, pairs=%s, replicates=%s, pairs=%s,'
' additional_factors:' %
(design.groups, design.pairs, design.has_replicates,
design.has_pairs))
# load DEXSeq
R('''suppressMessages(library('DEXSeq'))''')
sampleTable = design.table
allfiles = [file for file in os.listdir(base_dir)]
countfiles = []
for item in list(design.table.index):
countfiles += [base_dir+"/"+x for x in allfiles if item in x]
E.info("Processing Samples. Sample table:")
E.info("%s" % sampleTable)
buildCountDataSet = R('''
function(countFiles, gff, sampleTable, model){
full_model <- formula("%(model)s")
dxd <- suppressMessages(DEXSeqDataSetFromHTSeq(
countFiles,
sampleData=sampleTable,
flattenedfile=gff,
design=full_model))
contrast_levels = as.vector(levels(dxd@colData$%(contrast)s))
dxd = estimateSizeFactors(dxd)
dxd = estimateDispersions(dxd)
png("%(outfile_prefix)s_dispersion.png")
plotDispEsts(dxd)
dev.off()
dxd = testForDEU(dxd)
dxd = estimateExonFoldChanges( dxd, fitExpToVar="%(contrast)s")
result = DEXSeqResults(dxd)
result = as.data.frame(result)
result$contrast = "%(contrast)s"
result$log2FoldChange = result$log2fold
if(length(contrast_levels)==2) {
result$control = contrast_levels[1]
result$treatment = contrast_levels[2]
} else {
result$control = "%(contrast)s"
result$treatment = "%(contrast)s"
}
return(result)
}''' % locals())
result = pandas2ri.ri2py(
buildCountDataSet(countfiles, flattenedfile, sampleTable, model))
result['test_id'] = result.index
result['contrast'] = contrast
final_result = DEResult_DEXSeq(result)
return final_result
class DEResult_DEXSeq(DEResult):
def getResults(self, fdr):
''' post-process test results table into generic results output '''
E.info("Generating output - results table")
df_dict = collections.defaultdict()
n_rows = self.table.shape[0]
df_dict["treatment_name"] = self.table['treatment']
df_dict["control_name"] = self.table['control']
df_dict["test_id"] = self.table['test_id']
df_dict["contrast"] = self.table['contrast']
df_dict["control_mean"] = self.table['exonBaseMean']
df_dict["treatment_mean"] = self.table['exonBaseMean']
df_dict["control_std"] = (0,)*n_rows
df_dict["treatment_std"] = (0,)*n_rows
df_dict["p_value"] = self.table['pvalue']
df_dict["p_value_adj"] = adjustPvalues(self.table['pvalue'])
df_dict["significant"] = pvaluesToSignficant(
df_dict["p_value_adj"], fdr)
df_dict["l2fold"] = ("NA",)*n_rows
# Transformed l2fold is the shrunken values
df_dict["transformed_l2fold"] = self.table['log2FoldChange']
df_dict["fold"] = ("NA",)*n_rows
df_dict["status"] = ("OK",)*n_rows
self.table = pandas.DataFrame(df_dict)
# causes errors if multiple instance of same test_id exist, for example
# if multiple constrasts have been tested
# self.table.set_index("test_id", inplace=True)
def plotMAplot(self, design, outfile_prefix):
# need to implement DEXSeq specific MA plot
raise ValueError("MA plotting is not yet implemented for DESeq")
class DEExperiment_Sleuth(DEExperiment):
'''DEExperiment object to run sleuth on kallisto bootstrap files
Unlike the other DEExperiment instances, this does not operate on
a Counts.Counts object but instead reads the bootstrap hd5 files
from kallisto into memory in R and then performs the differential
testing
The run method expects all kallisto abundance.h5 files to be under
a single directory with a subdirectory for each sample
Note: LRT does not generate fold change estimates (see DEResult_Sleuth)
use dummy_run = True if you don't want to perform differential
testing but want the counts/tpm outfiles
'''
def run(self,
design,
base_dir,
model=None,
contrast=None,
outfile_prefix=None,
counts=None,
tpm=None,
fdr=0.1,
DE_test="wald",
reduced_model=None,
dummy_run=False,
genewise=False,
gene_biomart=None,
ref_group=None):
if DE_test == "lrt":
E.info("Note: LRT will not generate fold changes")
assert reduced_model is not None, ("need to provide a reduced "
"model to use LRT")
# Design table needs a "sample" column
design.table['sample'] = design.table.index
r_design_df = pandas2ri.py2ri(design.table)
E.info('running sleuth: groups=%s, pairs=%s, replicates=%s, pairs=%s, '
'additional_factors:' %
(design.groups, design.pairs, design.has_replicates,
design.has_pairs))
# load sleuth
r('''suppressMessages(library('sleuth'))''')
# make variates string to ensure all model terms are in the
# design dataframe for sleuth
model_terms = [x for x in re.split("[\+~ ]+", model)[1:]
if x != "0"]
variates = "c(%s)" % ",".join(model_terms)
# need to code in option to not use a reference group (e.g for LRT)
if genewise:
assert gene_biomart, ("for genewise analysis, "
"must provide a 'gene_biomart'")
createSleuthObject = r('''
function(design_df){
library(biomaRt)
sample_id = design_df$sample
kal_dirs <- sapply(sample_id,
function(id) file.path('%(base_dir)s', id))
design_df <- dplyr::select(design_df, sample = sample,
%(variates)s)
design_df <- dplyr::mutate(design_df, path = kal_dirs)
%(contrast)s <- factor(design_df$%(contrast)s)
%(contrast)s <- relevel(%(contrast)s,ref='%(ref_group)s')
md <- model.matrix(%(model)s, design_df)
colnames(md)[grep("%(contrast)s", colnames(md))] <- '%(contrast)s%(ref_group)s'
mart <- biomaRt::useMart(biomart = "ENSEMBL_MART_ENSEMBL",
#dataset = "hsapiens_gene_ensembl",
dataset = "%(gene_biomart)s",
host="www.ensembl.org")
t2g <- biomaRt::getBM(
attributes = c("ensembl_transcript_id","ensembl_gene_id",
"external_gene_name"), mart = mart)
t2g <- dplyr::rename(t2g, target_id = ensembl_transcript_id,
ens_gene = ensembl_gene_id,
ext_gene = external_gene_name)
so <- sleuth_prep(design_df, md,
target_mapping = t2g, aggregation_column = 'ens_gene')
so <- suppressMessages(sleuth_fit(so))
return(so)
}''' % locals())
else:
createSleuthObject = r('''
function(design_df){
sample_id = design_df$sample
kal_dirs <- sapply(sample_id,
function(id) file.path('%(base_dir)s', id))
design_df <- dplyr::select(design_df, sample = sample,
%(variates)s)
design_df <- dplyr::mutate(design_df, path = kal_dirs)
%(contrast)s <- factor(design_df$%(contrast)s)
%(contrast)s <- relevel(%(contrast)s,ref='%(ref_group)s')
md <- model.matrix(%(model)s, design_df)
colnames(md)[grep("%(contrast)s", colnames(md))] <- '%(contrast)s%(ref_group)s'
so <- sleuth_prep(design_df, md)
so <- sleuth_fit(so)
return(so)
}''' % locals())
so = createSleuthObject(r_design_df)
# write out counts and tpm tables if required
if counts:
makeCountsTable = r('''
function(so){
library('reshape')
df = cast(so$obs_raw, target_id~sample, value = "est_counts")
colnames(df)[1] <- "transcript_id"
write.table(df, "%(counts)s", sep="\t", row.names=F, quote=F)
}''' % locals())
makeCountsTable(so)
if tpm:
makeTPMTable = r('''
function(so){
library('reshape')
df = cast(so$obs_raw, target_id~sample, value = "tpm")
colnames(df)[1] <- "transcript_id"
write.table(df, "%(tpm)s", sep="\t", row.names=F, quote=F)
}''' % locals())
makeTPMTable(so)
if dummy_run:
return None
if DE_test == "lrt":
differentialTesting = r('''
function(so){
so <- suppressMessages(sleuth_fit(so, formula = %(reduced_model)s,
fit_name = "reduced"))
so <- suppressMessages(sleuth_lrt(so, "reduced", "full"))
results_table <- sleuth_results(so, test = 'reduced:full',
test_type = 'lrt')
return(results_table)
} ''' % locals())
final_result = DEResult_Sleuth(pandas2ri.ri2py(
differentialTesting(so)))
elif DE_test == "wald":
differentialTesting = r('''
function(so){
so <- sleuth_wt(so, which_beta = '%(contrast)s%(ref_group)s')
p_ma = plot_ma(so, '%(contrast)s%(ref_group)s')
ggsave("%(outfile_prefix)s_%(contrast)s_sleuth_ma.png",
width=15, height=15, units="cm")
p_vars = plot_vars(so, '%(contrast)s%(ref_group)s')
ggsave("%(outfile_prefix)s_%(contrast)s_sleuth_vars.png",
width=15, height=15, units="cm")
p_mean_var = plot_mean_var(so)
ggsave("%(outfile_prefix)s_%(contrast)s_sleuth_mean_var.png",
width=15, height=15, units="cm")
results_table <- sleuth_results(so, test = '%(contrast)s%(ref_group)s')
return(results_table)
} ''' % locals())
results = pandas2ri.ri2py(differentialTesting(so))
results['contrast'] = contrast
else:
raise ValueError("unknown DE test type, use 'wald' or 'lrt'")
final_result = DEResult_Sleuth(results)
return final_result
class DEResult_Sleuth(DEResult):
def getResults(self, fdr):
''' post-process test results table into generic results output
expression and fold changes from Sleuth are natural logs'''
E.info("Generating output - results table")
df_dict = collections.defaultdict()
n_rows = self.table.shape[0]
df_dict["treatment_name"] = ("NA",)*n_rows
df_dict["control_name"] = ("NA",)*n_rows
df_dict["test_id"] = self.table['target_id']
df_dict["contrast"] = self.table['contrast']
df_dict["control_mean"] = [math.exp(float(x)) for
x in self.table['mean_obs']]
df_dict["treatment_mean"] = df_dict["control_mean"]
df_dict["control_std"] = (0,)*n_rows
df_dict["treatment_std"] = (0,)*n_rows
df_dict["p_value"] = self.table['pval']
df_dict["p_value_adj"] = adjustPvalues(self.table['pval'])
df_dict["significant"] = pvaluesToSignficant(df_dict["p_value_adj"],
fdr)
df_dict["fold"] = [math.exp(float(x)) for
x in self.table['b']]
df_dict["l2fold"] = [math.log(float(x), 2) for x in df_dict['fold']]
df_dict["transformed_l2fold"] = df_dict["l2fold"]
# set all status values to "OK"
# TS: again, may need an if/else to check...
df_dict["status"] = ("OK",)*n_rows
self.table = pandas.DataFrame(df_dict)
# causes errors if multiple instance of same test_id exist, for example
# if multiple constrasts have been tested
# self.table.set_index("test_id", inplace=True)
###############################################################################
def buildProbeset2Gene(infile,
outfile,
database="hgu133plus2.db",
mapping="hgu133plus2ENSEMBL"):
'''build map relating a probeset to an ENSEMBL gene_id'''
R.library(database)
# map is a Bimap object
m = r(mapping)
result = R.toTable(m)
outf = open(outfile, "w")
outf.write("probe_id\tgene_id\n")
for probeset_id, gene_id in zip(result["probe_id"],
result["ensembl_id"]):
outf.write("%s\t%s\n" % (probeset_id, gene_id))
outf.close()
E.info("written %i mappings to %s: probes=%i, genes=%i" %
(len(result),
outfile,
len(set(result["probe_id"])),
len(set(result["ensembl_id"]))))
GeneExpressionResult = collections.namedtuple(
"GeneExpressionResult",
"test_id treatment_name treatment_mean treatment_std "
"control_name control_mean control_std "
"pvalue qvalue l2fold fold transformed_l2fold "
"significant status")
def writeExpressionResults(outfile, result):
'''output expression results table.'''
if outfile == sys.stdout:
outf = outfile
else:
outf = iotools.open_file(outfile, "w")
outf.write("%s\n" % "\t".join(GeneExpressionResult._fields))
for x in sorted(result):
outf.write("%s\n" % "\t".join(map(str, x)))
if outf != sys.stdout:
outf.close()
class WelchsTTest(object):
'''base class for computing expression differences.
'''
def __call__(self,
probesets,
treatments,
controls):
assert len(probesets) == len(treatments[0])
assert len(probesets) == len(controls[0])
nskipped = 0
results = []
for probeset, treatment, control in zip(
probesets, zip(*treatments), zip(*controls)):
nval1, nval2 = len(treatment), len(control)
mean1, mean2 = numpy.mean(treatment), numpy.mean(control)
stddev1, stddev2 = numpy.std(treatment), numpy.std(control)
try:
s = Stats.doWelchsTTest(nval1, mean1, stddev1,
nval2, mean2, stddev2,
alpha=0.05)
except ValueError:
E.warn(
"expressionDifferences: standard deviations are 0 for "
"probeset %s - skipped" % probeset)
nskipped += 1
continue
s.mProbeset = probeset
results.append(s)
qvalues = Stats.doFDR([x.mPValue for x in results]).mQValues
for s, qvalue in zip(results, qvalues):
s.mQValue = qvalue
return results, nskipped
class SAMR(object):
'''SAM analysis of microarray data.
Use the Two-Class Unpaired Case Assuming Unequal Variances.
This uses the samr library.
Significant genes are either called at *fdr* or the
top *ngenes* are returned.
*treatments* and *control* are arrays of
arrays of expression values.
See
https://stat.ethz.ch/pipermail/bioconductor/2008-July/023251.html
for an explanation of the differences between siggens SAM
and Excel SAM. This version is parameterised to reproduce Excel SAM
by setting::
var.equal = TRUE
med = TRUE
.. note::
SAM requires log2 scaled expression levels.
'''
def __call__(self, probesets,
treatments,
controls,
pattern=None,
fdr=0.10,
ngenes=None,
npermutations=1000,
ndelta=10,
method="ttest"):
if ngenes and fdr:
raise ValueError("either supply ngenes or fdr, but not both.")
R.library("samr")
m = numpy.matrix(treatments + controls)
m = numpy.transpose(m)
labels = numpy.array([1] * len(treatments) + [2] * len(controls))
R.assign("x", numpy.array(m))
R.assign("y", labels)
R.assign("probesets", probesets)
data = r(
'''data=list( x=x, y=y, geneid=1:length(probesets), genenames=probesets, logged2=TRUE)''')
result = r(
'''samr.obj<-samr(data, resp.type="Two class unpaired", nperms=100)''')
r('''plot(samr.obj, delta=.4)''')
class SAM(object):
'''SAM analysis of microarray data.
Use the Two-Class Unpaired Case Assuming Unequal Variances.
This uses the siggenes library. Note that there is also
an rsam package at:
http://rss.acs.unt.edu/Rdoc/library/samr/html/samr.html
Significant genes are either called at *fdr* or the
top *ngenes* are returned.
*treatments* and *control* are arrays of
arrays of expression values.
See
https://stat.ethz.ch/pipermail/bioconductor/2008-July/023251.html
for an explanation of the differences between siggens SAM
and Excel SAM. To parameterize the FDR to excel sam, set the
flag *use_excel_sam*.
.. note::
SAM requires log2 scaled expression levels.
I ran into trouble using this library. I was not able to
reproduce the same results from the original SAM study getting
differences in d and in the fdr.
fold change is treatment / control.
'''
def __call__(self, probesets,
treatments,
controls,
pattern=None,
fdr=0.10,
ngenes=None,
npermutations=1000,
ndelta=10,
method="ttest",
use_excel_sam=False,
treatment_label="treatment",
control_label="control"):
if ngenes and fdr:
raise ValueError("either supply ngenes or fdr, but not both.")
R.library("siggenes")
m = numpy.matrix(treatments + controls)
m = numpy.transpose(m)
E.debug("build expression matrix: %i x %i" % m.shape)
labels = numpy.array([1] * len(treatments) + [0] * len(controls))
# 1000 permutations for P-Values of down to 0.0001. Setting this
# to a high value improved reproducibility of results.
kwargs = {}
# kwargs set to replicate excel SAM
if use_excel_sam:
kwargs.update(
{"control":
r('''samControl( lambda = 0.5, n.delta = %(ndelta)s) ''' %
locals()),
"med": True,
"var.equal": True})
else:
kwargs.update({"control":
r('''samControl( n.delta = %(ndelta)s ) ''' %
locals())},)
# the option B needs to be not set if wilc.stat is chosen
if method == "ttest":
kwargs["method"] = r('''d.stat''')
kwargs["B"] = npermutations
elif method == "wilc":
kwargs["method"] = r('''wilc.stat''')
elif method == "cat":
kwargs["method"] = r('''cat.stat''')
else:
raise ValueError("unknown statistic `%s`" % method)
E.info("running sam with the following options: %s" % str(kwargs))
a = R.sam(numpy.array(m),
labels,
gene_names=numpy.array(probesets),
**kwargs)
# E.debug("%s" % str(a))
R.assign("a", a)
fdr_data = collections.namedtuple("sam_fdr", (
"delta", "p0", "false", "significant", "fdr", "cutlow",
"cutup", "j2", "j1"))
cutoff_data = collections.namedtuple(
"sam_cutoff", ("delta", "significant", "fdr"))
gene_data = collections.namedtuple(
"sam_fdr", ("row", "dvalue", "stddev", "rawp", "qvalue", "rfold"))
def _totable(robj):
'''convert robj to a row-wise table.'''
s = numpy.matrix(robj)
t = [numpy.array(x).reshape(-1,) for x in s]
return t
# extract the fdr values
# returns R matrix
t = _totable(a.do_slot('mat.fdr'))
assert len(t[0]) == len(fdr_data._fields)
# for x in t: E.debug( "x=%s" % str(x))
fdr_values = [fdr_data(*x) for x in t]
# find d cutoff
if fdr is not None and fdr > 0:
s = numpy.matrix(R.findDelta(a, fdr))
try:
cutoffs = [cutoff_data(*numpy.array(x).reshape(-1,))
for x in s]
E.debug("sam cutoffs for fdr %f: %s" % (fdr, str(cutoffs)))
cutoff = cutoffs[-1]
except TypeError:
E.debug("could not get cutoff")
cutoff = None
elif ngenes:
s = numpy.matrix(R.findDelta(a, ngenes))
try:
cutoffs = [cutoff_data(*numpy.array(x).reshape(-1,))
for x in s]
E.debug("sam cutoffs for fdr %f: %s" % (fdr, str(cutoffs)))
cutoff = cutoffs[-1]
except TypeError:
E.debug("could not get cutoff")
cutoff = None
else:
raise ValueError("either supply ngenes or fdr")
# collect (unadjusted) p-values and qvalues for all probesets
pvalues = dict(zip(probesets, r('''<EMAIL>''')))
qvalues = dict(zip(probesets, r('''a<EMAIL>''')))
if pattern:
outfile = pattern % "sam.pdf"
R.pdf(outfile)
if cutoff:
R.plot(a, cutoff.delta)
else:
R.plot(a)
r['dev.off']()
siggenes = {}
significant_genes = set()
if cutoff is not None:
E.debug("using cutoff %s" % str(cutoff))
summary = r('''summary( a, %f )''' % cutoff.delta)
# summary = R.summary( a, cutoff.delta )
R.assign("summary", summary)
significant_genes = set(
[probesets[int(x) - 1] for x
in r('''<EMAIL>''')])
# E.debug( "significant genes=%s" % str(significant_genes))
r_result = zip(*_totable(summary.do_slot('mat.sig')))
if len(r_result) > 0:
assert len(r_result[0]) == 6, \
"expected six columns from siggenes module, got: %s" % \
len(r_result[0])
for x in r_result:
if x[4] > fdr:
E.warn("%s has qvalue (%f) larger than cutoff, but "
"is significant significant." % (str(x), x[4]))
# except TypeError:
# only a single value
# x = [r_result[y] for y in ("Row", "d.value", "stdev", "rawp", "q.value", "R.fold") ]
# if x[4] > fdr:
# E.warn( "%s has qvalue (%f) larger than cutoff, but is called significant." % (str(x), x[4]))
siggenes[probesets[int(x[0]) - 1]] = gene_data(*x)
else:
E.debug("no cutoff found - no significant genes.")
genes = []
for probeset, treatment, control in zip(
probesets, zip(*treatments), zip(*controls)):
mean1, mean2 = numpy.mean(treatment), numpy.mean(control)
if probeset in siggenes:
s = siggenes[probeset]
pvalue = s.rawp
qvalue = s.qvalue
else:
pvalue = pvalues[probeset]
qvalue = qvalues[probeset]
significant = (0, 1)[probeset in significant_genes]
genes.append(GeneExpressionResult._make((probeset,
treatment_label,
mean1,
numpy.std(treatment),
control_label,
mean2,
numpy.std(control),
pvalue,
qvalue,
mean1 - mean2,
math.pow(
2, mean1 - mean2),
math.pow(
2, mean1 - mean2),
significant,
"OK")))
return genes, cutoff, fdr_values
#########################################################################
#########################################################################
#########################################################################
def loadTagData(tags_filename, design_filename):
'''load tag data for deseq/edger analysis.
*tags_file* is a tab-separated file with counts.
*design_file* is a tab-separated file with the
experimental design with a minimum of four columns::
track include group pair
CW-CD14-R1 0 CD14 1
CW-CD14-R2 0 CD14 1
CW-CD14-R3 1 CD14 1
CW-CD4-R1 1 CD4 1
FM-CD14-R1 1 CD14 2
FM-CD4-R2 0 CD4 2
FM-CD4-R3 0 CD4 2
FM-CD4-R4 0 CD4 2
track
name of track - should correspond to column header in *infile*
include
flag to indicate whether or not to include this data
group
group indicator - experimental group
pair
pair that sample belongs to (for paired tests)
Additional columns in design file are taken to contain levels for
additional factors and may be included for tests that allow multi-factor
model designs.
This method creates various R objects:
countsTable : data frame with counts.
groups : vector with groups
pairs : vector with pairs
factors : df of additional factors for more complex model designs
'''
# Load counts table
E.info("loading tag data from %s" % tags_filename)
r('''counts_table = read.table('%(tags_filename)s',
header=TRUE,
row.names=1,
stringsAsFactors=TRUE,
comment.char='#')''' % locals())
E.info("read data: %i observations for %i samples" %
tuple(r('''dim(counts_table)''')))
E.debug("sample names: %s" % r('''colnames(counts_table)'''))
# Load comparisons from file
r('''pheno = read.delim(
'%(design_filename)s',
header=TRUE,
stringsAsFactors=TRUE,
comment.char='#')''' % locals())
# Make sample names R-like - substitute - for .
r('''pheno[,1] = gsub('-', '.', pheno[,1]) ''')
E.debug("design names: %s" % r('''pheno[,1]'''))
# Ensure pheno rows match count columns
pheno = r(
'''pheno2 = pheno[match(colnames(counts_table),pheno[,1]),,drop=FALSE]''')
missing = r('''colnames(counts_table)[is.na(pheno2)][1]''')
if missing:
E.warn("missing samples from design file are ignored: %s" %
missing)
# subset data & set conditions
r('''includedSamples <- !(is.na(pheno2$include) | pheno2$include == '0') ''')
E.debug("included samples: %s" %
r('''colnames(counts_table)[includedSamples]'''))
r('''countsTable <- counts_table[ , includedSamples ]''')
r('''groups <- factor(pheno2$group[ includedSamples ])''')
r('''conds <- pheno2$group[ includedSamples ]''')
r('''pairs <- factor(pheno2$pair[ includedSamples ])''')
# if additional columns present, pass to 'factors'
r('''if (length(names(pheno2)) > 4) {
factors <- data.frame(pheno2[includedSamples,5:length(names(pheno2))])
} else {
factors <- NA
}''')
E.info("filtered data: %i observations for %i samples" %
tuple(r('''dim(countsTable)''')))
def filterTagData(filter_min_counts_per_row=1,
filter_min_counts_per_sample=10,
filter_percentile_rowsums=0):
'''filter tag data.
* remove rows with fewer than x counts in most highly expressed sample
* remove samples with fewer than x counts in most highly expressed row
* remove the lowest percentile of rows in the table, sorted
by total tags per row
'''
# Remove windows with no data
r('''max_counts = apply(countsTable,1,max)''')
r('''countsTable = countsTable[max_counts>=%i,]''' %
filter_min_counts_per_row)
E.info("removed %i empty rows" %
tuple(r('''sum(max_counts == 0)''')))
observations, samples = tuple(r('''dim(countsTable)'''))
E.info("trimmed data: %i observations for %i samples" %
(observations, samples))
# remove samples without data
r('''max_counts = apply(countsTable,2,max)''')
empty_samples = tuple(
r('''max_counts < %i''' % filter_min_counts_per_sample))
sample_names = r('''colnames(countsTable)''')
nempty_samples = sum(empty_samples)
if nempty_samples:
E.warn("%i empty samples are being removed: %s" %
(nempty_samples,
",".join([sample_names[x]
for x, y in enumerate(empty_samples) if y])))
r('''countsTable <- countsTable[, max_counts >= %i]''' %
filter_min_counts_per_sample)
r('''groups <- groups[max_counts >= %i]''' %
filter_min_counts_per_sample)
r('''pairs <- pairs[max_counts >= %i]''' %
filter_min_counts_per_sample)
r('''if (!is.na(factors)) {factors <- factors[max_counts >= %i,]}''' %
filter_min_counts_per_sample)
observations, samples = tuple(r('''dim(countsTable)'''))
# percentile filtering
if filter_percentile_rowsums > 0:
percentile = float(filter_percentile_rowsums) / 100.0
r('''sum_counts = rowSums( countsTable )''')
r('''take = (sum_counts > quantile(sum_counts, probs = %(percentile)f))''' %
locals())
discard, keep = r('''table( take )''')
E.info("percentile filtering at level %f: keep=%i, discard=%i" %
(filter_percentile_rowsums,
keep, discard))
r('''countsTable = countsTable[take,]''')
observations, samples = tuple(r('''dim(countsTable)'''))
return observations, samples
def groupTagData(ref_group=None, ref_regex=None):
'''compute groups and pairs from tag data table.'''
if ref_regex is not None and ref_group is None:
groups = r('''levels(groups)''')
for g in groups:
rx = re.compile(ref_regex)
if rx.search(g):
ref_group = g
# Relevel the groups so that the reference comes first
if ref_group is not None:
E.info("reference group (control) is '%s'" % ref_group)
r('''groups <- relevel(groups, ref="%s")''' % ref_group)
groups = r('''levels(groups)''')
pairs = r('''levels(pairs)''')
factors = r('''factors''')
# JJ - check whether there are additional factors in design file...
# warning... isintance(df, rpy.robjects.vectors.Vector) returns True
if isinstance(factors, rpy2.robjects.vectors.DataFrame):
E.warn("There are additional factors in design file that are ignored"
" by groupTagData: %s" % factors.r_repr())
# AH: uncommented as it causes an issue with rpy2 2.7.7
# else:
# # Hack... must be a better way to evaluate r NA instance in python?
# import pdb; pdb.set_trace()
# assert len(list(factors)) == 1 and bool(list(factors)[0]) is False, \
# "factors must either be DataFrame or NA in R global namespace"
# Test if replicates exist - at least one group must have multiple samples
max_per_group = r('''max(table(groups)) ''')[0]
has_replicates = max_per_group >= 2
# Test if pairs exist:
npairs = r('''length(table(pairs)) ''')[0]
has_pairs = npairs == 2
# at least two samples per pair
if has_pairs:
min_per_pair = r('''min(table(pairs)) ''')[0]
has_pairs = min_per_pair >= 2
return groups, pairs, has_replicates, has_pairs
def plotCorrelationHeatmap(method="correlation"):
'''plot a heatmap of correlations derived from
countsTable.
'''
if method == "correlation":
r('''dists <- dist( (1 - cor(countsTable)) / 2 )''')
else:
r('''dists <- dist( t(as.matrix(countsTable)), method = '%s' )''' %
method)
r('''heatmap( as.matrix( dists ), symm=TRUE )''')
def plotPairs():
'''requires counts table'''
# Plot pairs
r('''panel.pearson <- function(x, y, digits=2, prefix="", cex.cor, ...)
{
usr <- par("usr"); on.exit(par(usr))
par(usr = c(0, 1, 0, 1))
r <- abs(cor(x, y))
txt <- format(c(r, 0.123456789), digits=digits)[1]
txt <- paste(prefix, txt, sep="")
if(missing(cex.cor)) cex <- 0.6/strwidth(txt)
x = 0.5;
y = 0.5;
if (par("xlog")) { x = 10^x };
if (par("ylog")) { y = 10^y };
text(x, y, txt, cex = cex);
}
''')
try:
r('''pairs(countsTable,
lower.panel = panel.pearson,
pch=".",
labels=colnames(countsTable),
log="xy")''')
except RRuntimeError:
E.warn("can not plot pairwise scatter plot")
def plotPCA(groups=True):
'''plot a PCA plot from countsTable using ggplot.
If groups is *True*, the variable ``groups`` is
used for colouring. If *False*, the groups are
determined by sample labels.
'''
r('''suppressMessages(library(ggplot2))''')
r('''pca = prcomp(t(countsTable))''')
# Build factor groups by splitting labels at "."
r('''colour=groups''')
r('''shape=0''')
r('''size=1''')
if groups is False:
r('''mm = matrix(
unlist(sapply(colnames(countsTable),strsplit,'[.]')),
nrow=length(colnames(countsTable)),
byrow=T)''')
nrows, nlevels = r('''dim(mm)''')
if nlevels > 1:
r('''colour=mm[,1]''')
if nlevels > 2:
r('''shape=mm[,2]''')
try:
r('''p1 = ggplot(
as.data.frame(pca$x),
aes(x=PC1, y=PC2,
colour=colour,
shape=shape,
label=rownames(pca$x))) \
+ geom_text(size=4, vjust=1) \
+ geom_point()''')
r('''p2 = qplot(x=PC1, y=PC3,
data = as.data.frame(pca$x),
label=rownames(pca$x),
shape=shape,
colour=colour)''')
r('''p3 = qplot(x=PC2, y=PC3,
data = as.data.frame(pca$x),
label=rownames(pca$x),
shape=shape,
colour=colour)''')
# TODO: plot all in a multi-plot with proper scale
# the following squishes the plots
# r('''source('%s')''' %
# os.path.join(os.path.dirname(E.__file__),
# "../R",
# "multiplot.R"))
# r('''multiplot(p1, p2, p3, cols=2)''')
r('''plot(p1)''')
except RRuntimeError as msg:
E.warn("could not plot in plotPCA(): %s" % msg)
def runEdgeR(outfile,
outfile_prefix="edger.",
fdr=0.1,
prefix="",
dispersion=None,
ref_group=None,
ref_regex=None,
):
'''run EdgeR on countsTable.
Results are stored in *outfile* and files prefixed by *outfile_prefix*.
The dispersion is usually measuered from replicates. If there are no
replicates, you need to set the *dispersion* explicitely.
See page 13 of the EdgeR user guide::
2. Simply pick a reasonable dispersion value, based on your
experience with similar data, and use that. Although
subjective, this is still more defensible than assuming Poisson
variation. Typical values are dispersion=0.4 for human data,
dispersion=0.1 for data on genetically identical model
organisms or dispersion=0.01 for technical replicates.
'''
# load library
r('''suppressMessages(library('edgeR'))''')
groups, pairs, has_replicates, has_pairs = groupTagData(ref_group,
ref_regex)
# output heatmap plot
fn = '%(outfile_prefix)sheatmap.png' % locals()
E.info("outputing heatmap to {}".format(fn))
R.png(fn)
plotCorrelationHeatmap()
r['dev.off']()
E.info('running EdgeR: groups=%s, pairs=%s, replicates=%s, pairs=%s' %
(groups, pairs, has_replicates, has_pairs))
if has_pairs:
# output difference between groups
R.png('''%(outfile_prefix)sbalance_groups.png''' % locals())
first = True
for g1, g2 in itertools.combinations(groups, 2):
r('''a = rowSums( countsTable[groups == '%s'] ) ''' % g1)
r('''b = rowSums( countsTable[groups == '%s'] ) ''' % g2)
if first:
r('''plot(cumsum(sort(a - b)), type = 'l') ''')
first = False
else:
r('''lines(cumsum(sort(a - b))) ''')
r['dev.off']()
r('''suppressMessages(library('ggplot2'))''')
r('''suppressMessages(library('reshape'))''')
# output difference between pairs within groups
first = True
legend = []
for pair in pairs:
for g1, g2 in itertools.combinations(groups, 2):
key = re.sub("-", "_", "pair_%s_%s_vs_%s" % (pair, g1, g2))
legend.append(key)
# print r('''colnames( countsTable) ''')
# print r(''' pairs=='%s' ''' % pair)
# print r(''' groups=='%s' ''' % g1)
r('''a = rowSums( countsTable[pairs == '%s' & groups == '%s'])''' % (
pair, g1))
r('''b = rowSums( countsTable[pairs == '%s' & groups == '%s'])''' % (
pair, g2))
r('''c = cumsum( sort(a - b) )''')
r('''c = c - min(c)''')
if first:
data = r('''d = data.frame( %s = c)''' % key)
first = False
else:
r('''d$%s = c''' % key)
# remove row names (gene idenitifiers)
r('''row.names(d) = NULL''')
# add numbers of genes (x-axis)
r('''d$genes=1:nrow(d)''')
# merge data for ggplot
r('''d = melt( d, 'genes', variable_name = 'comparison' )''')
# plot
r('''gp = ggplot(d)''')
r('''pp = gp + \
geom_line(aes(x=genes,y=value,group=comparison,color=comparison))''')
try:
R.ggsave('''%(outfile_prefix)sbalance_pairs.png''' % locals())
r['dev.off']()
except RRuntimeError:
E.warn("could not plot")
# build DGEList object
# ignore message: "Calculating library sizes from column totals"
r('''countsTable = suppressMessages(DGEList(countsTable, group=groups))''')
# Relevel groups to make the results predictable - IMS
if ref_group is not None:
r('''countsTable$samples$group <- relevel(countsTable$samples$group,
ref = "%s")''' % ref_group)
else:
# if no ref_group provided use first group in groups
r('''countsTable$sample$group <- relevel(countsTable$samples$group,
ref = "%s")''' % groups[0])
# calculate normalisation factors
E.info("calculating normalization factors")
r('''countsTable = calcNormFactors( countsTable )''')
E.info("output")
# output MDS plot
R.png('''%(outfile_prefix)smds.png''' % locals())
try:
r('''plotMDS( countsTable )''')
except RRuntimeError:
E.warn("can not plot mds")
r['dev.off']()
# build design matrix
if has_pairs:
r('''design = model.matrix(~pairs + countsTable$samples$group)''')
else:
r('''design = model.matrix(~countsTable$samples$group)''')
# r('''rownames(design) = rownames( countsTable$samples )''')
# r('''colnames(design)[length(colnames(design))] = "CD4" ''' )
# fitting model to each tag
if has_replicates:
# estimate common dispersion
r('''countsTable = estimateGLMCommonDisp(countsTable, design)''')
# estimate tagwise dispersion
r('''countsTable = estimateGLMTagwiseDisp(countsTable, design)''')
# fitting model to each tag
r('''fit = glmFit(countsTable, design)''')
else:
# fitting model to each tag
if dispersion is None:
raise ValueError("no replicates and no dispersion")
E.warn("no replicates - using a fixed dispersion value")
r('''fit = glmFit(countsTable, design, dispersion=%f)''' %
dispersion)
# perform LR test
r('''lrt = glmLRT(fit)''')
E.info("Generating output")
# output cpm table
r('''suppressMessages(library(reshape2))''')
r('''countsTable.cpm <- cpm(countsTable, normalized.lib.sizes=TRUE)''')
r('''melted <- melt(countsTable.cpm)''')
r('''names(melted) <- c("test_id", "sample", "ncpm")''')
# melt columns are factors - convert to string for sorting
r('''melted$test_id = levels(melted$test_id)[as.numeric(melted$test_id)]''')
r('''melted$sample = levels(melted$sample)[as.numeric(melted$sample)]''')
# sort cpm table by test_id and sample
r('''sorted = melted[with(melted, order(test_id, sample)),]''')
r('''gz = gzfile("%(outfile_prefix)scpm.tsv.gz", "w" )''' % locals())
r('''write.table(sorted, file=gz, sep = "\t",
row.names=FALSE, quote=FALSE)''')
r('''close(gz)''')
# compute adjusted P-Values
r('''padj = p.adjust(lrt$table$PValue, 'BH')''')
rtype = collections.namedtuple("rtype", "lfold logCPM LR pvalue")
# output differences between pairs
if len(groups) == 2:
R.png('''%(outfile_prefix)smaplot.png''' % locals())
r('''plotSmear(countsTable, pair=c('%s'))''' % "','".join(groups))
r('''abline(h=c(-2, 2), col='dodgerblue') ''')
r['dev.off']()
# I am assuming that logFC is the base 2 logarithm foldchange.
# Parse results and parse to file
results = []
counts = E.Counter()
df = r('''lrt$table''')
df["padj"] = r('''padj''')
counts.significant = sum(df.padj <= fdr)
counts.insignificant = sum(df.padj > fdr)
counts.significant_over = sum((df.padj <= fdr) & (df.logFC > 0))
counts.significant_under = sum((df.padj <= fdr) & (df.logFC < 0))
counts.input = len(df)
counts.all_over = sum(df.logFC > 0)
counts.all_under = sum(df.logFC < 0)
counts.fail = sum(df.PValue.isnull())
counts.ok = counts.input - counts.fail
df["fold"] = df.logFC.pow(2.0)
df["significant"] = df.padj <= fdr
# TODO: use pandas throughout
for interval, d in df.iterrows():
# fold change is determined by the alphabetical order of the factors.
# Is the following correct?
results.append(GeneExpressionResult._make((
interval,
groups[1],
d.logCPM,
0,
groups[0],
d.logCPM,
0,
d.PValue,
d.padj,
d.logFC,
d.fold,
d.logFC, # no transform of lfold
str(int(d.significant)),
"OK")))
writeExpressionResults(outfile, results)
outf = iotools.open_file("%(outfile_prefix)ssummary.tsv" % locals(), "w")
outf.write("category\tcounts\n%s\n" % counts.asTable())
outf.close()
# needs to put into class
##
def deseqPlotSizeFactors(outfile):
'''plot size factors - requires cds object.'''
R.png(outfile)
r('''par(mar=c(8,4,4,2))''')
r('''barplot( sizeFactors( cds ), main="size factors", las=2)''')
r['dev.off']()
def deseqOutputSizeFactors(outfile):
'''output size factors - requires cds object.'''
size_factors = r('''sizeFactors( cds )''')
samples = r('''names(sizeFactors(cds))''')
with iotools.open_file(outfile, "w") as outf:
outf.write("sample\tfactor\n")
for name, x in zip(samples, size_factors):
outf.write("%s\t%s\n" % (name, str(x)))
def deseqPlotCorrelationHeatmap(outfile, vsd):
'''plot a heatmap
Use variance stabilized data in object vsd.
Should be 'blind', as then the transform is
not informed by the experimental design.
'''
# rpy2.4.2 - passing of arrays seems to be broken - do it in R
# dists = r['as.matrix'](R.dist(R.t(R.exprs(vsd))))
dists = r('''as.matrix(dist(t(exprs(vsd))))''')
R.png(outfile)
r['heatmap.2'](
dists,
trace='none',
margin=ro.IntVector((10, 10)))
r['dev.off']()
def deseqPlotGeneHeatmap(outfile,
data,
Rowv=False,
Colv=False):
'''plot a heatmap of all genes
Use variance stabilized data in object vsd.
Should be 'blind', as then the transform is
not informed by the experimental design.
'''
if len(data) == 0:
return
# do not print if not enough values in one
# direction (single row or column)
if min(R.dim(data)) < 2:
return
R.png(outfile, width=500, height=2000)
hmcol = R.colorRampPalette(r['brewer.pal'](9, "GnBu"))(100)
r['heatmap.2'](
data,
col=hmcol,
trace="none",
dendrogram="none",
Rowv=Rowv,
Colv=Colv,
labRow=False,
margin=ro.IntVector((5, 5)),
lhei=ro.IntVector((1, 10)),
key=False)
r['dev.off']()
def deseqPlotPCA(outfile, vsd, max_genes=500):
'''plot a PCA
Use variance stabilized data in object vsd.
Should be 'blind', as then the transform is
not informed by the experimental design.
'''
R.png(outfile)
# if there are more than 500 genes (after filtering)
# use the 500 most variable in the PCA
# else use the number of genes
r('''ntop = ifelse(as.integer(dim(vsd))[1] >= %(max_genes)i,
%(max_genes)i,
as.integer(dim(vsd))[1])''' % locals())
try:
r('''plotPCA(vsd)''')
except RRuntimeError as msg:
E.warn("can not plot PCA: %s" % msg)
r['dev.off']()
def deseqPlotPairs(outfile):
'''requires counts table'''
# Plot pairs
R.png(outfile, width=960, height=960)
plotPairs()
r['dev.off']()
def deseqPlotPvaluesAgainstRowsums(outfile):
'''plot pvalues against row sum rank.
This plot is useful to see if quantile filtering could
be applied.
'''
r('''counts_sum = rowSums( countsTable )''')
R.png(outfile)
r('''plot( rank( counts_sum)/length(counts_sum),
-log10( res$pval),
pch = 16,
cex= 0.1)''')
r('''abline( a=3, b=0, col='red')''')
r['dev.off']()
def deseqParseResults(control_name, treatment_name, fdr, vsd=False):
'''parse deseq output.
retrieve deseq results from object 'res' in R namespace.
The 'res' object is a dataframe with the following columns (from the
DESeq manual):
id: The ID of the observable, taken from the row names of the
counts slots.
baseMean: The base mean (i.e., mean of the counts divided by the size
factors) for the counts for both conditions
baseMeanA: The base mean (i.e., mean of the counts divided by the size
factors) for the counts for condition A
baseMeanB: The base mean for condition B
foldChange: The ratio meanB/meanA
log2FoldChange: The log2 of the fold change
pval: The p value for rejecting the null hypothesis 'meanA==meanB'
padj: The adjusted p values (adjusted with 'p.adjust( pval,
method="BH")')
vsd_log2FoldChange: The log2 fold change after variance stabilization.
This data field is not part of DESeq proper, but has been added
in this module in the runDESeq() method.
Here, 'conditionA' is 'control' and 'conditionB' is 'treatment'
such that a foldChange of 2 means that treatment is twice
upregulated compared to control.
Returns a list of results.
If vsd is True, the log fold change will be computed from the variance
stabilized data.
'''
results = []
counts = E.Counter()
res_df = pandas2ri.ri2py(r["res"])
for index, data in res_df.iterrows():
counts.input += 1
# set significant flag
if data['padj'] <= fdr:
signif = 1
counts.significant += 1
if data['log2FoldChange'] > 0:
counts.significant_over += 1
else:
counts.significant_under += 1
else:
signif = 0
counts.insignificant += 1
if data['log2FoldChange'] > 0:
counts.all_over += 1
else:
counts.all_under += 1
# set lfold change to 0 if both are not expressed
if data['baseMeanA'] == 0.0 and data['baseMeanB'] == 0.0:
data['foldChange'] = 0
data['log2FoldChange'] = 0
if data['pval'] != r('''NA'''):
status = "OK"
else:
status = "FAIL"
counts[status] += 1
counts.output += 1
# check if our assumptions about the direction of fold change
# are correct
assert (data['foldChange'] > 1) == (data['baseMeanB'] > data['baseMeanA'])
# note that fold change is computed as second group (B) divided by
# first (A)
results.append(GeneExpressionResult._make((
data['id'],
treatment_name,
data['baseMeanB'],
0,
control_name,
data['baseMeanA'],
0,
data['pval'],
data['padj'],
data['log2FoldChange'],
data['foldChange'],
data['transformed_log2FoldChange'],
str(signif),
status)))
return results, counts
def deseq2ParseResults(treatment_name, control_name, fdr, vsd=False):
'''
Standardises the output format from deseq2.
Deseq2 has the following output columns:
baseMean log2FoldChange lfcSE stat pvalue padj
described in
https://bioconductor.org/packages/release/bioc/
vignettes/DESeq2/inst/doc/DESeq2.pdf
Standardised columns are generated from this output as follows:
test_id - the gene or region tested, row names from raw output
treatment_name - the first group in this differential expression
comparison (from the design file)
treatment_mean - the mean expression value for this treatment from the
normalised count table generated by deseq2
treatment_std - the standard deviation of experssion for this treatment
from the normalised count table generated by deseq2
control_name - the second group in this differential expression
comparison (from the design file)
control_mean - the mean expression value for this treatment from the
normalised count table generated by deseq2
control_std - the standard deviation of experssion for this treatment
from the normalised count table generated by deseq2
pvalue - the pvalue generated by deseq2 (from the pvalue column)
qvalue - the adjusted pvalue generated by deseq2 (from the padj column)
l2fold - the log2fold change between normalised counts generated by
deseq2 (log2FoldChange column). If betaPrior is set to TRUE, this is the
shrunken log2 fold change. If set to FALSE, no shrinkage.
fold - control mean / treatment mean
transformed_l2fold - not applicable, set to 0 (see deseq2 manual,
"The variance stabilizing and rlog transformations are provided
for applications other than differential testing,
for example clustering of samples or other machine learning applications.
For differential testing we recommend the DESeqfunction applied to raw
counts"
signif = True if the qvalue is less than the FDR set in `term`PARAMS.
status = OK if a pvalue has been generated, else FAIL
'''
r(''' fdr=%s ''' % fdr)
# assign standard column names
r('''cols = c("test_id",
"treatment_name",
"treatment_mean",
"treatment_std",
"control_name",
"control_mean",
"control_std",
"pvalue",
"qvalue",
"l2fold",
"fold",
"transformed_l2fold",
"signif",
"status") ''')
# extract normalised counts
r('''normalcounts = counts(dds, normalized=T)''')
# build empty dataframe
r('''res2 = data.frame(matrix(nrow=nrow(res), ncol=length(cols)))''')
r('''colnames(res2) = cols''')
# fill columns with values described above
r('''res2['test_id'] = rownames(res)''')
r('''g = unique(groups[groups == "%s" | groups == "%s"])''' % (treatment_name, control_name))
r('''g1 = which(groups == g[1])''')
r('''g2 = which(groups == g[2])''')
r('''res2['treatment_name'] = g[1]''')
r('''res2['treatment_mean'] = rowMeans(normalcounts[,g1])''')
r('''res2['treatment_std'] = apply(normalcounts[,g1], 1, sd)''')
r('''res2['control_name'] = g[2]''')
r('''res2['control_mean'] = rowMeans(normalcounts[,g2])''')
r('''res2['control_std'] = apply(normalcounts[,g2], 1, sd)''')
r('''res2['pvalue'] = res$pvalue''')
r('''res2['qvalue'] = res$padj''')
r('''res2['l2fold'] = res$log2FoldChange''')
# Fold change here does not reflect the shrinkage applied to
# log2fold changes
r('''res2['fold'] = res2$control_mean / res2$treatment_mean''')
r('''res2['signif'] = as.integer(res2$qvalue <= fdr)''')
r('''res2['status'] = ifelse(is.na(res2$pvalue), "FAIL", "OK")''')
# replace l2fold change and fold for expression levels of 0 in treatment
# and control with 0
r('''z1 = which(res2$treatment_mean == 0)''')
r('''z2 = which(res2$control_mean == 0)''')
r('''zeros = intersect(z1, z2)''')
r('''res2$l2fold[zeros] = 0''')
r('''res2$fold[zeros] = 0''')
# occupy transformed l2fold with 0s
r('''res2$transformed_l2fold = 0''')
D = r('res2')
D.index = D['test_id']
D = D.drop('test_id', 1)
return D
def runDESeq(outfile,
outfile_prefix="deseq.",
fdr=0.1,
prefix="",
fit_type="parametric",
dispersion_method="pooled",
sharing_mode="maximum",
ref_group=None,
ref_regex=None,
):
'''run DESeq on countsTable.
Results are stored in *outfile* and files prefixed by *outfile_prefix*.
The current analysis follows the analysis as outlined in version
1.14.0
DESeq ignores any pair information in the design matrix.
The output is treatment and control. Fold change values are
computed as treatment divided by control.
'''
# load library
r('''suppressMessages(library('DESeq'))''')
r('''suppressMessages(library('gplots'))''')
r('''suppressMessages(library('RColorBrewer'))''')
groups, pairs, has_replicates, has_pairs = groupTagData(ref_group,
ref_regex)
# Run DESeq
# Create Count data object
E.info("running DESeq: replicates=%s" % (has_replicates))
r('''cds <-newCountDataSet( countsTable, groups) ''')
# Estimate size factors
r('''cds <- estimateSizeFactors( cds )''')
no_size_factors = r('''is.na(sum(sizeFactors(cds)))''')[0]
if no_size_factors:
E.warn("no size factors - can not estimate - no output")
return
# estimate variance
if has_replicates:
E.info("replicates - estimating variance from replicates")
else:
E.info("no replicates - estimating variance with method='blind'")
dispersion_method = "blind"
E.info("dispersion_method=%s, fit_type=%s, sharing_mode=%s" %
(dispersion_method, fit_type, sharing_mode))
r('''cds <- estimateDispersions( cds,
method='%(dispersion_method)s',
fitType='%(fit_type)s',
sharingMode='%(sharing_mode)s')''' % locals())
# bring into python namespace
cds = r('''cds''')
# plot fit - if method == "pooled":
if dispersion_method == "pooled":
R.png('%sdispersion_estimates_pooled.png' %
outfile_prefix)
R.plotDispEsts(cds)
r['dev.off']()
elif not has_replicates:
# without replicates the following error appears
# in the rpy2.py2ri conversion:
# 'dims' cannot be of length 0
pass
else:
dispersions = r('''ls(cds@fitInfo)''')
for dispersion in dispersions:
R.png('%sdispersion_estimates_%s.png' %
(outfile_prefix, dispersion))
R.plotDispEsts(cds, name=dispersion)
r['dev.off']()
# plot size factors
deseqPlotSizeFactors('%(outfile_prefix)ssize_factors.png' % locals())
# output size factors
deseqOutputSizeFactors("%(outfile_prefix)ssize_factors.tsv" % locals())
# plot scatter plots of pairs
deseqPlotPairs('%(outfile_prefix)spairs.png' % locals())
if dispersion_method not in ("blind",):
# also do a blind dispersion estimate for
# a variance stabilizing transform
r('''cds_blind <- estimateDispersions( cds,
method='blind',
fitType='%(fit_type)s',
sharingMode='%(sharing_mode)s')''' % locals())
else:
r('''cds_blind = cds''')
# perform variance stabilization for log2 fold changes
vsd = r('''vsd = varianceStabilizingTransformation(cds_blind)''')
# output normalized counts (in order)
# gzfile does not work with rpy 2.4.2 in python namespace
# using R.gzfile, so do it in R-space
r('''t = counts(cds, normalized=TRUE);
write.table(t[order(rownames(t)),],
file=gzfile('%(outfile_prefix)scounts.tsv.gz'),
row.names=TRUE,
col.names=NA,
quote=FALSE,
sep='\t') ''' % locals())
# output variance stabilized counts (in order)
r('''t = exprs(vsd);
write.table(t[order(rownames(t)),],
file=gzfile('%(outfile_prefix)svsd.tsv.gz'),
row.names=TRUE,
col.names=NA,
quote=FALSE,
sep='\t')
''' % locals())
# plot correlation heatmap of variance stabilized data
deseqPlotCorrelationHeatmap(
'%scorrelation_heatmap.png' % outfile_prefix,
vsd)
# plot PCA
deseqPlotPCA('%spca.png' % outfile_prefix,
vsd)
# plot gene heatmap for all genes - order by average expression
# subtract one to get numpy indices
select = R.order(R.rowMeans(R.counts(cds)), decreasing=True)
# the following uses R-based indexing
deseqPlotGeneHeatmap(
'%sgene_heatmap.png' % outfile_prefix,
r['as.matrix'](R.exprs(vsd).rx(select)))
# plot heatmap of top 200 expressed genes
deseqPlotGeneHeatmap(
'%sgene_heatmap_top200.png' % outfile_prefix,
r['as.matrix'](R.exprs(vsd).rx(select[:200])))
# Call diffential expression for all pairings of groups included in the
# design
all_results = []
for combination in itertools.combinations(groups, 2):
control, treatment = combination
gfix = "%s_vs_%s_" % (control, treatment)
outfile_groups_prefix = outfile_prefix + gfix
E.info(("calling differential expression for "
"control=%s vs treatment=%s") %
(control, treatment))
res = r('''res = nbinomTest(cds, '%s', '%s')''' % (control, treatment))
# plot significance
R.png('''%(outfile_groups_prefix)ssignificance.png''' % locals())
r('''plot(
res$baseMean,
res$log2FoldChange,
log="x",
pch=20, cex=.1,
col = ifelse( res$padj < %(fdr)s, "red", "black"))''' % locals())
r['dev.off']()
# plot pvalues against rowsums
deseqPlotPvaluesAgainstRowsums(
'%(outfile_groups_prefix)spvalue_rowsums.png' % locals())
E.info("Generating output (%s vs %s)" % (control, treatment))
# get variance stabilized fold changes - note the reversal of
# treatment/control
r('''vsd_l2f =
(rowMeans(exprs(vsd)[,conditions(cds) == '%s', drop=FALSE])
- rowMeans( exprs(vsd)[,conditions(cds) == '%s', drop=FALSE]))''' %
(treatment, control))
# plot vsd correlation, see Figure 14 in the DESeq manual
# if you also want to colour by expression level
R.png('''%(outfile_groups_prefix)sfold_transformation.png''' %
locals())
r('''plot(
res$log2FoldChange, vsd_l2f,
pch=20, cex=.1,
col = ifelse( res$padj < %(fdr)s, "red", "black" ) )''' % locals())
r['dev.off']()
# plot heatmap of differentially expressed genes
# plot gene heatmap for all genes - order by average expression
select = r('''select = res['padj'] < %f''' % fdr)
if r('''sum(select)''')[0] > 0:
E.info('%s vs %s: plotting %i genes in heatmap' %
(treatment, control, len(select)))
data = R.exprs(vsd).rx(select)
if not isinstance(data, rpy2.robjects.vectors.FloatVector):
order = R.order(R.rowMeans(data), decreasing=True)
deseqPlotGeneHeatmap(
'%sgene_heatmap.png' % outfile_groups_prefix,
r['as.matrix'](data[order]),
Colv=False,
Rowv=True)
else:
E.warn('can not plot differentially expressed genes')
else:
E.warn('no differentially expressed genes at fdr %f' % fdr)
# Plot pvalue histogram
R.png('''%(outfile_groups_prefix)spvalue_histogram.png''' % locals())
r('''pvalues = res$pval''')
r('''hist(pvalues, breaks=50, col='skyblue' )''')
r['dev.off']()
# Plot diagnostic plots for FDR
if has_replicates:
r('''orderInPlot = order(pvalues)''')
r('''showInPlot = (pvalues[orderInPlot] < 0.08)''')
# Jethro - previously plotting x =
# pvalues[orderInPlot][showInPlot]
# pvalues[orderInPlot][showInPlot] contains all NA values
# from pvalues which(showInPlot) doesn't... removing NA
# values
r('''true.pvalues <- pvalues[orderInPlot][showInPlot]''')
r('''true.pvalues <- true.pvalues[is.finite(true.pvalues)]''')
if r('''sum(showInPlot)''')[0] > 0:
R.png('''%(outfile_groups_prefix)sfdr.png''' % locals())
# failure when no replicates:
# rpy2.rinterface.RRuntimeError:
# Error in plot.window(...) : need finite 'xlim' values
r('''plot( seq( along=which(showInPlot)),
true.pvalues,
pch='.',
xlab=expression(rank(p[i])),
ylab=expression(p[i]))''')
r('''abline(a = 0, b = %(fdr)f / length(pvalues), col = "red")
''' % locals())
r['dev.off']()
else:
E.warn('no p-values < 0.08')
# Add log2 fold with variance stabilized l2fold value
r('''res$transformed_log2FoldChange = vsd_l2f''')
# Parse results and parse to file
results, counts = deseqParseResults(control,
treatment,
fdr=fdr)
all_results += results
E.info(counts)
outf = iotools.open_file(
"%(outfile_groups_prefix)ssummary.tsv" % locals(), "w")
outf.write("category\tcounts\n%s\n" % counts.asTable())
outf.close()
writeExpressionResults(outfile, all_results)
def runDESeq2(outfile,
outfile_prefix="deseq2",
fdr=0.1,
ref_group=None,
model=None,
contrasts=None,
plot=1,
):
"""
Run DESeq2 on counts table.
If no model is passed, then defaults to the group column in design file
Does not make use of group tag data bc function doesn't accomodate
multi-factor designs
To Do: Parse results into standard output format.
KB: I have done this but I'm not sure if it is compatible with complex
design tables
Fix fact that plotMA is hardcoded.
"""
# load libraries
r('''suppressMessages(library('DESeq2'))''')
# Create metadata... this will eventually be a pandas dataframe
if isinstance(r('''factors'''), rpy2.robjects.vectors.DataFrame):
E.info("DESeq2: Merging additional factors in design file to"
"create metadata table")
r('''mdata <- cbind(groups, factors)''')
mdata = tuple(r('''names(mdata)'''))
else:
r('''mdata <- data.frame(group=groups)''')
mdata = "group"
E.info("DESeq2 colData headers are: %s" % mdata)
# Check for model and that model terms are in metadata table
if model:
assert contrasts, "Must specifiy contrasts if model design provided"
terms = set([x for x in re.split("\W", model) if x != ''])
assert terms.issubset(mdata), \
"DESeq2: design formula has terms not present in colData"
else:
if mdata != "group":
E.warn("DESeq2 model specified, with no metadata in design file")
terms = ["group", ]
model = "~ group"
E.info("DESeq2 design formula is: %s" % model)
# Create DESeqDataSet, using countsTable, mdata, model
r('''suppressMessages(dds <- DESeqDataSetFromMatrix(countData=countsTable,
colData=mdata,
design=%(model)s))''' % locals())
# WARNING: This is not done automatically... I don't know why?
r('''colnames(dds) <- colnames(countsTable)''')
E.info("Combined colata, design formula and counts table to create"
" DESeqDataSet instance")
# Rlog transform
r('''suppressMessages(rld <- rlog(dds))''')
if plot == 1:
# Plot PCA of rlog transformed count data for top 500
for factor in terms:
outf = outfile_prefix + factor + "_PCAplot500.tiff"
E.info("Creating PCA plot for factor: %s" % outf)
r('''x <- plotPCA(rld, intgroup="%(factor)s")''' % locals())
# r('''saveRDS(x, '%(outf)s')''' % locals())
r('''tiff("%(outf)s")''' % locals())
r('''plot(x)''')
r('''dev.off()''')
# Extract rlog transformed count data...
r('''rlogtab = as.data.frame(assay(rld))''')
r('''rlogtab$test_id = rownames(rlogtab)''')
r('''rlogtab = rlogtab[, c(ncol(rlogtab), 1:ncol(rlogtab)-1)]''')
r('''rlogtab = as.data.frame(rlogtab)''')
R.data('rlogtab')
rlog_out = r('rlogtab')
rlogoutf = outfile_prefix + "rlog.tsv"
rlog_out.to_csv(rlogoutf, sep="\t", index=False)
os.system("gzip %s" % rlogoutf)
# Run DESeq2
r('''suppressMessages(dds <- DESeq(dds))''')
E.info("Completed DESeq2 differential expression analysis")
# Extract contrasts...
if contrasts:
contrasts = (x.split(":") for x in contrasts.split(","))
else:
# created by loadTagData...
groups = r('''levels(groups)''')
contrasts = (("group",) + x for x in itertools.combinations(groups, 2))
df_final = pandas.DataFrame()
raw_final = pandas.DataFrame()
all_results = []
for combination in contrasts:
variable, control, treatment = combination
# Fetch results
gfix = "%s_%s_vs_%s" % (variable, control, treatment)
outfile_groups_prefix = outfile_prefix + gfix + "_MAplot.png"
r('''res <- results(dds, contrast=c("%(variable)s",
"%(treatment)s",
"%(control)s"))''' % locals())
E.info("Extracting contrast for levels %s (treatment) vs %s (control)"
" for factor %s" % (treatment, control, variable))
# plot MA plot
if plot == 1:
r('''png("%(outfile_groups_prefix)s")''' % locals())
r('''plotMA(res, alpha=%f)''' % fdr)
r('''dev.off()''')
E.info("Plotted MA plot for levels %s (treatment) vs %s (control)"
" for factor %s" % (treatment, control, variable))
r('''res_df <- as.data.frame(res)''')
r('''res_df$test_id = rownames(res_df)''')
r('''res_df = res_df[, c(ncol(res_df), 1:ncol(res_df)-1)]''')
R.data('res_df')
raw_out = r('res_df')
# manipulate output into standard format
df_out = deseq2ParseResults(treatment, control, fdr, vsd=False)
# label the deseq2 raw output file and append it to the raw output tab
raw_out["treatment"] = [treatment, ]*len(df_out.index)
raw_out["control"] = [control, ]*len(df_out.index)
raw_out["variable"] = [variable, ]*len(df_out.index)
raw_final = raw_final.append(raw_out, ignore_index=True)
# write the standardised output table
df_out.to_csv(iotools.open_file(outfile_prefix + gfix + ".tsv.gz", "w"),
sep="\t",
index_label="test_id")
E.info("Extracted results table for contrast '%s' (treatment) vs '%s'"
" (control) for factor '%s'" % (treatment, control, variable))
# append to final dataframe
df_out.reset_index(inplace=True)
df_out.rename(columns={"index": "test_id"}, inplace=True)
df_final = df_final.append(df_out, ignore_index=True)
results = df_final.values.tolist()
# write final dataframe into standard format
writeExpressionResults(outfile, results)
rawoutf = outfile_prefix + "raw.tsv"
raw_final.to_csv(rawoutf, sep="\t", index=False)
os.system("gzip %s" % rawoutf)
Design = collections.namedtuple("Design", ("include", "group", "pair"))
def readDesignFile(design_file):
'''reads a design file.'''
design = collections.OrderedDict()
with iotools.open_file(design_file) as inf:
for line in inf:
if line.startswith("track"):
continue
track, include, group, pair = line.split("\t")[:4]
if track in design:
raise ValueError("duplicate track '%s'" % track)
design[track] = Design._make((int(include), group, pair))
return design
def plotTagStats(infile, design_file, outfile_prefix):
'''provide summary plots for tag data.'''
loadTagData(infile, design_file)
nobservations, nsamples = filterTagData()
if nobservations == 0:
E.warn("no observations - no output")
return
if nsamples == 0:
E.warn("no samples remain after filtering - no output")
return
groups, pairs, has_replicates, has_pairs = groupTagData()
# import rpy2.robjects.lib.ggplot2 as ggplot2
r('''suppressMessages(library('ggplot2'))''')
r('''suppressMessages(library('reshape'))''')
r('''d = melt( log10(countsTable + 1), variable_name = 'sample' )''')
# Note that ggsave does not work if there is
# X display.
R.png(outfile_prefix + ".densities.png")
r('''gp = ggplot(d)''')
r('''pp = gp + geom_density(aes(x=value, group=sample,
color=sample, fill=sample), alpha=I(1/3))''')
r('''plot(pp)''')
r['dev.off']()
R.png(outfile_prefix + ".boxplots.png")
r('''gp = ggplot(d)''')
r('''pp = gp +
geom_boxplot(aes(x=sample,y=value,color=sample,fill=sample),
size=0.3,
alpha=I(1/3)) +
theme(axis.text.x = element_text( angle=90, hjust=1, size=8 ) )''')
r('''plot(pp)''')
r['dev.off']()
def plotDETagStats(infile, outfile_prefix,
additional_file=None,
join_columns=None,
additional_columns=None):
'''provide summary plots for tag data.
Stratify boxplots and densities according to differential
expression calls.
The input file is the output of any of the DE
tools, see GeneExpressionResults for column names.
Additional file will be joined with infile and any additional
columns will be output as well.
'''
table = pandas.read_csv(iotools.open_file(infile),
sep="\t")
if additional_file is not None:
additional_table = pandas.read_csv(
iotools.open_file(additional_file),
sep="\t")
table = pandas.merge(table,
additional_table,
on=join_columns,
how="left",
sort=False)
# remove index. If it is numbered starting from 1, there is a bug
# in ggplot, see https://github.com/yhat/ggplot/pull/384
table.reset_index(inplace=True)
# add log-transformed count data
table['log10_treatment_mean'] = numpy.log10(table['treatment_mean'] + 1)
table['log10_control_mean'] = numpy.log10(table['control_mean'] + 1)
table['dmr'] = numpy.array(["insignicant"] * len(table))
table.loc[
(table["l2fold"] > 0) & (table["significant"] == 1), "dmr"] = "up"
table.loc[
(table["l2fold"] < 0) & (table["significant"] == 1), "dmr"] = "down"
def _dplot(table, outfile, column):
plot = ggplot.ggplot(
ggplot.aes(column,
colour='dmr',
fill='dmr'),
data=table) + \
ggplot.geom_density(alpha=0.5)
try:
plot.save(filename=outfile)
except Exception as msg:
E.warn("no plot for %s: %s" % (column, msg))
def _bplot(table, outfile, column):
plot = ggplot.ggplot(
ggplot.aes(x='dmr', y=column),
data=table) + \
ggplot.geom_boxplot()
try:
plot.save(filename=outfile)
except ValueError as msg:
# boxplot fails if all values are the same
# see https://github.com/yhat/ggplot/issues/393
E.warn(msg)
# TODO: ggplot not supported, replace with plotnine
# _dplot(table,
# outfile_prefix + ".densities_tags_control.png",
# "log10_control_mean")
# _dplot(table,
# outfile_prefix + ".densities_tags_treatment.png",
# "log10_treatment_mean")
# _bplot(table,
# outfile_prefix + ".boxplot_tags_control.png",
# "log10_control_mean")
# _bplot(table,
# outfile_prefix + ".boxplot_tags_treatment.png",
# "log10_treatment_mean")
if additional_columns:
for column in additional_columns:
_dplot(table,
outfile_prefix + ".densities_%s.png" % column,
column)
_bplot(table,
outfile_prefix + ".boxplot_%s.png" % column,
column)
return
def runMockAnalysis(outfile,
outfile_prefix,
ref_group=None,
ref_regex=None,
pseudo_counts=0):
'''run a mock analysis on a count table.
compute fold enrichment values, but do not normalize or
perform any test.
'''
groups, pairs, has_replicates, has_pairs = groupTagData(ref_group,
ref_regex)
all_results = []
for combination in itertools.combinations(groups, 2):
control, treatment = combination
r('''control_counts = rowSums( countsTable[groups == '%s'] )''' %
control)
r('''treatment_counts = rowSums( countsTable[groups == '%s'] )''' %
treatment)
# add pseudocounts to enable analysis of regions
# that are absent/present
if pseudo_counts:
r('''control_counts = control_counts + %f''' % pseudo_counts)
r('''treatment_counts = treatment_counts + %f''' % pseudo_counts)
r('''fc = treatment_counts / control_counts''')
results = []
for identifier, treatment_count, control_count, foldchange in \
zip(r('''rownames( countsTable)'''),
r('''treatment_counts'''),
r('''control_counts'''),
r('''fc''')):
try:
log2fold = math.log(foldchange)
except ValueError:
log2fold = "Inf"
results.append(GeneExpressionResult._make((
identifier,
treatment,
treatment_count,
0,
control,
control_count,
0,
1,
1,
log2fold,
foldchange,
log2fold,
"0",
"OK")))
all_results.extend(results)
writeExpressionResults(outfile, all_results)
def outputTagSummary(filename_tags,
outfile,
output_filename_pattern,
filename_design=None):
'''output summary values for a count table.'''
E.info("loading tag data from %s" % filename_tags)
if filename_design is not None:
# load all tag data
loadTagData(filename_tags, filename_design)
# filter
nobservations, nsamples = filterTagData()
else:
# read complete table
r('''countsTable = read.delim('%(filename_tags)s',
header = TRUE,
row.names = 1,
stringsAsFactors = TRUE,
comment.char = '#')''' % locals())
nobservations, nsamples = tuple(r('''dim(countsTable)'''))
E.info("read data: %i observations for %i samples" %
(nobservations, nsamples))
# remove samples without data
r('''max_counts = apply(countsTable,2,max)''')
filter_min_counts_per_sample = 1
empty_samples = tuple(
r('''max_counts < %i''' % filter_min_counts_per_sample))
sample_names = r('''colnames(countsTable)''')
nempty_samples = sum(empty_samples)
if nempty_samples:
E.warn("%i empty samples are being removed: %s" %
(nempty_samples,
",".join([sample_names[x]
for x, y in enumerate(empty_samples) if y])))
r('''countsTable <- countsTable[, max_counts >= %i]''' %
filter_min_counts_per_sample)
nobservations, nsamples = tuple(r('''dim(countsTable)'''))
r('''groups = factor(colnames( countsTable ))''')
E.debug("sample names: %s" % r('''colnames(countsTable)'''))
nrows, ncolumns = tuple(r('''dim(countsTable)'''))
outfile.write("metric\tvalue\tpercent\n")
outfile.write("number of observations\t%i\t100\n" % nobservations)
outfile.write("number of samples\t%i\t100\n" % nsamples)
# Count windows with no data
r('''max_counts = apply(countsTable,1,max)''')
# output distribution of maximum number of counts per window
outfilename = output_filename_pattern + "max_counts.tsv.gz"
E.info("outputting maximum counts per window to %s" % outfilename)
r('''write.table(table(max_counts),
file=gzfile('%(outfilename)s'),
sep="\t",
row.names=FALSE,
quote=FALSE)''' %
locals())
# removing empty rows
E.info("removing rows with no counts in any sample")
r('''countsTable = countsTable[max_counts>0,]''')
if nrows > 0:
for x in range(0, 20):
nempty = tuple(r('''sum(max_counts <= %i)''' % x))[0]
outfile.write("max per row<=%i\t%i\t%f\n" %
(x, nempty, 100.0 * nempty / nrows))
E.info("removed %i empty rows" % tuple(r('''sum(max_counts == 0)''')))
observations, samples = tuple(r('''dim(countsTable)'''))
E.info("trimmed data: %i observations for %i samples" %
(observations, samples))
# build correlation
r('''correlations = cor(countsTable)''')
outfilename = output_filename_pattern + "correlation.tsv"
E.info("outputting sample correlations table to %s" % outfilename)
r('''write.table(correlations, file='%(outfilename)s',
sep="\t",
row.names=TRUE,
col.names=NA,
quote=FALSE)''' % locals())
# output scatter plots
outfilename = output_filename_pattern + "scatter.png"
E.info("outputting scatter plots to %s" % outfilename)
R.png(outfilename, width=960, height=960)
plotPairs()
r['dev.off']()
# output heatmap based on correlations
outfilename = output_filename_pattern + "heatmap.svg"
E.info("outputting correlation heatmap to %s" % outfilename)
R.svg(outfilename)
plotCorrelationHeatmap(method="correlation")
r['dev.off']()
# output PCA
outfilename = output_filename_pattern + "pca.svg"
E.info("outputting PCA plot to %s" % outfilename)
R.svg(outfilename)
plotPCA(groups=False)
r['dev.off']()
# output an MDS plot
r('''suppressMessages(library('limma'))''')
outfilename = output_filename_pattern + "mds.svg"
E.info("outputting mds plot to %s" % outfilename)
R.svg(outfilename)
try:
r('''plotMDS(countsTable)''')
except RRuntimeError:
E.warn("can not plot mds")
r['dev.off']()
def dumpTagData(filename_tags, filename_design, outfile):
'''output filtered tag table.'''
if outfile == sys.stdout:
outfilename = ""
else:
outfilename = outfile.name
# load all tag data
loadTagData(filename_tags, filename_design)
# filter
nobservations, nsamples = filterTagData()
# output
r('''write.table( countsTable,
file='%(outfilename)s',
sep='\t',
quote=FALSE)''' % locals())
def runTTest(outfile,
outfile_prefix,
fdr=0.1,
ref_group=None,
ref_regex=None):
'''apply a ttest on the data.
For the T-test it is best to use FPKM values as
this method does not perform any library normalization.
'''
groups, pairs, has_replicates, has_pairs = groupTagData(ref_group,
ref_regex)
results = []
for combination in itertools.combinations(groups, 2):
control, treatment = combination
r = r('''r = apply(countsTable, 1,
function(x) { t.test(
x[groups == '%(treatment)s'],
x[groups == '%(control)s']) } )
''' % locals())
for test_id, ttest in zip(r.names, r):
# TS, swapped order below as assignment was incorrect
treatment_mean, control_mean = tuple(ttest.rx2('estimate'))
fold_change = treatment_mean / control_mean
pvalue = tuple(ttest.rx2('p.value'))[0]
significant = (0, 1)[pvalue < fdr]
results.append(GeneExpressionResult._make((test_id,
treatment,
treatment_mean,
0,
control,
control_mean,
0,
pvalue,
pvalue,
numpy.log2(fold_change),
fold_change,
numpy.log2(fold_change),
significant,
"OK")))
writeExpressionResults(outfile, results)
#####################################################################
# Pandas-based functions and matplotlib-based plotting functions ####
#####################################################################
def loadTagDataPandas(tags_filename, design_filename):
'''load tag data for deseq/edger analysis.
*Infile* is a tab-separated file with counts.
*design_file* is a tab-separated file with the
experimental design with four columns::
track include group pair
CW-CD14-R1 0 CD14 1
CW-CD14-R2 0 CD14 1
CW-CD14-R3 1 CD14 1
CW-CD4-R1 1 CD4 1
FM-CD14-R1 1 CD14 2
FM-CD4-R2 0 CD4 2
FM-CD4-R3 0 CD4 2
FM-CD4-R4 0 CD4 2
track
name of track - should correspond to column header in *infile*
include
flag to indicate whether or not to include this data
group
group indicator - experimental group
pair
pair that sample belongs to (for paired tests)
This method creates various R objects:
countsTable : data frame with counts.
groups : vector with groups
pairs : vector with pairs
'''
E.info("loading tag data from %s" % tags_filename)
inf = iotools.open_file(tags_filename)
counts_table = pandas.read_csv(inf,
sep="\t",
index_col=0,
comment="#")
inf.close()
E.info("read data: %i observations for %i samples" %
counts_table.shape)
E.debug("sample names: %s" % list(counts_table.columns))
inf = iotools.open_file(design_filename)
design_table = pandas.read_csv(inf, sep="\t", index_col=0)
inf.close()
E.debug("design names: %s" % list(design_table.index))
missing = set(counts_table.columns).difference(design_table.index)
if missing:
E.warn("missing samples from design file are ignored: %s" % missing)
# remove unnecessary samples
design_table = design_table[design_table["include"] != 0]
E.debug("included samples: %s" % list(design_table.index))
counts_table = counts_table[list(design_table.index)]
E.info("filtered data: %i observations for %i samples" %
counts_table.shape)
return counts_table, design_table
def filterTagDataPandas(counts_table,
design_table,
filter_min_counts_per_row=1,
filter_min_counts_per_sample=10,
filter_percentile_rowsums=0):
'''filter tag data.
* remove rows with at least x number of counts
* remove samples with a maximum of *min_sample_counts*
* remove the lowest percentile of rows in the table, sorted
by total tags per row
'''
# Remove windows with no data
max_counts_per_row = counts_table.max(1)
counts_table = counts_table[
max_counts_per_row >= filter_min_counts_per_row]
observations, samples = counts_table.shape
E.info("trimmed data: %i observations for %i samples" %
(observations, samples))
# remove samples without data
max_counts_per_sample = counts_table.max()
empty_samples = max_counts_per_sample < filter_min_counts_per_sample
sample_names = counts_table.columns
nempty_samples = sum(empty_samples)
if nempty_samples:
E.warn("%i empty samples are being removed: %s" %
(nempty_samples,
",".join([sample_names[x] for x, y in
enumerate(empty_samples) if y])))
raise NotImplementedError("removing empty samples needs to be done")
# r('''countsTable <- countsTable[, max_counts >= %i]''' % filter_min_counts_per_sample)
# r('''groups <- groups[max_counts >= %i]''' % filter_min_counts_per_sample)
# r('''pairs <- pairs[max_counts >= %i]''' % filter_min_counts_per_sample)
# observations, samples = tuple( r('''dim(countsTable)'''))
# percentile filtering
if filter_percentile_rowsums > 0:
percentile = float(filter_percentile_rowsums) / 100.0
sum_counts = counts_table.sum(1)
take = sum_counts > sum_counts.quantile(percentile)
E.info("percentile filtering at level %f: keep=%i, discard=%i" %
(filter_percentile_rowsums,
sum(take),
len(take) - sum(take)))
counts_table = counts_table[take]
return counts_table
def identifyVariablesPandas(design_table):
# design table should have been processed by loadTagDataPandas already
# just in case, re-filter for not included samples here
design_table = design_table[design_table["include"] != 0]
conds = design_table['group'].tolist()
pairs = design_table['pair'].tolist()
# TS, adapted from JJ code for DESeq2 design tables:
# if additional columns present, pass to 'factors'
if len(design_table.columns) > 3:
factors = design_table.iloc[:, 3:]
else:
factors = None
return conds, pairs, factors
def checkTagGroupsPandas(design_table, ref_group=None):
'''compute groups and pairs from tag data table.'''
conds, pairs, factors = identifyVariablesPandas(design_table)
groups = list(set(conds))
# Relevel the groups so that the reference comes first
# how to do this in python?
# if ref_group is not None:
# r('''groups <- relevel(groups, ref = "%s")''' % ref_group)
# check this works, will need to make factors from normal df
# TS adapted from JJ code for DESeq2 -
# check whether there are additional factors in design file...
if factors:
E.warn("There are additional factors in design file that are ignored"
" by groupTagData: ", factors)
else:
pass
# Test if replicates exist - at least one group must have multiple samples
max_per_group = max([conds.count(x) for x in groups])
has_replicates = max_per_group >= 2
# Test if pairs exist:
npairs = len(set(pairs))
has_pairs = npairs == 2
# ..if so, at least two samples are required per pair
if has_pairs:
min_per_pair = min([pairs.count(x) for x in set(pairs)])
has_pairs = min_per_pair >= 2
return groups, pairs, conds, factors, has_replicates, has_pairs
ResultColumns = ["test_id", "treatment_name", "treatment_mean",
"treatment_std", "control_name", "control_mean",
"control_std", "p_value", "p_value_adj", "l2fold", "fold",
"transformed_l2fold", "significant", "status"]
ResultColumns_dtype = {"test_id": object, "treatment_name": object,
"treatment_mean": float, "treatment_std":
float, "control_name": object, "control_mean":
float, "control_std": float, "p_value": float,
"p_value_adj": float, "l2fold": float, "fold":
float, "transformed_l2fold": float,
"significant": int, "status": object}
def makeEmptyDataFrameDict():
return {key: [] for key in ResultColumns}
def runTTestPandas(counts_table,
design_table,
outfile,
outfile_prefix,
fdr,
ref_group=None):
'''apply a ttest on the data.
For the T-test it is best to use FPKM values as
this method does not perform any library normalization.
Alternatively, perform normalisation on counts table using Counts.py
'''
stats = importr('stats')
(groups, pairs, conds, factors, has_replicates,
has_pairs) = checkTagGroupsPandas(design_table, ref_group)
df_dict = makeEmptyDataFrameDict()
for combination in itertools.combinations(groups, 2):
# as each combination may have different numbers of samples in control
# and treatment, calculations have to be performed on a per
# combination basis
control, treatment = combination
n_rows = counts_table.shape[0]
df_dict["control_name"].extend((control,)*n_rows)
df_dict["treatment_name"].extend((treatment,)*n_rows)
df_dict["test_id"].extend(counts_table.index.tolist())
# subset counts table for each combination
c_keep = [x == control for x in conds]
control_counts = counts_table.iloc[:, c_keep]
t_keep = [x == treatment for x in conds]
treatment_counts = counts_table.iloc[:, t_keep]
c_mean = control_counts.mean(axis=1)
df_dict["control_mean"].extend(c_mean)
df_dict["control_std"].extend(control_counts.std(axis=1))
t_mean = treatment_counts.mean(axis=1)
df_dict["treatment_mean"].extend(t_mean)
df_dict["treatment_std"].extend(treatment_counts.std(axis=1))
t, prob = ttest_ind(control_counts, treatment_counts, axis=1)
df_dict["p_value"].extend(prob)
# what about zero values?!
df_dict["fold"].extend(t_mean / c_mean)
df_dict["p_value_adj"].extend(
list(stats.p_adjust(FloatVector(df_dict["p_value"]), method='BH')))
df_dict["significant"].extend(
[int(x < fdr) for x in df_dict["p_value_adj"]])
df_dict["l2fold"].extend(list(numpy.log2(df_dict["fold"])))
# note: the transformed log2 fold change is not transformed!
df_dict["transformed_l2fold"].extend(list(numpy.log2(df_dict["fold"])))
# set all status values to "OK"
df_dict["status"].extend(("OK",)*n_rows)
results = pandas.DataFrame(df_dict)
results.set_index("test_id", inplace=True)
results.to_csv(outfile, sep="\t", header=True, index=True)
def plotCorrelationHeatmapMatplot(counts, outfile, method="correlation",
cor_method="pearson"):
'''plot a heatmap of correlations derived from
countsTable.
'''
# to do: add other methods?
# define outside function? - Will we reuse?
heatmap_cdict_b_to_y = {
'red': ((0.0, 0.4, .4), (0.01, .4, .4), (1., .95, .95)),
'green': ((0.0, 0.4, 0.4), (0.01, .4, .4), (1., .95, .95)),
'blue': ((0.0, .9, .9), (0.01, .9, .9), (1., 0.4, 0.4))}
cm = matplotlib.colors.LinearSegmentedColormap(
'', heatmap_cdict_b_to_y, 256)
df = counts.corr(method=cor_method)
plt.pcolor(np.array(df), cmap=cm)
plt.colorbar()
plt.title("%(cor_method)s correlation heatmap" % locals())
plt.yticks(np.arange(0.5, len(df.index), 1), df.index)
plt.xticks(np.arange(0.5, len(df.columns), 1), df.columns, rotation=90)
plt.tight_layout()
plt.savefig(outfile)
def runEdgeRPandas(counts,
design_table,
outfile,
outfile_prefix="edger.",
fdr=0.1,
prefix="",
dispersion=None,
ref_group=None):
'''run EdgeR on countsTable.
Results are stored in *outfile* and files prefixed by *outfile_prefix*.
The dispersion is usually measuered from replicates. If there are no
replicates, you need to set the *dispersion* explicitely.
See page 13 of the EdgeR user guide::
2. Simply pick a reasonable dispersion value, based on your
experience with similar data, and use that. Although
subjective, this is still more defensible than assuming Poisson
variation. Typical values are dispersion=0.4 for human data,
dispersion=0.1 for data on genetically identical model
organisms or dispersion=0.01 for technical replicates.
'''
# load library
r('''suppressMessages(library('edgeR'))''')
(groups, pairs, conds, factors, has_replicates,
has_pairs) = checkTagGroupsPandas(design_table, ref_group)
if not has_replicates and dispersion is None:
raise ValueError("no replicates and no dispersion")
# output heatmap plot
plotCorrelationHeatmapMatplot(counts,
'%(outfile_prefix)sheatmap.png' % locals(),
cor_method="spearman")
E.info('running EdgeR: groups=%s, pairs=%s, replicates=%s, pairs=%s' %
(groups, pairs, has_replicates, has_pairs))
r_counts = pandas2ri.py2ri(counts)
passDFtoRGlobalEnvironment = r('''function(df){
countsTable <<- df}''')
passDFtoRGlobalEnvironment(r_counts)
if has_pairs:
# output difference between groups
# TS #####
# this is performed on non-normalised data
# should we use Counts.py to normalise first?
# also, this isn't edgeR specific, should this be
# moved to a seperate summary function?
# also move the MDS plotting?
# #####
first = True
pairs_df = | pandas.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
# Filter csv data based on application args
def filter(filter_args):
# Read in csv data
df = pd.read_csv('soilgenerate/data/12072016_plants_sheff.csv', encoding="utf-8")
## BEGIN Default Filters
# Filter nan
is_not_nan = pd.notnull(df['Growth Rate'])
df = df[is_not_nan]
is_not_nan = | pd.notnull(df['Planting Density per Acre, Maximum']) | pandas.notnull |
import os, gzip, logging, re
import pandas as pd
from typing import List, Tuple
from common.base_parser import BaseParser
from common.constants import *
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s',
handlers=[logging.StreamHandler()])
# protein name types:
REC_FULLNAME = 'recommended full name'
REC_SHORTNAME = 'recommended short name'
ALT_FULLNAME = 'alternative full name'
ALT_SHORTNAME = 'alternative short name'
UNIPROT_FILE = 'sprot.tsv'
UNIPROT_2_GENE = 'sprot2gene.tsv'
UNIPROT_2_GO = 'sprot2go.tsv'
UNIPROT_SYNONYM = 'sprot2syn.tsv'
UNIPROT_SYNONYM_DERIVED = 'sprot2syn_derived.tsv'
class Entry:
def __init__(self):
self.id: str = ''
self.other_accessions = []
self.name: str = ''
self.gene_name: str = ''
self.dataset: str = 'Swiss-Prot'
self.protein_names: List[Tuple[str, str]] = []
self.tax_id: str = ''
self.function: str = ''
self.pathway: str = ''
self.go_ids = []
def add_protein_name(self, type, name):
if name:
self.protein_names.append((type, name))
def get_all_names(self):
names = []
for _, name in self.protein_names:
names.append(name)
return names
def get_synonym_rows(self):
s = ''
for protein_type, name in self.protein_names:
s += f'{self.id}\t{name}\t{protein_type}\n'
return s
def get_alternative_accession_rows(self):
s = ''
for item in self.other_accessions:
s += f'{self.id}\t{item}\n'
return s
def get_go_rows(self):
s = ''
for k in self.go_ids:
go_id = k.replace('GO:', '')
s += f'{self.id}\t{go_id}\n'
return s
class UniprotParser(BaseParser):
def __init__(self, prefix: str):
BaseParser.__init__(self, prefix, 'uniprot')
self.uniprot_file = 'uniprot_sprot.dat.gz'
self.id_mapping_file = 'idmapping_selected.tab.gz'
self.logger = logging.getLogger(__name__)
def parse_uniprot_file(self):
entries = []
self.logger.info("Start parsing file:" + self.uniprot_file)
with gzip.open(os.path.join(self.download_dir, self.uniprot_file), 'rt') as f:
for line in f:
if not line.startswith('CC '):
function = False
pathway = False
if line.startswith('ID'):
if entries and len(entries) % 10000 == 0:
logging.info(str(len(entries)))
pre_line_type = None
entry = Entry()
entry.name = line[3:].strip().split(' ')[0]
entries.append(entry)
else:
# ID indicates the start of a new section block
# otherwise get the last entry to update it
entry = entries[-1]
if line.startswith('AC'):
line = line[3:].strip().strip(';')
items = re.split(';\s*', line)
# currently we are not storing other accession in neo4j. Could be useful in the future if we need to use it
if not entry.id:
entry.id = items[0]
if len(items) > 1:
entry.other_accessions = entry.other_accessions + items[1:]
else:
entry.other_accessions = entry.other_accessions + items
elif line.startswith('DE'):
text = line[3:].strip()
if text.startswith('RecName: Full='):
entry.add_protein_name(REC_FULLNAME, self._clean_name(text[len('RecName: Full='):]))
pre_line_type = 'RecName'
elif text.startswith('AltName: Full='):
entry.add_protein_name(ALT_FULLNAME, self._clean_name(text[len('AltName: Full='):]))
pre_line_type = 'AltName'
elif text.startswith('Short='):
if pre_line_type == 'RecName':
entry.add_protein_name(REC_SHORTNAME, self._clean_name(text[len('Short='):]))
elif pre_line_type == 'AltName':
entry.add_protein_name(ALT_SHORTNAME, self._clean_name(text[len('Short='):]))
elif line.startswith('OX NCBI_TaxID='):
entry.tax_id = self._clean_name(line[len('OX NCBI_TaxID='):])
elif line.startswith('GN Name='):
entry.gene_name = self._clean_name(line[len('GN Name='):])
elif line.startswith('DR GO;'):
entry.go_ids.append(self._clean_name(line[len('DR GO;'):]))
elif line.startswith('CC -!- FUNCTION:'):
function = True
entry.function = self._clean_name(line[len('CC -!- FUNCTION:'):], False)
elif line.startswith('CC -!- PATHWAY:'):
pathway = True
if entry.pathway:
entry.pathway += '; '
entry.pathway += self._clean_name(line[len('CC -!- PATHWAY:'):], False)
elif line.startswith('CC '):
if function:
entry.function += ' ' + self._clean_name(line[len('CC '):], False)
elif pathway:
entry.pathway += ' ' + self._clean_name(line[len('CC '):], False)
self.logger.info(f'Total entries: {len(entries)}')
return entries
def _clean_name(self, text:str, clean_brace=True):
item = text.split(';')[0]
if clean_brace and '{' in item:
item = item[:item.find('{')]
return item.strip()
def parse_and_write_data_files(self):
entries = self.parse_uniprot_file()
self.write_protein_file(entries)
self.write_protein2synonym_file(entries)
self.write_protein2go_file(entries)
self.parse_and_write_sprot2gene_file()
self.extract_protein_symbol_as_synonym()
self.extract_multiple_genes_from_sprot2gene()
def write_protein_file(self, entries):
self.logger.info("write sprot.tsv")
with open(os.path.join(self.output_dir, self.file_prefix + 'sprot.tsv'), 'w') as f:
f.write('\t'.join([PROP_ID, PROP_NAME, PROP_GENE_NAME, PROP_TAX_ID, PROP_PATHWAY, PROP_FUNCTION]) + '\n')
f.writelines('\t'.join([entry.id, entry.name, entry.gene_name, entry.tax_id, entry.pathway, entry.function])+'\n' for entry in entries)
def write_protein2synonym_file(self, entries):
self.logger.info("write sprot2syn")
names = set()
for entry in entries:
names.update(entry.get_all_names())
with open(os.path.join(self.output_dir, self.file_prefix + 'sprot2syn.tsv'), 'w') as f:
f.write('\t'.join([PROP_ID, PROP_NAME, PROP_TYPE]) + '\n')
f.writelines(entry.get_synonym_rows() for entry in entries)
def write_protein2go_file(self, entries):
self.logger.info("write sprot2go")
with open(os.path.join(self.output_dir, self.file_prefix + 'sprot2go.tsv'), 'w') as f:
f.write(f'{PROP_ID}\t{PROP_GO_ID}\n')
f.writelines(entry.get_go_rows() for entry in entries)
def parse_and_write_sprot2gene_file(self):
self.logger.info("write sprot2gene")
# get sprot ids
df = pd.read_table(os.path.join(self.output_dir, self.file_prefix + 'sprot.tsv'))
sprot_ids = set([id for id in df[PROP_ID]])
with gzip.open(os.path.join(self.download_dir, self.id_mapping_file), 'rt') as f:
self.logger.info('opened gzip idmapping_selected.tab.gz to start writing sprot2gene')
with open(os.path.join(self.output_dir, self.file_prefix + 'sprot2gene.tsv'), 'w') as outfile:
outfile.write(f'{PROP_ID}\t{PROP_GENE_ID}\n')
rows = 0
lines = 0
for line in f:
lines += 1
# if lines % 10000 == 0:
# print(lines)
if len(line) > 10000:
# some rows are too long, truncate it as we only need the first two columns
line = line[:1000]
row = line.split('\t')
if len(row) < 2:
break
if row[2] and row[0] in sprot_ids:
rows += 1
outfile.write(f'{row[0]}\t{row[2]}\n')
self.logger.info('finished writing sprot2gene.tsv. rows:' + str(rows))
def extract_multiple_genes_from_sprot2gene(self):
self.logger.info('split gene_id columns when values have multiple gene ids')
df = pd.read_table(os.path.join(self.output_dir, self.file_prefix + 'sprot2gene.tsv'))
print('sprot2gene:', len(df))
df_m = df[df[PROP_GENE_ID].str.contains(';')]
df = df[~df[PROP_GENE_ID].str.contains(';')]
print('sprot2gene with multiple genes:', len(df_m), 'single gene:', len(df))
df_m.set_index(PROP_ID, inplace=True)
df_m = df_m[PROP_GENE_ID].str.split(';', expand=True).stack()
df_m = df_m.reset_index().rename(columns={0: PROP_GENE_ID})
df_m[PROP_GENE_ID] = df_m[PROP_GENE_ID].str.strip()
df_m = df_m[[PROP_ID, PROP_GENE_ID]]
print('split genes:', len(df_m))
df = | pd.concat([df, df_m]) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
# Author:
# <NAME>
# Emotional Sentiment on Twitter
# A coronavirus vaccine online firestorm
# In this python script you will find examples of some of the most common
# NLP (Natural Language Processing) techniques used to uncover patterns of
# sentiment and emotion on social media microblogging platforms like Twitter.
# It is organized as follows:
# - Step 1: Exploratory analysis
# - Step 2: Text processing
# - Step 3: Sentiment analysis
# - Step 4: Word frequency
# - Step 5: LDA topics extraction
# - Step 6: Emotion analysis
#
# ## Step 1: EXPLORATORY ANALYSIS
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from collections import defaultdict
from datetime import date
import re # for regular expressions
import string
# Importing the data
tweets = pd.read_csv('input/tweets.csv')
# getting the date column ready for datetime operations
tweets['datetime']= pd.to_datetime(tweets['datetime'])
# A plot of the tweets with the word "CureVac" over the past 6 years.
fig = plt.figure(figsize=(15, 10))
ax = sns.lineplot(data=tweets.set_index("datetime").groupby(pd.Grouper(freq='Y')).count())
plt.title('Tweets with "CureVac" from 2014 to 2020', fontsize=20)
plt.xlabel('Years', fontsize=15)
plt.ylabel('Tweets', fontsize=15)
fig.savefig("images/All_Tweets_2014-2020.png")
# creating a column to filter the online storm period (from 15 and 18 March)
def make_onlinestorm_field():
for i, row in tweets.iterrows():
if pd.to_datetime(tweets.at[i, 'datetime']) > pd.Timestamp(date(2020,3,15)):
tweets.at[i, 'onlinestorm'] = True
else:
tweets.at[i, 'onlinestorm'] = False
make_onlinestorm_field()
# counting tweets during the three days online storm
print('In three days, tweets went over {}, all around the world.'.format(tweets[tweets['onlinestorm']]['onlinestorm'].count()))
tweets[tweets['onlinestorm']]
# Let's now have a look at the distribution of the tweets, by the hour, during the online storm.
fig = plt.figure(figsize=(15, 10))
ax = sns.lineplot(data=tweets[tweets['onlinestorm'] == True].set_index("datetime").groupby(pd.Grouper(freq='H')).onlinestorm.count())
plt.title('Tweets per hour from 15 to 18 March 2020', fontsize=20)
plt.xlabel('Time (hours)', fontsize=15)
plt.ylabel('No. Tweets', fontsize=15)
fig.savefig("images/All_Tweets_Onlinestorm.png")
# It is time to have a first look at the content of the tweets and do some descriptive statistics.
# For now, I will focus only on features like hastags, mentions, urls, capital words and words in general.
# A function to count tweets based on regular expressions
def count_tweets(reg_expression, tweet):
tweets_list = re.findall(reg_expression, tweet)
return len(tweets_list)
# Creating a dictionary to hold these counts
content_count = {
'words' : tweets['text'].apply(lambda x: count_tweets(r'\w+', x)),
'mentions' : tweets['text'].apply(lambda x: count_tweets(r'@\w+', x)),
'hashtags' : tweets['text'].apply(lambda x: count_tweets(r'#\w+', x)),
'urls' : tweets['text'].apply(lambda x: count_tweets(r'http.?://[^\s]+[\s]?', x)),
}
df = pd.concat([tweets, pd.DataFrame(content_count)], axis=1)
# Tweets descriptive statistics
# Display descriptive statistics fdor words, mentions,
# hashtags and urls
for key in content_count.keys():
print()
print('Descriptive statistics for {}'.format(key))
print(df.groupby('onlinestorm')[key].describe())
# Now plot them
for key in content_count.keys():
bins = np.arange(df[key].min(), df[key].max() + 1)
g = sns.FacetGrid(df, col='onlinestorm', height=5, hue='onlinestorm', palette="RdYlGn")
g = g.map(sns.distplot, key, kde=False, norm_hist=True, bins=bins)
plt.savefig('images/Descriptive_stats_for_' + key + '.png')
# Step 2: TEXT PROCESSING
import nltk
from nltk.corpus import stopwords
from nltk.stem.snowball import SnowballStemmer
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk import pos_tag
# I am adding my own stopwords list to the NLTK list.
# This way we can drop words that are irrelevant for text processing
MY_STOPWORDS = ['curevac','vaccine','german','mrna','biotech','cancer', 'lilly','eli','ag','etherna_immuno', 'translatebio', 'mooreorless62','boehringer', 'ingelheim','biopharmaceutical', 'company']
STOPLIST = set(stopwords.words('english') + list(MY_STOPWORDS))
SYMBOLS = " ".join(string.punctuation).split(" ") + ["-", "...", "”", "``", ",", ".", ":", "''","#","@"]
# The NLTK lemmatizer and stemmer classes
lemmatizer = WordNetLemmatizer()
stemmer = SnowballStemmer('english')
# read english selected tweets, no duplicates
tweets = pd.read_csv('input/tweets_en.csv')
# I use the POS tagging from NLTK to retain only adjectives, verbs, adverbs
# and nouns as a base for for lemmatization.
def get_lemmas(tweet):
# A dictionary to help convert Treebank tags to WordNet
treebank2wordnet = {'NN':'n', 'JJ':'a', 'VB':'v', 'RB':'r'}
postag = ''
lemmas_list = []
for word, tag in pos_tag(word_tokenize(tweet)):
if tag.startswith("JJ") or tag.startswith("RB") or tag.startswith("VB") or tag.startswith("NN"):
try:
postag = treebank2wordnet[tag[:2]]
except:
postag = 'n'
lemmas_list.append(lemmatizer.lemmatize(word.lower(), postag))
return lemmas_list
# We will now pre-process the tweets, following a pipeline of tokenization,
# filtering, case normalization and lemma extraction.
# This is the function to clean and filter the tokens in each tweet
def clean_tweet(tokens):
filtered = []
for token in tokens:
if re.search('[a-zA-Z]', token):
if token not in STOPLIST:
if token[0] not in SYMBOLS:
if not token.startswith('http'):
if '/' not in token:
if '-' not in token:
filtered.append(token)
return filtered
# Prior to lemmatization, I apply POS (part-of-speech) tagging to make sure that only the
# adjectives, verbs, adverbs and nouns are retained.
# Starts the lemmatization process
def get_lemmatized(tweet):
all_tokens_string = ''
filtered = []
tokens = []
# lemmatize
tokens = [token for token in get_lemmas(tweet)]
# filter
filtered = clean_tweet(tokens)
# join everything into a single string
all_tokens_string = ' '.join(filtered)
return all_tokens_string
# get the lemmatized tweets and puts the result in an "edited" text column
# for future use in this script
edited = ''
for i, row in tweets.iterrows():
edited = get_lemmatized(tweets.loc[i]['text'])
if len(edited) > 0:
tweets.at[i,'edited'] = edited
else:
tweets.at[i,'edited'] = None
# After lemmatization, some tweets may end up with the same words
# Let's make sure that we have no duplicates
tweets.drop_duplicates(subset=['edited'], inplace=True)
tweets.dropna(inplace=True)
# With these text processing steps, and the removal of duplicates,
# the final sample counts 5,508 English-language tweets,
# with an average of 30 words (SD 12.5, ranging from 4 to 61 words).
# Using apply/lambda to create a new column with the number of words in each tweet
tweets['word_count'] = tweets.apply(lambda x: len(x['text'].split()),axis=1)
t = pd.DataFrame(tweets['word_count'].describe()).T
tweets.head()
# Step 3: SENTIMENT ANALYSIS
# Let us import the VADER analyser.
from nltk.sentiment.vader import SentimentIntensityAnalyzer
# For the puropose of the timeseries analysis, we must make sure that the tweets are all correctly sorted.
tweets['datetime']=pd.to_datetime(tweets['datetime'])
tweets.sort_values('datetime', inplace=True, ascending=True)
tweets = tweets.reset_index(drop=True)
# Creating a column to "filter" the online storm period.
make_onlinestorm_field()
# To avoid repetitions in our code, here are some plotting functions
# that will be called often ...
def plot_sentiment_period(df, info):
# Using the mean values of sentiment for each period
df1 = df.groupby(df['datetime'].dt.to_period(info['period'])).mean()
df1.reset_index(inplace=True)
df1['datetime'] = pd.PeriodIndex(df1['datetime']).to_timestamp()
plot_df = pd.DataFrame(df1, df1.index, info['cols'])
plt.figure(figsize=(15, 10))
ax = sns.lineplot(data=plot_df, linewidth = 3, dashes = False)
plt.legend(loc='best', fontsize=15)
plt.title(info['title'], fontsize=20)
plt.xlabel(info['xlab'], fontsize=15)
plt.ylabel(info['ylab'], fontsize=15)
plt.tight_layout()
plt.savefig('images/' + info['fname'])
return
def plot_fractions(props, title, fname):
plt1 = props.plot(kind='bar', stacked=False, figsize=(16,5), colormap='Spectral')
plt.legend(bbox_to_anchor=(1.005, 1), loc=2, borderaxespad=0.)
plt.xlabel('Online storm', fontweight='bold', fontsize=18)
plt.xticks(rotation=0,fontsize=14)
#plt.ylim(0, 0.5)
plt.ylabel('Fraction of Tweets', fontweight='bold', fontsize=18)
plt1.set_title(label=title, fontweight='bold', size=20)
plt.tight_layout()
plt.savefig('images/' + fname + '.png')
return
def plot_frequency_chart(info):
fig, ax = plt.subplots(figsize=(14, 8))
sns.set_context("notebook", font_scale=1)
ax = sns.barplot(x=info['x'], y=info['y'], data=info['data'], palette=(info['pal']))
ax.set_title(label=info['title'], fontweight='bold', size=18)
plt.ylabel(info['ylab'], fontsize=16)
plt.xlabel(info['xlab'], fontsize=16)
plt.xticks(rotation=info['angle'],fontsize=14)
plt.yticks(fontsize=14)
plt.tight_layout()
plt.savefig('images/' + info['fname'])
return
# Calling VADER
analyzer = SentimentIntensityAnalyzer()
# Get VADER Compound value for sentiment intensity
tweets['sentiment_intensity'] = [analyzer.polarity_scores(v)['compound'] for v in tweets['edited']]
# This function returns the sentiment category
def get_sentiment(intensity):
if intensity >= 0.05:
return 'Positive'
elif (intensity >= -0.05) and (intensity < 0.05):
return 'Neutral'
else:
return 'Negative'
# Using pandas apply/lambda to speed up the process
tweets['sentiment'] = tweets.apply(lambda x: get_sentiment(x['sentiment_intensity']),axis=1)
# The next plot gives us a clear image of the “explosion” of contradictory sentiments in this period:
df=tweets.loc[:,['datetime','sentiment_intensity']]
# filter for these dates
df.set_index('datetime',inplace=True)
df=df[(df.index>='2020-03-12') & (df.index<'2020-03-18')]
df.plot(figsize=(12,6));
plt.ylabel('Compoud score', fontsize=15)
plt.xlabel('Tweets', fontsize=15)
plt.legend().set_visible(False)
plt.title('Sentiment on tweets with CureVac (12 March to 18 March)', fontsize=20)
plt.tight_layout()
sns.despine(top=True)
plt.savefig('images/Sentiment_during_onlinestorm.png')
plt.show()
# And this one will shows us a comparison of the sentiments before and during the online strom.
# Values are normalized to take into account the number of tweets in each
# of the two different periods
props = tweets.groupby('onlinestorm')['sentiment'].value_counts(normalize=True).unstack()
plot_fractions(props,'Percentage of sentiments before and during the online storm',
'Fraction_sentiments_before_and_during_onlinestorm')
# Step 4: Word frequency
# We need these imports for the wordcloud representation:
from PIL import Image
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
from matplotlib.colors import makeMappingArray
from palettable.colorbrewer.diverging import Spectral_4
from collections import Counter # Counts the most common items in a list
def display_wordcloud(tokens, title, fname):
tokens_upper = [token.upper() for token in tokens]
cloud_mask = np.array(Image.open("images/cloud_mask.png"))
wordcloud = WordCloud(max_font_size=100,
max_words=50, width=2500,
height=1750,mask=cloud_mask,
background_color="white").generate(" ".join(tokens_upper))
plt.figure()
fig, ax = plt.subplots(figsize=(14, 8))
plt.title(title, fontsize=20)
plt.imshow(wordcloud, interpolation="bilinear")
plt.axis("off")
plt.savefig('images/'+ fname + '.png')
plt.show()
return
def join_edited_string(edited_tweets):
edited_string = ''
for row in edited_tweets:
edited_string = edited_string + ' ' + row
return edited_string
def get_trigrams(trigrams, top_grams):
grams_str = []
data = []
gram_counter = Counter(trigrams)
for grams in gram_counter.most_common(10):
gram = ''
grams_str = grams[0]
grams_str_count = []
for n in range(0,3):
gram = gram + grams_str[n] + ' '
grams_str_count.append(gram)
grams_str_count.append(grams[1])
data.append(grams_str_count)
print(grams_str_count)
df = pd.DataFrame(data, columns = ['Grams', 'Count'])
return df
# Let’s have a look at the 20 most frequent words in tweets before the online storm.
# Filtering the tweets of the 6 years before the online storm
df = tweets[tweets['onlinestorm'] == False]
# Join all the edited tweets in one single string
joined_string = join_edited_string(df['edited'])
# Get tokens
tokens = joined_string.split(' ')
# get trigrams
trigrams = nltk.trigrams(tokens)
# plot word frequency during online storm
word_counter = Counter(tokens)
df_counter = pd.DataFrame(word_counter.most_common(20), columns = ['word', 'freq'])
info = {'data': df_counter, 'x': 'freq', 'y': 'word',
'xlab': 'Count', 'ylab': 'Words', 'pal':'viridis',
'title': 'Most frequent words before online storm',
'fname':'word_frequency_before_onlinestorm.png',
'angle': 90}
plot_frequency_chart(info)
# plot trigram frequency
df_trigrams = get_trigrams(trigrams, 10)
info = {'data': df_trigrams, 'x': 'Grams', 'y': 'Count',
'xlab': 'Trigrams', 'ylab': 'Count', 'pal':'viridis',
'title': 'Most frequent trigrams before online storm',
'fname':'trigrams_frequency_before_onlinestorm.png',
'angle': 40}
plot_frequency_chart(info)
# And the wordcloud ...
display_wordcloud(tokens, 'Wordcloud of most frequent words before online storm',
'WordCloud_before_onlinestorm')
# Filtering the tweets of the 3 days of the online storm
df =tweets[tweets['onlinestorm']]
# Join all the edited tweets in one single string
joined_string = join_edited_string(df['edited'])
# Get tokens
tokens = joined_string.split(' ')
# get trigrams
trigrams = nltk.trigrams(tokens)
# plot word frequency during online storm
word_counter = Counter(tokens)
df_counter = pd.DataFrame(word_counter.most_common(20), columns = ['word', 'freq'])
info = {'data': df_counter, 'x': 'freq', 'y': 'word',
'xlab': 'Count', 'ylab': 'Words', 'pal':'inferno',
'title': 'Most frequent words during online storm',
'fname':'word_frequency_during_onlinestorm.png',
'angle': 90}
plot_frequency_chart(info)
# In[139]:
# plot trigrams frequency
df_trigrams = get_trigrams(trigrams, 10)
info = {'data': df_trigrams, 'x': 'Grams', 'y': 'Count',
'xlab': 'Trigrams', 'ylab': 'Count', 'pal':'inferno',
'title': 'Most frequent trigrams during online storm',
'fname':'trigrams_frequency_during_onlinestorm.png',
'angle': 40}
plot_frequency_chart(info)
# In[140]:
display_wordcloud(tokens, 'Wordcloud of most frequent words during online storm',
'WordCloud_during_onlinestorm')
# Step 5: LDA topics extraction
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.feature_extraction.text import TfidfVectorizer
# I am using here Susan Li's functions to get the top words from a topic:
def get_keys(topic_matrix):
'''
returns an integer list of predicted topic
categories for a given topic matrix
'''
keys = topic_matrix.argmax(axis=1).tolist()
return keys
def keys_to_counts(keys):
'''
returns a tuple of topic categories and their
accompanying magnitudes for a given list of keys
'''
count_pairs = Counter(keys).items()
categories = [pair[0] for pair in count_pairs]
counts = [pair[1] for pair in count_pairs]
return (categories, counts)
def get_top_n_words(n, n_topics, keys, document_term_matrix, tfidf_vectorizer):
'''
returns a list of n_topic strings, where each string contains the n most common
words in a predicted category, in order
'''
top_word_indices = []
for topic in range(n_topics):
temp_vector_sum = 0
for i in range(len(keys)):
if keys[i] == topic:
temp_vector_sum += document_term_matrix[i]
temp_vector_sum = temp_vector_sum.toarray()
top_n_word_indices = np.flip(np.argsort(temp_vector_sum)[0][-n:],0)
top_word_indices.append(top_n_word_indices)
top_words = []
for topic in top_word_indices:
topic_words = []
for index in topic:
temp_word_vector = np.zeros((1,document_term_matrix.shape[1]))
temp_word_vector[:, index] = 1
the_word = tfidf_vectorizer.inverse_transform(temp_word_vector)[0][0]
try:
topic_words.append(the_word.encode('ascii').decode('utf-8'))
except:
pass
top_words.append(", ".join(topic_words))
return top_words
# And here is a function for topics extraction using LDA, in which I produce a dataframe
# with the topics and their top words to facilitate the plotting that follows.
# LDA topics
def get_topics(edited, n_topics, n_words):
eds = edited.values
vec = TfidfVectorizer(use_idf=True, smooth_idf=True)
document_term_matrix = vec.fit_transform(eds)
model = LatentDirichletAllocation(n_components=n_topics)
topic_matrix = model.fit_transform(document_term_matrix)
keys = get_keys(topic_matrix)
categories, counts = keys_to_counts(keys)
top_n_words = get_top_n_words(n_words, n_topics, keys, document_term_matrix, vec)
topics = ['Topic {}: \n'.format(i + 1) + top_n_words[i] for i in categories]
data=[]
for i, topic in enumerate(topics):
tmp = []
tmp.append(topic)
tmp.append(counts[i])
data.append(tmp)
df_topics = pd.DataFrame(data, columns = ['Topics', 'Count'])
return df_topics
# Topics before the online storm
# Filtering the tweets of the 6 years before the online storm
df = tweets[tweets['onlinestorm'] == False]
# LDA topics
df_topics = get_topics(df['edited'], 5, 5)
info = {'data': df_topics, 'x': 'Topics', 'y': 'Count',
'xlab': 'Topics', 'ylab': 'Count', 'pal':'viridis',
'title': 'LDA Topics before Online Storm',
'fname':'LDA_Topics_before_onlinestorm.png',
'angle': 40}
plot_frequency_chart(info)
# Topics during the online storm
# Filtering the tweets of the 3 days of the online storm
df =tweets[tweets['onlinestorm']]
# LDA topics
df_topics = get_topics(df['edited'], 5, 5)
info = {'data': df_topics, 'x': 'Topics', 'y': 'Count',
'xlab': 'Topics', 'ylab': 'Count', 'pal':'inferno',
'title': 'Main Topics during Online Storm',
'fname':'LDA_Topics_during_onlinestorm.png',
'angle': 40}
plot_frequency_chart(info)
# Step 6: Emotion analysis
import termcolor
import sys
from termcolor import colored, cprint
plt.style.use('fivethirtyeight')
# Importing the data from the NCR lexicon
ncr = pd.read_csv('input/NCR-lexicon.csv', sep =';')
# Let's create a list of the emotions
emotions = ['Anger', 'Anticipation','Disgust','Fear', 'Joy','Sadness', 'Surprise', 'Trust']
# Join all the edited tweets in one single string
joined_string = join_edited_string(df['edited'])
# Get tokens
tokens = joined_string.split(' ')
# We build now two dictionaries with indexes and unique words, for future reference
unique_words = set(tokens)
word_to_ind = dict((word, i) for i, word in enumerate(unique_words))
ind_to_word = dict((i, word) for i, word in enumerate(unique_words))
def plot_emotions_period(df, cols, title, fname, period = 'h' ):
df1 = df.groupby(df['datetime'].dt.to_period(period)).mean()
df1.reset_index(inplace=True)
df1['datetime'] = pd.PeriodIndex(df1['datetime']).to_timestamp()
plot_df = pd.DataFrame(df1, df1.index, cols)
plt.figure(figsize=(15, 10))
ax = sns.lineplot(data=plot_df, linewidth = 3,dashes = False)
plt.legend(loc='best', fontsize=15)
plt.title(title, fontsize=20)
plt.xlabel('Time (hours)', fontsize=15)
plt.ylabel('Z-scored Emotions', fontsize=15)
plt.savefig('images/'+ fname + '.png')
return
def get_tweet_emotions(df, emotions, col):
df_tweets = df.copy()
df_tweets.drop(['sentiment','sentiment_intensity'], axis=1, inplace=True)
emo_info = {'emotion':'' , 'emo_frq': defaultdict(int) }
list_emotion_counts = []
# creating a dictionary list to hold the frequency of the words
# contributing to the emotions
for emotion in emotions:
emo_info = {}
emo_info['emotion'] = emotion
emo_info['emo_frq'] = defaultdict(int)
list_emotion_counts.append(emo_info)
# bulding a zeros matrix to hold the emotions data
df_emotions = pd.DataFrame(0, index=df.index, columns=emotions)
# stemming the word to facilitate the search in NRC
stemmer = SnowballStemmer("english")
# iterating in the tweets data set
for i, row in df_tweets.iterrows(): # for each tweet ...
tweet = word_tokenize(df_tweets.loc[i][col])
for word in tweet: # for each word ...
word_stemmed = stemmer.stem(word.lower())
# check if the word is in NRC
result = ncr[ncr.English == word_stemmed]
# we have a match
if not result.empty:
# update the tweet-emotions counts
for idx, emotion in enumerate(emotions):
df_emotions.at[i, emotion] += result[emotion]
# update the frequencies dictionary list
if result[emotion].any():
try:
list_emotion_counts[idx]['emo_frq'][word_to_ind[word]] += 1
except:
continue
# append the emotions matrix to the tweets data set
df_tweets = | pd.concat([df_tweets, df_emotions], axis=1) | pandas.concat |
import pandas as pd
from . import processing
def plottable_sums(reference_df, behaviour, identifier_column="Animal_id", periods={}, period_label="period", metadata_columns={"TreatmentProtocol_code":"Treatment"}):
identifiers = list(set(reference_df[identifier_column]))
evaluation_df = pd.DataFrame({})
for identifier in identifiers:
identifier_df = reference_df[reference_df[identifier_column]==identifier]
evaluation_path = identifier_df["Evaluation_path"].values[0]
identifier_data = {}
for metadata_column in metadata_columns:
identifier_data[metadata_columns[metadata_column]] = identifier_df[metadata_column].values[0]
for period in periods:
period_start, period_end = periods[period]
sums = processing.timedelta_sums(evaluation_path, index_name=identifier, period_start=period_start, period_end=period_end)
#We need to calculate this explicitly since the start/end of th experiment may not align perfecty with the theoretical period
real_period_duration = sums.sum(axis=1).values[0]
#if the behaviour key is not found, there was none of that behaviour type in the period
try:
behaviour_ratio = sums[behaviour].values[0]/real_period_duration
except KeyError:
behaviour_ratio = 0
identifier_data[behaviour.title()+" Ratio"] = behaviour_ratio
identifier_data[period_label] = period
identifier_data["Identifier"] = identifier
period_df_slice = pd.DataFrame(identifier_data, index=[identifier])
evaluation_df = pd.concat([evaluation_df, period_df_slice])
#data is usually ordered as it comes, for nicer plots we sort it here
evaluation_df = evaluation_df.sort_values([period_label], ascending=True)
evaluation_df = evaluation_df.sort_values(list(metadata_columns.values()), ascending=False)
return evaluation_df
def plottable_sucrosepreference_df(reference_df):
cage_ids = list(set(reference_df["Cage_id"]))
preferences_df = pd.DataFrame({})
for cage_id in cage_ids:
cage_id_df = reference_df[reference_df["Cage_id"]==cage_id]
reference_dates = list(set(cage_id_df["SucrosePreferenceMeasurement_reference_date"]))
reference_dates.sort()
measurement_dates = list(set(cage_id_df["SucrosePreferenceMeasurement_date"]))
measurement_dates.sort()
first_date = reference_dates[0]
preferences={}
for measurement_date in measurement_dates:
cage_id_measurement_df = cage_id_df[cage_id_df["SucrosePreferenceMeasurement_date"] == measurement_date]
start_date = cage_id_measurement_df["SucrosePreferenceMeasurement_reference_date"].tolist()[0]
relative_start_day = start_date-first_date
rounded_relative_start_day = processing.rounded_days(relative_start_day)
relative_end_day = measurement_date-first_date
rounded_relative_end_day = processing.rounded_days(relative_end_day)
key = "{} to {}".format(rounded_relative_start_day, rounded_relative_end_day)
water_start = cage_id_measurement_df["SucrosePreferenceMeasurement_water_start_amount"].tolist()[0]
water_end = cage_id_measurement_df["SucrosePreferenceMeasurement_water_end_amount"].tolist()[0]
sucrose_start = cage_id_measurement_df["SucrosePreferenceMeasurement_sucrose_start_amount"].tolist()[0]
sucrose_end = cage_id_measurement_df["SucrosePreferenceMeasurement_sucrose_end_amount"].tolist()[0]
water_consumption = water_end - water_start
sucrose_consumption = sucrose_end - sucrose_start
sucrose_prefernce = sucrose_consumption/(water_consumption + sucrose_consumption)
preferences["Period [days]"] = key
preferences["Sucrose Preference Ratio"] = sucrose_prefernce
preferences["Sucrose Bottle Position"] = cage_id_measurement_df["SucrosePreferenceMeasurement_sucrose_bottle_position"].tolist()[0]
preferences["Sucrose Concentration"] = cage_id_measurement_df["SucrosePreferenceMeasurement_sucrose_concentration"].tolist()[0]
preferences["Treatment"] = cage_id_measurement_df["TreatmentProtocol_code"].tolist()[0]
preferences["Cage ID"] = cage_id # this may not actually be needed, as the same info is contained in the index
preferences_df_slice = pd.DataFrame(preferences, index=[cage_id])
preferences_df = | pd.concat([preferences_df, preferences_df_slice]) | pandas.concat |
import os
import pandas as pd
import numpy as np
import math
from datetime import datetime
import csv
from helpers import make_directory
# If date is specified, calculate ranking up until that date
def get_rankings(from_file, to_file, date=None, include_prediction=False, predicted_date_so_far=None, ranking_summary_file=None):
if date:
datet = datetime.strptime(date, '%Y-%m-%d')
if not (from_file and to_file):
raise ValueError("Error: get_rankings: Give a from_file/to_file pair")
df = pd.read_csv(from_file)
scores = dict()
for _, row in df.iterrows():
if type(row['Date']) is float:
continue
if date and datetime.strptime(row['Date'], '%Y-%m-%d') > datet:
break
# That means this row is a prediction value
if not include_prediction and row['FTHG'] == 0 and row['FTAG'] == 0 and row['FTR'] != 'D':
break
# Meaning this game is not played and not predicted yet
if row['FTR'] is np.nan:
break
home = row['HomeTeam']
away = row['AwayTeam']
if home not in scores:
scores[home] = {
'match_played': 0,
'points': 0,
'goal_diff': 0,
'win': 0
}
if away not in scores:
scores[away] = {
'match_played': 0,
'points': 0,
'goal_diff': 0,
'win': 0
}
scores[home]['match_played'] += 1
scores[away]['match_played'] += 1
match_goal_diff = row['FTHG'] - row['FTAG']
scores[home]['goal_diff'] += match_goal_diff
scores[away]['goal_diff'] -= match_goal_diff
if row['FTR'] == 'H':
scores[home]['points'] += 3
scores[home]['win'] += 1
elif row['FTR'] == 'A':
scores[away]['points'] += 3
scores[away]['win'] += 1
else:
scores[home]['points'] += 1
scores[away]['points'] += 1
teams = sorted(scores, key=lambda k: scores[k]['points'], reverse=True)
points, goal_diff, win_rate = [], [], []
for name in teams:
val = scores[name]
points.append(val['points'])
goal_diff.append(val['goal_diff'])
win_rate.append(val['win'] / val['match_played'])
df = pd.DataFrame(list(zip(teams, points, goal_diff, win_rate)), columns=['Team', 'Points', 'Goal_Diff', 'Win_Rate'])
make_directory(to_file)
df.to_csv(to_file, index=False)
if include_prediction and predicted_date_so_far and ranking_summary_file:
round_df = pd.DataFrame(list(zip(teams, points)), columns=['Team', predicted_date_so_far])
round_df.set_index('Team', inplace=True)
round_df = round_df.transpose()
round_df.index.name = 'Date'
if os.path.isfile(ranking_summary_file):
summary_df = | pd.read_csv(ranking_summary_file) | pandas.read_csv |
def scatter_plot(Matrix,identifier_dataframe,cmap_categ,cmap_multiplier,title,size,screen_labels):
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
"""
This function goal is to allow data visualization of 2D or 3D matrices of
data with different attributes
RESULT: A 2D or 3D visualization of the data, with different colors for
each different categories. The centroids of each class are also ploted.
(with the formula of unweighted barycenters)
PARAMS :
- 'Matrix' refers to a 2D or 3D Matrix of data
- 'identifier_dataframe' must have 2 columns :
--> "main_category" (String), the categorie's column of labels
-->"color" (Integer), it's color transposition
- 'cmap_categ' refers to the 'colormaps_reference' in matplotlib :
https://matplotlib.org/examples/color/colormaps_reference.html
- 'cmap_multiplier' is the multiplier to apply to the categories in order to
scale the colors between 1 and 100
- 'size' refers to the points sizes
- 'screen_labels' refers to the way of displaying the point's categories:
--> choose 'centroids' if you want to display the labels at the categories
centroids levels
--> let the string empty if you want to display one label each 50 points
"""
if Matrix.shape[1] ==1:
df_plot=pd.DataFrame(Matrix,columns =['X'])
df_plot['Y']=1
if Matrix.shape[1] ==2:
df_plot=pd.DataFrame(Matrix,columns =['X','Y'])
if Matrix.shape[1] ==3:
df_plot=pd.DataFrame(Matrix,columns =['X','Y','Z'])
fig = plt.figure(figsize=(13, 13))
ax = plt.axes(projection='3d')
min_X = min(df_plot.X)
max_X = max(df_plot.X)
min_Y = min(df_plot.Y)
max_Y = max(df_plot.Y)
min_Z = min(df_plot.Z)
max_Z = max(df_plot.Z)
# Data for a three-dimensional line
xline = np.linspace(min_X, max_X, 50)
yline = np.linspace(min_Y, max_Y, 50)
zline = np.linspace(min_Z, max_Z, 50)
ax.plot3D(xline, yline, zline, 'gray')
new_identifier_df = | pd.DataFrame(identifier_dataframe) | pandas.DataFrame |
from tweepy import OAuthHandler
from tweepy import API
from tweepy import Stream
from tweepy.streaming import StreamListener
import json
import time
import sys
import pandas as pd
import numpy as np
import twitter_dataprep as dataprep
import twitter_cache as tc
class SListener(StreamListener):
def __init__(self, api = None, fprefix = 'streamer'):
self.api = api or API()
self.counter = 0
self.fprefix = fprefix
self.output = open('tweets.json', 'w')
self.cache = tc.twitter_cache()
self.cache.clean_allcache()
def on_data(self, data):
if 'in_reply_to_status' in data:
##Debug
#print('=================='+str(self.counter)+'=========')
#print(data)
self.on_status(data)
elif 'delete' in data:
delete = json.loads(data)['delete']['status']
if self.on_delete(delete['id'], delete['user_id']) is False:
return False
elif 'limit' in data:
if self.on_limit(json.loads(data)['limit']['track']) is False:
return False
elif 'warning' in data:
warning = json.loads(data)['warnings']
print("WARNING: %s" % warning['message'])
return
def on_status(self, status):
self.output.write(status)
self.counter += 1
#if self.counter >= 20000:
if self.counter >= 1000:
self.output.close()
self.output = open('%s_%s.json' % (self.fprefix, time.strftime('%Y%m%d-%H%M%S')), 'w')
self.counter = 0
#print(self.counter)
df_tweets=pd.DataFrame(dataprep.process_raw(status))
#self.cache.add_df_cache(key=str(df_tweets.id.max()),df=df_tweets)
self.cache.add_cache(ns='twitter_cache:raw:',key=str(df_tweets.id.max()),value=str(df_tweets.to_json(orient='records')))
return
def on_delete(self, status_id, user_id):
print("Delete notice")
return
#def on_limit(self, track):
#print("WARNING: Limitation notice received, tweets missed: %d" % track)
# return
def on_error(self, status_code):
print('Encountered error with status code:', status_code)
return
def on_timeout(self):
print("Timeout, sleeping for 60 seconds...")
time.sleep(60)
return
class twitter_collector():
def __init__(self,cache=''):
##Init stream object
# Consumer key authentication(consumer_key,consumer_secret can be collected from our twitter developer profile)
auth = OAuthHandler('<KEY>', '<KEY>')
# Access key authentication(access_token,access_token_secret can be collected from our twitter developer profile)
auth.set_access_token('<KEY>', '<KEY>')
# Set up the API with the authentication handler
api = API(auth)
# Instantiate the cache object for twitter stream
#self.cache = tc.twitter_cache()
# Instantiate the SListener object
self.listen = SListener(api=api)
# Instantiate the Stream object
self.stream = Stream(auth, self.listen)
# Initiate collector counters
self.listen.cache.add_cache(ns='twitter_cache:summary:',key='total_tweets',value=0)
self.listen.cache.add_cache(ns='twitter_cache:summary:',key='trump_tweets',value=0)
self.listen.cache.add_cache(ns='twitter_cache:summary:',key='biden_tweets',value=0)
def start(self,keywords=['biden','trump'],duration=0):
# Begin collecting data
self.stream.filter(track = keywords, is_async=True)
# if collector object started with duration specified, stop after X seconds
if duration>0:
time.sleep(duration)
self.stream.disconnect()
print('streaming stopped after '+str(duration)+"s")
self.summary()
def stop(self):
#Disconnect the streaming object
self.stream.disconnect()
time.sleep(1)
print("Twitter collector stopped!")
self.summary()
def refresh_summary(self,clear_raw=True):
'''
Load raw tweets and process summary
Delete raw tweets from redis cache if [clear_raw=True]
'''
try:
df_summary=dataprep.repack_tweets(self.listen.cache.get_rawcache(ns='twitter_cache:raw:',return_df=True))
except Exception as e:
print ('Error to load raw tweets and refresh summary! Error Msg: {0}'.format(e))
return
if clear_raw:
self.listen.cache.clear_rawcache(ns='twitter_cache:raw:')
# Update counters
total_tweets=int(self.listen.cache.get_cache(ns='twitter_cache:summary:',key='total_tweets'))+len(df_summary.index)
trump_tweets=int(self.listen.cache.get_cache(ns='twitter_cache:summary:',key='trump_tweets'))+np.sum(dataprep.check_word_in_tweet('trump',df_summary))
biden_tweets=int(self.listen.cache.get_cache(ns='twitter_cache:summary:',key='biden_tweets'))+np.sum(dataprep.check_word_in_tweet('biden',df_summary))
self.listen.cache.add_cache(ns='twitter_cache:summary:',key='total_tweets',value=str(total_tweets))
self.listen.cache.add_cache(ns='twitter_cache:summary:',key='trump_tweets',value=str(trump_tweets))
self.listen.cache.add_cache(ns='twitter_cache:summary:',key='biden_tweets',value=str(biden_tweets))
## Build the word count histogram
trump_wcloud = df_summary[dataprep.check_word_in_tweet('trump',df_summary)].text.str.split(expand=True).stack().value_counts().rename_axis('keyword').to_frame('counts')
biden_wcloud = df_summary[dataprep.check_word_in_tweet('biden',df_summary)].text.str.split(expand=True).stack().value_counts().rename_axis('keyword').to_frame('counts')
exclude_word = ['AT_USER','rt','URL','is','am','are','was','were','a','an','of','the','to','in','for','and','i','you','at','this','there','that','he','she','it','his','her','will','on','by','about','with','and','or']
trump_wcloud=trump_wcloud[~trump_wcloud.index.isin(exclude_word)].nlargest(10,'counts')
biden_wcloud=biden_wcloud[~biden_wcloud.index.isin(exclude_word)].nlargest(10,'counts')
trump_wc = self.listen.cache.get_df_cache(ns='twitter_cache:summary:',key='trump_wc')
biden_wc = self.listen.cache.get_df_cache(ns='twitter_cache:summary:',key='biden_wc')
if not trump_wc.empty:
trump_wc = | pd.concat([trump_wc,trump_wcloud]) | pandas.concat |
# The MIT License (MIT)
# Copyright (c) 2018 Massachusetts Institute of Technology
#
# Author: <NAME>
# This software has been created in projects supported by the US National
# Science Foundation and NASA (PI: Pankratius)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Standard library imports
from collections import OrderedDict
from getpass import getpass
# Scikit Data Access
from skdaccess.framework.data_class import DataFetcherStream, TableWrapper
# Third party packages
import pandas as pd
from alpha_vantage.timeseries import TimeSeries
class DataFetcher(DataFetcherStream):
""" Data Fetcher for retrieving stock data """
def __init__(self, ap_paramList, data_type, start_date=None, end_date=None, interval=None):
"""
@param ap_paramList[stock_symbol_list]: AutoList of stock symbols
@param data_type: Type of data to retrieve (daily, daily_adjusted, intraday, monthly, monthly_adjusted, weekly, weekly_adjusted)
@param start_date: Starting date
@param end_date: Ending date
@param interval: Interval for intraday (1min, 5min, 15min, 30min, 60min)
@return: Table data wrapper of stock data
"""
self.data_type = data_type
self.start_date = start_date
self.end_date = end_date
self.interval = interval
self.possible_intervals = ('1min', '5min', '15min', '30min', '60min')
self.possible_data_types = ("daily", "daily_adjusted", "intraday", "monthly", "monthly_adjusted", "weekly", "weekly_adjusted")
if interval not in self.possible_intervals and data_type == 'intraday':
raise RuntimeError('Did not understand interval: "' + str(interval) + '" to use with intraday data type')
elif interval is not None and data_type != 'intraday':
raise RuntimeError('interval is only used with data type intraday')
api_key = DataFetcher.getConfigItem('stocks', 'api_key')
write_key = False
while api_key is None or api_key == "":
api_key = getpass(prompt='Alpha Vantage API key')
write_key = True
if write_key:
DataFetcher.writeConfigItem('stocks','api_key', api_key)
super(DataFetcher, self).__init__(ap_paramList)
def output(self):
"""
Retrieve stock data
@return TableWrapper of stock data
"""
stock_symbols = self.ap_paramList[0]()
timeseries_retriever = TimeSeries(key=DataFetcher.getConfigItem('stocks','api_key'),
output_format='pandas',
indexing_type = 'date')
data_dict = OrderedDict()
metadata_dict = OrderedDict()
for symbol in stock_symbols:
# Extract data
if self.data_type == 'daily':
data, metadata = timeseries_retriever.get_daily(symbol, outputsize='full')
elif self.data_type == 'daily_adjusted':
data, metadata = timeseries_retriever.get_daily_adjusted(symbol, outputsize='full')
elif self.data_type == 'monthly':
data, metadata = timeseries_retriever.get_monthly(symbol)
elif self.data_type == 'monthly_adjusted':
data, metadata = timeseries_retriever.get_monthly_adjusted(symbol)
elif self.data_type == 'weekly':
data, metadata = timeseries_retriever.get_weekly(symbol)
elif self.data_type == 'weekly_adjusted':
data, metadata = timeseries_retriever.get_weekly_adjusted(symbol)
elif self.data_type == 'intraday':
data, metadata = timeseries_retriever.get_weekly_adjusted(symbol, self.interval, outputsize='full')
# Convert index to pandas datetime
if self.data_type == 'intraday':
data.index = | pd.to_datetime(data.index) | pandas.to_datetime |
# coding: utf-8
import os
import pandas as pd
from tqdm import tqdm
from czsc.objects import RawBar, Freq
from czsc.utils.bar_generator import BarGenerator, freq_end_time
from test.test_analyze import read_1min
cur_path = os.path.split(os.path.realpath(__file__))[0]
kline = read_1min()
def test_freq_end_time():
assert freq_end_time(pd.to_datetime("2021-11-11 09:43"), Freq.F1) == pd.to_datetime("2021-11-11 09:43")
assert freq_end_time(pd.to_datetime("2021-11-11 09:43"), Freq.F5) == pd.to_datetime("2021-11-11 09:45")
assert freq_end_time(pd.to_datetime("2021-11-11 09:43"), Freq.F15) == pd.to_datetime("2021-11-11 09:45")
assert freq_end_time(pd.to_datetime("2021-11-11 09:45"), Freq.F15) == pd.to_datetime("2021-11-11 09:45")
assert freq_end_time(pd.to_datetime("2021-11-11 14:56"), Freq.F15) == pd.to_datetime("2021-11-11 15:00")
assert freq_end_time(pd.to_datetime("2021-11-11 09:43"), Freq.F30) == pd.to_datetime("2021-11-11 10:00")
assert freq_end_time( | pd.to_datetime("2021-11-11 09:43") | pandas.to_datetime |
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
import numpy as np
import random
import pandas as pd
from sklearn.metrics import confusion_matrix, f1_score, roc_curve, auc, accuracy_score
import joblib
from collections import Counter
from sklearn.model_selection import GridSearchCV
from sklearn.cluster import KMeans
import sys
import timeit
import matplotlib.pyplot as plt
def get_trainset(simhome, rna):
simfile = "%s/%s_result.txt" %(simhome, rna.upper())
with open(simfile, "r") as ifile:
for line in ifile:
if line.startswith("train"):
trainset = eval(next(ifile, '').strip())
trainset = [t.lower() for t in trainset]
return trainset
def get_full_trainset(home):
pdblist = "%s/data/pdblist.txt" %home
return | pd.read_csv(pdblist, delim_whitespace=True) | pandas.read_csv |
"""
Import as:
import core.test.test_statistics as cttsta
"""
import logging
from typing import List
import numpy as np
import pandas as pd
import pytest
import core.artificial_signal_generators as casgen
import core.finance as cfinan
import core.signal_processing as csproc
import core.statistics as cstati
import helpers.printing as hprint
import helpers.unit_test as hut
_LOG = logging.getLogger(__name__)
class TestComputeMoments(hut.TestCase):
def test1(self) -> None:
series = self._get_series(seed=1)
actual = cstati.compute_moments(series)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test2(self) -> None:
series = self._get_series(seed=1)
actual = cstati.compute_moments(series, prefix="moments_")
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
# Smoke test for empty input.
def test3(self) -> None:
series = pd.Series([])
cstati.compute_moments(series)
def test4(self) -> None:
series = self._get_series(seed=1)
# Place some `NaN` values in the series.
series[:5] = np.nan
series[8:10] = np.nan
actual = cstati.compute_moments(series)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test5(self) -> None:
series = self._get_series(seed=1)
# Place some `NaN` values in the series.
series[:5] = np.nan
series[8:10] = np.nan
actual = cstati.compute_moments(series, nan_mode="ffill_and_drop_leading")
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
# Smoke test for input of `np.nan`s.
def test6(self) -> None:
series = pd.Series([np.nan for i in range(10)])
cstati.compute_moments(series)
def test7(self) -> None:
"""
Test series with `inf`.
"""
series = self._get_series(seed=1)
# Place some `NaN` values in the series.
series[4] = np.inf
actual = cstati.compute_moments(series, nan_mode="ffill_and_drop_leading")
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
@staticmethod
def _get_series(seed: int) -> pd.Series:
arparams = np.array([0.75, -0.25])
maparams = np.array([0.65, 0.35])
arma_process = casgen.ArmaProcess(arparams, maparams)
date_range = {"start": "1/1/2010", "periods": 40, "freq": "M"}
series = arma_process.generate_sample(
date_range_kwargs=date_range, seed=seed
)
return series
class TestComputeFracZero(hut.TestCase):
def test1(self) -> None:
data = [0.466667, 0.2, 0.13333, 0.2, 0.33333]
index = [0, 1, 2, 3, 4]
expected = pd.Series(data=data, index=index)
actual = cstati.compute_frac_zero(self._get_df(seed=1))
pd.testing.assert_series_equal(actual, expected, check_less_precise=3)
def test2(self) -> None:
data = [
0.4,
0.0,
0.2,
0.4,
0.4,
0.2,
0.4,
0.0,
0.6,
0.4,
0.6,
0.2,
0.0,
0.0,
0.2,
]
index = pd.date_range(start="1-04-2018", periods=15, freq="30T")
expected = pd.Series(data=data, index=index)
actual = cstati.compute_frac_zero(self._get_df(seed=1), axis=1)
pd.testing.assert_series_equal(actual, expected, check_less_precise=3)
def test3(self) -> None:
# Equals 20 / 75 = num_zeros / num_points.
expected = 0.266666
actual = cstati.compute_frac_zero(self._get_df(seed=1), axis=None)
np.testing.assert_almost_equal(actual, expected, decimal=3)
def test4(self) -> None:
series = self._get_df(seed=1)[0]
expected = 0.466667
actual = cstati.compute_frac_zero(series)
np.testing.assert_almost_equal(actual, expected, decimal=3)
def test5(self) -> None:
series = self._get_df(seed=1)[0]
expected = 0.466667
actual = cstati.compute_frac_zero(series, axis=0)
np.testing.assert_almost_equal(actual, expected, decimal=3)
# Smoke test for empty input.
def test6(self) -> None:
series = pd.Series([])
cstati.compute_frac_zero(series)
@staticmethod
def _get_df(seed: int) -> pd.DataFrame:
nrows = 15
ncols = 5
num_nans = 15
num_infs = 5
num_zeros = 20
#
np.random.seed(seed=seed)
mat = np.random.randn(nrows, ncols)
mat.ravel()[np.random.choice(mat.size, num_nans, replace=False)] = np.nan
mat.ravel()[np.random.choice(mat.size, num_infs, replace=False)] = np.inf
mat.ravel()[np.random.choice(mat.size, num_infs, replace=False)] = -np.inf
mat.ravel()[np.random.choice(mat.size, num_zeros, replace=False)] = 0
#
index = pd.date_range(start="01-04-2018", periods=nrows, freq="30T")
df = pd.DataFrame(data=mat, index=index)
return df
class TestComputeFracNan(hut.TestCase):
def test1(self) -> None:
data = [0.4, 0.133333, 0.133333, 0.133333, 0.2]
index = [0, 1, 2, 3, 4]
expected = pd.Series(data=data, index=index)
actual = cstati.compute_frac_nan(self._get_df(seed=1))
pd.testing.assert_series_equal(actual, expected, check_less_precise=3)
def test2(self) -> None:
data = [
0.4,
0.0,
0.2,
0.4,
0.2,
0.2,
0.2,
0.0,
0.4,
0.2,
0.6,
0.0,
0.0,
0.0,
0.2,
]
index = pd.date_range(start="1-04-2018", periods=15, freq="30T")
expected = pd.Series(data=data, index=index)
actual = cstati.compute_frac_nan(self._get_df(seed=1), axis=1)
pd.testing.assert_series_equal(actual, expected, check_less_precise=3)
def test3(self) -> None:
# Equals 15 / 75 = num_nans / num_points.
expected = 0.2
actual = cstati.compute_frac_nan(self._get_df(seed=1), axis=None)
np.testing.assert_almost_equal(actual, expected, decimal=3)
def test4(self) -> None:
series = self._get_df(seed=1)[0]
expected = 0.4
actual = cstati.compute_frac_nan(series)
np.testing.assert_almost_equal(actual, expected, decimal=3)
def test5(self) -> None:
series = self._get_df(seed=1)[0]
expected = 0.4
actual = cstati.compute_frac_nan(series, axis=0)
np.testing.assert_almost_equal(actual, expected, decimal=3)
# Smoke test for empty input.
def test6(self) -> None:
series = pd.Series([])
cstati.compute_frac_nan(series)
@staticmethod
def _get_df(seed: int) -> pd.DataFrame:
nrows = 15
ncols = 5
num_nans = 15
num_infs = 5
num_zeros = 20
#
np.random.seed(seed=seed)
mat = np.random.randn(nrows, ncols)
mat.ravel()[np.random.choice(mat.size, num_infs, replace=False)] = np.inf
mat.ravel()[np.random.choice(mat.size, num_infs, replace=False)] = -np.inf
mat.ravel()[np.random.choice(mat.size, num_zeros, replace=False)] = 0
mat.ravel()[np.random.choice(mat.size, num_nans, replace=False)] = np.nan
#
index = pd.date_range(start="01-04-2018", periods=nrows, freq="30T")
df = pd.DataFrame(data=mat, index=index)
return df
class TestComputeNumFiniteSamples(hut.TestCase):
@staticmethod
# Smoke test for empty input.
def test1() -> None:
series = pd.Series([])
cstati.count_num_finite_samples(series)
class TestComputeNumUniqueValues(hut.TestCase):
@staticmethod
# Smoke test for empty input.
def test1() -> None:
series = pd.Series([])
cstati.count_num_unique_values(series)
class TestComputeDenominatorAndPackage(hut.TestCase):
@staticmethod
# Smoke test for empty input.
def test1() -> None:
series = pd.Series([])
cstati._compute_denominator_and_package(reduction=1, data=series)
class TestTTest1samp(hut.TestCase):
# Smoke test for empty input.
def test1(self) -> None:
series = pd.Series([])
cstati.ttest_1samp(series)
def test2(self) -> None:
series = self._get_series(seed=1)
# Place some `NaN` values in the series.
series[:5] = np.nan
series[8:10] = np.nan
actual = cstati.ttest_1samp(series)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test3(self) -> None:
series = self._get_series(seed=1)
# Place some `NaN` values in the series.
series[:5] = np.nan
series[8:10] = np.nan
actual = cstati.ttest_1samp(series, nan_mode="ffill_and_drop_leading")
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
# Smoke test for input of `np.nan`s.
def test4(self) -> None:
series = pd.Series([np.nan for i in range(10)])
cstati.ttest_1samp(series)
@staticmethod
def _get_series(seed: int) -> pd.Series:
arparams = np.array([0.75, -0.25])
maparams = np.array([0.65, 0.35])
arma_process = casgen.ArmaProcess(arparams, maparams)
date_range = {"start": "1/1/2010", "periods": 40, "freq": "M"}
series = arma_process.generate_sample(
date_range_kwargs=date_range, seed=seed
)
return series
class TestMultipleTests(hut.TestCase):
# Smoke test for empty input.
def test1(self) -> None:
series = pd.Series([])
cstati.multipletests(series)
# Test if error is raised with default arguments when input contains NaNs.
@pytest.mark.xfail()
def test2(self) -> None:
series_with_nans = self._get_series(seed=1)
series_with_nans[0:5] = np.nan
actual = cstati.multipletests(series_with_nans)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test3(self) -> None:
series_with_nans = self._get_series(seed=1)
series_with_nans[0:5] = np.nan
actual = cstati.multipletests(series_with_nans, nan_mode="drop")
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
@staticmethod
def _get_series(seed: int) -> pd.Series:
date_range = {"start": "1/1/2010", "periods": 40, "freq": "M"}
series = hut.get_random_df(
num_cols=1,
seed=seed,
**date_range,
)[0]
return series
class TestMultiTTest(hut.TestCase):
# Smoke test for empty input.
def test1(self) -> None:
df = pd.DataFrame(columns=["series_name"])
cstati.multi_ttest(df)
def test2(self) -> None:
df = self._get_df_of_series(seed=1)
actual = cstati.multi_ttest(df)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test3(self) -> None:
df = self._get_df_of_series(seed=1)
actual = cstati.multi_ttest(df, prefix="multi_ttest_")
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test4(self) -> None:
df = self._get_df_of_series(seed=1)
actual = cstati.multi_ttest(df, popmean=1)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test5(self) -> None:
df = self._get_df_of_series(seed=1)
actual = cstati.multi_ttest(df, nan_mode="fill_with_zero")
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test6(self) -> None:
df = self._get_df_of_series(seed=1)
actual = cstati.multi_ttest(df, method="sidak")
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
@pytest.mark.xfail()
def test7(self) -> None:
df = self._get_df_of_series(seed=1)
df.iloc[:, 0] = np.nan
actual = cstati.multi_ttest(df)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
@staticmethod
def _get_df_of_series(seed: int) -> pd.DataFrame:
n_series = 7
arparams = np.array([0.75, -0.25])
maparams = np.array([0.65, 0.35])
arma_process = casgen.ArmaProcess(arparams, maparams)
date_range = {"start": "1/1/2010", "periods": 40, "freq": "M"}
# Generating a dataframe from different series.
df = pd.DataFrame(
[
arma_process.generate_sample(
date_range_kwargs=date_range, seed=seed + i
)
for i in range(n_series)
],
index=["series_" + str(i) for i in range(n_series)],
).T
return df
class TestApplyNormalityTest(hut.TestCase):
def test1(self) -> None:
series = self._get_series(seed=1)
actual = cstati.apply_normality_test(series)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test2(self) -> None:
series = self._get_series(seed=1)
actual = cstati.apply_normality_test(series, prefix="norm_test_")
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
# Smoke test for empty input.
def test3(self) -> None:
series = pd.Series([])
cstati.apply_normality_test(series)
def test4(self) -> None:
series = self._get_series(seed=1)
# Place some `NaN` values in the series.
series[:5] = np.nan
series[8:10] = np.nan
actual = cstati.apply_normality_test(series)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test5(self) -> None:
series = self._get_series(seed=1)
# Place some `NaN` values in the series.
series[:5] = np.nan
series[8:10] = np.nan
actual = cstati.apply_normality_test(
series, nan_mode="ffill_and_drop_leading"
)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
# Smoke test for input of `np.nan`s.
def test6(self) -> None:
series = pd.Series([np.nan for i in range(10)])
cstati.apply_normality_test(series)
@staticmethod
def _get_series(seed: int) -> pd.Series:
arparams = np.array([0.75, -0.25])
maparams = np.array([0.65, 0.35])
arma_process = casgen.ArmaProcess(arparams, maparams)
date_range = {"start": "1/1/2010", "periods": 40, "freq": "M"}
series = arma_process.generate_sample(
date_range_kwargs=date_range, seed=seed
)
return series
class TestApplyAdfTest(hut.TestCase):
def test1(self) -> None:
series = self._get_series(seed=1)
actual = cstati.apply_adf_test(series)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test2(self) -> None:
series = self._get_series(seed=1)
actual = cstati.apply_adf_test(series, regression="ctt")
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test3(self) -> None:
series = self._get_series(seed=1)
actual = cstati.apply_adf_test(series, maxlag=5)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test4(self) -> None:
series = self._get_series(seed=1)
actual = cstati.apply_adf_test(series, autolag="t-stat")
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test5(self) -> None:
series = self._get_series(seed=1)
actual = cstati.apply_adf_test(series, prefix="adf_")
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
# Smoke test for empty input.
def test6(self) -> None:
series = | pd.Series([]) | pandas.Series |
import datetime
import os
import urllib
from http.client import IncompleteRead
from urllib.request import Request
import bs4 as bs
import pandas as pd
from django.conf import settings
from core.models import CourseModel, UpdateModel
# FILE PATHS
my_path = os.path.abspath(os.path.dirname(__file__))
stopwords_path = os.path.join(settings.BASE_DIR, "api/datasets/stopwords.txt")
# Date Cleaner
def date_cleaner(date):
"""Clean DataTime columns helper function"""
datetime_format = "%d/%m/%Y"
if len(date) < 8:
date = "01/" + date
else:
date = date
clean_date = datetime.datetime.strptime(date, datetime_format).strftime("%Y/%m/%d")
return clean_date
# WEBCRAWLER
def data_extractor():
"""Dataset Extraction"""
# Load WebPage
try:
req = urllib.request.Request(
"http://springboardcourses.ie/results?keywords=&perPage=500",
data=None,
headers={
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36"
})
except IncompleteRead:
req = urllib.request.Request(
"http://springboardcourses.ie/results?keywords=&perPage=500",
data=None,
headers={
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36"
})
f = urllib.request.urlopen(req)
soup = bs.BeautifulSoup(f, "lxml")
for course_div in soup.find_all("div", class_="panel panel-primary"):
try:
# Course Title
title_div = course_div.find("div", class_="panel-heading").h3.text
title_div = title_div.strip()
title_div = title_div.replace(" ", "")
except Exception as e:
title_div = None
try:
# First Content Row
first_row = course_div.find("div", class_="panel-body")
# Provider:
provider = first_row.find("div", class_="col-md-12").p
provider = str(provider).split("</strong>")[1]
provider = provider.split("<")[0]
# Award:
award = first_row.find("div", class_="col-md-5").p
award = str(award).split("</strong>")[1]
award = award.split("<")[0]
# Delivery Method:
delivery = first_row.find("div", class_="col-md-4").p
delivery = str(delivery).split("</strong>")[1]
delivery = delivery.split("<")[0]
# ECTS credits:
ects_credits = first_row.find("div", class_="col-md-3")
ects_credits = str(ects_credits).split("</strong> ")[1]
ects_credits = ects_credits.split("<")[0]
# Second Content Row
second_row = first_row.find_all("div", "row")[2]
# Mode:
mode = second_row.find("div", class_="col-md-5")
mode = str(mode).split("</strong> ")[1]
mode = mode.split("<")[0]
# Application Deadline:
deadline = second_row.find("div", class_="col-md-4")
deadline = str(deadline).split("</strong>")[1]
deadline = deadline.split("<")[0]
deadline = date_cleaner(deadline)
# Start Date
start_date = second_row.find("div", class_="col-md-3")
start_date = str(start_date).split("</strong>")[1]
start_date = start_date.split("<")[0]
start_date = date_cleaner(start_date)
# End Date
end_date = second_row.find("div", class_="col-md-3")
end_date = str(end_date).split("</strong>")[2]
end_date = end_date.replace(" ", "")
end_date = end_date.split("<")[0]
end_date = date_cleaner(end_date)
# Bottom Content Row
third_row = first_row.find("div", "row margin-bottom-0")
# NFQ
nfq = third_row.find("div", class_="col-md-5").p
nfq = str(nfq).split("</strong>")[1]
nfq = nfq.split("<")[0]
nfq = nfq.replace("Level ", "")
# Open to those in employment
ote_flag = third_row.find("div", class_="col-md-4").p
ote_flag = str(ote_flag).split("</strong>")[1]
ote_flag = ote_flag.split("<")[0]
# Skills area
skill_list = third_row.find("div", class_="col-md-3")
skill_list = str(skill_list).split("</strong>")[1]
skill_list = skill_list.split("<")[0]
skill_list = skill_list.lower()
# Course Link
link = course_div.find("a")
link = link.get("href")
CourseModel.objects.get_or_create(
title=title_div,
provider=provider,
award=award,
ects_credits=ects_credits,
mode=mode,
deadline=deadline,
start_date=start_date,
end_date=end_date,
nfq=nfq,
ote_flag=ote_flag,
link=link,
skills=skill_list,
delivery=delivery
)
except Exception as e:
pass
UpdateModel.objects.create()
print("update done")
def statistical_data():
dataset = pd.DataFrame(list(CourseModel.objects.all().values()))
if len(dataset.index) > 0:
# top providers
occurence = dataset["provider"].value_counts()[:6]
top_providers_dict = occurence.to_dict()
# Delivery mode
dataset["less_than_50"] = dataset["ects_credits"].apply(
lambda x: "below 50 C" if int(x) < 50 else "over 50 C")
less_than_50_values = dataset["less_than_50"].value_counts()
lt50_dict = less_than_50_values.to_dict()
# partime/ fulltime
mode_values = dataset["mode"].value_counts()
mode_dict = mode_values.to_dict()
# NFQ
nfq_values = dataset["nfq"].value_counts()
nfq_dict = nfq_values.to_dict()
# Top popular categories
category_values = dataset["skills"].value_counts()[:6]
category_dict = category_values.to_dict()
stats_dict = {"top_providers_dict": top_providers_dict,
"lt50_dict": lt50_dict,
"nfq_dict": nfq_dict,
"mode_dict": mode_dict,
"category_dict": category_dict
}
else:
stats_dict = {}
return stats_dict
def fastest_diploma(queryset):
dataset = pd.DataFrame(list(queryset.values()))
if len(dataset.index) > 0:
diplomas = ["Diploma", "diploma"]
dataset["diploma"] = dataset["title"].apply(lambda x: 1 if any(w in x for w in diplomas) else 0)
dataset_diplomas = dataset[(dataset["diploma"] == 1)]
# top 12 shortest diplomas
if len(dataset_diplomas.index) > 0:
dataset_diplomas["start_date"] = | pd.to_datetime(dataset_diplomas["start_date"]) | pandas.to_datetime |
"""
Functions for categoricals
"""
from itertools import chain, product
import numpy as np
import pandas as pd
import pandas.api.types as pdtypes
from pandas.core.algorithms import value_counts
from .utils import last2
__all__ = [
'cat_anon',
'cat_collapse',
'cat_concat',
'cat_drop',
'cat_expand',
'cat_explicit_na',
'cat_infreq',
'cat_inorder',
'cat_inseq',
'cat_lump',
'cat_lump_lowfreq',
'cat_lump_min',
'cat_lump_n',
'cat_lump_prop',
'cat_move',
'cat_other',
'cat_recode',
'cat_relabel',
'cat_relevel',
'cat_rename',
'cat_reorder',
'cat_reorder2',
'cat_rev',
'cat_shift',
'cat_shuffle',
'cat_unify',
'cat_zip',
]
def cat_infreq(c, ordered=None):
"""
Reorder categorical by frequency of the values
Parameters
----------
c : list-like
Values that will make up the categorical.
ordered : bool
If ``True``, the categorical is ordered.
Returns
-------
out : categorical
Values
Examples
--------
>>> x = ['d', 'a', 'b', 'b', 'c', 'c', 'c']
>>> cat_infreq(x)
['d', 'a', 'b', 'b', 'c', 'c', 'c']
Categories (4, object): ['c', 'b', 'd', 'a']
>>> cat_infreq(x, ordered=True)
['d', 'a', 'b', 'b', 'c', 'c', 'c']
Categories (4, object): ['c' < 'b' < 'd' < 'a']
When two or more values occur the same number of times, if the
categorical is ordered, the order is preserved. If it is not
not ordered, the order depends on that of the values. Above 'd'
comes before 'a', and below 'a' comes before 'a'.
>>> c = pd.Categorical(
... x, categories=['a', 'c', 'b', 'd']
... )
>>> cat_infreq(c)
['d', 'a', 'b', 'b', 'c', 'c', 'c']
Categories (4, object): ['c', 'b', 'a', 'd']
>>> cat_infreq(c.set_ordered(True))
['d', 'a', 'b', 'b', 'c', 'c', 'c']
Categories (4, object): ['c' < 'b' < 'a' < 'd']
"""
kwargs = {} if ordered is None else {'ordered': ordered}
counts = value_counts(c)
if pdtypes.is_categorical_dtype(c):
original_cat_order = c.categories
else:
original_cat_order = pd.unique(c)
counts = counts.reindex(index=original_cat_order)
cats = (_stable_series_sort(counts, ascending=False)
.index
.to_list())
return pd.Categorical(c, categories=cats, **kwargs)
def cat_inorder(c, ordered=None):
"""
Reorder categorical by appearance
Parameters
----------
c : list-like
Values that will make up the categorical.
ordered : bool
If ``True``, the categorical is ordered.
Returns
-------
out : categorical
Values
Examples
--------
>>> import numpy as np
>>> x = [4, 1, 3, 4, 4, 7, 3]
>>> cat_inorder(x)
[4, 1, 3, 4, 4, 7, 3]
Categories (4, int64): [4, 1, 3, 7]
>>> arr = np.array(x)
>>> cat_inorder(arr)
[4, 1, 3, 4, 4, 7, 3]
Categories (4, int64): [4, 1, 3, 7]
>>> c = ['b', 'f', 'c', None, 'c', 'a', 'b', 'e']
>>> cat_inorder(c)
['b', 'f', 'c', NaN, 'c', 'a', 'b', 'e']
Categories (5, object): ['b', 'f', 'c', 'a', 'e']
>>> s = pd.Series(c)
>>> cat_inorder(s)
['b', 'f', 'c', NaN, 'c', 'a', 'b', 'e']
Categories (5, object): ['b', 'f', 'c', 'a', 'e']
>>> cat = pd.Categorical(c)
>>> cat_inorder(cat)
['b', 'f', 'c', NaN, 'c', 'a', 'b', 'e']
Categories (5, object): ['b', 'f', 'c', 'a', 'e']
>>> cat_inorder(cat, ordered=True)
['b', 'f', 'c', NaN, 'c', 'a', 'b', 'e']
Categories (5, object): ['b' < 'f' < 'c' < 'a' < 'e']
By default, ordered categories remain ordered.
>>> ocat = pd.Categorical(cat, ordered=True)
>>> ocat
['b', 'f', 'c', NaN, 'c', 'a', 'b', 'e']
Categories (5, object): ['a' < 'b' < 'c' < 'e' < 'f']
>>> cat_inorder(ocat)
['b', 'f', 'c', NaN, 'c', 'a', 'b', 'e']
Categories (5, object): ['b' < 'f' < 'c' < 'a' < 'e']
>>> cat_inorder(ocat, ordered=False)
['b', 'f', 'c', NaN, 'c', 'a', 'b', 'e']
Categories (5, object): ['b', 'f', 'c', 'a', 'e']
Notes
-----
``NaN`` or ``None`` are ignored when creating the categories.
"""
kwargs = {} if ordered is None else {'ordered': ordered}
if isinstance(c, (pd.Series, pd.Categorical)):
cats = c[~pd.isnull(c)].unique()
if hasattr(cats, 'to_list'):
cats = cats.to_list()
elif hasattr(c, 'dtype'):
cats = pd.unique(c[~pd.isnull(c)])
else:
cats = pd.unique([
x for x, keep in zip(c, ~pd.isnull(c))
if keep
])
return pd.Categorical(c, categories=cats, **kwargs)
def cat_inseq(c, ordered=None):
"""
Reorder categorical by numerical order
Parameters
----------
c : list-like
Values that will make up the categorical.
ordered : bool
If ``True``, the categorical is ordered.
Returns
-------
out : categorical
Values
Examples
--------
>>> x = pd.Categorical([5, 1, 3, 2, 4])
>>> cat_inseq(x)
[5, 1, 3, 2, 4]
Categories (5, int64): [1, 2, 3, 4, 5]
>>> x = pd.Categorical([5, 1, '3', 2, 4])
>>> cat_inseq(x)
[5, 1, 3, 2, 4]
Categories (5, int64): [1, 2, 3, 4, 5]
Values that cannot be coerced to numerical turn in ``NaN``,
and categories cannot be ``NaN``.
>>> x = pd.Categorical([5, 1, 'three', 2, 4])
>>> cat_inseq(x)
[5, 1, NaN, 2, 4]
Categories (4, int64): [1, 2, 4, 5]
Coerces values to numerical
>>> x = [5, 1, '3', 2, 4]
>>> cat_inseq(x, ordered=True)
[5, 1, 3, 2, 4]
Categories (5, int64): [1 < 2 < 3 < 4 < 5]
>>> x = [5, 1, '3', 2, '4.5']
>>> cat_inseq(x)
[5.0, 1.0, 3.0, 2.0, 4.5]
Categories (5, float64): [1.0, 2.0, 3.0, 4.5, 5.0]
Atleast one of the values must be coercible to the integer
>>> x = ['five', 'one', 'three', 'two', 'four']
>>> cat_inseq(x)
Traceback (most recent call last):
...
ValueError: Atleast one existing category must be a number.
>>> x = ['five', 'one', '3', 'two', 'four']
>>> cat_inseq(x)
[NaN, NaN, 3, NaN, NaN]
Categories (1, int64): [3]
"""
c = as_categorical(c)
# one value at a time to avoid turning integers into floats
# when some values create nans
numerical_cats = []
for x in c.categories:
_x = pd.to_numeric(x, 'coerce')
if not pd.isnull(_x):
numerical_cats.append(_x)
if len(numerical_cats) == 0 and len(c) > 0:
raise ValueError(
"Atleast one existing category must be a number."
)
# Change the original categories to numerical ones, making sure
# to rename the existing ones i.e '3' becomes 3. Only after that,
# change to order.
c = (c.set_categories(numerical_cats, rename=True)
.reorder_categories(sorted(numerical_cats)))
if ordered is not None:
c.set_ordered(ordered, inplace=True)
return c
def cat_reorder(c, x, fun=np.median, ascending=True):
"""
Reorder categorical by sorting along another variable
It is the order of the categories that changes. Values in x
are grouped by categories and summarised to determine the
new order.
Parameters
----------
c : list-like
Values that will make up the categorical.
x : list-like
Values by which ``c`` will be ordered.
fun : callable
Summarising function to ``x`` for each category in ``c``.
Default is the *median*.
ascending : bool
If ``True``, the ``c`` is ordered in ascending order of ``x``.
Examples
--------
>>> c = list('abbccc')
>>> x = [11, 2, 2, 3, 33, 3]
>>> cat_reorder(c, x)
['a', 'b', 'b', 'c', 'c', 'c']
Categories (3, object): ['b', 'c', 'a']
>>> cat_reorder(c, x, fun=max)
['a', 'b', 'b', 'c', 'c', 'c']
Categories (3, object): ['b', 'a', 'c']
>>> cat_reorder(c, x, fun=max, ascending=False)
['a', 'b', 'b', 'c', 'c', 'c']
Categories (3, object): ['c', 'a', 'b']
>>> c_ordered = pd.Categorical(c, ordered=True)
>>> cat_reorder(c_ordered, x)
['a', 'b', 'b', 'c', 'c', 'c']
Categories (3, object): ['b' < 'c' < 'a']
>>> cat_reorder(c + ['d'], x)
Traceback (most recent call last):
...
ValueError: Lengths are not equal. len(c) is 7 and len(x) is 6.
"""
if len(c) != len(x):
raise ValueError(
"Lengths are not equal. len(c) is {} and len(x) is {}.".format(
len(c), len(x)
)
)
summary = (pd.Series(x)
.groupby(c)
.apply(fun)
.sort_values(ascending=ascending)
)
cats = summary.index.to_list()
return pd.Categorical(c, categories=cats)
def cat_reorder2(c, x, y, *args, fun=last2, ascending=False, **kwargs):
"""
Reorder categorical by sorting along another variable
It is the order of the categories that changes. Values in x
are grouped by categories and summarised to determine the
new order.
Parameters
----------
c : list-like
Values that will make up the categorical.
x : list-like
Values by which ``c`` will be ordered.
y : list-like
Values by which ``c`` will be ordered.
*args : tuple
Position arguments passed to function fun.
fun : callable
Summarising function to ``x`` for each category in ``c``.
Default is the *median*.
ascending : bool
If ``True``, the ``c`` is ordered in ascending order of ``x``.
**kwargs : dict
Keyword arguments passed to ``fun``.
Examples
--------
Order stocks by the price in the latest year. This type of ordering
can be used to order line plots so that the ends match the order of
the legend.
>>> stocks = list('AAABBBCCC')
>>> year = [1980, 1990, 2000] * 3
>>> price = [12.34, 12.90, 13.55, 10.92, 14.73, 11.08, 9.02, 12.44, 15.65]
>>> cat_reorder2(stocks, year, price)
['A', 'A', 'A', 'B', 'B', 'B', 'C', 'C', 'C']
Categories (3, object): ['C', 'A', 'B']
"""
if len(c) != len(x) or len(x) != len(y):
raise ValueError(
"Lengths are not equal. len(c) is {}, len(x) is {} and "
"len(y) is {}.".format(len(c), len(x), len(y))
)
# Wrap two argument function fun with a function that
# takes a dataframe, put x and y into a dataframe, then
# use dataframe.groupby
def _fun(cat_df):
return fun(cat_df['x'], cat_df['y'], *args, **kwargs)
summary = (pd.DataFrame({'x': x, 'y': y})
.groupby(c)
.apply(_fun)
.sort_values(ascending=ascending)
)
cats = summary.index.to_list()
return pd.Categorical(c, categories=cats)
def cat_move(c, *args, to=0):
"""
Reorder categories explicitly
Parameters
----------
c : list-like
Values that will make up the categorical.
*args : tuple
Categories to reorder. Any categories not mentioned
will be left in existing order.
to : int or inf
Position where to place the categories. ``inf``, puts
them at the end (highest value).
Returns
-------
out : categorical
Values
Examples
--------
>>> c = ['a', 'b', 'c', 'd', 'e']
>>> cat_move(c, 'e', 'b')
['a', 'b', 'c', 'd', 'e']
Categories (5, object): ['e', 'b', 'a', 'c', 'd']
>>> cat_move(c, 'c', to=np.inf)
['a', 'b', 'c', 'd', 'e']
Categories (5, object): ['a', 'b', 'd', 'e', 'c']
>>> cat_move(pd.Categorical(c, ordered=True), 'a', 'c', 'e', to=1)
['a', 'b', 'c', 'd', 'e']
Categories (5, object): ['b' < 'a' < 'c' < 'e' < 'd']
"""
c = as_categorical(c)
if np.isinf(to):
to = len(c.categories)
args = list(args)
unmoved_cats = c.categories.drop(args).to_list()
cats = unmoved_cats[0:to] + args + unmoved_cats[to:]
c.reorder_categories(cats, inplace=True)
return c
def cat_rev(c):
"""
Reverse order of categories
Parameters
----------
c : list-like
Values that will make up the categorical.
Returns
-------
out : categorical
Values
Examples
--------
>>> c = ['a', 'b', 'c']
>>> cat_rev(c)
['a', 'b', 'c']
Categories (3, object): ['c', 'b', 'a']
>>> cat_rev(pd.Categorical(c))
['a', 'b', 'c']
Categories (3, object): ['c', 'b', 'a']
"""
c = as_categorical(c)
c.reorder_categories(c.categories[::-1], inplace=True)
return c
def cat_shift(c, n=1):
"""
Shift and wrap-around categories to the left or right
Parameters
----------
c : list-like
Values that will make up the categorical.
n : int
Number of times to shift. If positive, shift to
the left, if negative shift to the right.
Default is 1.
Returns
-------
out : categorical
Values
Examples
--------
>>> c = ['a', 'b', 'c', 'd', 'e']
>>> cat_shift(c)
['a', 'b', 'c', 'd', 'e']
Categories (5, object): ['b', 'c', 'd', 'e', 'a']
>>> cat_shift(c, 2)
['a', 'b', 'c', 'd', 'e']
Categories (5, object): ['c', 'd', 'e', 'a', 'b']
>>> cat_shift(c, -2)
['a', 'b', 'c', 'd', 'e']
Categories (5, object): ['d', 'e', 'a', 'b', 'c']
>>> cat_shift(pd.Categorical(c, ordered=True), -3)
['a', 'b', 'c', 'd', 'e']
Categories (5, object): ['c' < 'd' < 'e' < 'a' < 'b']
"""
c = as_categorical(c)
cats = c.categories.to_list()
cats_extended = cats + cats
m = len(cats)
n = n % m
cats = cats_extended[n:m] + cats_extended[:n]
c.reorder_categories(cats, inplace=True)
return c
def cat_shuffle(c, random_state=None):
"""
Reverse order of categories
Parameters
----------
c : list-like
Values that will make up the categorical.
random_state : int or ~numpy.random.RandomState, optional
Seed or Random number generator to use. If ``None``, then
numpy global generator :class:`numpy.random` is used.
Returns
-------
out : categorical
Values
Examples
--------
>>> np.random.seed(123)
>>> c = ['a', 'b', 'c', 'd', 'e']
>>> cat_shuffle(c)
['a', 'b', 'c', 'd', 'e']
Categories (5, object): ['b', 'd', 'e', 'a', 'c']
>>> cat_shuffle(pd.Categorical(c, ordered=True), 321)
['a', 'b', 'c', 'd', 'e']
Categories (5, object): ['d' < 'b' < 'a' < 'c' < 'e']
"""
c = as_categorical(c)
if random_state is None:
random_state = np.random
elif isinstance(random_state, int):
random_state = np.random.RandomState(random_state)
elif not isinstance(random_state, np.random.RandomState):
raise TypeError(
"Unknown type `{}` of random_state".format(type(random_state))
)
cats = c.categories.to_list()
random_state.shuffle(cats)
c.reorder_categories(cats, inplace=True)
return c
# Change the value of categories
def cat_anon(c, prefix='', random_state=None):
"""
Anonymise categories
Neither the value nor the order of the categories is preserved.
Parameters
----------
c : list-like
Values that will make up the categorical.
random_state : int or ~numpy.random.RandomState, optional
Seed or Random number generator to use. If ``None``, then
numpy global generator :class:`numpy.random` is used.
Returns
-------
out : categorical
Values
Examples
--------
>>> np.random.seed(123)
>>> c = ['a', 'b', 'b', 'c', 'c', 'c']
>>> cat_anon(c)
['0', '1', '1', '2', '2', '2']
Categories (3, object): ['1', '0', '2']
>>> cat_anon(c, 'c-', 321)
['c-1', 'c-2', 'c-2', 'c-0', 'c-0', 'c-0']
Categories (3, object): ['c-0', 'c-2', 'c-1']
>>> cat_anon(pd.Categorical(c, ordered=True), 'c-', 321)
['c-1', 'c-2', 'c-2', 'c-0', 'c-0', 'c-0']
Categories (3, object): ['c-0' < 'c-2' < 'c-1']
"""
c = as_categorical(c)
if random_state is None:
random_state = np.random
elif isinstance(random_state, int):
random_state = np.random.RandomState(random_state)
elif not isinstance(random_state, np.random.RandomState):
raise TypeError(
"Unknown type `{}` of random_state".format(type(random_state))
)
# Shuffle two times,
# 1. to prevent predicable sequence to category mapping
# 2. to prevent reversing of the new categories to the old ones
fmt = '{}{}'.format
cats = [fmt(prefix, i) for i in range(len(c.categories))]
random_state.shuffle(cats)
c.rename_categories(cats, inplace=True)
cats = c.categories.to_list()
random_state.shuffle(cats)
c.reorder_categories(cats, inplace=True)
return c
def cat_collapse(c, mapping, group_other=False):
"""
Collapse categories into manually defined groups
Parameters
----------
c : list-like
Values that will make up the categorical.
mapping : dict
New categories and the old categories contained in them.
group_other : False
If ``True``, a category is created to contain all other
categories that have not been explicitly collapsed.
The name of the other categories is ``other``, it may be
postfixed by the first available integer starting from
2 if there is a category with a similar name.
Returns
-------
out : categorical
Values
Examples
--------
>>> c = ['a', 'b', 'c', 'd', 'e', 'f']
>>> mapping = {'first_2': ['a', 'b'], 'second_2': ['c', 'd']}
>>> cat_collapse(c, mapping)
['first_2', 'first_2', 'second_2', 'second_2', 'e', 'f']
Categories (4, object): ['first_2', 'second_2', 'e', 'f']
>>> cat_collapse(c, mapping, group_other=True)
['first_2', 'first_2', 'second_2', 'second_2', 'other', 'other']
Categories (3, object): ['first_2', 'second_2', 'other']
Collapsing preserves the order
>>> cat_rev(c)
['a', 'b', 'c', 'd', 'e', 'f']
Categories (6, object): ['f', 'e', 'd', 'c', 'b', 'a']
>>> cat_collapse(cat_rev(c), mapping)
['first_2', 'first_2', 'second_2', 'second_2', 'e', 'f']
Categories (4, object): ['f', 'e', 'second_2', 'first_2']
>>> mapping = {'other': ['a', 'b'], 'another': ['c', 'd']}
>>> cat_collapse(c, mapping, group_other=True)
['other', 'other', 'another', 'another', 'other2', 'other2']
Categories (3, object): ['other', 'another', 'other2']
"""
def make_other_name():
"""
Generate unique name for the other category
"""
if 'other' not in mapping:
return 'other'
for i in range(2, len(mapping)+2):
other = 'other' + str(i)
if other not in mapping:
return other
c = as_categorical(c)
if group_other:
mapping = mapping.copy()
other = make_other_name()
mapped_categories = list(chain(*mapping.values()))
unmapped_categories = c.categories.difference(mapped_categories)
mapping[other] = list(unmapped_categories)
inverted_mapping = {
cat: new_cat
for new_cat, old_cats in mapping.items()
for cat in old_cats
}
# Convert old categories to new values in order and remove
# any duplicates. The preserves the order
new_cats = pd.unique([
inverted_mapping.get(x, x)
for x in c.categories
])
c = pd.Categorical(
[inverted_mapping.get(x, x) for x in c],
categories=new_cats,
ordered=c.ordered
)
return c
def cat_other(c, keep=None, drop=None, other_category='other'):
"""
Replace categories with 'other'
Parameters
----------
c : list-like
Values that will make up the categorical.
keep : list-like
Categories to preserve. Only one of ``keep`` or ``drop``
should be specified.
drop : list-like
Categories to drop. Only one of ``keep`` or ``drop``
should be specified.
other_category : object
Value used for the 'other' values. It is placed at
the end of the categories.
Returns
-------
out : categorical
Values
Examples
--------
>>> c = ['a', 'b', 'a', 'c', 'b', 'b', 'b', 'd', 'c']
>>> cat_other(c, keep=['a', 'b'])
['a', 'b', 'a', 'other', 'b', 'b', 'b', 'other', 'other']
Categories (3, object): ['a', 'b', 'other']
>>> cat_other(c, drop=['a', 'b'])
['other', 'other', 'other', 'c', 'other', 'other', 'other', 'd', 'c']
Categories (3, object): ['c', 'd', 'other']
>>> cat_other(pd.Categorical(c, ordered=True), drop=['a', 'b'])
['other', 'other', 'other', 'c', 'other', 'other', 'other', 'd', 'c']
Categories (3, object): ['c' < 'd' < 'other']
"""
if keep is None and drop is None:
raise ValueError(
"Missing columns to `keep` or those to `drop`."
)
elif keep is not None and drop is not None:
raise ValueError(
"Only one of `keep` or `drop` should be given."
)
c = as_categorical(c)
cats = c.categories
if keep is not None:
if not pdtypes.is_list_like(keep):
keep = [keep]
elif drop is not None:
if not pdtypes.is_list_like(drop):
drop = [drop]
keep = cats.difference(drop)
inverted_mapping = {
cat: other_category
for cat in cats.difference(keep)
}
inverted_mapping.update({x: x for x in keep})
new_cats = cats.intersection(keep).to_list() + [other_category]
c = pd.Categorical(
[inverted_mapping.get(x, x) for x in c],
categories=new_cats,
ordered=c.ordered
)
return c
def _lump(lump_it, c, other_category):
"""
Return a categorical of lumped
Helper for cat_lump_* functions
Parameters
----------
lump_it : sequence[(obj, bool)]
Sequence of (category, lump_category)
c : cateorical
Original categorical.
other_category : object (default: 'other')
Value used for the 'other' values. It is placed at
the end of the categories.
Returns
-------
out : categorical
Values
"""
lookup = {
cat: other_category if lump else cat
for cat, lump in lump_it
}
new_cats = (
c.categories
.intersection(lookup.values())
.insert(len(c), other_category)
)
c = pd.Categorical(
[lookup[value] for value in c],
categories=new_cats,
ordered=c.ordered
)
return c
def cat_lump(
c,
n=None,
prop=None,
w=None,
other_category='other',
ties_method='min'
):
"""
Lump together least or most common categories
This is a general method that calls one of
:func:`~plydata.cat_tools.cat_lump_n`
:func:`~plydata.cat_tools.cat_lump_prop` or
:func:`~plydata.cat_tools.cat_lump_lowfreq`
depending on the parameters.
Parameters
----------
c : list-like
Values that will make up the categorical.
n : int (optional)
Number of most/least common values to preserve (not lumped
together). Positive ``n`` preserves the most common,
negative ``n`` preserves the least common.
Lumping happens on condition that the lumped category "other"
will have the smallest number of items.
You should only specify one of ``n`` or ``prop``
prop : float (optional)
Proportion above/below which the values of a category will be
preserved (not lumped together). Positive ``prop`` preserves
categories whose proportion of values is *more* than ``prop``.
Negative ``prop`` preserves categories whose proportion of
values is *less* than ``prop``.
Lumping happens on condition that the lumped category "other"
will have the smallest number of items.
You should only specify one of ``n`` or ``prop``
w : list[int|float] (optional)
Weights for the frequency of each value. It should be the same
length as ``c``.
other_category : object (default: 'other')
Value used for the 'other' values. It is placed at
the end of the categories.
ties_method : {'min', 'max', 'average', 'first', 'dense'} (default: min)
How to treat categories that occur the same number of times
(i.e. ties):
* min: lowest rank in the group
* max: highest rank in the group
* average: average rank of the group
* first: ranks assigned in order they appear in the array
* dense: like 'min', but rank always increases by 1 between groups.
Examples
--------
>>> cat_lump(list('abbccc'))
['other', 'b', 'b', 'c', 'c', 'c']
Categories (3, object): ['b', 'c', 'other']
When the least categories put together are not less than the next
smallest group.
>>> cat_lump(list('abcddd'))
['a', 'b', 'c', 'd', 'd', 'd']
Categories (4, object): ['a', 'b', 'c', 'd']
>>> cat_lump(list('abcdddd'))
['other', 'other', 'other', 'd', 'd', 'd', 'd']
Categories (2, object): ['d', 'other']
>>> c = pd.Categorical(list('abccdd'))
>>> cat_lump(c, n=1)
['other', 'other', 'c', 'c', 'd', 'd']
Categories (3, object): ['c', 'd', 'other']
>>> cat_lump(c, n=2)
['other', 'other', 'c', 'c', 'd', 'd']
Categories (3, object): ['c', 'd', 'other']
``n`` Least common categories
>>> cat_lump(c, n=-2)
['a', 'b', 'other', 'other', 'other', 'other']
Categories (3, object): ['a', 'b', 'other']
There are fewer than ``n`` categories that are the most/least common.
>>> cat_lump(c, n=3)
['a', 'b', 'c', 'c', 'd', 'd']
Categories (4, object): ['a', 'b', 'c', 'd']
>>> cat_lump(c, n=-3)
['a', 'b', 'c', 'c', 'd', 'd']
Categories (4, object): ['a', 'b', 'c', 'd']
By proportions, categories that make up *more* than ``prop`` fraction
of the items.
>>> cat_lump(c, prop=1/3.01)
['other', 'other', 'c', 'c', 'd', 'd']
Categories (3, object): ['c', 'd', 'other']
>>> cat_lump(c, prop=-1/3.01)
['a', 'b', 'other', 'other', 'other', 'other']
Categories (3, object): ['a', 'b', 'other']
>>> cat_lump(c, prop=1/2)
['other', 'other', 'other', 'other', 'other', 'other']
Categories (1, object): ['other']
Order of categoricals is maintained
>>> c = pd.Categorical(
... list('abccdd'),
... categories=list('adcb'),
... ordered=True
... )
>>> cat_lump(c, n=2)
['other', 'other', 'c', 'c', 'd', 'd']
Categories (3, object): ['d' < 'c' < 'other']
**Weighted lumping**
>>> c = list('abcd')
>>> weights = [3, 2, 1, 1]
>>> cat_lump(c, n=2) # No lumping
['a', 'b', 'c', 'd']
Categories (4, object): ['a', 'b', 'c', 'd']
>>> cat_lump(c, n=2, w=weights)
['a', 'b', 'other', 'other']
Categories (3, object): ['a', 'b', 'other']
"""
if n is not None:
return cat_lump_n(c, n, w, other_category, ties_method)
elif prop is not None:
return cat_lump_prop(c, prop, w, other_category)
else:
return cat_lump_lowfreq(c, other_category)
def cat_lump_n(
c,
n,
w=None,
other_category='other',
ties_method='min'
):
"""
Lump together most/least common n categories
Parameters
----------
c : list-like
Values that will make up the categorical.
n : int
Number of most/least common values to preserve (not lumped
together). Positive ``n`` preserves the most common,
negative ``n`` preserves the least common.
Lumping happens on condition that the lumped category "other"
will have the smallest number of items.
You should only specify one of ``n`` or ``prop``
w : list[int|float] (optional)
Weights for the frequency of each value. It should be the same
length as ``c``.
other_category : object (default: 'other')
Value used for the 'other' values. It is placed at
the end of the categories.
ties_method : {'min', 'max', 'average', 'first', 'dense'} (default: min)
How to treat categories that occur the same number of times
(i.e. ties):
* min: lowest rank in the group
* max: highest rank in the group
* average: average rank of the group
* first: ranks assigned in order they appear in the array
* dense: like 'min', but rank always increases by 1 between groups.
Examples
--------
>>> c = pd.Categorical(list('abccdd'))
>>> cat_lump_n(c, 1)
['other', 'other', 'c', 'c', 'd', 'd']
Categories (3, object): ['c', 'd', 'other']
>>> cat_lump_n(c, 2)
['other', 'other', 'c', 'c', 'd', 'd']
Categories (3, object): ['c', 'd', 'other']
``n`` Least common categories
>>> cat_lump_n(c, -2)
['a', 'b', 'other', 'other', 'other', 'other']
Categories (3, object): ['a', 'b', 'other']
There are fewer than ``n`` categories that are the most/least common.
>>> cat_lump_n(c, 3)
['a', 'b', 'c', 'c', 'd', 'd']
Categories (4, object): ['a', 'b', 'c', 'd']
>>> cat_lump_n(c, -3)
['a', 'b', 'c', 'c', 'd', 'd']
Categories (4, object): ['a', 'b', 'c', 'd']
Order of categoricals is maintained
>>> c = pd.Categorical(
... list('abccdd'),
... categories=list('adcb'),
... ordered=True
... )
>>> cat_lump_n(c, 2)
['other', 'other', 'c', 'c', 'd', 'd']
Categories (3, object): ['d' < 'c' < 'other']
**Weighted lumping**
>>> c = list('abcd')
>>> weights = [3, 2, 1, 1]
>>> cat_lump_n(c, n=2) # No lumping
['a', 'b', 'c', 'd']
Categories (4, object): ['a', 'b', 'c', 'd']
>>> cat_lump_n(c, n=2, w=weights)
['a', 'b', 'other', 'other']
Categories (3, object): ['a', 'b', 'other']
"""
c = as_categorical(c)
if len(c) == 0:
return c
if w is None:
counts = c.value_counts().sort_values(ascending=False)
else:
counts = (
pd.Series(w)
.groupby(c)
.apply(np.sum)
.sort_values(ascending=False)
)
if n < 0:
rank = counts.rank(method=ties_method)
n = -n
else:
rank = (-counts).rank(method=ties_method)
# Less than n categories outside the lumping,
if not (rank > n).any():
return c
lump_it = zip(rank.index, rank > n)
return _lump(lump_it, c, other_category)
def cat_lump_prop(
c,
prop,
w=None,
other_category='other',
):
"""
Lump together least or most common categories by proportion
Parameters
----------
c : list-like
Values that will make up the categorical.
prop : float
Proportion above/below which the values of a category will be
preserved (not lumped together). Positive ``prop`` preserves
categories whose proportion of values is *more* than ``prop``.
Negative ``prop`` preserves categories whose proportion of
values is *less* than ``prop``.
Lumping happens on condition that the lumped category "other"
will have the smallest number of items.
You should only specify one of ``n`` or ``prop``
w : list[int|float] (optional)
Weights for the frequency of each value. It should be the same
length as ``c``.
other_category : object (default: 'other')
Value used for the 'other' values. It is placed at
the end of the categories.
Examples
--------
By proportions, categories that make up *more* than ``prop`` fraction
of the items.
>>> c = pd.Categorical(list('abccdd'))
>>> cat_lump_prop(c, 1/3.01)
['other', 'other', 'c', 'c', 'd', 'd']
Categories (3, object): ['c', 'd', 'other']
>>> cat_lump_prop(c, -1/3.01)
['a', 'b', 'other', 'other', 'other', 'other']
Categories (3, object): ['a', 'b', 'other']
>>> cat_lump_prop(c, 1/2)
['other', 'other', 'other', 'other', 'other', 'other']
Categories (1, object): ['other']
"""
c = as_categorical(c)
if len(c) == 0:
return c
if w is None:
counts = c.value_counts().sort_values(ascending=False)
total = len(c)
else:
counts = (
pd.Series(w)
.groupby(c)
.apply(np.sum)
.sort_values(ascending=False)
)
total = counts.sum()
# For each category findout whether to lump it or keep it
# Create a generator of the form ((cat, lump), ...)
props = counts / total
if prop < 0:
if not (props > -prop).any():
# No proportion more than target, so no lumping
# the most common
return c
else:
lump_it = zip(props.index, props > -prop)
else:
if not (props <= prop).any():
# No proportion less than target, so no lumping
# the least common
return c
else:
lump_it = zip(props.index, props <= prop)
return _lump(lump_it, c, other_category)
def cat_lump_lowfreq(
c,
other_category='other',
):
"""
Lump together least categories
Ensures that the "other" category is still the smallest.
Parameters
----------
c : list-like
Values that will make up the categorical.
other_category : object (default: 'other')
Value used for the 'other' values. It is placed at
the end of the categories.
Examples
--------
>>> cat_lump_lowfreq(list('abbccc'))
['other', 'b', 'b', 'c', 'c', 'c']
Categories (3, object): ['b', 'c', 'other']
When the least categories put together are not less than the next
smallest group.
>>> cat_lump_lowfreq(list('abcddd'))
['a', 'b', 'c', 'd', 'd', 'd']
Categories (4, object): ['a', 'b', 'c', 'd']
>>> cat_lump_lowfreq(list('abcdddd'))
['other', 'other', 'other', 'd', 'd', 'd', 'd']
Categories (2, object): ['d', 'other']
"""
c = as_categorical(c)
if len(c) == 0:
return c
# For each category findout whether to lump it or keep it
# Create a generator of the form ((cat, lump), ...)
counts = c.value_counts().sort_values(ascending=False)
if len(counts) == 1:
return c
unique_counts = pd.unique(counts)
smallest = unique_counts[-1]
next_smallest = unique_counts[-2]
smallest_counts = counts[counts == smallest]
smallest_total = smallest_counts.sum()
smallest_cats = smallest_counts.index
if not smallest_total < next_smallest:
return c
lump_it = (
(cat, True) if cat in smallest_cats else (cat, False)
for cat in counts.index
)
return _lump(lump_it, c, other_category)
def cat_lump_min(
c,
min,
w=None,
other_category='other',
):
"""
Lump catogeries, preserving those that appear min number of times
Parameters
----------
c : list-like
Values that will make up the categorical.
min : int
Minum number of times a category must be represented to be
preserved.
w : list[int|float] (optional)
Weights for the frequency of each value. It should be the same
length as ``c``.
other_category : object (default: 'other')
Value used for the 'other' values. It is placed at
the end of the categories.
Examples
--------
>>> c = list('abccdd')
>>> cat_lump_min(c, min=1)
['a', 'b', 'c', 'c', 'd', 'd']
Categories (4, object): ['a', 'b', 'c', 'd']
>>> cat_lump_min(c, min=2)
['other', 'other', 'c', 'c', 'd', 'd']
Categories (3, object): ['c', 'd', 'other']
**Weighted Lumping**
>>> weights = [2, 2, .5, .5, 1, 1]
>>> cat_lump_min(c, min=2, w=weights)
['a', 'b', 'other', 'other', 'd', 'd']
Categories (4, object): ['a', 'b', 'd', 'other']
Unlike :func:`~plydata.cat_tools.cat_lump`, :func:`cat_lump_min`
can lump together and create a category larger than the preserved
categories.
>>> c = list('abxyzccdd')
>>> cat_lump_min(c, min=2)
['other', 'other', 'other', 'other', 'other', 'c', 'c', 'd', 'd']
Categories (3, object): ['c', 'd', 'other']
"""
c = as_categorical(c)
if len(c) == 0:
return c
if w is None:
counts = c.value_counts().sort_values(ascending=False)
else:
counts = (
pd.Series(w)
.groupby(c)
.apply(np.sum)
.sort_values(ascending=False)
)
if (counts >= min).all():
return c
lookup = {
cat: cat if freq >= min else other_category
for cat, freq in counts.items()
}
new_cats = (
c.categories
.intersection(lookup.values())
.insert(len(c), other_category)
)
c = pd.Categorical(
[lookup[value] for value in c],
categories=new_cats,
ordered=c.ordered
)
return c
def cat_rename(c, mapping=None, **kwargs):
"""
Change/rename categories manually
Parameters
----------
c : list-like
Values that will make up the categorical.
mapping : dict (optional)
Mapping of the form ``{old_name: new_name}`` for how to rename
the categories. Setting a value to ``None`` removes the category.
This arguments is useful if the old names are not valid
python parameters. Otherwise, ``kwargs`` can be used.
**kwargs : dict
Mapping to rename categories. Setting a value to ``None`` removes
the category.
Examples
--------
>>> c = list('abcd')
>>> cat_rename(c, a='A')
['A', 'b', 'c', 'd']
Categories (4, object): ['A', 'b', 'c', 'd']
>>> c = pd.Categorical(
... list('abcd'),
... categories=list('bacd'),
... ordered=True
... )
>>> cat_rename(c, b='B', d='D')
['a', 'B', 'c', 'D']
Categories (4, object): ['B' < 'a' < 'c' < 'D']
Remove categories by setting them to ``None``.
>>> cat_rename(c, b='B', d=None)
['a', 'B', 'c']
Categories (3, object): ['B' < 'a' < 'c']
"""
c = as_categorical(c)
if mapping is not None and len(kwargs):
raise ValueError("Use only one of `new` or the ``kwargs``.")
lookup = mapping or kwargs
if not lookup:
return c
# Remove categories set to None
remove = [
old
for old, new in lookup.items()
if new is None
]
if remove:
for cat in remove:
del lookup[cat]
c = c.remove_categories(remove).dropna()
# Separately change values (inplace) and the categories (using an
# array) old to the new names. Then reconcile the two lists.
categories = c.categories.to_numpy().copy()
c.add_categories(
pd.Index(lookup.values()).difference(c.categories),
inplace=True
)
for old, new in lookup.items():
if old not in c.categories:
raise IndexError("Unknown category '{}'.".format(old))
c[c == old] = new
categories[categories == old] = new
new_categories = pd.unique(categories)
c.remove_unused_categories(inplace=True)
c.set_categories(new_categories, inplace=True)
return c
def cat_relabel(c, func=None, *args, **kwargs):
"""
Change/rename categories and collapse as necessary
Parameters
----------
c : list-like
Values that will make up the categorical.
func : callable
Function to create the new name. The first argument to
the function will be a category to be renamed.
*args : tuple
Positional arguments passed to ``func``.
*kwargs : dict
Keyword arguments passed to ``func``.
Examples
--------
>>> c = list('abcde')
>>> cat_relabel(c, str.upper)
['A', 'B', 'C', 'D', 'E']
Categories (5, object): ['A', 'B', 'C', 'D', 'E']
>>> c = pd.Categorical([0, 1, 2, 1, 1, 0])
>>> def func(x):
... if x == 0:
... return 'low'
... elif x == 1:
... return 'mid'
... elif x == 2:
... return 'high'
>>> cat_relabel(c, func)
['low', 'mid', 'high', 'mid', 'mid', 'low']
Categories (3, object): ['low', 'mid', 'high']
When the function yields the same output for 2 or more
different categories, those categories are collapsed.
>>> def first(x):
... return x[0]
>>> c = pd.Categorical(['aA', 'bB', 'aC', 'dD'],
... categories=['bB', 'aA', 'dD', 'aC'],
... ordered=True
... )
>>> cat_relabel(c, first)
['a', 'b', 'a', 'd']
Categories (3, object): ['b' < 'a' < 'd']
"""
c = as_categorical(c)
new_categories = [func(x, *args, **kwargs) for x in c.categories]
new_categories_uniq = pd.unique(new_categories)
if len(new_categories_uniq) < len(c.categories):
# Collapse
lookup = dict(zip(c.categories, new_categories))
c = pd.Categorical(
[lookup[value] for value in c],
categories=new_categories_uniq,
ordered=c.ordered
)
else:
c.categories = new_categories
return c
def cat_expand(c, *args):
"""
Add additional categories to a categorical
Parameters
----------
c : list-like
Values that will make up the categorical.
*args : tuple
Categories to add.
Examples
--------
>>> cat_expand(list('abc'), 'd', 'e')
['a', 'b', 'c']
Categories (5, object): ['a', 'b', 'c', 'd', 'e']
>>> c = pd.Categorical(list('abcd'), ordered=True)
>>> cat_expand(c, 'e', 'f')
['a', 'b', 'c', 'd']
Categories (6, object): ['a' < 'b' < 'c' < 'd' < 'e' < 'f']
"""
c = as_categorical(c)
c.add_categories(
pd.Index(args).difference(c.categories),
inplace=True
)
return c
def cat_explicit_na(c, na_category='(missing)'):
"""
Give missing values an explicity category
Parameters
----------
c : list-like
Values that will make up the categorical.
na_category : object (default: '(missing)')
Category for missing values
Examples
--------
>>> c = pd.Categorical(
... ['a', 'b', None, 'c', None, 'd', 'd'],
... ordered=True
... )
>>> c
['a', 'b', NaN, 'c', NaN, 'd', 'd']
Categories (4, object): ['a' < 'b' < 'c' < 'd']
>>> cat_explicit_na(c)
['a', 'b', '(missing)', 'c', '(missing)', 'd', 'd']
Categories (5, object): ['a' < 'b' < 'c' < 'd' < '(missing)']
"""
c = as_categorical(c)
bool_idx = pd.isnull(c)
if any(bool_idx):
c.add_categories([na_category], inplace=True)
c[bool_idx] = na_category
return c
def cat_remove_unused(c, only=None):
"""
Remove unused categories
Parameters
----------
c : list-like
Values that will make up the categorical.
only : list-like (optional)
The categories to remove *if* they are empty. If not given,
all unused categories are dropped.
Examples
--------
>>> c = pd.Categorical(list('abcdd'), categories=list('bacdefg'))
>>> c
['a', 'b', 'c', 'd', 'd']
Categories (7, object): ['b', 'a', 'c', 'd', 'e', 'f', 'g']
>>> cat_remove_unused(c)
['a', 'b', 'c', 'd', 'd']
Categories (4, object): ['b', 'a', 'c', 'd']
>>> cat_remove_unused(c, only=['a', 'e', 'g'])
['a', 'b', 'c', 'd', 'd']
Categories (5, object): ['b', 'a', 'c', 'd', 'f']
"""
if not pdtypes.is_categorical_dtype(c):
# All categories are used
c = pd.Categorical(c)
return c
else:
c = c.copy()
if only is None:
only = c.categories
used_idx = pd.unique(c.codes)
used_categories = c.categories[used_idx]
c = c.remove_categories(
c.categories
.difference(used_categories)
.intersection(only)
)
return c
def cat_unify(cs, categories=None):
"""
Unify (union of all) the categories in a list of categoricals
Parameters
----------
cs : list-like
Categoricals
categories : list-like
Extra categories to apply to very categorical.
Examples
--------
>>> c1 = pd.Categorical(['a', 'b'], categories=list('abc'))
>>> c2 = pd.Categorical(['d', 'e'], categories=list('edf'))
>>> c1_new, c2_new = cat_unify([c1, c2])
>>> c1_new
['a', 'b']
Categories (6, object): ['a', 'b', 'c', 'e', 'd', 'f']
>>> c2_new
['d', 'e']
Categories (6, object): ['a', 'b', 'c', 'e', 'd', 'f']
>>> c1_new, c2_new = cat_unify([c1, c2], categories=['z', 'y'])
>>> c1_new
['a', 'b']
Categories (8, object): ['a', 'b', 'c', 'e', 'd', 'f', 'z', 'y']
>>> c2_new
['d', 'e']
Categories (8, object): ['a', 'b', 'c', 'e', 'd', 'f', 'z', 'y']
"""
cs = [as_categorical(c) for c in cs]
all_cats = list(chain(*(c.categories.to_list() for c in cs)))
if categories is None:
categories = pd.unique(all_cats)
else:
categories = | pd.unique(all_cats + categories) | pandas.unique |
# %%
import rasterio
import pandas as pds
import numpy as np
import numpy.ma as ma
from sklearn.pipeline import Pipeline
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import seaborn
# %%
HI_RES = '30s'
LOW_RES = '10m'
INCLUDE_VARS = [1,2,3,4,5,6,7,10,11]
vlab = ['meanT', 'diT', 'isoT', 'seaT', '+monT', '-monT', 'range', 'mean+Q', 'mean-Q']
def print_matrix(M, rlab=None, clab=None):
t = '\t'
if clab:
print('', end=t)
for cl in clab:
print(cl, end=t)
print('')
for ir, r in enumerate(M):
if rlab:
print(f'{rlab[ir]}', end=t)
for ic, c in enumerate(r):
print(f'{c:.2f}' if abs(c) > 0 else '', end=t)
print('')
def read_band(path):
with rasterio.open(path) as file:
return file.read(1, masked=True).ravel()
def build_matrix(res, varnums):
features = [read_band(f'./_data/wc2.1_{res}/bio_{num}.tif') for num in varnums]
return ma.mask_rows(ma.vstack(features).transpose())
# %%
raw_rows = build_matrix(LOW_RES, INCLUDE_VARS)
compressed = ma.compress_rows(raw_rows)
corr = np.corrcoef(compressed, rowvar=False)
cov = np.cov(compressed, rowvar=False)
# %%
scaler = StandardScaler()
scaled = scaler.fit_transform(compressed)
df = | pds.DataFrame(scaled, columns=vlab) | pandas.DataFrame |
# install imblearn package to a specific anaconda enviroment boston_house_price
# $ conda install -n boston_house_price -c conda-forge imbalanced-learn
# update imblearn package to a specific anaconda enviroment boston_house_price
# $ conda update -n boston_house_price -c glemaitre imbalanced-learn
# =============================================================
# Import libraries necessary for this project
import numpy as np
import pandas as pd
from time import time
# Set a random seed
import random
seed = 42
random.seed(seed)
# Import supplementary visualization code visuals.py
import scripts.visuals as vs
# Load the Census dataset
path = '../data/'
train_data = path + 'census.csv'
test_data = path + 'test_census.csv'
data = pd.read_csv(train_data)
print(data.head(n=1))
print(data.shape)
# get the types of columns
print(data.dtypes)
# Pandas has a helpful select_dtypes function
# which we can use to build a new dataframe containing only the object columns.
obj_data = data.select_dtypes(include=['object']).copy()
# Before going any further, we have to check if there are null values in the data that we need to clean up.
print(obj_data[obj_data.isnull().any(axis=1)])
# TODO: Total number of records
n_records = data.shape[0]
# TODO: Number of records where individual's income is more than $50,000
# TODO: Number of records where individual's income is at most $50,000
# Method1:
n_at_most_50k, n_greater_50k = data.income.value_counts()
# Method2: (optional) -->
# n2_greater_50k = data[data['income']=='>50K'].shape[0]
# n2_at_most_50k = data[data['income']=='<=50K'].shape[0]
n_aux = data.loc[(data['capital-gain'] > 0) & (data['capital-loss'] > 0)].shape
# TODO: Percentage of individuals whose income is more than $50,000
greater_percent = (100*n_greater_50k)/n_records
# Print the results
print("Total number of records: {}".format(n_records))
print("Individuals making more than $50,000: {}".format(n_greater_50k))
print("Individuals making at most $50,000: {}".format(n_at_most_50k))
print("Percentage of individuals making more than $50,000: {}%".format(greater_percent))
# Split the data into features and target label
income_raw = data['income']
features_raw = data.drop('income', axis = 1)
# Visualize skewed continuous features of original data
vs.distribution(data)
# Log-transform the skewed features
skewed = ['capital-gain', 'capital-loss']
features_log_transformed = pd.DataFrame(data = features_raw)
features_log_transformed[skewed] = features_raw[skewed].apply(lambda x: np.log(x + 1))
# Visualize the new log distributions
vs.distribution(features_log_transformed, transformed = True)
# Import sklearn.preprocessing.StandardScaler
from sklearn.preprocessing import MinMaxScaler, LabelEncoder
# Initialize a scaler, then apply it to the features
scaler = MinMaxScaler() # default=(0, 1)
numerical = ['age', 'education-num', 'capital-gain', 'capital-loss', 'hours-per-week']
features_log_minmax_transform = pd.DataFrame(data = features_log_transformed)
features_log_minmax_transform[numerical] = scaler.fit_transform(features_log_transformed[numerical])
# Show an example of a record with scaling applied
print(features_log_minmax_transform.head(n = 5))
# TODO: One-hot encode the 'features_log_minmax_transform' data using pandas.get_dummies()
features_final = pd.get_dummies(features_log_minmax_transform)
# TODO: Encode the 'income_raw' data to numerical values
# Method1:
encoder = LabelEncoder()
income = pd.Series(encoder.fit_transform(income_raw))
# Method2:(optional) -->
income1 =income_raw.map({'<=50K':0, '>50K':1})
# Method3:(optional) -->
income2 =pd.get_dummies(income_raw)['>50K']
# Print the number of features after one-hot encoding
encoded = list(features_final.columns)
print("{} total features after one-hot encoding.".format(len(encoded)))
# Uncomment the following line to see the encoded feature names
print(encoded)
#-----------------
# @Raafat: Some techniques to deal imbalanced data:
# --> under sampling
from imblearn.under_sampling import CondensedNearestNeighbour
cnn = CondensedNearestNeighbour(random_state=42)
X_res, y_res = cnn.fit_sample(features_final[0:300], income[0:300])
print('not Resampled dataset shape {}'.format(income[0:300].value_counts()))
print('cnn Resampled dataset shape {}'.format( | pd.Series(y_res) | pandas.Series |
# -*- coding: utf-8 -*-
"""
*This script contains a post-processing script for plotting times recorded by the main_constellation.py application*
Placeholder
"""
import numpy as np
import pandas as pd
from datetime import timedelta, datetime, timezone
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import seaborn as sns
from scipy import stats
# from scipy.stats import skew, kurtosis
font1 = {"family": "sans-serif", "weight": "bold", "size": 24}
font1 = {"family": "sans-serif", "weight": "bold", "size": 10}
start = pd.DataFrame(constellation.fires)
start.set_index("fireId", inplace=True)
detect = | pd.DataFrame(constellation.detect) | pandas.DataFrame |
from datetime import datetime
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.core.dtypes.base import _registry as ea_registry
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_interval_dtype,
is_object_dtype,
)
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
IntervalDtype,
PeriodDtype,
)
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
Interval,
IntervalIndex,
MultiIndex,
NaT,
Period,
PeriodIndex,
Series,
Timestamp,
cut,
date_range,
notna,
period_range,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.tseries.offsets import BDay
class TestDataFrameSetItem:
@pytest.mark.parametrize("dtype", ["int32", "int64", "float32", "float64"])
def test_setitem_dtype(self, dtype, float_frame):
arr = np.random.randn(len(float_frame))
float_frame[dtype] = np.array(arr, dtype=dtype)
assert float_frame[dtype].dtype.name == dtype
def test_setitem_list_not_dataframe(self, float_frame):
data = np.random.randn(len(float_frame), 2)
float_frame[["A", "B"]] = data
tm.assert_almost_equal(float_frame[["A", "B"]].values, data)
def test_setitem_error_msmgs(self):
# GH 7432
df = DataFrame(
{"bar": [1, 2, 3], "baz": ["d", "e", "f"]},
index=Index(["a", "b", "c"], name="foo"),
)
ser = Series(
["g", "h", "i", "j"],
index=Index(["a", "b", "c", "a"], name="foo"),
name="fiz",
)
msg = "cannot reindex from a duplicate axis"
with pytest.raises(ValueError, match=msg):
df["newcol"] = ser
# GH 4107, more descriptive error message
df = DataFrame(np.random.randint(0, 2, (4, 4)), columns=["a", "b", "c", "d"])
msg = "incompatible index of inserted column with frame index"
with pytest.raises(TypeError, match=msg):
df["gr"] = df.groupby(["b", "c"]).count()
def test_setitem_benchmark(self):
# from the vb_suite/frame_methods/frame_insert_columns
N = 10
K = 5
df = DataFrame(index=range(N))
new_col = np.random.randn(N)
for i in range(K):
df[i] = new_col
expected = DataFrame(np.repeat(new_col, K).reshape(N, K), index=range(N))
tm.assert_frame_equal(df, expected)
def test_setitem_different_dtype(self):
df = DataFrame(
np.random.randn(5, 3), index=np.arange(5), columns=["c", "b", "a"]
)
df.insert(0, "foo", df["a"])
df.insert(2, "bar", df["c"])
# diff dtype
# new item
df["x"] = df["a"].astype("float32")
result = df.dtypes
expected = Series(
[np.dtype("float64")] * 5 + [np.dtype("float32")],
index=["foo", "c", "bar", "b", "a", "x"],
)
tm.assert_series_equal(result, expected)
# replacing current (in different block)
df["a"] = df["a"].astype("float32")
result = df.dtypes
expected = Series(
[np.dtype("float64")] * 4 + [np.dtype("float32")] * 2,
index=["foo", "c", "bar", "b", "a", "x"],
)
tm.assert_series_equal(result, expected)
df["y"] = df["a"].astype("int32")
result = df.dtypes
expected = Series(
[np.dtype("float64")] * 4 + [np.dtype("float32")] * 2 + [np.dtype("int32")],
index=["foo", "c", "bar", "b", "a", "x", "y"],
)
tm.assert_series_equal(result, expected)
def test_setitem_empty_columns(self):
# GH 13522
df = DataFrame(index=["A", "B", "C"])
df["X"] = df.index
df["X"] = ["x", "y", "z"]
exp = DataFrame(data={"X": ["x", "y", "z"]}, index=["A", "B", "C"])
tm.assert_frame_equal(df, exp)
def test_setitem_dt64_index_empty_columns(self):
rng = date_range("1/1/2000 00:00:00", "1/1/2000 1:59:50", freq="10s")
df = DataFrame(index=np.arange(len(rng)))
df["A"] = rng
assert df["A"].dtype == np.dtype("M8[ns]")
def test_setitem_timestamp_empty_columns(self):
# GH#19843
df = DataFrame(index=range(3))
df["now"] = Timestamp("20130101", tz="UTC")
expected = DataFrame(
[[Timestamp("20130101", tz="UTC")]] * 3, index=[0, 1, 2], columns=["now"]
)
tm.assert_frame_equal(df, expected)
def test_setitem_wrong_length_categorical_dtype_raises(self):
# GH#29523
cat = Categorical.from_codes([0, 1, 1, 0, 1, 2], ["a", "b", "c"])
df = DataFrame(range(10), columns=["bar"])
msg = (
rf"Length of values \({len(cat)}\) "
rf"does not match length of index \({len(df)}\)"
)
with pytest.raises(ValueError, match=msg):
df["foo"] = cat
def test_setitem_with_sparse_value(self):
# GH#8131
df = DataFrame({"c_1": ["a", "b", "c"], "n_1": [1.0, 2.0, 3.0]})
sp_array = SparseArray([0, 0, 1])
df["new_column"] = sp_array
expected = Series(sp_array, name="new_column")
tm.assert_series_equal(df["new_column"], expected)
def test_setitem_with_unaligned_sparse_value(self):
df = DataFrame({"c_1": ["a", "b", "c"], "n_1": [1.0, 2.0, 3.0]})
sp_series = Series(SparseArray([0, 0, 1]), index=[2, 1, 0])
df["new_column"] = sp_series
expected = Series(SparseArray([1, 0, 0]), name="new_column")
tm.assert_series_equal(df["new_column"], expected)
def test_setitem_dict_preserves_dtypes(self):
# https://github.com/pandas-dev/pandas/issues/34573
expected = DataFrame(
{
"a": Series([0, 1, 2], dtype="int64"),
"b": Series([1, 2, 3], dtype=float),
"c": Series([1, 2, 3], dtype=float),
}
)
df = DataFrame(
{
"a": Series([], dtype="int64"),
"b": Series([], dtype=float),
"c": Series([], dtype=float),
}
)
for idx, b in enumerate([1, 2, 3]):
df.loc[df.shape[0]] = {"a": int(idx), "b": float(b), "c": float(b)}
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"obj,dtype",
[
(Period("2020-01"), PeriodDtype("M")),
(Interval(left=0, right=5), IntervalDtype("int64", "right")),
(
Timestamp("2011-01-01", tz="US/Eastern"),
DatetimeTZDtype(tz="US/Eastern"),
),
],
)
def test_setitem_extension_types(self, obj, dtype):
# GH: 34832
expected = DataFrame({"idx": [1, 2, 3], "obj": Series([obj] * 3, dtype=dtype)})
df = DataFrame({"idx": [1, 2, 3]})
df["obj"] = obj
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"ea_name",
[
dtype.name
for dtype in ea_registry.dtypes
# property would require instantiation
if not isinstance(dtype.name, property)
]
# mypy doesn't allow adding lists of different types
# https://github.com/python/mypy/issues/5492
+ ["datetime64[ns, UTC]", "period[D]"], # type: ignore[list-item]
)
def test_setitem_with_ea_name(self, ea_name):
# GH 38386
result = DataFrame([0])
result[ea_name] = [1]
expected = DataFrame({0: [0], ea_name: [1]})
tm.assert_frame_equal(result, expected)
def test_setitem_dt64_ndarray_with_NaT_and_diff_time_units(self):
# GH#7492
data_ns = np.array([1, "nat"], dtype="datetime64[ns]")
result = Series(data_ns).to_frame()
result["new"] = data_ns
expected = DataFrame({0: [1, None], "new": [1, None]}, dtype="datetime64[ns]")
tm.assert_frame_equal(result, expected)
# OutOfBoundsDatetime error shouldn't occur
data_s = np.array([1, "nat"], dtype="datetime64[s]")
result["new"] = data_s
expected = DataFrame({0: [1, None], "new": [1e9, None]}, dtype="datetime64[ns]")
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
# Functionality to read and store in HDF5 files
import h5py
import pandas as pd
import random
import string
import os
import datetime
import json
from data_science.data_transfer.data_api import Dataset
class HDF5Dataset:
def __init__(self, file_name, file_path, dataset_id,
random_string_in_name=10):
"""
Initialization.
:param self:
:param file_name: name for the file. No ending necessary
:param file_path: location path
:param dataset_id: dataset's id
:param random_string_in_name: lenght of the random string to add to
the name
"""
self.file_name = file_name
self.file_path = file_path
self.file_w_path = os.path.join(self.file_path, self.file_name)
self._dataset_id = dataset_id
self.random_string_in_name = random_string_in_name
@property
def dataset_id(self):
# do something
return self._dataset_id
@dataset_id.setter
def dataset_id(self, value):
self._dataset_id = value
def create_h5_file(self):
"""
Create the h5 file and add the groups.
:param self: self
"""
self.file_name = self.file_name + '-' + \
_generate_random_string(l=self.random_string_in_name) + '.h5'
self.file_w_path = os.path.join(self.file_path, self.file_name)
try:
f = h5py.File(self.file_w_path, 'a')
f.create_group('meta')
f.create_group('meta/columns')
f.create_group('data')
f.close()
return self.file_name
except ValueError as e:
print(e)
return
def add_dataset_meta_to_h5(self, dataset):
"""
Add the meta data of the dataset to the file. Consist of:
A dataframe with the dataset attributes.
A dataframe with the columns of the dataset.
:param self: self
:param dataset: a Dataset instance
"""
if not isinstance(dataset, Dataset):
raise TypeError('A dataset has to be provided.')
# create a df with the metadata
columns = list(dataset.dump_attributes_to_dictionary().keys())
df = pd.DataFrame(columns=columns)
df.loc[0] = dataset.dump_attributes_to_dictionary()
# insert to the file
df.to_hdf(self.file_w_path, key='meta/' + self._dataset_id, mode='a')
def add_dataset_data_df_to_h5(self, df):
"""
Add the data of the dataset to the file.
:param self: self
:param df: dataframe with the data
"""
# insert the df to the file
df.to_hdf(self.file_name, key='data/' + self._dataset_id, mode='a')
# insert the columns names as df to the metadata
df_col = pd.DataFrame(columns=['columns'])
df_col['columns'] = list(df.columns.values)
# insert to the file
df_col.to_hdf(self.file_w_path,
key='meta/columns/' + self._dataset_id,
mode='a')
def remove_dataset_from_h5(self):
"""
Remove the dataset from the file.
:param self: self
"""
try:
with h5py.File(self.file_w_path, 'a') as f:
del f['data/' + self._dataset_id]
del f['meta/' + self._dataset_id]
del f['meta/columns/' + self._dataset_id]
except KeyError as e:
print(e)
def read_dataset_data_df_from_h5(self):
"""
Read the data from the file. Returns a dataframe.
:param self: self
"""
try:
df = | pd.read_hdf(self.file_w_path, 'data/' + self._dataset_id, 'r') | pandas.read_hdf |
# -*- coding: utf-8 -*-
"""
Created on Mon May 07 17:34:56 2018
@author: gerar
"""
import os
import pandas as pd
import numpy as np
from scipy.stats.stats import pearsonr
#%%
def rmse(predictions, targets):
return np.sqrt(((predictions - targets) ** 2).mean())
#%%
def mae(predictions,targets):
return np.abs((predictions - targets)).mean()
#%%
def bias(predictions,targets):
return np.mean(predictions)-np.mean(targets)
#%%
def r_deming(x,y):
x_mean = np.mean(x)
y_mean = np.mean(y)
sxx = np.sum(np.power(x-x_mean,2.))/(len(x)-1.)
syy = np.sum(np.power(y-y_mean,2.))/(len(x)-1.)
sxy = np.sum((x-x_mean)*(y-y_mean))/(len(x)-1.)
lamb = 1#np.var(y)/np.var(x)
b1 = (syy-lamb*sxx+np.sqrt((syy-lamb*sxx)**2 +4*lamb*sxy**2))/(2*sxy)
b0 = y_mean - x_mean*b1
return [b1,b0]
#%%
def get_data(_file):
# data = pd.read_table(file_to_plot[0], parse_dates=[0],infer_datetime_format = True,usecols=(0,2,4)) #, parse_dates=[0],infer_datetime_format = True
# data = data.dropna() #[pd.notnull(data['AOD_AERONET'])]
# data = data.set_index('Date_MODIS')
modis_data,aeronet_data = np.loadtxt(_file,skiprows = 1,usecols=(2,4),unpack=True)
return modis_data, aeronet_data
#%%
def helper(x):
return pd.DataFrame({'AOD_MODIS':x['AOD_MODIS'].values,'AOD_AERONET':x['AOD_AERONET'].values})
#%%
def ee_fraction(predictions, targets):
_ee = np.abs(0.05 + 0.15*targets)
ee_plus = targets + _ee
ee_minus = targets - _ee
n_tot = len(predictions)
within_ee = predictions[np.logical_and(ee_minus<predictions,predictions<ee_plus)]
return '{:.2%}'.format(float(len(within_ee))/n_tot)
# m_plus,b_plus = np.polyfit(targets,ee_plus,1)
# m_minus,b_minus = np.polyfit(targets,ee_minus,1)
#%%
def db_results(row_names,data,s=None,e=None):
ix_names=['RMSE','R_pearson','MAE','BIAS','m_deming','b_deming','MEAN_MODIS','MEAN_AERONET','N','f']
if len(row_names) == 1:
db = pd.DataFrame(columns=row_names,index=ix_names)
db.loc['RMSE']=rmse(data[0],data[1])
db.loc['R_pearson']=pearsonr(data[0],data[1])[0]
db.loc['MAE']=mae(data[0],data[1])
db.loc['BIAS']=bias(data[0],data[1])
m,b = r_deming(data[1],data[0])
db.loc['m_deming']=m
db.loc['b_deming']=b
db.loc['MEAN_MODIS']=np.mean(data[0])
db.loc['MEAN_AERONET']=np.mean(data[1])
db.loc['N']=len(data[0])
db.loc['f']=ee_fraction(data[0],data[1])
else:
db = pd.DataFrame(columns=range(s,e),index=ix_names)
for col in row_names:
t_data = data.loc[col]
if len(t_data) <= 2:
continue
db.loc['RMSE'][col]=rmse(t_data['AOD_MODIS'],t_data['AOD_AERONET'])
db.loc['R_pearson'][col]=pearsonr(t_data['AOD_MODIS'],t_data['AOD_AERONET'])[0]
db.loc['MAE'][col]=mae(t_data['AOD_MODIS'],t_data['AOD_AERONET'])
db.loc['BIAS'][col]=bias(t_data['AOD_MODIS'],t_data['AOD_AERONET'])
m,b = r_deming(t_data['AOD_AERONET'],t_data['AOD_MODIS'])
db.loc['m_deming'][col]=m
db.loc['b_deming'][col]=b
db.loc['MEAN_MODIS'][col]=np.mean(t_data['AOD_MODIS'])
db.loc['MEAN_AERONET'][col]=np.mean(t_data['AOD_AERONET'])
db.loc['N'][col]=len(t_data['AOD_MODIS'])
db.loc['f'][col]=ee_fraction(t_data['AOD_MODIS'],t_data['AOD_AERONET'])
return db
#%%
def main():
files = [x for x in os.listdir(os.getcwd()) if x.endswith("matched_data_end.txt")]
for _file in files:
modis, aeronet = get_data(_file)
general_data = db_results(['Statistics'],[modis,aeronet])
data = | pd.read_table(_file,usecols=[0,2,4]) | pandas.read_table |
import logging
import traceback
import pandas as pd
import numpy as np
import seaborn as sns
from collections import defaultdict
import matplotlib
matplotlib.use('Agg')
matplotlib.rcParams['pdf.fonttype'] = 42
import matplotlib.ticker as ticker
from matplotlib import pyplot as plt
import matplotlib.patches as mpatches
from matplotlib.backends.backend_pdf import PdfPages
import inStrain.plotting.utilities
from inStrain.plotting.utilities import plot_genome
from inStrain.plotting.utilities import estimate_breadth
def genome_plot_from_IS(IS, plot_dir=False, **kwargs):
# Load the required data
try:
stb = IS.get('scaffold2bin')
b2s = defaultdict(list)
for s, b in stb.items():
b2s[b].append(s)
assert len(b2s.keys()) > 0
# Load the cache
covTs = kwargs.get('covT')#, IS.get('covT'))
clonTs = kwargs.get('clonT')#, IS.get('clonT'))
raw_linkage_table = kwargs.get('raw_linkage_table')#, IS.get('raw_linkage_table'))
cumulative_snv_table = kwargs.get('cumulative_snv_table')#, IS.get('cumulative_snv_table'))
scaffold2length = IS.get('scaffold2length')
rl = IS.get_read_length()
profiled_scaffolds = set(scaffold2length.keys())
except:
logging.error("Skipping plot 2 - you don't have all required information. You need to run inStrain genome_wide first")
traceback.print_exc()
return
# Make the plot
logging.info("Plotting plot 2")
name = 'genomeWide_microdiveristy_metrics.pdf'
pp = PdfPages(plot_dir + name)
for genome, scaffolds in b2s.items():
if not plot_genome(genome, IS, **kwargs):
continue
present_scaffolds = list(set(scaffolds).intersection(set(profiled_scaffolds)))
Wdb, breaks, midpoints = load_windowed_metrics(present_scaffolds,
scaffold2length,
rl,
report_midpoints=True,
covTs=covTs, clonTs=clonTs,
raw_linkage_table=raw_linkage_table,
cumulative_snv_table=cumulative_snv_table)
if len(Wdb) == 0:
logging.debug(f"{genome} could not have windowed metrics loaded")
continue
genomeWide_microdiveristy_metrics_plot(Wdb, breaks, title=genome)
fig = plt.gcf()
fig.set_size_inches(8, 5)
fig.tight_layout()
pp.savefig(fig)#, bbox_inches='tight')
#plt.show()
plt.close(fig)
# Save the figure
pp.close()
#plt.show()
plt.close('all')
def scaffold_inspection_from_IS(IS, plot_dir=False, **kwargs):
# Load the required data
try:
stb = IS.get('scaffold2bin')
b2s = defaultdict(list)
for s, b in stb.items():
b2s[b].append(s)
assert len(b2s.keys()) > 0
# Load the cache
covTs = kwargs.get('covTs', IS.get('covT'))
clonTs = kwargs.get('clonTs', IS.get('clonT'))
raw_linkage_table = kwargs.get('raw_linkage_table', IS.get('raw_linkage_table'))
cumulative_snv_table = kwargs.get('cumulative_snv_table', IS.get('cumulative_snv_table'))
scaffold2length = IS.get('scaffold2length')
rl = IS.get_read_length()
profiled_scaffolds = set(scaffold2length.keys())
except:
logging.error("Skipping plot 7 - you don't have all required information. You need to run inStrain genome_wide first")
traceback.print_exc()
return
# Make the plot
logging.info("Plotting plot 7")
name = 'ScaffoldInspection_plot.pdf'
pp = PdfPages(plot_dir + name)
for genome, scaffolds in b2s.items():
if not plot_genome(genome, IS, **kwargs):
continue
present_scaffolds = list(set(scaffolds).intersection(set(profiled_scaffolds)))
Wdb, breaks, midpoints = load_windowed_metrics(present_scaffolds,
scaffold2length,
rl,
report_midpoints=True,
covTs=covTs, clonTs=clonTs,
raw_linkage_table=raw_linkage_table,
cumulative_snv_table=cumulative_snv_table)
if len(Wdb) == 0:
logging.debug(f"{genome} could not have windowed metrics loaded")
continue
scaffold_inspection_plot(Wdb, breaks, midpoints, title=genome)
fig = plt.gcf()
fig.tight_layout()
pp.savefig(fig)#, bbox_inches='tight')
#plt.show()
plt.close(fig)
# Save the figure
pp.close()
#plt.show()
plt.close('all')
def genomeWide_microdiveristy_metrics_plot(Wdb, breaks, title=''):
'''
Make the multiple metrics plot
'''
# Get set up for multiple rows
i = len(Wdb['metric'].unique())
if i > 1:
fig, ax = plt.subplots(i, 1, sharex=True)
else:
ax = {}
ax[0] = plt.gca()
i = 0
for metric in ['linkage', 'snp_density', 'coverage', 'nucl_diversity']:
#for metric, wdb in Wdb.groupby('metric'):
if metric not in set(Wdb['metric'].tolist()):
continue
wdb = Wdb[Wdb['metric'] == metric]
med = wdb['value'].median()
# Rotate colors:
colors = ['red', 'blue', 'black']
c = 0
for mm, ddb in wdb.groupby('ANI'):
ax[i].plot(ddb['midpoint'], ddb['value'], c=colors[c], label=mm, marker='o', ms=1)#, ls='')
c += 1
ax[i].set_title("{0}".format(metric))
ax[i].grid(False)
if i == 0:
ax[i].legend(loc='upper left', title='Min read ANI (%)')
# Add breaks
for b in breaks:
ax[i].axvline(b, ls='-', c='lightgrey', zorder=-1)
i += 1
plt.xlabel('genome position')
plt.xlim(0, Wdb['midpoint'].max())
plt.suptitle(title, y=0.999)
plt.subplots_adjust(hspace=0.3)
def load_windowed_metrics(scaffolds, s2l, rLen, metrics=None, window_len=None, ANI_levels=[0, 100],
min_scaff_len=0, report_midpoints=False, covTs=False, clonTs=False,
raw_linkage_table=False, cumulative_snv_table=False):
if metrics is None:
metrics = ['coverage', 'nucl_diversity', 'linkage', 'snp_density']
if type(metrics) != type([]):
print("Metrics must be a list")
return
# Figure out the MMs needed
#rLen = IS.get_read_length()
mms = [_get_mm(None, ANI, rLen=rLen) for ANI in ANI_levels]
# Sort the scaffolds
#s2l = IS.get('scaffold2length')
scaffolds = sorted(scaffolds, key=s2l.get, reverse=True)
if min_scaff_len > 0:
scaffolds = [s for s in scaffolds if s2l[s] >= min_scaff_len]
# Figure out the window length
if window_len == None:
window_len = int(sum([s2l[s] for s in scaffolds]) / 100)
else:
window_len = int(window_len)
# Calculate the breaks
breaks = []
midpoints = {}
tally = 0
for scaffold in scaffolds:
midpoints[scaffold] = tally + int(s2l[scaffold] / 2)
tally += s2l[scaffold]
breaks.append(tally)
dbs = []
if 'coverage' in metrics:
if covTs == False:
logging.error("need covTs for coverage")
raise Exception
cdb = load_windowed_coverage_or_clonality('coverage', covTs, scaffolds, window_len, mms, ANI_levels, s2l)
cdb['metric'] = 'coverage'
dbs.append(cdb)
# if 'clonality' in metrics:
# cdb = load_windowed_coverage_or_clonality(IS, 'clonality', scaffolds, window_len, mms, ANI_levels, s2l)
# cdb['metric'] = 'clonality'
# dbs.append(cdb)
if 'nucl_diversity' in metrics:
if clonTs == False:
logging.error("need clonTs for microdiversity")
raise Exception
cdb = load_windowed_coverage_or_clonality('nucl_diversity', clonTs, scaffolds, window_len, mms, ANI_levels, s2l)
cdb['metric'] = 'nucl_diversity'
dbs.append(cdb)
if 'linkage' in metrics:
if raw_linkage_table is False:
logging.error("need raw_linkage_table for linkage")
raise Exception
cdb = load_windowed_linkage(raw_linkage_table, scaffolds, window_len, mms, ANI_levels, s2l)
cdb['metric'] = 'linkage'
dbs.append(cdb)
if 'snp_density' in metrics:
if cumulative_snv_table is False:
logging.error("need cumulative_snv_table for snp_density")
raise Exception
if len(cumulative_snv_table) > 0:
cdb = load_windowed_SNP_density(cumulative_snv_table, scaffolds, window_len, mms, ANI_levels, s2l)
cdb['metric'] = 'snp_density'
dbs.append(cdb)
if len(dbs) > 0:
Wdb = pd.concat(dbs, sort=True)
Wdb = Wdb.rename(columns={'avg_cov':'value'})
else:
Wdb = pd.DataFrame()
# Add blanks at the breaks
table = defaultdict(list)
for mm, ani in zip(mms, ANI_levels):
for metric in Wdb['metric'].unique():
for bre in breaks:
table['scaffold'].append('break')
table['mm'].append(mm)
table['ANI'].append(ani)
table['adjusted_start'].append(bre) # The minus one makes sure it doenst split things it shouldnt
table['adjusted_end'].append(bre)
table['value'].append(np.nan)
table['metric'].append(metric)
bdb = pd.DataFrame(table)
Wdb = pd.concat([Wdb, bdb], sort=False)
if len(Wdb) > 0:
Wdb['midpoint'] = [np.mean([x, y]) for x, y in zip(Wdb['adjusted_start'], Wdb['adjusted_end'])]
Wdb = Wdb.sort_values(['metric', 'mm', 'midpoint', 'scaffold'])
if report_midpoints:
return Wdb, breaks, midpoints
else:
return Wdb, breaks
def _get_mm(IS, ANI, rLen = None):
'''
Get the mm corresponding to an ANI level in an IS
'''
if ANI > 1:
ANI = ANI / 100
if rLen == None:
rLen = IS.get_read_length()
#rLen = IS.get('mapping_info')['mean_pair_length'].tolist()[0]
mm = int(round((rLen - (rLen * ANI))))
return mm
def load_windowed_coverage_or_clonality(thing, covTs, scaffolds, window_len, mms, ANI_levels, s2l):
'''
Get the windowed coverage
Pass in a clonTs for microdiversity and covTs for coverage
'''
if thing == 'coverage':
item = 'covT'
elif thing == 'nucl_diversity':
item = 'clonT'
else:
print("idk what {0} is".format(thing))
return
# Get the covTs
#covTs = IS.get(item, scaffolds=scaffolds)
# Make the windows
dbs = []
tally = 0
breaks = []
for scaffold in scaffolds:
if scaffold not in covTs:
tally += s2l[scaffold]
breaks.append(tally)
continue
else:
covT = covTs[scaffold]
for mm, ani in zip(mms, ANI_levels):
if item == 'covT':
cov = inStrain.profile.profile_utilities.mm_counts_to_counts_shrunk(covT, mm)
if len(cov) == 0:
continue
db = _gen_windowed_cov(cov, window_len, sLen=s2l[scaffold])
elif item == 'clonT':
cov = _get_basewise_clons3(covT, mm)
if len(cov) == 0:
continue
db = _gen_windowed_cov(cov, window_len, sLen=s2l[scaffold], full_len=False)
db['avg_cov'] = [1 - x if x == x else x for x in db['avg_cov']]
db['scaffold'] = scaffold
db['mm'] = mm
db['ANI'] = ani
db['adjusted_start'] = db['start'] + tally
db['adjusted_end'] = db['end'] + tally
dbs.append(db)
tally += s2l[scaffold]
breaks.append(tally)
if len(dbs) > 0:
Wdb = pd.concat(dbs)
else:
Wdb = pd.DataFrame()
return Wdb#, breaks
def load_windowed_linkage(Ldb, scaffolds, window_len, mms, ANI_levels, s2l, on='r2'):
# Get the linkage table
#Ldb = IS.get('raw_linkage_table')
Ldb = Ldb[Ldb['scaffold'].isin(scaffolds)].sort_values('mm')
got_scaffolds = set(Ldb['scaffold'].unique())
# Make the windows
dbs = []
tally = 0
breaks = []
for scaffold in scaffolds:
if scaffold not in got_scaffolds:
tally += s2l[scaffold]
breaks.append(tally)
continue
else:
ldb = Ldb[Ldb['scaffold'] == scaffold]
for mm, ani in zip(mms, ANI_levels):
db = ldb[ldb['mm'] <= int(mm)].drop_duplicates(subset=['scaffold', 'position_A', 'position_B'], keep='last')
cov = db.set_index('position_A')[on].sort_index()
db = _gen_windowed_cov(cov, window_len, sLen=s2l[scaffold], full_len=False)
db['scaffold'] = scaffold
db['mm'] = mm
db['ANI'] = ani
db['adjusted_start'] = db['start'] + tally
db['adjusted_end'] = db['end'] + tally
dbs.append(db)
tally += s2l[scaffold]
breaks.append(tally)
if len(dbs) > 0:
Wdb = pd.concat(dbs)
else:
Wdb = pd.DataFrame()
return Wdb
def load_windowed_SNP_density(Ldb, scaffolds, window_len, mms, ANI_levels, s2l):
# Get the table
#Ldb = IS.get('cumulative_snv_table')
Ldb = Ldb[Ldb['scaffold'].isin(scaffolds)].sort_values('mm')
got_scaffolds = list(Ldb['scaffold'].unique())
# Make the windows
dbs = []
tally = 0
breaks = []
for scaffold in scaffolds:
if scaffold not in got_scaffolds:
tally += s2l[scaffold]
breaks.append(tally)
continue
else:
ldb = Ldb[Ldb['scaffold'] == scaffold]
for mm, ani in zip(mms, ANI_levels):
db = ldb[ldb['mm'] <= int(mm)].drop_duplicates(subset=['scaffold', 'position'], keep='last')
cov = db.set_index('position')['ref_base'].sort_index()
db = _gen_windowed_cov(cov, window_len, sLen=s2l[scaffold], full_len='count')
db['scaffold'] = scaffold
db['mm'] = mm
db['ANI'] = ani
db['adjusted_start'] = db['start'] + tally
db['adjusted_end'] = db['end'] + tally
dbs.append(db)
tally += s2l[scaffold]
breaks.append(tally)
if len(dbs) > 0:
Wdb = pd.concat(dbs)
else:
Wdb = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
"""Re-generate exps.csv from individual experiments
"""
import argparse
import logging
from os.path import join as pjoin
from logging import debug, info
import pandas as pd
import os
def create_exps_from_folders(expsdir, dffolderspath):
files = sorted(os.listdir(expsdir))
df = pd.DataFrame()
for i, d in enumerate(files):
dpath = pjoin(expsdir, d)
if not os.path.isdir(dpath): continue
expspath = pjoin(dpath, 'config.json')
dfaux = pd.read_json(expspath)
df = | pd.concat([df, dfaux], axis=0, sort=False) | pandas.concat |
import librosa
import numpy as np
import pandas as pd
from os import listdir
from os.path import isfile, join
from audioread import NoBackendError
def extract_features(path, label, emotionId, startid):
"""
提取path目录下的音频文件的特征,使用librosa库
:param path: 文件路径
:param label: 情绪类型
:param startid: 开始的序列号
:return: 特征矩阵 pandas.DataFrame
"""
id = startid # 序列号
feature_set = pd.DataFrame() # 特征矩阵
# 单独的特征向量
labels = pd.Series()
emotion_vector = pd.Series()
songname_vector = pd.Series()
tempo_vector = pd.Series()
total_beats = pd.Series()
average_beats = pd.Series()
chroma_stft_mean = pd.Series()
# chroma_stft_std = pd.Series()
chroma_stft_var = pd.Series()
# chroma_cq_mean = pd.Series()
# chroma_cq_std = pd.Series()
# chroma_cq_var = pd.Series()
# chroma_cens_mean = pd.Series()
# chroma_cens_std = pd.Series()
# chroma_cens_var = pd.Series()
mel_mean = pd.Series()
# mel_std = pd.Series()
mel_var = | pd.Series() | pandas.Series |
"""Daylight hours from http://www.sunrisesunset.com """
import re
import datetime
import requests
from six.moves import xrange
from os.path import join, abspath, dirname
import pandas as pd
url = "http://sunrisesunset.com/calendar.asp"
r0 = re.compile("<[^>]+>| |[\r\n\t]")
r1 = re.compile(r"(\d+)(DST Begins|DST Ends)?Sunrise: (\d+):(\d\d)Sunset: (\d+):(\d\d)")
def fetch_daylight_hours(lat, lon, tz, dst, year):
"""Fetch daylight hours from sunrisesunset.com for a given location.
Parameters
----------
lat : float
Location's latitude.
lon : float
Location's longitude.
tz : int or float
Time zone offset from UTC. Use floats for half-hour time zones.
dst : int
Daylight saving type, e.g. 0 -> none, 1 -> North America, 2 -> Europe.
See sunrisesunset.com/custom.asp for other possible values.
year : int
Year (1901..2099).
"""
daylight = []
summer = 0 if lat >= 0 else 1
for month in xrange(1, 12+1):
args = dict(url=url, lat=lat, lon=lon, tz=tz, dst=dst, year=year, month=month)
response = requests.get("%(url)s?comb_city_info=_;%(lon)s;%(lat)s;%(tz)s;%(dst)s&month=%(month)s&year=%(year)s&time_type=1&wadj=1" % args)
entries = r1.findall(r0.sub("", response.text))
for day, note, sunrise_hour, sunrise_minute, sunset_hour, sunset_minute in entries:
if note == "DST Begins":
summer = 1
elif note == "DST Ends":
summer = 0
date = datetime.date(year, month, int(day))
sunrise = datetime.time(int(sunrise_hour), int(sunrise_minute))
sunset = datetime.time(int(sunset_hour), int(sunset_minute))
daylight.append([date, sunrise, sunset, summer])
return pd.DataFrame(daylight, columns=["Date", "Sunrise", "Sunset", "Summer"])
# daylight_warsaw_2013 = fetch_daylight_hours(52.2297, -21.0122, 1, 2, 2013)
# daylight_warsaw_2013.to_csv("bokeh/sampledata/daylight_warsaw_2013.csv", index=False)
def load_daylight_hours(file):
path = join(dirname(abspath(__file__)), file)
df = | pd.read_csv(path, parse_dates=["Date", "Sunrise", "Sunset"]) | pandas.read_csv |
# -*- coding: utf-8 -*-
import pytest
import numpy as np
import pandas as pd
from pandas import Timestamp
def create_dataframe(tuple_data):
"""Create pandas df from tuple data with a header."""
return pd.DataFrame.from_records(tuple_data[1:], columns=tuple_data[0])
### REUSABLE FIXTURES --------------------------------------------------------
@pytest.fixture()
def indices_3years():
"""Three indices over 3 years."""
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 100.0, 100.0, 100.0),
(Timestamp('2012-02-01 00:00:00'), 101.239553643, 96.60525323799999, 97.776838217),
(Timestamp('2012-03-01 00:00:00'), 102.03030533, 101.450821724, 96.59101862),
(Timestamp('2012-04-01 00:00:00'), 104.432402661, 98.000263617, 94.491213369),
(Timestamp('2012-05-01 00:00:00'), 105.122830333, 95.946873831, 93.731891785),
(Timestamp('2012-06-01 00:00:00'), 103.976692567, 97.45914568100001, 90.131064035),
(Timestamp('2012-07-01 00:00:00'), 106.56768678200001, 94.788761174, 94.53487522),
(Timestamp('2012-08-01 00:00:00'), 106.652151036, 98.478217946, 92.56165627700001),
(Timestamp('2012-09-01 00:00:00'), 108.97290730799999, 99.986521241, 89.647230903),
(Timestamp('2012-10-01 00:00:00'), 106.20124385700001, 99.237117891, 92.27819603799999),
(Timestamp('2012-11-01 00:00:00'), 104.11913898700001, 100.993436318, 95.758970985),
(Timestamp('2012-12-01 00:00:00'), 107.76600978, 99.60424011299999, 95.697091336),
(Timestamp('2013-01-01 00:00:00'), 98.74350698299999, 100.357120656, 100.24073830200001),
(Timestamp('2013-02-01 00:00:00'), 100.46305431100001, 99.98213513200001, 99.499007278),
(Timestamp('2013-03-01 00:00:00'), 101.943121499, 102.034291064, 96.043392231),
(Timestamp('2013-04-01 00:00:00'), 99.358987741, 106.513055039, 97.332012817),
(Timestamp('2013-05-01 00:00:00'), 97.128074038, 106.132168479, 96.799806436),
(Timestamp('2013-06-01 00:00:00'), 94.42944162, 106.615734964, 93.72086654600001),
(Timestamp('2013-07-01 00:00:00'), 94.872365481, 103.069773446, 94.490515359),
(Timestamp('2013-08-01 00:00:00'), 98.239415397, 105.458081805, 93.57271149299999),
(Timestamp('2013-09-01 00:00:00'), 100.36774827100001, 106.144579258, 90.314524375),
(Timestamp('2013-10-01 00:00:00'), 100.660205114, 101.844838294, 88.35136848399999),
(Timestamp('2013-11-01 00:00:00'), 101.33948384799999, 100.592230114, 93.02874928899999),
(Timestamp('2013-12-01 00:00:00'), 101.74876982299999, 102.709038791, 93.38277933200001),
(Timestamp('2014-01-01 00:00:00'), 101.73439491, 99.579700011, 104.755837919),
(Timestamp('2014-02-01 00:00:00'), 100.247760523, 100.76732961, 100.197855834),
(Timestamp('2014-03-01 00:00:00'), 102.82080245600001, 99.763171909, 100.252537549),
(Timestamp('2014-04-01 00:00:00'), 104.469889684, 96.207920184, 98.719797067),
(Timestamp('2014-05-01 00:00:00'), 105.268899775, 99.357641836, 99.99786671),
(Timestamp('2014-06-01 00:00:00'), 107.41649204299999, 100.844974811, 96.463821506),
(Timestamp('2014-07-01 00:00:00'), 110.146087435, 102.01075029799999, 94.332755083),
(Timestamp('2014-08-01 00:00:00'), 109.17068484100001, 101.562418115, 91.15410351700001),
(Timestamp('2014-09-01 00:00:00'), 109.872892919, 101.471759564, 90.502291475),
(Timestamp('2014-10-01 00:00:00'), 108.508436998, 98.801947543, 93.97423224399999),
(Timestamp('2014-11-01 00:00:00'), 109.91248118, 97.730489099, 90.50638234200001),
(Timestamp('2014-12-01 00:00:00'), 111.19756703600001, 99.734704555, 90.470418612),
],
).set_index(0, drop=True)
@pytest.fixture()
def weights_3years():
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2013-01-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2014-01-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
],
).set_index(0, drop=True)
@pytest.fixture()
def weights_3years_start_feb(weights_3years):
return weights_3years.shift(1, freq='MS')
@pytest.fixture()
def weight_shares_3years():
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 0.489537029, 0.21362007800000002, 0.29684289199999997),
(Timestamp('2013-01-01 00:00:00'), 0.535477885, 0.147572705, 0.31694941),
(Timestamp('2014-01-01 00:00:00'), 0.512055362, 0.1940439, 0.293900738),
],
).set_index(0, drop=True)
@pytest.fixture()
def weights_shares_start_feb(weight_shares_3years):
return weight_shares_3years.shift(1, freq='MS')
@pytest.fixture()
def indices_1year(indices_3years):
return indices_3years.loc['2012', :]
@pytest.fixture()
def weights_1year(weights_3years):
return weights_3years.loc['2012', :]
@pytest.fixture()
def indices_6months(indices_3years):
return indices_3years.loc['2012-Jan':'2012-Jun', :]
@pytest.fixture()
def weights_6months(weights_3years):
return weights_3years.loc['2012', :]
@pytest.fixture()
def indices_transposed(indices_3years):
return indices_3years.T
@pytest.fixture()
def weights_transposed(weights_3years):
return weights_3years.T
@pytest.fixture()
def indices_missing(indices_3years):
indices_missing = indices_3years.copy()
change_to_nans = [
('2012-06', 2),
('2012-12', 3),
('2013-10', 2),
('2014-07', 1),
]
for sl in change_to_nans:
indices_missing.loc[sl] = np.nan
return indices_missing
@pytest.fixture()
def indices_missing_transposed(indices_missing):
return indices_missing.T
### AGGREGATION FIXTURES -----------------------------------------------------
@pytest.fixture()
def aggregate_outcome_3years():
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 100.0),
(Timestamp('2012-02-01 00:00:00'), 99.22169156),
(Timestamp('2012-03-01 00:00:00'), 100.29190240000001),
(Timestamp('2012-04-01 00:00:00'), 100.10739720000001),
(Timestamp('2012-05-01 00:00:00'), 99.78134264),
(Timestamp('2012-06-01 00:00:00'), 98.47443727),
(Timestamp('2012-07-01 00:00:00'), 100.4796172),
(Timestamp('2012-08-01 00:00:00'), 100.7233716),
(Timestamp('2012-09-01 00:00:00'), 101.31654509999998),
(Timestamp('2012-10-01 00:00:00'), 100.5806089),
(Timestamp('2012-11-01 00:00:00'), 100.9697697),
(Timestamp('2012-12-01 00:00:00'), 102.4399192),
(Timestamp('2013-01-01 00:00:00'), 99.45617890000001),
(Timestamp('2013-02-01 00:00:00'), 100.08652959999999),
(Timestamp('2013-03-01 00:00:00'), 100.0866599),
(Timestamp('2013-04-01 00:00:00'), 99.7722843),
(Timestamp('2013-05-01 00:00:00'), 98.35278839),
(Timestamp('2013-06-01 00:00:00'), 96.00322344),
(Timestamp('2013-07-01 00:00:00'), 95.96105198),
(Timestamp('2013-08-01 00:00:00'), 97.82558448),
(Timestamp('2013-09-01 00:00:00'), 98.03388747),
(Timestamp('2013-10-01 00:00:00'), 96.93374613),
(Timestamp('2013-11-01 00:00:00'), 98.59512718),
(Timestamp('2013-12-01 00:00:00'), 99.23888357),
(Timestamp('2014-01-01 00:00:00'), 102.2042938),
(Timestamp('2014-02-01 00:00:00'), 100.3339127),
(Timestamp('2014-03-01 00:00:00'), 101.4726729),
(Timestamp('2014-04-01 00:00:00'), 101.17674840000001),
(Timestamp('2014-05-01 00:00:00'), 102.57269570000001),
(Timestamp('2014-06-01 00:00:00'), 102.9223313),
(Timestamp('2014-07-01 00:00:00'), 103.9199248),
(Timestamp('2014-08-01 00:00:00'), 102.3992605),
(Timestamp('2014-09-01 00:00:00'), 102.54967020000001),
(Timestamp('2014-10-01 00:00:00'), 102.35333840000001),
(Timestamp('2014-11-01 00:00:00'), 101.8451732),
(Timestamp('2014-12-01 00:00:00'), 102.8815443),
],
).set_index(0, drop=True).squeeze()
@pytest.fixture()
def aggregate_outcome_1year(aggregate_outcome_3years):
return aggregate_outcome_3years.loc['2012']
@pytest.fixture()
def aggregate_outcome_6months(aggregate_outcome_3years):
return aggregate_outcome_3years.loc['2012-Jan':'2012-Jun']
@pytest.fixture()
def aggregate_outcome_missing():
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 100.0),
(Timestamp('2012-02-01 00:00:00'), 99.22169156),
(Timestamp('2012-03-01 00:00:00'), 100.29190240000001),
(Timestamp('2012-04-01 00:00:00'), 100.10739720000001),
(Timestamp('2012-05-01 00:00:00'), 99.78134264),
(Timestamp('2012-06-01 00:00:00'), 98.75024119),
(Timestamp('2012-07-01 00:00:00'), 100.4796172),
(Timestamp('2012-08-01 00:00:00'), 100.7233716),
(Timestamp('2012-09-01 00:00:00'), 101.31654509999998),
(Timestamp('2012-10-01 00:00:00'), 100.5806089),
(Timestamp('2012-11-01 00:00:00'), 100.9697697),
(Timestamp('2012-12-01 00:00:00'), 105.2864531),
(Timestamp('2013-01-01 00:00:00'), 99.45617890000001),
(Timestamp('2013-02-01 00:00:00'), 100.08652959999999),
(Timestamp('2013-03-01 00:00:00'), 100.0866599),
(Timestamp('2013-04-01 00:00:00'), 99.7722843),
(Timestamp('2013-05-01 00:00:00'), 98.35278839),
(Timestamp('2013-06-01 00:00:00'), 96.00322344),
(Timestamp('2013-07-01 00:00:00'), 95.96105198),
(Timestamp('2013-08-01 00:00:00'), 97.82558448),
(Timestamp('2013-09-01 00:00:00'), 98.03388747),
(Timestamp('2013-10-01 00:00:00'), 96.08353503),
(Timestamp('2013-11-01 00:00:00'), 98.59512718),
(Timestamp('2013-12-01 00:00:00'), 99.23888357),
(Timestamp('2014-01-01 00:00:00'), 102.2042938),
(Timestamp('2014-02-01 00:00:00'), 100.3339127),
(Timestamp('2014-03-01 00:00:00'), 101.4726729),
(Timestamp('2014-04-01 00:00:00'), 101.17674840000001),
(Timestamp('2014-05-01 00:00:00'), 102.57269570000001),
(Timestamp('2014-06-01 00:00:00'), 102.9223313),
(Timestamp('2014-07-01 00:00:00'), 97.38610996),
(Timestamp('2014-08-01 00:00:00'), 102.3992605),
(Timestamp('2014-09-01 00:00:00'), 102.54967020000001),
(Timestamp('2014-10-01 00:00:00'), 102.35333840000001),
(Timestamp('2014-11-01 00:00:00'), 101.8451732),
(Timestamp('2014-12-01 00:00:00'), 102.8815443),
],
).set_index(0, drop=True).squeeze()
### WEIGHTS FIXTURES ------------------------------------------------------
@pytest.fixture()
def reindex_weights_to_indices_outcome_start_jan():
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-02-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-03-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-04-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-05-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-06-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-07-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-08-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-09-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-10-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-11-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-12-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2013-01-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-02-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-03-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-04-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-05-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-06-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-07-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-08-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-09-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-10-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-11-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-12-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2014-01-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-02-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-03-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-04-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-05-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-06-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-07-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-08-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-09-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-10-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-11-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-12-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
],
).set_index(0, drop=True)
@pytest.fixture()
def reindex_weights_to_indices_outcome_start_feb():
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-02-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-03-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-04-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-05-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-06-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-07-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-08-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-09-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-10-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-11-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-12-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2013-01-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2013-02-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-03-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-04-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-05-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-06-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-07-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-08-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-09-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-10-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-11-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
( | Timestamp('2013-12-01 00:00:00') | pandas.Timestamp |
'''
Created on Apr 23, 2018
@author: nishant.sethi
'''
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.simplefilter(action = "ignore", category = FutureWarning)
'''load the matches file'''
matches = pd.read_csv('matches.csv')
matches["type"] = "pre-qualifier"
for year in range(2008, 2017):
final_match_index = matches[matches['season']==year][-1:].index.values[0]
matches = matches.set_value(final_match_index, "type", "final")
matches = matches.set_value(final_match_index-1, "type", "qualifier-2")
matches = matches.set_value(final_match_index-2, "type", "eliminator")
matches = matches.set_value(final_match_index-3, "type", "qualifier-1")
matches.groupby(["type"])["id"].count()
'''load the deliveries file'''
deliveries= | pd.read_csv('deliveries.csv') | pandas.read_csv |
'''
@Author: <NAME>
@Date: 2019-07-03 16:18:27
@LastEditors: Yudi
@LastEditTime: 2019-07-19 15:40:23
@Company: Cardinal Operation
@Email: <EMAIL>
@Description:
'''
import pickle
import gzip
import os
import gc
import time
import random
from itertools import chain
import numpy as np
import pandas as pd
import scipy.sparse as sparse
def parse(path):
g = gzip.open(path, 'rb')
for e in g:
yield eval(e)
def load_json_gz(path):
i = 0
df = {}
for d in parse(path):
df[i] = d
i += 1
return pd.DataFrame.from_dict(df, orient='index')
def load_init_influence(cat='yelp'):
wei_u = pickle.load(open(f'./data/experiment_result/influence_dict/{cat}/uid_influence_dict.pkl', 'rb'))
wei_i = pickle.load(open(f'./data/experiment_result/influence_dict/{cat}/iid_influence_dict.pkl', 'rb'))
return wei_u, wei_i
def load_weight(cat='yelp'):
p = f'./data/experiment_result/weight/{cat}/'
dec_i_wei = pickle.load(open(p + 'dec_i_wei.pkl', 'rb'))
dec_u_wei = pickle.load(open(p + 'dec_u_wei.pkl', 'rb'))
inc_i_wei = pickle.load(open(p + 'inc_i_wei.pkl', 'rb'))
inc_u_wei = pickle.load(open(p + 'inc_u_wei.pkl', 'rb'))
return inc_u_wei, inc_i_wei, dec_u_wei, dec_i_wei
def load_rid2id_ref(cat='yelp'):
p = f'./data/experiment_result/init_vec/{cat}/'
rid2id_u = pickle.load(open(p + 'rid2id_u.pkl', 'rb'))
rid2id_i = pickle.load(open(p + 'rid2id_i.pkl', 'rb'))
return rid2id_u, rid2id_i
def load_rate(cat_name, sample_num=None, sample_target='item'):
assert cat_name in ['yelp', 'amazon'], 'invalid dataset selected, try "yelp", "ml"'
assert sample_target in ['user', 'item', 'all'], 'sample_target must be set "user", "item" or "all"'
if sample_target == 'item' or sample_target == 'user':
assert type(sample_num) is int, 'sample number for full data set must be integer......'
else:
assert type(sample_num) is int or type(sample_num) is float, 'sample number for full data set must be integer or float value in [0, 1]......'
if cat_name == 'yelp':
p = './data/raw_data/yelp_academic_dataset_review.csv' # from business.json
assert os.path.isfile(p), f'No such file {p}, please check again......'
df = pd.read_csv(p)
df.rename(index=str, columns={'user_id': 'user',
'business_id': 'item',
'stars': 'rating',
'date':'timestamp'}, inplace=True)
df['timestamp'] = df.timestamp.agg(lambda x: time.mktime(time.strptime(x,'%Y-%m-%d'))).astype(int)
df = df[['user', 'item', 'rating', 'timestamp']]
max_u_num, max_i_num = df.user.unique().size, df.item.unique().size
if sample_num is not None:
if sample_target == 'item':
sample_num = min(sample_num, max_i_num)
sub_item_set = random.sample(df.item.unique().tolist(), sample_num)
df = df[df.item.isin(sub_item_set)].sort_values(['user', 'item', 'timestamp']).reset_index(drop=True)
elif sample_target == 'user':
sample_num = min(sample_num, max_u_num)
sub_user_set = random.sample(df.user.unique().tolist(), sample_num)
df = df[df.user.isin(sub_user_set)].sort_values(['user', 'item', 'timestamp']).reset_index(drop=True)
else:
if type(sample_num) is float:
df = df.sample(frac=sample_num, random_state=2019).reset_index(drop=True)
elif type(sample_num) is int:
df = df.sample(n=sample_num, random_state=2019).reset_index(drop=True)
df.to_csv('./data/processed_data/yelp_exp_uir_data.csv', index=False)
return df
elif cat_name == 'amazon':
p = './data/raw_data/ratings_Digital_Music.csv'
assert os.path.exists(p), f'No such file {p}, please check again......'
df = | pd.read_csv(p, names=['user', 'item', 'rating', 'timestamp']) | pandas.read_csv |
import matplotlib.pylab as pylab
import math as m
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import pdb
from pylab import rcParams
import matplotlib as mpl
mpl.use('AGG')
font = {'size': 40}
rcParams['figure.figsize'] = 10, 8
mpl.style.use('seaborn-paper')
rcParams['figure.figsize'] = 10, 8
# rcParams['savefig.pad_inches'] = 0.5
rcParams['figure.constrained_layout.use'] = True
mpl.rcParams['font.size'] = 15.0
params = {'legend.fontsize': 'x-large',
'axes.labelsize': 'x-large',
'axes.titlesize': 'x-large',
'xtick.labelsize': 'x-large',
'ytick.labelsize': 'x-large'}
pylab.rcParams.update(params)
labelmap = {
'pcc': 'PCC',
'bbr': 'BBR',
'cubic': 'Cubic',
'hybla': 'Hybla'
}
colormap = {
'pcc': 'firebrick',
'bbr': 'olivedrab',
'cubic': 'teal',
'hybla': 'darkorchid'
}
# DATA_DIR = './data/2020-05-09/'
# DATA_DIR = './data/2020-06-01/'
# DATA_DIR = './data/2020-07-12/'
DATA_DIR = './data/2020-09-20'
global PREFIX
PREFIX = ""
def plot_rtt_cdf(df, column='mean'):
plt.close()
for protocol, data in df.groupby('protocol'):
sorted_rtt = data[column].sort_values().reset_index()
plt.plot(sorted_rtt[column], sorted_rtt.index /
sorted_rtt[column].count() * 100, label=labelmap[protocol], color=colormap[protocol])
plt.legend()
plt.ylabel("Cumulative Distribution\n(percent)")
plt.xlabel(f"RTT (ms)")
plt.savefig(f"{DATA_DIR}/{PREFIX}cdf_rtt_{column}.png")
def plot_loss_cdf(df):
column = 'loss'
plt.close()
print(df.head())
for protocol, data in df.groupby('protocol'):
data = data[data['loss'] < 0.95]
sorted_loss = data[column].sort_values().reset_index()
plt.plot(sorted_loss[column] * 100, sorted_loss.index /
sorted_loss[column].count() * 100, label=labelmap[protocol], color=colormap[protocol])
plt.legend()
plt.ylabel("Cumulative Distribution\n(percent)")
plt.xlabel(f"Retranmission\n(percent)")
plt.savefig(f"{DATA_DIR}/{PREFIX}cdf_{column}.png")
def plot_througput_cdf(df, column='mean'):
plt.close()
for protocol, data in df.groupby('protocol'):
sorted_throughput = data[column].sort_values().reset_index()
plt.plot(sorted_throughput[column], sorted_throughput.index /
sorted_throughput[column].count() * 100, label=labelmap[protocol], color=colormap[protocol])
plt.legend()
plt.ylabel("Percent")
plt.xlabel(f"{column} throughput")
plt.savefig(f"{DATA_DIR}/{PREFIX}cdf_{column}.png")
def summary_statistics(prefix=PREFIX):
global PREFIX
fname = f"{DATA_DIR}/{prefix}quantiles.csv"
print(f"summary of {fname}")
df = pd.read_csv(fname, index_col=0).dropna(how='all')
mean = df.groupby('protocol').mean()['mean']
std = df.groupby('protocol').std()['mean']
print(f'{prefix} mean throughput', mean)
print(f'{prefix} std throughput', std)
with open('out.txt', 'w') as output_file:
print(df.groupby('protocol').describe(), file=output_file)
fname = f"{DATA_DIR}/{prefix}rtt_quantiles.csv"
df = | pd.read_csv(fname, index_col=0) | pandas.read_csv |
"""Network representation and utilities
<NAME>, <NAME> & <NAME>
"""
import os,sys
import re
import numpy as np
import pandas as pd
import geopandas as gpd
import pygeos
import pygeos.geometry as pygeom
import contextily as ctx
from rasterstats import zonal_stats
import pyproj
import pylab as pl
from IPython import display
import seaborn as sns
import subprocess
from shapely.wkb import loads
import time
from timeit import default_timer as timer
import feather
import igraph as ig
from pandas import DataFrame
from shapely.geometry import Point, MultiPoint, LineString, GeometryCollection, shape, mapping
from shapely.ops import split, linemerge
from tqdm import tqdm
from pathlib import Path
# path to python scripts
sys.path.append(os.path.join('..','src','trails'))
from flow_model import *
from simplify import *
from extract import railway,ferries,mainRoads,roads
import pathlib
pd.options.mode.chained_assignment = None
#data_path = os.path.join('..','data')
data_path = (Path(__file__).parent.absolute().parent.absolute().parent.absolute())
data_path = Path(r'C:/data/')
road_types = ['primary','trunk','motorway','motorway_link','trunk_link','primary_link','secondary','secondary_link','tertiary','tertiary_link']
# optional progress bars
'''
if 'SNKIT_PROGRESS' in os.environ and os.environ['SNKIT_PROGRESS'] in ('1', 'TRUE'):
try:
from tqdm import tqdm
except ImportError:
from snkit.utils import tqdm_standin as tqdm
else:
from snkit.utils import tqdm_standin as tqdm
'''
class Network():
"""A Network is composed of nodes (points in space) and edges (lines)
Parameters
----------
nodes : pandas.DataFrame, optional
edges : pandas.DataFrame, optional
Attributes
----------
nodes : pandas.DataFrame
edges : pandas.DataFrame
"""
def __init__(self, nodes=None, edges=None):
"""
"""
if nodes is None:
nodes = pd.DataFrame()
self.nodes = nodes
if edges is None:
edges = pd.DataFrame()
self.edges = edges
def set_crs(self, crs=None, epsg=None):
"""Set network (node and edge) crs
Parameters
----------
crs : dict or str
Projection parameters as PROJ4 string or in dictionary form.
epsg : int
EPSG code specifying output projection
"""
if crs is None and epsg is None:
raise ValueError("Either crs or epsg must be provided to Network.set_crs")
if epsg is not None:
crs = {'init': 'epsg:{}'.format(epsg)}
self.edges = pygeos.geometry.set_srid(point, epsg)
self.nodes = pygeos.geometry.set_srid(point, epsg)
def to_crs(self, crs=None, epsg=None):
"""Set network (node and edge) crs
Parameters
----------
crs : dict or str
Projection parameters as PROJ4 string or in dictionary form.
epsg : int
EPSG code specifying output projection
"""
if crs is None and epsg is None:
raise ValueError("Either crs or epsg must be provided to Network.set_crs")
if epsg is not None:
crs = {'init': 'epsg:{}'.format(epsg)}
self.edges.to_crs(crs, inplace=True)
self.nodes.to_crs(crs, inplace=True)
def add_ids(network, id_col='id', edge_prefix='', node_prefix=''):
"""Add or replace an id column with ascending ids
The ids are represented into int64s for easier conversion to numpy arrays
Args:
network (class): A network composed of nodes (points in space) and edges (lines)
id_col (str, optional): [description]. Defaults to 'id'.
edge_prefix (str, optional): [description]. Defaults to ''.
node_prefix (str, optional): [description]. Defaults to ''.
Returns:
[type]: [description]
"""
nodes = network.nodes.copy()
if not nodes.empty:
nodes = nodes.reset_index(drop=True)
edges = network.edges.copy()
if not edges.empty:
edges = edges.reset_index(drop=True)
nodes[id_col] = range(len(nodes))
edges[id_col] = range(len(edges))
return Network(
nodes=nodes,
edges=edges
)
def add_topology(network, id_col='id'):
"""Add or replace from_id, to_id to edges
Args:
network (class): A network composed of nodes (points in space) and edges (lines)
id_col (str, optional): [description]. Defaults to 'id'.
Returns:
network (class): A network composed of nodes (points in space) and edges (lines)
"""
from_ids = []
to_ids = []
node_ends = []
bugs = []
sindex = pygeos.STRtree(network.nodes.geometry)
for edge in tqdm(network.edges.itertuples(), desc="topology", total=len(network.edges)):
start, end = line_endpoints(edge.geometry)
try:
start_node = nearest_node(start, network.nodes,sindex)
from_ids.append(start_node[id_col])
except:
bugs.append(edge.id)
from_ids.append(-1)
try:
end_node = nearest_node(end, network.nodes,sindex)
to_ids.append(end_node[id_col])
except:
bugs.append(edge.id)
to_ids.append(-1)
#print(len(bugs)," Edges not connected to nodes")
edges = network.edges.copy()
nodes = network.nodes.copy()
edges['from_id'] = from_ids
edges['to_id'] = to_ids
edges = edges.loc[~(edges.id.isin(list(bugs)))].reset_index(drop=True)
return Network(
nodes=network.nodes,
edges=edges
)
def get_endpoints(network):
"""Get nodes for each edge endpoint
Args:
network (class): A network composed of nodes (points in space) and edges (lines)
Returns:
[type]: [description]
"""
endpoints = []
for edge in tqdm(network.edges.itertuples(), desc="endpoints", total=len(network.edges)):
if edge.geometry is None:
continue
# 5 is MULTILINESTRING
if pygeom.get_type_id(edge.geometry) == '5':
for line in edge.geometry.geoms:
start, end = line_endpoints(line)
endpoints.append(start)
endpoints.append(end)
else:
start, end = line_endpoints(edge.geometry)
endpoints.append(start)
endpoints.append(end)
# create dataframe to match the nodes geometry column name
return matching_df_from_geoms(network.nodes, endpoints)
def add_endpoints(network):
"""Add nodes at line endpoints
Args:
network (class): A network composed of nodes (points in space) and edges (lines)
Returns:
network (class): A network composed of nodes (points in space) and edges (lines)
"""
endpoints = get_endpoints(network)
nodes = concat_dedup([network.nodes, endpoints])
return Network(
nodes=nodes,
edges=network.edges
)
def round_geometries(network, precision=3):
"""Round coordinates of all node points and vertices of edge linestrings to some precision
Args:
network (class): A network composed of nodes (points in space) and edges (lines)
precision (int, optional): [description]. Defaults to 3.
Returns:
network (class): A network composed of nodes (points in space) and edges (lines)
"""
def _set_precision(geom):
return set_precision(geom, precision)
network.nodes.geometry = network.nodes.geometry.apply(_set_precision)
network.edges.geometry = network.edges.geometry.apply(_set_precision)
return network
def split_multilinestrings(network):
"""Create multiple edges from any MultiLineString edge
Ensures that edge geometries are all LineStrings, duplicates attributes over any
created multi-edges.
Args:
network (class): A network composed of nodes (points in space) and edges (lines)
Returns:
network (class): A network composed of nodes (points in space) and edges (lines)
"""
simple_edge_attrs = []
simple_edge_geoms = []
edges = network.edges
for edge in tqdm(edges.itertuples(index=False), desc="split_multi", total=len(edges)):
if pygeom.get_type_id(edge.geometry) == 5:
edge_parts = [x for x in pygeos.geometry.get_geometry(edge, pygeos.geometry.get_num_geometries(edge))]
else:
edge_parts = [edge.geometry]
for part in edge_parts:
simple_edge_geoms.append(part)
attrs = DataFrame([edge] * len(edge_parts))
simple_edge_attrs.append(attrs)
simple_edge_geoms = DataFrame(simple_edge_geoms, columns=['geometry'])
edges = pd.concat(simple_edge_attrs, axis=0).reset_index(drop=True).drop('geometry', axis=1)
edges = pd.concat([edges, simple_edge_geoms], axis=1)
return Network(
nodes=network.nodes,
edges=edges
)
def merge_multilinestrings(network):
"""Try to merge all multilinestring geometries into linestring geometries.
Args:
network (class): A network composed of nodes (points in space) and edges (lines)
Returns:
network (class): A network composed of nodes (points in space) and edges (lines)
"""
edges = network.edges.copy()
edges['geometry']= edges.geometry.apply(lambda x: merge_multilinestring(x))
return Network(edges=edges,
nodes=network.nodes)
def merge_multilinestring(geom):
"""Merge a MultiLineString to LineString
Args:
geom (pygeos.geometry): A pygeos geometry, most likely a linestring or a multilinestring
Returns:
geom (pygeos.geometry): A pygeos linestring geometry if merge was succesful. If not, it returns the input.
"""
if pygeom.get_type_id(geom) == '5':
geom_inb = pygeos.line_merge(geom)
if geom_inb.is_ring: # still something to fix if desired
return geom_inb
else:
return geom_inb
else:
return geom
def snap_nodes(network, threshold=None):
"""Move nodes (within threshold) to edges
Args:
network (class): A network composed of nodes (points in space) and edges (lines)
threshold ([type], optional): [description]. Defaults to None.
Returns:
[type]: [description]
"""
def snap_node(node):
snap = nearest_point_on_edges(node.geometry, network.edges)
distance = snap.distance(node.geometry)
if threshold is not None and distance > threshold:
snap = node.geometry
return snap
snapped_geoms = network.nodes.apply(snap_node, axis=1)
geom_col = geometry_column_name(network.nodes)
nodes = pd.concat([
network.nodes.drop(geom_col, axis=1),
DataFrame(snapped_geoms, columns=[geom_col])
], axis=1)
return Network(
nodes=nodes,
edges=network.edges
)
def link_nodes_to_edges_within(network, distance, condition=None, tolerance=1e-9):
"""Link nodes to all edges within some distance
Args:
network (class): A network composed of nodes (points in space) and edges (lines)
distance ([type]): [description]
condition ([type], optional): [description]. Defaults to None.
tolerance ([type], optional): [description]. Defaults to 1e-9.
Returns:
[type]: [description]
"""
new_node_geoms = []
new_edge_geoms = []
for node in tqdm(network.nodes.itertuples(index=False), desc="link", total=len(network.nodes)):
# for each node, find edges within
edges = edges_within(node.geometry, network.edges, distance)
for edge in edges.itertuples():
if condition is not None and not condition(node, edge):
continue
# add nodes at points-nearest
point = nearest_point_on_line(node.geometry, edge.geometry)
if point != node.geometry:
new_node_geoms.append(point)
# add edges linking
line = LineString([node.geometry, point])
new_edge_geoms.append(line)
new_nodes = matching_df_from_geoms(network.nodes, new_node_geoms)
all_nodes = concat_dedup([network.nodes, new_nodes])
new_edges = matching_df_from_geoms(network.edges, new_edge_geoms)
all_edges = concat_dedup([network.edges, new_edges])
# split edges as necessary after new node creation
unsplit = Network(
nodes=all_nodes,
edges=all_edges
)
return split_edges_at_nodes(unsplit, tolerance)
def link_nodes_to_nearest_edge(network, condition=None):
"""Link nodes to all edges within some distance
Args:
network (class): A network composed of nodes (points in space) and edges (lines)
condition ([type], optional): [description]. Defaults to None.
Returns:
[type]: [description]
"""
new_node_geoms = []
new_edge_geoms = []
for node in tqdm(network.nodes.itertuples(index=False), desc="link", total=len(network.nodes)):
# for each node, find edges within
edge = nearest_edge(node.geometry, network.edges)
if condition is not None and not condition(node, edge):
continue
# add nodes at points-nearest
point = nearest_point_on_line(node.geometry, edge.geometry)
if point != node.geometry:
new_node_geoms.append(point)
# add edges linking
line = LineString([node.geometry, point])
new_edge_geoms.append(line)
new_nodes = matching_df_from_geoms(network.nodes, new_node_geoms)
all_nodes = concat_dedup([network.nodes, new_nodes])
new_edges = matching_df_from_geoms(network.edges, new_edge_geoms)
all_edges = concat_dedup([network.edges, new_edges])
# split edges as necessary after new node creation
unsplit = Network(
nodes=all_nodes,
edges=all_edges
)
return split_edges_at_nodes(unsplit)
def find_roundabouts(network):
"""Methods to find roundabouts
Args:
network (class): A network composed of nodes (points in space) and edges (lines)
Returns:
roundabouts (list): Returns the edges that can be identified as roundabouts
"""
roundabouts = []
for edge in network.edges.itertuples():
if pygeos.predicates.is_ring(edge.geometry): roundabouts.append(edge)
return roundabouts
def clean_roundabouts(network):
"""Methods to clean roundabouts and junctions should be done before
splitting edges at nodes to avoid logic conflicts
Args:
network (class): A network composed of nodes (points in space) and edges (lines)
Returns:
network (class): A network composed of nodes (points in space) and edges (lines)
"""
sindex = pygeos.STRtree(network.edges['geometry'])
edges = network.edges
new_geom = network.edges
new_edge = []
remove_edge=[]
new_edge_id = []
attributes = [x for x in network.edges.columns if x not in ['geometry','osm_id']]
roundabouts = find_roundabouts(network)
testy = []
for roundabout in roundabouts:
round_bound = pygeos.constructive.boundary(roundabout.geometry)
round_centroid = pygeos.constructive.centroid(roundabout.geometry)
remove_edge.append(roundabout.Index)
edges_intersect = _intersects(roundabout.geometry, network.edges['geometry'], sindex)
#Drop the roundabout from series so that no snapping happens on it
edges_intersect.drop(roundabout.Index,inplace=True)
#index at e[0] geometry at e[1] of edges that intersect with
for e in edges_intersect.items():
edge = edges.iloc[e[0]]
start = pygeom.get_point(e[1],0)
end = pygeom.get_point(e[1],-1)
first_co_is_closer = pygeos.measurement.distance(end, round_centroid) > pygeos.measurement.distance(start, round_centroid)
co_ords = pygeos.coordinates.get_coordinates(edge.geometry)
centroid_co = pygeos.coordinates.get_coordinates(round_centroid)
if first_co_is_closer:
new_co = np.concatenate((centroid_co,co_ords))
else:
new_co = np.concatenate((co_ords,centroid_co))
snap_line = pygeos.linestrings(new_co)
snap_line = pygeos.linestrings(new_co)
#an edge should never connect to more than 2 roundabouts, if it does this will break
if edge.osm_id in new_edge_id:
a = []
counter = 0
for x in new_edge:
if x[0]==edge.osm_id:
a = counter
break
counter += 1
double_edge = new_edge.pop(a)
start = pygeom.get_point(double_edge[-1],0)
end = pygeom.get_point(double_edge[-1],-1)
first_co_is_closer = pygeos.measurement.distance(end, round_centroid) > pygeos.measurement.distance(start, round_centroid)
co_ords = pygeos.coordinates.get_coordinates(double_edge[-1])
if first_co_is_closer:
new_co = np.concatenate((centroid_co,co_ords))
else:
new_co = np.concatenate((co_ords,centroid_co))
snap_line = pygeos.linestrings(new_co)
new_edge.append([edge.osm_id]+list(edge[list(attributes)])+[snap_line])
else:
new_edge.append([edge.osm_id]+list(edge[list(attributes)])+[snap_line])
new_edge_id.append(edge.osm_id)
remove_edge.append(e[0])
new = pd.DataFrame(new_edge,columns=['osm_id']+attributes+['geometry'])
dg = network.edges.loc[~network.edges.index.isin(remove_edge)]
ges = pd.concat([dg,new]).reset_index(drop=True)
return Network(edges=ges, nodes=network.nodes)
def find_hanging_nodes(network):
"""Simply returns a dataframe of nodes with degree 1, technically not all of
these are "hanging"
Args:
network (class): A network composed of nodes (points in space) and edges (lines)
Returns:
[type]: [description]
"""
hang_index = np.where(network.nodes['degree']==1)
return network.nodes.iloc[hang_index]
def add_distances(network):
"""This method adds a distance column using pygeos (converted from shapely)
assuming the new crs from the latitude and longitude of the first node
distance is in metres
Args:
network (class): A network composed of nodes (points in space) and edges (lines)
Returns:
network (class): A network composed of nodes (points in space) and edges (lines)
"""
#Find crs of current df and arbitrary point(lat,lon) for new crs
current_crs="epsg:4326"
#The commented out crs does not work in all cases
#current_crs = [*network.edges.crs.values()]
#current_crs = str(current_crs[0])
lat = pygeom.get_y(network.nodes['geometry'].iloc[0])
lon = pygeom.get_x(network.nodes['geometry'].iloc[0])
# formula below based on :https://gis.stackexchange.com/a/190209/80697
approximate_crs = "epsg:" + str(int(32700-np.round((45+lat)/90,0)*100+np.round((183+lon)/6,0)))
#from pygeos/issues/95
geometries = network.edges['geometry']
coords = pygeos.get_coordinates(geometries)
transformer=pyproj.Transformer.from_crs(current_crs, approximate_crs,always_xy=True)
new_coords = transformer.transform(coords[:, 0], coords[:, 1])
result = pygeos.set_coordinates(geometries.copy(), np.array(new_coords).T)
dist = pygeos.length(result)
edges = network.edges.copy()
edges['distance'] = dist
return Network(
nodes=network.nodes,
edges=edges)
def add_travel_time(network):
"""Add travel time column to network edges. Time is in hours
Args:
network (class): A network composed of nodes (points in space) and edges (lines)
Returns:
network (class): A network composed of nodes (points in space) and edges (lines)
"""
if 'distance' not in network.nodes.columns:
network = add_distances(network)
speed_d = {
'motorway':80000,
'motorway_link': 65000,
'trunk': 60000,
'trunk_link':50000,
'primary': 50000, # metres ph
'primary_link':40000,
'secondary': 40000, # metres ph
'secondary_link':30000,
'tertiary':30000,
'tertiary_link': 20000,
'unclassified':20000,
'service':20000,
'residential': 20000, # mph
}
def calculate_time(edge):
try:
return edge['distance'] / (edge['maxspeed']*1000) #metres per hour
except:
return edge['distance'] / speed_d.get('unclassified')
network.edges['time'] = network.edges.apply(calculate_time,axis=1)
return network
def calculate_degree(network):
"""Calculates the degree of the nodes from the from and to ids. It
is not wise to call this method after removing nodes or edges
without first resetting the ids
Args:
network (class): A network composed of nodes (points in space) and edges (lines)
Returns:
Connectivity degree (numpy.array): [description]
"""
#the number of nodes(from index) to use as the number of bins
ndC = len(network.nodes.index)
if ndC-1 > max(network.edges.from_id) and ndC-1 > max(network.edges.to_id): print("Calculate_degree possibly unhappy")
return np.bincount(network.edges['from_id'],None,ndC) + np.bincount(network.edges['to_id'],None,ndC)
#Adds a degree column to the node dataframe
def add_degree(network):
"""[summary]
Args:
network (class): A network composed of nodes (points in space) and edges (lines)
Returns:
network (class): A network composed of nodes (points in space) and edges (lines)
"""
degree = calculate_degree(network)
network.nodes['degree'] = degree
return network
def drop_hanging_nodes(network, tolerance = 0.005):
"""This method drops any single degree nodes and their associated edges given a
distance(degrees) threshold. This primarily happens when a road was connected to residential
areas, most often these are link roads that no longer do so.
Args:
network (class): A network composed of nodes (points in space) and edges (lines)
tolerance (float, optional): The maximum allowed distance from hanging nodes to the network. Defaults to 0.005.
Returns:
network (class): A network composed of nodes (points in space) and edges (lines)
"""
if 'degree' not in network.nodes.columns:
deg = calculate_degree(network)
else: deg = network.nodes['degree'].to_numpy()
#hangNodes : An array of the indices of nodes with degree 1
hangNodes = np.where(deg==1)
ed = network.edges.copy()
to_ids = ed['to_id'].to_numpy()
from_ids = ed['from_id'].to_numpy()
hangTo = np.isin(to_ids,hangNodes)
hangFrom = np.isin(from_ids,hangNodes)
#eInd : An array containing the indices of edges that connect
#the degree 1 nodes
eInd = np.hstack((np.nonzero(hangTo),np.nonzero(hangFrom)))
degEd = ed.iloc[np.sort(eInd[0])]
edge_id_drop = []
for d in degEd.itertuples():
dist = pygeos.measurement.length(d.geometry)
#If the edge is shorter than the tolerance
#add the ID to the drop list and update involved node degrees
if dist < tolerance:
edge_id_drop.append(d.id)
deg[d.from_id] -= 1
deg[d.to_id] -= 1
# drops disconnected edges, some may still persist since we have not merged yet
if deg[d.from_id] == 1 and deg[d.to_id] == 1:
edge_id_drop.append(d.id)
deg[d.from_id] -= 1
deg[d.to_id] -= 1
edg = ed.loc[~(ed.id.isin(edge_id_drop))].reset_index(drop=True)
aa = ed.loc[ed.id.isin(edge_id_drop)]
edg.drop(labels=['id'],axis=1,inplace=True)
edg['id'] = range(len(edg))
n = network.nodes.copy()
n['degree'] = deg
#Degree 0 Nodes are cleaned in the merge_2 method
#x = n.loc[n.degree==0]
#nod = n.loc[n.degree > 0].reset_index(drop=True)
return Network(nodes = n,edges=edg)
def merge_edges(network, print_err=False):
"""This method removes all degree 2 nodes and merges their associated edges, at
the moment it arbitrarily uses the first edge's attributes for the new edges
column attributes, in the future the mean or another measure can be used
to set these new values. The general strategy is to find a node of degree 2,
and the associated 2 edges, then traverse edges and nodes in both directions
until a node of degree !=2 is found, at this point stop in this direction. Reset the
geometry and from/to ids for this edge, delete the nodes and edges traversed.
Args:
network (class): A network composed of nodes (points in space) and edges (lines)
print_err (bool, optional): [description]. Defaults to False.
Returns:
network (class): A network composed of nodes (points in space) and edges (lines)
"""
net = network
nod = net.nodes.copy()
edg = net.edges.copy()
optional_cols = edg.columns.difference(['osm_id','geometry','from_id','to_id','id'])
edg_sindex = pygeos.STRtree(network.edges.geometry)
if 'degree' not in network.nodes.columns:
deg = calculate_degree(network)
else: deg = nod['degree'].to_numpy()
#For the 0.002s speed up, alternatively do a straightforward loc[degree==2]
degree2 = np.where(deg==2)
#n2: is the set of all node IDs that are degree 2
n2 = set((nod['id'].iloc[degree2]))
#TODO if you create a dictionary to mask values this geometry
#array nodGeom can be made to only contain the 'geometry' of degree 2
#nodes
nodGeom = nod['geometry']
eIDtoRemove =[]
nIDtoRemove =[]
c = 0
#pbar = tqdm(total=len(n2))
while n2:
newEdge = []
info_first_edge = []
possibly_delete = []
pos_0_deg = []
nodeID = n2.pop()
pos_0_deg.append(nodeID)
#Co-ordinates of current node
node_geometry = nodGeom[nodeID]
eID = set(edg_sindex.query(node_geometry,predicate='intersects'))
#Find the nearest 2 edges, unless there is an error in the dataframe
#this will return the connected edges using spatial indexing
if len(eID) > 2: edgePath1, edgePath2 = find_closest_2_edges(eID,nodeID,edg,node_geometry)
elif len(eID) < 2:
continue
else:
edgePath1 = edg.iloc[eID.pop()]
edgePath2 = edg.iloc[eID.pop()]
#For the two edges found, identify the next 2 nodes in either direction
nextNode1 = edgePath1.to_id if edgePath1.from_id==nodeID else edgePath1.from_id
nextNode2 = edgePath2.to_id if edgePath2.from_id==nodeID else edgePath2.from_id
if nextNode1==nextNode2: continue
possibly_delete.append(edgePath2.id)
#At the moment the first edge information is used for the merged edge
info_first_edge = edgePath1.id
newEdge.append(edgePath1.geometry)
newEdge.append(edgePath2.geometry)
#While the next node along the path is degree 2 keep traversing
while deg[nextNode1] == 2:
if nextNode1 in pos_0_deg: break
nextNode1Geom = nodGeom[nextNode1]
eID = set(edg_sindex.query(nextNode1Geom,predicate='intersects'))
eID.discard(edgePath1.id)
try:
edgePath1 = min([edg.iloc[match_idx] for match_idx in eID],
key= lambda match: pygeos.distance(nextNode1Geom,(match.geometry)))
except:
continue
pos_0_deg.append(nextNode1)
n2.discard(nextNode1)
nextNode1 = edgePath1.to_id if edgePath1.from_id==nextNode1 else edgePath1.from_id
newEdge.append(edgePath1.geometry)
possibly_delete.append(edgePath1.id)
while deg[nextNode2] == 2:
if nextNode2 in pos_0_deg: break
nextNode2Geom = nodGeom[nextNode2]
eID = set(edg_sindex.query(nextNode2Geom,predicate='intersects'))
eID.discard(edgePath2.id)
try:
edgePath2 = min([edg.iloc[match_idx] for match_idx in eID],
key= lambda match: pygeos.distance(nextNode2Geom,(match.geometry)))
except: continue
pos_0_deg.append(nextNode2)
n2.discard(nextNode2)
nextNode2 = edgePath2.to_id if edgePath2.from_id==nextNode2 else edgePath2.from_id
newEdge.append(edgePath2.geometry)
possibly_delete.append(edgePath2.id)
#Update the information of the first edge
new_merged_geom = pygeos.line_merge(pygeos.multilinestrings([x for x in newEdge]))
if pygeom.get_type_id(new_merged_geom) == 1:
edg.at[info_first_edge,'geometry'] = new_merged_geom
if nodGeom[nextNode1]==pygeom.get_point(new_merged_geom,0):
edg.at[info_first_edge,'from_id'] = nextNode1
edg.at[info_first_edge,'to_id'] = nextNode2
else:
edg.at[info_first_edge,'from_id'] = nextNode2
edg.at[info_first_edge,'to_id'] = nextNode1
eIDtoRemove += possibly_delete
possibly_delete.append(info_first_edge)
for x in pos_0_deg:
deg[x] = 0
mode_edges = edg.loc[edg.id.isin(possibly_delete)]
edg.at[info_first_edge,optional_cols] = mode_edges[optional_cols].mode().iloc[0].values
else:
if print_err: print("Line", info_first_edge, "failed to merge, has pygeos type ", pygeom.get_type_id(edg.at[info_first_edge,'geometry']))
#pbar.update(1)
#pbar.close()
edg = edg.loc[~(edg.id.isin(eIDtoRemove))].reset_index(drop=True)
#We remove all degree 0 nodes, including those found in dropHanging
n = nod.loc[nod.degree > 0].reset_index(drop=True)
return Network(nodes=n,edges=edg)
def find_closest_2_edges(edgeIDs, nodeID, edges, nodGeometry):
"""Returns the 2 edges connected to the current node
Args:
edgeIDs ([type]): [description]
nodeID ([type]): [description]
edges ([type]): [description]
nodGeometry ([type]): [description]
Returns:
[type]: [description]
"""
edgePath1 = min([edges.iloc[match_idx] for match_idx in edgeIDs],
key=lambda match: pygeos.distance(nodGeometry,match.geometry))
edgeIDs.remove(edgePath1.id)
edgePath2 = min([edges.iloc[match_idx] for match_idx in edgeIDs],
key=lambda match: pygeos.distance(nodGeometry,match.geometry))
return edgePath1, edgePath2
def geometry_column_name(df):
"""Get geometry column name, fall back to 'geometry'
Args:
df (pandas.DataFrame): [description]
Returns:
geom_col (string): [description]
"""
try:
geom_col = df.geometry.name
except AttributeError:
geom_col = 'geometry'
return geom_col
def matching_df_from_geoms(df, geoms):
"""Create a geometry-only DataFrame with column name to match an existing DataFrame
Args:
df (pandas.DataFrame): [description]
geoms (numpy.array): numpy array with pygeos geometries
Returns:
[type]: [description]
"""
geom_col = geometry_column_name(df)
return pd.DataFrame(geoms, columns=[geom_col])
def concat_dedup(dfs):
"""Concatenate a list of GeoDataFrames, dropping duplicate geometries
- note: repeatedly drops indexes for deduplication to work
Args:
dfs ([type]): [description]
Returns:
[type]: [description]
"""
cat = pd.concat(dfs, axis=0, sort=False)
cat.reset_index(drop=True, inplace=True)
cat_dedup = drop_duplicate_geometries(cat)
cat_dedup.reset_index(drop=True, inplace=True)
return cat_dedup
def node_connectivity_degree(node, network):
"""Get the degree of connectivity for a node.
Args:
node ([type]): [description]
network (class): A network composed of nodes (points in space) and edges (lines)
Returns:
[type]: [description]
"""
return len(
network.edges[
(network.edges.from_id == node) | (network.edges.to_id == node)
]
)
def drop_duplicate_geometries(df, keep='first'):
"""Drop duplicate geometries from a dataframe
Convert to wkb so drop_duplicates will work as discussed
in https://github.com/geopandas/geopandas/issues/521
Args:
df (pandas.DataFrame): [description]
keep (str, optional): [description]. Defaults to 'first'.
Returns:
[type]: [description]
"""
mask = df.geometry.apply(lambda geom: pygeos.to_wkb(geom))
# use dropped duplicates index to drop from actual dataframe
return df.iloc[mask.drop_duplicates(keep).index]
def nearest_point_on_edges(point, edges):
"""Find nearest point on edges to a point
Args:
point (pygeos.geometry): [description]
edges (network.edges): [description]
Returns:
[type]: [description]
"""
edge = nearest_edge(point, edges)
snap = nearest_point_on_line(point, edge.geometry)
return snap
def nearest_node(point, nodes,sindex):
"""Find nearest node to a point
Args:
point *pygeos.geometry): [description]
nodes (network.nodes): [description]
sindex ([type]): [description]
Returns:
[type]: [description]
"""
return nearest(point, nodes,sindex)
def nearest_edge(point, edges,sindex):
"""Find nearest edge to a point
Args:
point (pygeos.geometry): [description]
edges (network.edges): [description]
sindex ([type]): [description]
Returns:
[type]: [description]
"""
return nearest(point, edges,sindex)
def nearest(geom, df,sindex):
"""Find the element of a DataFrame nearest a geometry
Args:
geom (pygeos.geometry): [description]
df (pandas.DataFrame): [description]
sindex ([type]): [description]
Returns:
[type]: [description]
"""
matches_idx = sindex.query(geom)
nearest_geom = min(
[df.iloc[match_idx] for match_idx in matches_idx],
key=lambda match: pygeos.measurement.distance(match.geometry,geom)
)
return nearest_geom
def edges_within(point, edges, distance):
"""Find edges within a distance of point
Args:
point (pygeos.geometry): [description]
edges (network.edges): [description]
distance ([type]): [description]
Returns:
[type]: [description]
"""
return d_within(point, edges, distance)
def d_within(geom, df, distance):
"""Find the subset of a DataFrame within some distance of a shapely geometry
Args:
geom (pygeos.geometry): [description]
df (pandas.DataFrame): [description]
distance ([type]): [description]
Returns:
[type]: [description]
"""
return _intersects(geom, df, distance)
def _intersects(geom, df, sindex,tolerance=1e-9):
"""[summary]
Args:
geom (pygeos.geometry): [description]
df ([type]): [description]
sindex ([type]): [description]
tolerance ([type], optional): [description]. Defaults to 1e-9.
Returns:
[type]: [description]
"""
buffer = pygeos.buffer(geom,tolerance)
if pygeos.is_empty(buffer):
# can have an empty buffer with too small a tolerance, fallback to original geom
buffer = geom
try:
return _intersects_df(buffer, df,sindex)
except:
# can exceptionally buffer to an invalid geometry, so try re-buffering
buffer = pygeos.buffer(geom,0)
return _intersects_df(buffer, df,sindex)
def _intersects_df(geom, df,sindex):
"""[summary]
Args:
geom ([type]): [description]
df ([type]): [description]
sindex ([type]): [description]
Returns:
[type]: [description]
"""
return df[sindex.query(geom,'intersects')]
def intersects(geom, df, sindex, tolerance=1e-9):
"""Find the subset of a GeoDataFrame intersecting with a shapely geometry
Args:
geom ([type]): [description]
df ([type]): [description]
sindex ([type]): [description]
tolerance ([type], optional): [description]. Defaults to 1e-9.
Returns:
[type]: [description]
"""
return _intersects(geom, df, sindex, tolerance)
def nodes_intersecting(line,nodes,sindex,tolerance=1e-9):
"""Find nodes intersecting line
Args:
line ([type]): [description]
nodes ([type]): [description]
sindex ([type]): [description]
tolerance ([type], optional): [description]. Defaults to 1e-9.
Returns:
[type]: [description]
"""
return intersects(line, nodes,sindex, tolerance)
def line_endpoints(line):
"""Return points at first and last vertex of a line
Args:
line ([type]): [description]
Returns:
[type]: [description]
"""
start = pygeom.get_point(line,0)
end = pygeom.get_point(line,-1)
return start, end
def split_edge_at_points(edge, points, tolerance=1e-9):
"""Split edge at point/multipoint
Args:
edge ([type]): [description]
points (pygeos.geometry): [description]
tolerance ([type], optional): [description]. Defaults to 1e-9.
Returns:
[type]: [description]
"""
try:
segments = split_line(edge.geometry, points, tolerance)
except ValueError:
# if splitting fails, e.g. becuase points is empty GeometryCollection
segments = [edge.geometry]
edges = DataFrame([edge] * len(segments))
edges.geometry = segments
return edges
def split_line(line, points, tolerance=1e-9):
"""Split line at point or multipoint, within some tolerance
Args:
line (pygeos.geometry): [description]
points (pygeos.geometry): [description]
tolerance ([type], optional): [description]. Defaults to 1e-9.
Returns:
[type]: [description]
"""
to_split = snap_line(line, points, tolerance)
return list(split(to_split, points))
def snap_line(line, points, tolerance=1e-9):
"""Snap a line to points within tolerance, inserting vertices as necessary
Args:
line (pygeos.geometry): [description]
points (pygeos.geometry): [description]
tolerance ([type], optional): [description]. Defaults to 1e-9.
Returns:
[type]: [description]
"""
if pygeom.get_type_id(edge.geometry) == 0:
if pygeos.distance(point,line) < tolerance:
line = pygeos.snap(line, points, tolerance=1e-9)
elif pygeom.get_type_id(edge.geometry) == 4:
points = [point for point in points if pygeos.distance(point,line) < tolerance]
for point in points:
line = pygeos.snap(line, points, tolerance=1e-9)
return line
def nearest_point_on_line(point, line):
"""Return the nearest point on a line
Args:
point (pygeos.geometry): [description]
line (pygeos.geometry): [description]
Returns:
[type]: [description]
"""
return line.interpolate(line.project(point))
def set_precision(geom, precision):
"""Set geometry precision
Args:
geom (pygeos.geometry): [description]
precision ([type]): [description]
Returns:
[type]: [description]
"""
geom_mapping = mapping(geom)
geom_mapping['coordinates'] = np.round(np.array(geom_mapping['coordinates']), precision)
return shape(geom_mapping)
def reset_ids(network):
"""Resets the ids of the nodes and edges, editing the refereces in edge table
using dict masking
Args:
network (class): A network composed of nodes (points in space) and edges (lines)
Returns:
[type]: [description]
"""
nodes = network.nodes.copy()
edges = network.edges.copy()
to_ids = edges['to_id'].to_numpy()
from_ids = edges['from_id'].to_numpy()
new_node_ids = range(len(nodes))
#creates a dictionary of the node ids and the actual indices
id_dict = dict(zip(nodes.id,new_node_ids))
nt = np.copy(to_ids)
nf = np.copy(from_ids)
#updates all from and to ids, because many nodes are effected, this
#is quite optimal approach for large dataframes
for k,v in id_dict.items():
nt[to_ids==k] = v
nf[from_ids==k] = v
edges.drop(labels=['to_id','from_id'],axis=1,inplace=True)
edges['from_id'] = nf
edges['to_id'] = nt
nodes.drop(labels=['id'],axis=1,inplace=True)
nodes['id'] = new_node_ids
edges['id'] = range(len(edges))
edges.reset_index(drop=True,inplace=True)
nodes.reset_index(drop=True,inplace=True)
return Network(edges=edges,nodes=nodes)
def nearest_network_node_list(gdf_admin,gdf_nodes,sg):
"""[summary]
Args:
gdf_admin ([type]): [description]
gdf_nodes ([type]): [description]
sg ([type]): [description]
Returns:
[type]: [description]
"""
gdf_nodes = gdf_nodes.loc[gdf_nodes.id.isin(sg.vs['name'])]
gdf_nodes.reset_index(drop=True,inplace=True)
nodes = {}
for admin_ in gdf_admin.itertuples():
if (pygeos.distance((admin_.geometry),gdf_nodes.geometry).min()) > 0.005:
continue
nodes[admin_.id] = gdf_nodes.iloc[pygeos.distance((admin_.geometry),gdf_nodes.geometry).idxmin()].id
return nodes
def split_edges_at_nodes(network, tolerance=1e-9):
"""Split network edges where they intersect node geometries
"""
sindex_nodes = pygeos.STRtree(network.nodes['geometry'])
sindex_edges = pygeos.STRtree(network.edges['geometry'])
attributes = [x for x in network.edges.columns if x not in ['index','geometry','osm_id']]
grab_all_edges = []
for edge in (network.edges.itertuples(index=False)):
hits_nodes = nodes_intersecting(edge.geometry,network.nodes['geometry'],sindex_nodes, tolerance=1e-9)
hits_edges = nodes_intersecting(edge.geometry,network.edges['geometry'],sindex_edges, tolerance=1e-9)
hits_edges = pygeos.set_operations.intersection(edge.geometry,hits_edges)
try:
hits_edges = (hits_edges[~(pygeos.predicates.covers(hits_edges,edge.geometry))])
hits_edges = pd.Series([pygeos.points(item) for sublist in [pygeos.get_coordinates(x) for x in hits_edges] for item in sublist],name='geometry')
hits = [pygeos.points(x) for x in pygeos.coordinates.get_coordinates(
pygeos.constructive.extract_unique_points(pygeos.multipoints(pd.concat([hits_nodes,hits_edges]).values)))]
except TypeError:
return hits_edges
hits = pd.DataFrame(hits,columns=['geometry'])
# get points and geometry as list of coordinates
split_points = pygeos.coordinates.get_coordinates(pygeos.snap(hits,edge.geometry,tolerance=1e-9))
coor_geom = pygeos.coordinates.get_coordinates(edge.geometry)
# potentially split to multiple edges
split_locs = np.argwhere(np.isin(coor_geom, split_points).all(axis=1))[:,0]
split_locs = list(zip(split_locs.tolist(), split_locs.tolist()[1:]))
new_edges = [coor_geom[split_loc[0]:split_loc[1]+1] for split_loc in split_locs]
grab_all_edges.append([[edge.osm_id]*len(new_edges),[pygeos.linestrings(edge) for edge in new_edges],[edge[1:-1]]*len(new_edges)])
big_list = [list(zip(x[0],x[1],x[2])) for x in grab_all_edges]
# combine all new edges
edges = pd.DataFrame([[item[0],item[1]]+list(item[2]) for sublist in big_list for item in sublist],
columns=['osm_id','geometry']+attributes)
# return new network with split edges
return Network(
nodes=network.nodes,
edges=edges
)
def fill_attributes(network):
"""[summary]
Args:
edges ([type]): [description]
Returns:
[type]: [description]
"""
speed_d = {
'motorway':'80',
'motorway_link': '65',
'trunk': '60',
'trunk_link':'50',
'primary': '50', # metres ph
'primary_link':'40',
'secondary': '40', # metres ph
'secondary_link':'30',
'tertiary':'30',
'tertiary_link': '20',
'unclassified':'20',
'service':'20',
'residential': '20', # mph
}
lanes_d = {
'motorway':'4',
'motorway_link': '2',
'trunk': '4',
'trunk_link':'2',
'primary': '2', # metres ph
'primary_link':'1',
'secondary': '2', # metres ph
'secondary_link':'1',
'tertiary':'2',
'tertiary_link': '1',
'unclassified':'2',
'service':'1',
'residential': '1', # mph
}
df_speed = pd.DataFrame.from_dict(speed_d,orient='index',columns=['maxspeed'])
df_lanes = pd.DataFrame.from_dict(lanes_d,orient='index',columns=['lanes'])
def turn_to_int(x):
if isinstance(x.maxspeed,str):
if len(re.findall(r'\d+',x.maxspeed)) > 0:
return re.findall(r'\d+',x.maxspeed)[0]
else:
return speed_d[x.highway]
else:
return x.maxspeed
network.edges.maxspeed = network.edges.apply(turn_to_int,axis=1)
try:
vals_to_assign = network.edges.groupby('highway')[['lanes','maxspeed']].agg(pd.Series.mode)
except:
vals_to_assign = df_lanes.join(df_speed)
#print(vals_to_assign)
try:
vals_to_assign.lanes.iloc[0]
except:
print('NOTE: No maxspeed values available in the country, fall back on default')
vals_to_assign = vals_to_assign.join(df_lanes)
try:
vals_to_assign.maxspeed.iloc[0]
except:
print('NOTE: No maxspeed values available in the country, fall back on default')
vals_to_assign = vals_to_assign.join(df_speed)
def fill_empty_maxspeed(x):
if len(list(x.maxspeed)) == 0:
return speed_d[x.name]
else:
return x.maxspeed
def fill_empty_lanes(x):
if len(list(x.lanes)) == 0:
return lanes_d[x.name]
else:
return x.lanes
def get_max_in_vals_to_assign(x):
if isinstance(x,list):
return max([(y) for y in x])
else:
try:
return re.findall(r'\d+',x)[0]
except:
return x
def get_max(x):
return max([int(y) for y in x])
#fill empty cells
vals_to_assign.lanes = vals_to_assign.apply(lambda x: fill_empty_lanes(x),axis=1)
vals_to_assign.maxspeed = vals_to_assign.apply(lambda x: fill_empty_maxspeed(x),axis=1)
vals_to_assign.maxspeed = vals_to_assign.maxspeed.apply(lambda x: get_max_in_vals_to_assign(x))
def fill_oneway(x):
if isinstance(x.oneway,str):
return x.oneway
else:
return 'no'
def fill_lanes(x):
if isinstance(x.lanes,str):
try:
return int(x.lanes)
except:
try:
return int(get_max(re.findall(r'\d+', x.lanes)))
except:
return int(vals_to_assign.loc[x.highway].lanes)
elif x.lanes is None:
if isinstance(vals_to_assign.loc[x.highway].lanes,np.ndarray):
return int(get_max(vals_to_assign.loc[x.highway.split('_')[0]].lanes))
else:
return int(vals_to_assign.loc[x.highway].lanes)
elif np.isnan(x.lanes):
if isinstance(vals_to_assign.loc[x.highway].lanes,np.ndarray):
return int(get_max(vals_to_assign.loc[x.highway.split('_')[0]].lanes))
else:
return int(vals_to_assign.loc[x.highway].lanes)
def fill_maxspeed(x):
if isinstance(x.maxspeed,str):
try:
return [int(s) for s in x.maxspeed.split() if s.isdigit()][0]
except:
try:
return int(get_max(vals_to_assign.loc[x.highway.split('_')[0]].maxspeed))
except:
try:
return int(get_max(re.findall(r'\d+', x.maxspeed)))
except:
return int(vals_to_assign.loc[x.highway].maxspeed)
elif x.maxspeed is None:
if isinstance(vals_to_assign.loc[x.highway].maxspeed,np.ndarray):
return int(get_max(vals_to_assign.loc[x.highway.split('_')[0]].maxspeed))
else:
try:
return int(get_max(re.findall(r'\d+', x.maxspeed)))
except:
return int(vals_to_assign.loc[x.highway].maxspeed)
elif np.isnan(x.maxspeed):
if isinstance(vals_to_assign.loc[x.highway].maxspeed,np.ndarray):
try:
return int(get_max(vals_to_assign.loc[x.highway.split('_')[0]].maxspeed))
except:
print(vals_to_assign.loc[x.highway].maxspeed)
return int(vals_to_assign.loc[x.highway].maxspeed)
else:
return int(vals_to_assign.loc[x.highway].maxspeed)
network.edges['oneway'] = network.edges.apply(lambda x: fill_oneway(x),axis=1)
network.edges['lanes'] = network.edges.apply(lambda x: fill_lanes(x),axis=1)
network.edges['maxspeed'] = network.edges.apply(lambda x: fill_maxspeed(x),axis=1)
return network
def simplified_network(df,drop_hanging_nodes_run=True,fill_attributes_run=True):
"""returns a geopandas dataframe of a simplified network
Args:
df ([type]): [description]
drop_hanging_nodes_run (bool, optional): [description]. Defaults to True.
Returns:
[type]: [description]
"""
net = Network(edges=df)
net = clean_roundabouts(net)
net = add_endpoints(net)
net = split_edges_at_nodes(net)
net = add_endpoints(net)
net = add_ids(net)
net = add_topology(net)
if drop_hanging_nodes_run:
net = drop_hanging_nodes(net)
else:
net.nodes['degree'] = calculate_degree(net)
net = merge_edges(net)
net.edges = drop_duplicate_geometries(net.edges, keep='first')
net = reset_ids(net)
net = add_distances(net)
net = merge_multilinestrings(net)
if fill_attributes_run:
net = fill_attributes(net)
net = add_travel_time(net)
return net
def ferry_connected_network(country,data_path,tqdm_on=True):
"""
connect ferries to main network (and connect smaller sub networks automatically)
Args:
country ([type]): [description]
data_path ([type]): [description]
Returns:
[type]: [description]
"""
if not tqdm_on:
from utils import tqdm_standin as tqdm
# load full network
full_network = roads(str(data_path.joinpath('country_osm','{}.osm.pbf'.format(country))))
main_road_network = full_network.loc[full_network.highway.isin(road_types)].reset_index(drop=True)
# load ferries
ferry_network = ferries(str(data_path.joinpath('country_osm','{}.osm.pbf'.format(country))))
# create a main network where hanging nodes are not removed
network_with_hanging = simplified_network(main_road_network,drop_hanging_nodes_run=False)
nodes,edges = network_with_hanging.nodes.copy(),network_with_hanging.edges.copy()
# create connections between ferry network and the main network
connectors = connect_ferries(country,full_network,ferry_network)
# loop through ferry connectors to add to edges of main network
for link in connectors.itertuples():
start = pygeos.get_point(link.geometry,0)
end = pygeos.get_point(link.geometry,-1)
from_id = nodes.id.loc[nodes.geometry==start]
to_id = nodes.id.loc[nodes.geometry==end]
edges = edges.append({ 'osm_id': np.random.random_integers(1e7,1e8),
'geometry': link.geometry,
'highway': 'ferry_connector',
'maxspeed': 10,
'oneway': 'no',
'lanes': 2},
ignore_index=True)
# loop through ferry network to add to edges of main network
for iter_,ferry in ferry_network.iterrows():
start = pygeos.get_point(ferry.geometry,0)
end = pygeos.get_point(ferry.geometry,-1)
from_id = nodes.id.loc[nodes.geometry==start]
to_id = nodes.id.loc[nodes.geometry==end]
#if not from_id.empty and not to_id.empty:
edges = edges.append({ 'osm_id': ferry.osm_id,
'geometry': ferry.geometry,
'highway': 'ferry',
'maxspeed': 20,
'oneway': 'no',
'lanes': 2},
ignore_index=True)
# ensure the newly created edge network has the same order compared to the original one
new_edges = edges.iloc[:,:6]
new_edges = new_edges[[x for x in new_edges.columns if x != 'geometry']+['geometry']]
# create new network with ferry connections
net_final = simplified_network(new_edges,fill_attributes_run=False)
net_final.edges.osm_id = net_final.edges.osm_id.astype(int)
net_final.edges.geometry = pygeos.to_wkb(net_final.edges.geometry)
net_final.nodes.geometry = pygeos.to_wkb(net_final.nodes.geometry)
feather.write_dataframe(net_final.edges.copy(),data_path.joinpath('road_ferry_networks','{}-edges.feather'.format(country)))
feather.write_dataframe(net_final.nodes.copy(),data_path.joinpath('road_ferry_networks','{}-nodes.feather'.format(country)))
return net_final
def connect_ferries(country,full_network,ferry_network):
"""[summary]
Args:
country ([type]): [description]
full_network ([type]): [description]
ferry_network ([type]): [description]
Returns:
[type]: [description]
"""
# list in which we will connect new ferry connections
collect_connectors = []
# loop through all ferries
for iter_,ferry in (ferry_network.iterrows()):
# create buffer around ferry to get the full network around the ferry ends
ferry_buffer = pygeos.buffer(ferry.geometry,0.05)
# collect the road network around the ferry
sub_full_network = full_network.loc[pygeos.intersects(full_network.geometry,ferry_buffer)].reset_index(drop=True)
sub_main_network_nodes = [[pygeos.points(pygeos.get_coordinates(x)[0]),pygeos.points(pygeos.get_coordinates(x)[1])] for x in sub_full_network.loc[sub_full_network.highway.isin(road_types)].geometry]
sub_main_network_nodes = [item for sublist in sub_main_network_nodes for item in sublist]
sub_main_network_nodes = pd.DataFrame(sub_main_network_nodes,columns=['geometry'])
sub_main_network_nodes['id'] = [x+1 for x in range(len(sub_main_network_nodes))]
# create a dataframe of the ferry nodes
ferry_nodes = pd.DataFrame([pygeos.points(pygeos.get_coordinates(ferry.geometry)[0]),pygeos.points(pygeos.get_coordinates(ferry.geometry)[-1])],columns=['geometry'])
ferry_nodes['id'] = [1,2]
# create mini simplified network and graph of network around ferry
net = Network(edges=sub_full_network)
net = add_endpoints(net)
net = split_edges_at_nodes(net)
net = add_endpoints(net)
net = add_ids(net)
net = add_topology(net)
net = add_distances(net)
edges = net.edges.reindex(['from_id','to_id'] + [x for x in list(net.edges.columns) if x not in ['from_id','to_id']],axis=1)
graph= ig.Graph.TupleList(edges.itertuples(index=False), edge_attrs=list(edges.columns)[2:],directed=False)
sg = graph.copy()
# collect nearest nodes on network in graph
nearest_node_main = nearest_network_node_list(sub_main_network_nodes,net.nodes,sg)
nearest_node_ferry = nearest_network_node_list(ferry_nodes,net.nodes,sg)
dest_nodes = [sg.vs['name'].index(nearest_node_main[x]) for x in list(nearest_node_main.keys())]
ferry_nodes_graph = [sg.vs['name'].index(nearest_node_ferry[x]) for x in list(nearest_node_ferry.keys())]
# collect paths on both sides of the ferry, if both sides have an actual network nearby
if len(ferry_nodes_graph) == 2:
start_node,end_node = ferry_nodes_graph
# collect all shortest path from one side of the ferry to main network nodes
collect_start_paths = {}
for dest_node in dest_nodes:
paths = sg.get_shortest_paths(sg.vs[start_node],sg.vs[dest_node],weights='distance',output="epath")
if len(paths[0]) != 0:
collect_start_paths[dest_node] = sg.es[paths[0]]['id'],np.sum(sg.es[paths[0]]['distance'])
start_coords = ferry_nodes.geometry[ferry_nodes.id=={v: k for k, v in nearest_node_ferry.items()}[sg.vs[start_node]['name']]].values
# if there are paths, connect them up!
if len(collect_start_paths) != 0:
if len(pd.DataFrame.from_dict(collect_start_paths).T.min()) != 0:
path_1 = pd.DataFrame.from_dict(collect_start_paths).T.min()[0]
p_1 = []
for p in path_1:
high_type = net.edges.highway.loc[net.edges.id==p].values
if np.isin(high_type,road_types):
break
else:
p_1.append(p)
path_1 = net.edges.loc[net.edges.id.isin(p_1)]
# check if they are really connected, if not, we need to create a little linestring to connect the new connector path and the ferry
if len(p_1) > 0:
linestring = pygeos.linear.line_merge(pygeos.multilinestrings(path_1['geometry'].values))
endpoint1 = pygeos.points(pygeos.coordinates.get_coordinates(linestring))[0]
endpoint2 = pygeos.points(pygeos.coordinates.get_coordinates(linestring))[-1]
endpoint1_distance = pygeos.distance(start_coords,endpoint1)
endpoint2_distance = pygeos.distance(start_coords,endpoint2)
if (endpoint1_distance == 0) | (endpoint2_distance == 0):
collect_connectors.append(linestring)
elif endpoint1_distance < endpoint2_distance:
collect_connectors.append(pygeos.linestrings(np.concatenate((pygeos.coordinates.get_coordinates(start_coords),pygeos.coordinates.get_coordinates(linestring)),axis=0)))
else:
collect_connectors.append(pygeos.linestrings(np.concatenate((pygeos.coordinates.get_coordinates(linestring),pygeos.coordinates.get_coordinates(start_coords)),axis=0)))
else:
local_network = net.edges.loc[net.edges.id.isin(pd.DataFrame.from_dict(collect_start_paths).T.min()[0])]
sub_local_network = [[pygeos.points(pygeos.get_coordinates(x)[0]),pygeos.points(pygeos.get_coordinates(x)[-1])] for x in local_network.loc[local_network.highway.isin(road_types)].geometry]
sub_local_network = [item for sublist in sub_local_network for item in sublist]
location_closest_point = np.where(pygeos.distance(start_coords[0],sub_local_network) == np.amin(pygeos.distance(start_coords[0],sub_local_network)))[0][0]
collect_connectors.append(pygeos.linestrings(np.concatenate((pygeos.get_coordinates(start_coords),pygeos.get_coordinates(sub_local_network[location_closest_point])),axis=0)))
# if there are no paths, but if the ferry node is still very close to the main network, we create a new linestring to connect them up (sometimes the ferry dock has no road)
elif pygeos.distance(sub_main_network_nodes.geometry,start_coords).min() < 0.01:
get_new_end_point = pygeos.coordinates.get_coordinates(sub_main_network_nodes.iloc[pygeos.distance(sub_main_network_nodes.geometry,start_coords).idxmin()].geometry)
collect_connectors.append(pygeos.linestrings(np.concatenate((pygeos.coordinates.get_coordinates(start_coords),get_new_end_point),axis=0)))
# collect all shortest path from one side of the ferry to main network nodes
collect_end_paths = {}
for dest_node in dest_nodes:
paths = sg.get_shortest_paths(sg.vs[end_node],sg.vs[dest_node],weights='distance',output="epath")
if len(paths[0]) != 0:
collect_end_paths[dest_node] = sg.es[paths[0]]['id'],np.sum(sg.es[paths[0]]['distance'])
end_coords = ferry_nodes.geometry[ferry_nodes.id=={v: k for k, v in nearest_node_ferry.items()}[sg.vs[end_node]['name']]].values
# if there are paths, connect them up!
if len(collect_end_paths) != 0:
if len(pd.DataFrame.from_dict(collect_end_paths).T.min()) != 0:
path_2 = pd.DataFrame.from_dict(collect_end_paths).T.min()[0]
p_2 = []
for p in path_2:
high_type = net.edges.highway.loc[net.edges.id==p].values
if np.isin(high_type,road_types):
break
else:
p_2.append(p)
# check if they are really connected, if not, we need to create a little linestring to connect the new connector path and the ferry
path_2 = net.edges.loc[net.edges.id.isin(p_2)]
if len(p_2) > 0:
linestring = pygeos.linear.line_merge(pygeos.multilinestrings(path_2['geometry'].values))
endpoint1 = pygeos.points(pygeos.coordinates.get_coordinates(linestring))[0]
endpoint2 = pygeos.points(pygeos.coordinates.get_coordinates(linestring))[-1]
endpoint1_distance = pygeos.distance(end_coords,endpoint1)
endpoint2_distance = pygeos.distance(end_coords,endpoint2)
if (endpoint1_distance == 0) | (endpoint2_distance == 0):
collect_connectors.append(linestring)
elif endpoint1_distance < endpoint2_distance:
collect_connectors.append(pygeos.linestrings(np.concatenate((pygeos.coordinates.get_coordinates(end_coords),pygeos.coordinates.get_coordinates(linestring)),axis=0)))
else:
collect_connectors.append(pygeos.linestrings(np.concatenate((pygeos.coordinates.get_coordinates(linestring),pygeos.coordinates.get_coordinates(end_coords)),axis=0)))
else:
local_network = net.edges.loc[net.edges.id.isin(pd.DataFrame.from_dict(collect_end_paths).T.min()[0])]
sub_local_network = [[pygeos.points(pygeos.get_coordinates(x)[0]),pygeos.points(pygeos.get_coordinates(x)[-1])] for x in local_network.loc[local_network.highway.isin(road_types)].geometry]
sub_local_network = [item for sublist in sub_local_network for item in sublist]
location_closest_point = np.where(pygeos.distance(end_coords[0],sub_local_network) == np.amin(pygeos.distance(end_coords[0],sub_local_network)))[0][0]
collect_connectors.append(pygeos.linestrings(np.concatenate((pygeos.get_coordinates(end_coords),pygeos.get_coordinates(sub_local_network[location_closest_point])),axis=0)))
# if there are no paths, but if the ferry node is still very close to the main network, we create a new linestring to connect them up (sometimes the ferry dock has no road)
elif pygeos.distance(sub_main_network_nodes.geometry,end_coords).min() < 0.01:
get_new_end_point = pygeos.coordinates.get_coordinates(sub_main_network_nodes.iloc[pygeos.distance(sub_main_network_nodes.geometry,end_coords).idxmin()].geometry)
collect_connectors.append(pygeos.linestrings(np.concatenate((pygeos.coordinates.get_coordinates(end_coords),get_new_end_point),axis=0)))
# ferry is stand-alone, so we continue because there is nothing to connect
elif len(ferry_nodes_graph) == 0:
continue
# collect paths on one side of the ferry, as other side does not have a network nearby
else:
start_node = ferry_nodes_graph[0]
start_coords = ferry_nodes.geometry[ferry_nodes.id=={v: k for k, v in nearest_node_ferry.items()}[sg.vs[start_node]['name']]].values
# collect all shortest path from one side of the ferry to main network nodes
collect_start_paths = {}
for dest_node in dest_nodes:
paths = sg.get_shortest_paths(sg.vs[start_node],sg.vs[dest_node],weights='distance',output="epath")
if len(paths[0]) != 0:
collect_start_paths[dest_node] = sg.es[paths[0]]['id'],np.sum(sg.es[paths[0]]['distance'])
# if there are paths, connect them up!
if len(collect_start_paths) != 0:
path_1 = pd.DataFrame.from_dict(collect_start_paths).T.min()[0]
p_1 = []
for p in path_1:
high_type = net.edges.highway.loc[net.edges.id==p].values
if np.isin(high_type,road_types): break
else: p_1.append(p)
path_1 = net.edges.loc[net.edges.id.isin(p_1)]
# check if they are really connected, if not, we need to create a little linestring to connect the new connector path and the ferry
if len(p_1) > 0:
linestring = pygeos.linear.line_merge(pygeos.multilinestrings(path_1['geometry'].values))
endpoint1 = pygeos.points(pygeos.coordinates.get_coordinates(linestring))[0]
endpoint2 = pygeos.points(pygeos.coordinates.get_coordinates(linestring))[-1]
endpoint1_distance = pygeos.distance(start_coords,endpoint1)
endpoint2_distance = pygeos.distance(start_coords,endpoint2)
if (endpoint1_distance == 0) | (endpoint2_distance == 0):
collect_connectors.append(linestring)
elif endpoint1_distance < endpoint2_distance:
collect_connectors.append(pygeos.linestrings(np.concatenate((pygeos.coordinates.get_coordinates(start_coords),pygeos.coordinates.get_coordinates(linestring)),axis=0)))
else:
collect_connectors.append(pygeos.linestrings(np.concatenate((pygeos.coordinates.get_coordinates(linestring),pygeos.coordinates.get_coordinates(start_coords)),axis=0)))
else:
local_network = net.edges.loc[net.edges.id.isin( | pd.DataFrame.from_dict(collect_start_paths) | pandas.DataFrame.from_dict |
import pydoc
import pandas as pd
import os
import random
def read_excel():
df = pd.read_excel('/Users/ls/Downloads/babycare11-1.xlsx')
data = df.head(2)
print(str(data))
# print(df.head(2))
def merge_excel():
dfs = []
dir = '/Users/ls/babycare/'
des = '/Users/ls/babycare/babycare-stats-7-3.xlsx'
for root, dirs, files in os.walk(dir):
for file in files:
file_name = os.path.join(root, file)
# print(root,"==",file)
if 'babycare-stats-7-' in file_name:
df = pd.read_excel(file_name)
dfs.append(df)
print(len(files))
all_data = pd.concat(dfs)
all_data.rename(columns={'dt': '日期', 'task_id': '任务ID', 'conversion_user_pin_count': '触达人数',
'conversion_bought_user_pin_count': '触达-曾购数',
'conversion_non_bought_user_pin_count': '触达-未购数',
'browsered_pin_count': '浏览数',
'browsered_bought_pin_count': '浏览-曾购数',
'browsered_non_bought_pin_count': '浏览-未购数',
'purchased_pin_count': '加购数',
'purchased_bought_pin_count': '加购-曾购数',
'purchased_non_bought_pin_count': '加购-未购数',
'ordered_pin_count': '下单数',
'ordered_bought_pin_count': '下单-曾购数',
'ordered_non_bought_pin_count': '下单-未购数',
'paid_pin_count': '付款数',
'paid_bought_pin_count': '付款-曾购数',
'paid_non_bought_pin_count': '付款-未购数',
'consume_amount': '付款额',
'consume_bought_amount': '付款-曾购金额',
'consume_non_bought_amount': '付款-未购金额',
'per_amount': '客单价',
'per_bought_amount': '客单价-曾购价',
'per_non_bought_amount': '客单价-未购价'}, inplace=True)
# print(all_data.columns)
all_data.to_excel(des, index=False, encoding='gbk')
def simulation():
factor_low = 0.2
factor_high = 0.6
period_one_from = '/Users/ls/babycare/babycare-stats-1.xlsx'
period_seven_from = '/Users/ls/babycare/babycare-stats-7.xlsx'
period_one_des = '/Users/ls/babycare/babycare-stats-simu1.xlsx'
period_seven_des = '/Users/ls/babycare/babycare-stats-simu7.xlsx'
df = | pd.read_excel(period_seven_from) | pandas.read_excel |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from scipy import stats
from sklearn.linear_model import Ridge, RidgeCV
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn.metrics import mean_squared_error, make_scorer
# In[2]:
def calculate_pearson(df):
correlations = {}
numerical_features = df.select_dtypes(exclude = ["object"]).columns
numerical_features = numerical_features.drop("cod_municipio")
for i in numerical_features:
corr = stats.pearsonr(df[i], df['ideb'])[0]
correlations[i] = corr
df_corr = pd.DataFrame(list(correlations.items()), columns=['feature', 'correlation_with_ideb'])
df_corr = df_corr.dropna()
return df_corr
# In[3]:
def calculate_categorical_correlation(df):
categorical_features = df.select_dtypes(include = ["object"]).columns
return categorical_features
# # Puxa dados do CSV de cada integrante do grupo
# ### Dados Alexandre
# In[4]:
path = '../../data/'
# In[5]:
#Dados iniciais
alexandre_inicio_2015 = pd.read_csv(path + 'bases_ale/anos_iniciais/ideb_municipios_2015_ai.csv')
alexandre_inicio_2017 = pd.read_csv(path + 'bases_ale/anos_iniciais/ideb_municipios_2017_ai.csv')
# Dados finais
alexandre_final_2015 = pd.read_csv(path + 'bases_ale/anos_finais/ideb_municipios_2015_af.csv')
alexandre_final_2017 = pd.read_csv(path + 'bases_ale/anos_finais/ideb_municipios_2017_af.csv')
# ### Dados Lidia
# In[6]:
#Dados iniciais
lidia_inicio_2007 = pd.read_csv(path + 'bases_lidia/anos_iniciais/ideb_escola_2007_ai.csv')
lidia_inicio_2009 = pd.read_csv(path + 'bases_lidia/anos_iniciais/ideb_escola_2009_ai.csv')
lidia_inicio_2011 = pd.read_csv(path + 'bases_lidia/anos_iniciais/ideb_escola_2011_ai.csv')
lidia_inicio_2013 = pd.read_csv(path + 'bases_lidia/anos_iniciais/ideb_escola_2013_ai.csv')
lidia_inicio_2015 = pd.read_csv(path + 'bases_lidia/anos_iniciais/ideb_escola_2015_ai.csv')
lidia_inicio_2017 = pd.read_csv(path + 'bases_lidia/anos_iniciais/ideb_escola_2017_ai.csv')
# Dados finais
lidia_final_2007 = pd.read_csv(path + 'bases_lidia/anos_finais/ideb_escola_2007_af.csv')
lidia_final_2009 = pd.read_csv(path + 'bases_lidia/anos_finais/ideb_escola_2009_af.csv')
lidia_final_2011 = pd.read_csv(path + 'bases_lidia/anos_finais/ideb_escola_2011_af.csv')
lidia_final_2013 = pd.read_csv(path + 'bases_lidia/anos_finais/ideb_escola_2013_af.csv')
lidia_final_2015 = pd.read_csv(path + 'bases_lidia/anos_finais/ideb_escola_2015_af.csv')
lidia_final_2017 = pd.read_csv(path + 'bases_lidia/anos_finais/ideb_escola_2017_af.csv')
# ### Dados William
# In[7]:
#Dados iniciais
william_inicio_2005 = pd.read_csv(path + 'bases_william/anos_iniciais/dados2005_inic.csv')
william_inicio_2007 = pd.read_csv(path + 'bases_william/anos_iniciais/dados2007_inic.csv')
william_inicio_2009 = pd.read_csv(path + 'bases_william/anos_iniciais/dados2009_inic.csv')
william_inicio_2011 = pd.read_csv(path + 'bases_william/anos_iniciais/dados2011_inic.csv')
william_inicio_2013 = pd.read_csv(path + 'bases_william/anos_iniciais/dados2013_inic.csv')
william_inicio_2015 = pd.read_csv(path + 'bases_william/anos_iniciais/dados2015_inic.csv')
william_inicio_2017 = pd.read_csv(path + 'bases_william/anos_iniciais/dados2017_inic.csv')
# Dados finais
william_final_2005 = pd.read_csv(path + 'bases_william/anos_finais/dados2005_fim.csv')
william_final_2007 = pd.read_csv(path + 'bases_william/anos_finais/dados2007_fim.csv')
william_final_2009 = pd.read_csv(path + 'bases_william/anos_finais/dados2009_fim.csv')
william_final_2011 = pd.read_csv(path + 'bases_william/anos_finais/dados2011_fim.csv')
william_final_2013 = | pd.read_csv(path + 'bases_william/anos_finais/dados2013_fim.csv') | pandas.read_csv |
import pandas as pd
df1 = | pd.read_csv('data//alexander_algoaddition_adddate.csv') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Parsing of a csv game tracking sheet of type 'X', saving data in consice and relevant manner."""
# Here comes your imports
import sys
import logging as log
import pandas as pd
# Here comes your (few) global variables
# Here comes your class definitions
# Here comes your function definitions
def parse_pre_shot_situation(data, out):
"""parse the situation leading to the shot"""
# (cycle / free-hit / develop / counter / turnover / rebound / penalty / others)
situation_labels = \
['Festsetzen', 'Freischlag', 'Auslösung', 'Konter', \
'Ballgewinn', 'Abpraller', 'Penalty', 'Sonstige']
situation_categories = \
['CYC', 'FHT', 'DVL', 'CNT', 'TNV', 'RBD', 'PNT', 'OTH']
shot_situations = data[situation_labels]
shot_situations.columns = situation_categories
situation_count = shot_situations.notna().sum(axis=1)
if (situation_count != 1).any():
log.warning('no pre shot situation:\n%s', shot_situations[situation_count < 1])
log.warning('multiple pre shot situations:\n%s', shot_situations[situation_count > 1])
situation = pd.Categorical([''] * len(shot_situations.index), categories=situation_categories)
for label, content in shot_situations.items():
situation[content.notna()] = label
log.debug(pd.Series(situation))
log.debug(pd.Series(situation).value_counts())
out['sh_situ'] = pd.Series(situation)
def parse_shot_type(data, out):
"""parse the type of the shot"""
# (wrist / chip / slap / backhand / one-timer / volley / tip / in-tight)
type_labels = \
['Gezogen', 'Chip', 'Slapshot', 'Backhand', 'Direkt', 'Volley', 'Ablenker', 'InTight']
type_categories = \
['WRS', 'CHP', 'SLP', 'BKH', 'ONT', 'VOL', 'TIP', 'INT']
shot_types = data[type_labels]
shot_types.columns = type_categories
type_count = shot_types.notna().sum(axis=1)
if (type_count != 1).any():
log.warning('no shot type:\n%s', shot_types[type_count < 1])
log.warning('multiple shot types:\n%s', shot_types[type_count > 1])
shot_type = pd.Categorical([''] * len(shot_types.index), categories=type_categories)
for label, content in shot_types.items():
shot_type[content.notna()] = label
log.debug(pd.Series(shot_type))
log.debug(pd.Series(shot_type).value_counts())
out['sh_type'] = pd.Series(shot_type)
def parse_shot_result(data, out):
"""parse the result (blocked / missed / on-goal / goal) of the event / shot"""
result_categories = ['BL', 'MI', 'SOG', 'G']
shot_results = data[result_categories]
log.debug(shot_results.info())
result_count = shot_results.notna().sum(axis=1)
if (result_count < 1).any():
log.warning('no shot result:\n%s', shot_results[result_count < 1])
if (result_count > 1).any():
log.debug('multiple shot results:\n%s', shot_results[result_count > 1])
result = pd.Categorical([''] * len(shot_results.index), categories=result_categories)
for label, content in shot_results.items():
result[content.notna()] = label
log.debug(pd.Series(result))
log.debug(pd.Series(result).value_counts())
out['sh_outc'] = pd.Series(result)
def parse_involved_players_for(data, out):
"""parse the involved (on-field) players for"""
prefix = 'hm_'
players_goalies = data.filter(regex=("^g?[0-9]+$"))
numbers = pd.Series(list(players_goalies))
col = [prefix + str(i) for i in range(1, 7)]
players = | pd.DataFrame('', index=players_goalies.index, columns=col) | pandas.DataFrame |
import sys,os
#os.chdir("/Users/utkarshvirendranigam/Desktop/Homework/Project")
# required_packages=["PyQt5","re", "scipy","itertools","random","matplotlib","pandas","numpy","sklearn","pydotplus","collections","warnings","seaborn"]
#print(os.getcwd())
# for my_package in required_packages:
# try:
# command_string="conda install "+ my_package+ " --yes"
# os.system(command_string)
# except:
# count=1
from PyQt5.QtWidgets import (QMainWindow, QApplication, QWidget, QPushButton, QAction, QComboBox, QLabel,
QGridLayout, QCheckBox, QGroupBox, QVBoxLayout, QHBoxLayout, QLineEdit, QPlainTextEdit)
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import pyqtSlot, QRect
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtCore import Qt
# from scipy import interp
from itertools import cycle, combinations
import random
from PyQt5.QtWidgets import QDialog, QVBoxLayout, QSizePolicy, QFormLayout, QRadioButton, QScrollArea, QMessageBox
from PyQt5.QtGui import QPixmap
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
import pandas as pd
import numpy as np
import pickle
from numpy.polynomial.polynomial import polyfit
from sklearn.preprocessing import LabelEncoder, OneHotEncoder, StandardScaler
from sklearn.pipeline import make_pipeline
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.tree import DecisionTreeClassifier, export_graphviz
from sklearn.compose import make_column_transformer
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve, auc, log_loss, brier_score_loss
from sklearn.calibration import calibration_curve
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import GradientBoostingClassifier
from sklearn import feature_selection
from sklearn import metrics
from sklearn.preprocessing import label_binarize
from sklearn.model_selection import cross_val_predict
# Libraries to display decision tree
from pydotplus import graph_from_dot_data
import collections
from sklearn.tree import export_graphviz
import webbrowser
import warnings
warnings.filterwarnings("ignore")
import matplotlib.pyplot as plt
from Preprocessing import PreProcessing
import random
import seaborn as sns
#%%-----------------------------------------------------------------------
import os
os.environ["PATH"] += os.pathsep + 'C:\\Program Files (x86)\\graphviz-2.38\\release\\bin'
#%%-----------------------------------------------------------------------
#::--------------------------------
# Deafault font size for all the windows
#::--------------------------------
font_size_window = 'font-size:18px'
class DecisionTree(QMainWindow):
#::--------------------------------------------------------------------------------
# Implementation of Random Forest Classifier using the happiness dataset
# the methods in this class are
# _init_ : initialize the class
# initUi : creates the canvas and all the elements in the canvas
# update : populates the elements of the canvas base on the parametes
# chosen by the user
#::---------------------------------------------------------------------------------
send_fig = pyqtSignal(str)
def __init__(self):
super(DecisionTree, self).__init__()
self.Title = "Decision Tree Classifier"
self.initUi()
def initUi(self):
#::-----------------------------------------------------------------
# Create the canvas and all the element to create a dashboard with
# all the necessary elements to present the results from the algorithm
# The canvas is divided using a grid loyout to facilitate the drawing
# of the elements
#::-----------------------------------------------------------------
self.setWindowTitle(self.Title)
self.setStyleSheet(font_size_window)
self.main_widget = QWidget(self)
self.layout = QGridLayout(self.main_widget)
self.groupBox1 = QGroupBox('Decision Tree Features')
self.groupBox1Layout= QGridLayout()
self.groupBox1.setLayout(self.groupBox1Layout)
self.feature0 = QCheckBox(features_list[0],self)
self.feature1 = QCheckBox(features_list[1],self)
self.feature2 = QCheckBox(features_list[2], self)
self.feature3 = QCheckBox(features_list[3], self)
self.feature4 = QCheckBox(features_list[4],self)
self.feature5 = QCheckBox(features_list[5],self)
self.feature6 = QCheckBox(features_list[6], self)
self.feature7 = QCheckBox(features_list[7], self)
self.feature8 = QCheckBox(features_list[8], self)
self.feature9 = QCheckBox(features_list[9], self)
self.feature10 = QCheckBox(features_list[10], self)
self.feature11 = QCheckBox(features_list[11], self)
self.feature12 = QCheckBox(features_list[12], self)
self.feature13 = QCheckBox(features_list[13], self)
self.feature14 = QCheckBox(features_list[14], self)
self.feature15 = QCheckBox(features_list[15], self)
self.feature16 = QCheckBox(features_list[16], self)
self.feature17 = QCheckBox(features_list[17], self)
self.feature18 = QCheckBox(features_list[18], self)
self.feature19 = QCheckBox(features_list[19], self)
self.feature20 = QCheckBox(features_list[20], self)
self.feature21 = QCheckBox(features_list[21], self)
self.feature22 = QCheckBox(features_list[22], self)
self.feature23 = QCheckBox(features_list[23], self)
self.feature24 = QCheckBox(features_list[24], self)
self.feature0.setChecked(True)
self.feature1.setChecked(True)
self.feature2.setChecked(True)
self.feature3.setChecked(True)
self.feature4.setChecked(True)
self.feature5.setChecked(True)
self.feature6.setChecked(True)
self.feature7.setChecked(True)
self.feature8.setChecked(True)
self.feature9.setChecked(True)
self.feature10.setChecked(True)
self.feature11.setChecked(True)
self.feature12.setChecked(True)
self.feature13.setChecked(True)
self.feature14.setChecked(True)
self.feature15.setChecked(True)
self.feature16.setChecked(True)
self.feature17.setChecked(True)
self.feature18.setChecked(True)
self.feature19.setChecked(True)
self.feature20.setChecked(True)
self.feature21.setChecked(True)
self.feature22.setChecked(True)
self.feature23.setChecked(True)
self.feature24.setChecked(True)
self.lblPercentTest = QLabel('Percentage for Test :')
self.lblPercentTest.adjustSize()
self.txtPercentTest = QLineEdit(self)
self.txtPercentTest.setText("30")
self.lblMaxDepth = QLabel('Maximun Depth :')
self.txtMaxDepth = QLineEdit(self)
self.txtMaxDepth.setText("3")
self.btnExecute = QPushButton("Run Model")
self.btnExecute.setGeometry(QRect(60, 500, 75, 23))
self.btnExecute.clicked.connect(self.update)
self.btnDTFigure = QPushButton("View Tree")
self.btnDTFigure.setGeometry(QRect(60, 500, 75, 23))
self.btnDTFigure.clicked.connect(self.view_tree)
# We create a checkbox for each feature
self.groupBox1Layout.addWidget(self.feature0, 0, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature1, 0, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature2, 1, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature3, 1, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature4, 2, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature5, 2, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature6, 3, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature7, 3, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature8, 4, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature9, 4, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature10, 5, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature11, 5, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature12, 6, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature13, 6, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature14, 7, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature15, 7, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature16, 8, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature17, 8, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature18, 9, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature19, 9, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature20, 10, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature21, 10, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature22, 11, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature23, 11, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature24, 12, 0, 1, 1)
self.groupBox1Layout.addWidget(self.lblPercentTest, 19, 0, 1, 1)
self.groupBox1Layout.addWidget(self.txtPercentTest, 19, 1, 1, 1)
self.groupBox1Layout.addWidget(self.lblMaxDepth, 20, 0, 1, 1)
self.groupBox1Layout.addWidget(self.txtMaxDepth, 20, 1, 1, 1)
self.groupBox1Layout.addWidget(self.btnExecute, 21, 0, 1, 1)
self.groupBox1Layout.addWidget(self.btnDTFigure, 21, 1, 1, 1)
self.groupBox2 = QGroupBox('Measurements:')
self.groupBox2Layout = QVBoxLayout()
self.groupBox2.setLayout(self.groupBox2Layout)
# self.groupBox2.setMinimumSize(400, 100)
self.current_model_summary = QWidget(self)
self.current_model_summary.layout = QFormLayout(self.current_model_summary)
self.txtCurrentAccuracy = QLineEdit()
self.txtCurrentPrecision = QLineEdit()
self.txtCurrentRecall = QLineEdit()
self.txtCurrentF1score = QLineEdit()
self.current_model_summary.layout.addRow('Accuracy:', self.txtCurrentAccuracy)
self.current_model_summary.layout.addRow('Precision:', self.txtCurrentPrecision)
self.current_model_summary.layout.addRow('Recall:', self.txtCurrentRecall)
self.current_model_summary.layout.addRow('F1 Score:', self.txtCurrentF1score)
self.groupBox2Layout.addWidget(self.current_model_summary)
self.groupBox3 = QGroupBox('Other Models Accuracy:')
self.groupBox3Layout = QVBoxLayout()
self.groupBox3.setLayout(self.groupBox3Layout)
self.other_models = QWidget(self)
self.other_models.layout = QFormLayout(self.other_models)
self.txtAccuracy_lr = QLineEdit()
self.txtAccuracy_gb = QLineEdit()
self.txtAccuracy_rf = QLineEdit()
self.other_models.layout.addRow('Logistic:', self.txtAccuracy_lr)
self.other_models.layout.addRow('Random Forest:', self.txtAccuracy_rf)
self.other_models.layout.addRow('Gradient Boosting:', self.txtAccuracy_gb)
self.groupBox3Layout.addWidget(self.other_models)
#::-------------------------------------
# Graphic 1 : Confusion Matrix
#::-------------------------------------
self.fig = Figure()
self.ax1 = self.fig.add_subplot(111)
self.axes=[self.ax1]
self.canvas = FigureCanvas(self.fig)
self.canvas.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas.updateGeometry()
self.groupBoxG1 = QGroupBox('Confusion Matrix')
self.groupBoxG1Layout= QVBoxLayout()
self.groupBoxG1.setLayout(self.groupBoxG1Layout)
self.groupBoxG1Layout.addWidget(self.canvas)
#::---------------------------------------------
# Graphic 2 : ROC Curve
#::---------------------------------------------
self.fig2 = Figure()
self.ax2 = self.fig2.add_subplot(111)
self.axes2 = [self.ax2]
self.canvas2 = FigureCanvas(self.fig2)
self.canvas2.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas2.updateGeometry()
self.groupBoxG2 = QGroupBox('ROC Curve')
self.groupBoxG2Layout = QVBoxLayout()
self.groupBoxG2.setLayout(self.groupBoxG2Layout)
self.groupBoxG2Layout.addWidget(self.canvas2)
#::-------------------------------------------
# Graphic 3 : Importance of Features
#::-------------------------------------------
self.fig3 = Figure()
self.ax3 = self.fig3.add_subplot(111)
self.axes3 = [self.ax3]
self.canvas3 = FigureCanvas(self.fig3)
self.canvas3.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas3.updateGeometry()
self.groupBoxG3 = QGroupBox('Importance of Features')
self.groupBoxG3Layout = QVBoxLayout()
self.groupBoxG3.setLayout(self.groupBoxG3Layout)
self.groupBoxG3Layout.addWidget(self.canvas3)
#::--------------------------------------------
# Graphic 4 : ROC Curve by class
#::--------------------------------------------
self.fig4 = Figure()
self.ax4 = self.fig4.add_subplot(111)
self.axes4 = [self.ax4]
self.canvas4 = FigureCanvas(self.fig4)
self.canvas4.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas4.updateGeometry()
self.groupBoxG4 = QGroupBox('ROC Curve by Class')
self.groupBoxG4Layout = QVBoxLayout()
self.groupBoxG4.setLayout(self.groupBoxG4Layout)
self.groupBoxG4Layout.addWidget(self.canvas4)
#::-------------------------------------------------
# End of graphs
#::-------------------------------------------------
self.layout.addWidget(self.groupBox1, 0, 0, 3, 2)
self.layout.addWidget(self.groupBoxG1, 0, 2, 1, 1)
self.layout.addWidget(self.groupBoxG3, 0, 3, 1, 1)
self.layout.addWidget(self.groupBoxG2, 1, 2, 1, 1)
self.layout.addWidget(self.groupBoxG4, 1, 3, 1, 1)
self.layout.addWidget(self.groupBox2, 2, 2, 1, 1)
self.layout.addWidget(self.groupBox3, 2, 3, 1, 1)
self.setCentralWidget(self.main_widget)
self.resize(1800, 1200)
self.show()
def update(self):
'''
Random Forest Classifier
We pupulate the dashboard using the parametres chosen by the user
The parameters are processed to execute in the skit-learn Random Forest algorithm
then the results are presented in graphics and reports in the canvas
:return:None
'''
# processing the parameters
self.list_corr_features = pd.DataFrame([])
if self.feature0.isChecked():
if len(self.list_corr_features)==0:
self.list_corr_features = df[features_list[0]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[0]]],axis=1)
if self.feature1.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[1]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[1]]],axis=1)
if self.feature2.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[2]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[2]]],axis=1)
if self.feature3.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[3]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[3]]],axis=1)
if self.feature4.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[4]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[4]]],axis=1)
if self.feature5.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[5]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[5]]],axis=1)
if self.feature6.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[6]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[6]]],axis=1)
if self.feature7.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[7]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[7]]],axis=1)
if self.feature8.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[8]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[8]]],axis=1)
if self.feature9.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[9]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[9]]],axis=1)
if self.feature10.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[10]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[10]]], axis=1)
if self.feature11.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[11]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[11]]], axis=1)
if self.feature12.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[12]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[12]]], axis=1)
if self.feature13.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[13]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[13]]], axis=1)
if self.feature14.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[14]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[14]]], axis=1)
if self.feature15.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[15]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[15]]], axis=1)
if self.feature16.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[16]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[16]]], axis=1)
if self.feature17.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[17]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[17]]], axis=1)
if self.feature18.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[18]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[18]]], axis=1)
if self.feature19.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[19]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[19]]], axis=1)
if self.feature20.isChecked():
if len(self.list_corr_features)==0:
self.list_corr_features = df[features_list[20]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[20]]],axis=1)
if self.feature21.isChecked():
if len(self.list_corr_features) == 20:
self.list_corr_features = df[features_list[1]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[21]]],axis=1)
if self.feature22.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[22]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[22]]],axis=1)
if self.feature23.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[23]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[23]]],axis=1)
if self.feature24.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[24]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[24]]],axis=1)
vtest_per = float(self.txtPercentTest.text())
vmax_depth = float(self.txtMaxDepth.text())
# Clear the graphs to populate them with the new information
self.ax1.clear()
self.ax2.clear()
self.ax3.clear()
self.ax4.clear()
# self.txtResults.clear()
# self.txtResults.setUndoRedoEnabled(False)
vtest_per = vtest_per / 100
# -----------------------------------------------------------------------
filename = 'dt_finalized_model.sav'
self.clf_entropy = pickle.load(open(filename, 'rb'))
y_test = y
X_test= X[features_list]
# predicton on test using entropy
y_pred_entropy = self.clf_entropy.predict(X_test)
# confusion matrix for RandomForest
conf_matrix = confusion_matrix(y_test, y_pred_entropy)
# accuracy score
self.ff_accuracy_score = accuracy_score(y_test, y_pred_entropy) * 100
self.txtCurrentAccuracy.setText(str(self.ff_accuracy_score))
# precision score
self.ff_precision_score = precision_score(y_test, y_pred_entropy) * 100
self.txtCurrentPrecision.setText(str(self.ff_precision_score))
# recall score
self.ff_recall_score = recall_score(y_test, y_pred_entropy) * 100
self.txtCurrentRecall.setText(str(self.ff_recall_score))
# f1_score
self.ff_f1_score = f1_score(y_test, y_pred_entropy)
self.txtCurrentF1score.setText(str(self.ff_f1_score))
#::------------------------------------
## Ghaph1 :
## Confusion Matrix
#::------------------------------------
class_names1 = ['', 'No', 'Yes']
self.ax1.matshow(conf_matrix, cmap=plt.cm.get_cmap('Blues', 14))
self.ax1.set_yticklabels(class_names1)
self.ax1.set_xticklabels(class_names1, rotation=90)
self.ax1.set_xlabel('Predicted label')
self.ax1.set_ylabel('True label')
for i in range(len(class_names)):
for j in range(len(class_names)):
y_pred_score = self.clf_entropy.predict_proba(X_test)
self.ax1.text(j, i, str(conf_matrix[i][j]))
self.fig.tight_layout()
self.fig.canvas.draw_idle()
#::----------------------------------------
## Graph 2 - ROC Curve
#::----------------------------------------
y_test_bin = pd.get_dummies(y_test).to_numpy()
n_classes = y_test_bin.shape[1]
# From the sckict learn site
# https://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test_bin[:, i], y_pred_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test_bin.ravel(), y_pred_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
lw = 2
self.ax2.plot(fpr[1], tpr[1], color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc[1])
self.ax2.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
self.ax2.set_xlim([0.0, 1.0])
self.ax2.set_ylim([0.0, 1.05])
self.ax2.set_xlabel('False Positive Rate')
self.ax2.set_ylabel('True Positive Rate')
self.ax2.set_title('ROC Curve Random Forest')
self.ax2.legend(loc="lower right")
self.fig2.tight_layout()
self.fig2.canvas.draw_idle()
######################################
# Graph - 3 Feature Importances
#####################################
# get feature importances
importances = self.clf_entropy.feature_importances_
# convert the importances into one-dimensional 1darray with corresponding df column names as axis labels
f_importances = pd.Series(importances, self.list_corr_features.columns)
# sort the array in descending order of the importances, only show the first 10
f_importances.sort_values(ascending=False, inplace=True)
f_importances = f_importances[0:10]
X_Features = f_importances.index
y_Importance = list(f_importances)
self.ax3.barh(X_Features, y_Importance)
self.ax3.set_aspect('auto')
# show the plot
self.fig3.tight_layout()
self.fig3.canvas.draw_idle()
#::-----------------------------------------------------
# Graph 4 - ROC Curve by Class
#::-----------------------------------------------------
str_classes = ['No','Yes']
colors = cycle(['magenta', 'darkorange'])
for i, color in zip(range(n_classes), colors):
self.ax4.plot(fpr[i], tpr[i], color=color, lw=lw,
label='{0} (area = {1:0.2f})'
''.format(str_classes[i], roc_auc[i]))
self.ax4.plot([0, 1], [0, 1], 'k--', lw=lw)
self.ax4.set_xlim([0.0, 1.0])
self.ax4.set_ylim([0.0, 1.05])
self.ax4.set_xlabel('False Positive Rate')
self.ax4.set_ylabel('True Positive Rate')
self.ax4.set_title('ROC Curve by Class')
self.ax4.legend(loc="lower right")
# show the plot
self.fig4.tight_layout()
self.fig4.canvas.draw_idle()
#::-----------------------------------------------------
# Other Models Comparison
#::-----------------------------------------------------
filename2 = 'lr_finalized_model.sav'
self.other_clf_lr = pickle.load(open(filename2, 'rb'))
y_pred_lr = self.other_clf_lr.predict(X_test)
self.accuracy_lr = accuracy_score(y_test, y_pred_lr) * 100
self.txtAccuracy_lr.setText(str(self.accuracy_lr))
filename3 = 'rf_finalized_model.sav'
self.other_clf_rf = pickle.load(open(filename3, 'rb'))
y_pred_rf = self.other_clf_rf.predict(X_test)
self.accuracy_rf = accuracy_score(y_test, y_pred_rf) * 100
self.txtAccuracy_rf.setText(str(self.accuracy_rf))
filename4 = 'gb_finalized_model.sav'
self.other_clf_gb = pickle.load(open(filename4, 'rb'))
y_pred_gb = self.other_clf_gb.predict(X_test)
self.accuracy_gb = accuracy_score(y_test, y_pred_gb) * 100
self.txtAccuracy_gb.setText(str(self.accuracy_gb))
def view_tree(self):
'''
Executes the graphviz to create a tree view of the information
then it presents the graphic in a pdf formt using webbrowser
:return:None
'''
webbrowser.open_new(r'decision_tree_entropy.pdf')
class RandomForest(QMainWindow):
#::--------------------------------------------------------------------------------
# Implementation of Random Forest Classifier using the happiness dataset
# the methods in this class are
# _init_ : initialize the class
# initUi : creates the canvas and all the elements in the canvas
# update : populates the elements of the canvas base on the parametes
# chosen by the user
#::---------------------------------------------------------------------------------
send_fig = pyqtSignal(str)
def __init__(self):
super(RandomForest, self).__init__()
self.Title = "Random Forest Classifier"
self.initUi()
def initUi(self):
#::-----------------------------------------------------------------
# Create the canvas and all the element to create a dashboard with
# all the necessary elements to present the results from the algorithm
# The canvas is divided using a grid loyout to facilitate the drawing
# of the elements
#::-----------------------------------------------------------------
self.setWindowTitle(self.Title)
self.setStyleSheet(font_size_window)
self.main_widget = QWidget(self)
self.layout = QGridLayout(self.main_widget)
self.groupBox1 = QGroupBox('Random Forest Features')
self.groupBox1Layout= QGridLayout()
self.groupBox1.setLayout(self.groupBox1Layout)
self.feature0 = QCheckBox(features_list[0],self)
self.feature1 = QCheckBox(features_list[1],self)
self.feature2 = QCheckBox(features_list[2], self)
self.feature3 = QCheckBox(features_list[3], self)
self.feature4 = QCheckBox(features_list[4],self)
self.feature5 = QCheckBox(features_list[5],self)
self.feature6 = QCheckBox(features_list[6], self)
self.feature7 = QCheckBox(features_list[7], self)
self.feature8 = QCheckBox(features_list[8], self)
self.feature9 = QCheckBox(features_list[9], self)
self.feature10 = QCheckBox(features_list[10], self)
self.feature11 = QCheckBox(features_list[11], self)
self.feature12 = QCheckBox(features_list[12], self)
self.feature13 = QCheckBox(features_list[13], self)
self.feature14 = QCheckBox(features_list[14], self)
self.feature15 = QCheckBox(features_list[15], self)
self.feature16 = QCheckBox(features_list[16], self)
self.feature17 = QCheckBox(features_list[17], self)
self.feature18 = QCheckBox(features_list[18], self)
self.feature19 = QCheckBox(features_list[19], self)
self.feature20 = QCheckBox(features_list[20], self)
self.feature21 = QCheckBox(features_list[21], self)
self.feature22 = QCheckBox(features_list[22], self)
self.feature23 = QCheckBox(features_list[23], self)
self.feature24 = QCheckBox(features_list[24], self)
self.feature0.setChecked(True)
self.feature1.setChecked(True)
self.feature2.setChecked(True)
self.feature3.setChecked(True)
self.feature4.setChecked(True)
self.feature5.setChecked(True)
self.feature6.setChecked(True)
self.feature7.setChecked(True)
self.feature8.setChecked(True)
self.feature9.setChecked(True)
self.feature10.setChecked(True)
self.feature11.setChecked(True)
self.feature12.setChecked(True)
self.feature13.setChecked(True)
self.feature14.setChecked(True)
self.feature15.setChecked(True)
self.feature16.setChecked(True)
self.feature17.setChecked(True)
self.feature18.setChecked(True)
self.feature19.setChecked(True)
self.feature20.setChecked(True)
self.feature21.setChecked(True)
self.feature22.setChecked(True)
self.feature23.setChecked(True)
self.feature24.setChecked(True)
self.lblPercentTest = QLabel('Percentage for Test :')
self.lblPercentTest.adjustSize()
self.txtPercentTest = QLineEdit(self)
self.txtPercentTest.setText("30")
self.lblMaxDepth = QLabel('Maximun Depth :')
self.txtMaxDepth = QLineEdit(self)
self.txtMaxDepth.setText("3")
self.btnExecute = QPushButton("Run Model")
self.btnExecute.setGeometry(QRect(60, 500, 75, 23))
self.btnExecute.clicked.connect(self.update)
# We create a checkbox for each feature
self.groupBox1Layout.addWidget(self.feature0, 0, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature1, 0, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature2, 1, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature3, 1, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature4, 2, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature5, 2, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature6, 3, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature7, 3, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature8, 4, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature9, 4, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature10, 5, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature11, 5, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature12, 6, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature13, 6, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature14, 7, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature15, 7, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature16, 8, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature17, 8, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature18, 9, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature19, 9, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature20, 10, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature21, 10, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature22, 11, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature23, 11, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature24, 12, 0, 1, 1)
self.groupBox1Layout.addWidget(self.lblPercentTest, 19, 0, 1, 1)
self.groupBox1Layout.addWidget(self.txtPercentTest, 19, 1, 1, 1)
self.groupBox1Layout.addWidget(self.lblMaxDepth, 20, 0, 1, 1)
self.groupBox1Layout.addWidget(self.txtMaxDepth, 20, 1, 1, 1)
self.groupBox1Layout.addWidget(self.btnExecute, 21, 0, 1, 1)
self.groupBox2 = QGroupBox('Measurements:')
self.groupBox2Layout = QVBoxLayout()
self.groupBox2.setLayout(self.groupBox2Layout)
# self.groupBox2.setMinimumSize(400, 100)
self.current_model_summary = QWidget(self)
self.current_model_summary.layout = QFormLayout(self.current_model_summary)
self.txtCurrentAccuracy = QLineEdit()
self.txtCurrentPrecision = QLineEdit()
self.txtCurrentRecall = QLineEdit()
self.txtCurrentF1score = QLineEdit()
self.current_model_summary.layout.addRow('Accuracy:', self.txtCurrentAccuracy)
self.current_model_summary.layout.addRow('Precision:', self.txtCurrentPrecision)
self.current_model_summary.layout.addRow('Recall:', self.txtCurrentRecall)
self.current_model_summary.layout.addRow('F1 Score:', self.txtCurrentF1score)
self.groupBox2Layout.addWidget(self.current_model_summary)
self.groupBox3 = QGroupBox('Other Models Accuracy:')
self.groupBox3Layout = QVBoxLayout()
self.groupBox3.setLayout(self.groupBox3Layout)
self.other_models = QWidget(self)
self.other_models.layout = QFormLayout(self.other_models)
self.txtAccuracy_lr = QLineEdit()
self.txtAccuracy_gb = QLineEdit()
self.txtAccuracy_dt = QLineEdit()
self.other_models.layout.addRow('Logistic:', self.txtAccuracy_lr)
self.other_models.layout.addRow('Gradient Boosting:', self.txtAccuracy_gb)
self.other_models.layout.addRow('Decision tree:', self.txtAccuracy_dt)
self.groupBox3Layout.addWidget(self.other_models)
#::-------------------------------------
# Graphic 1 : Confusion Matrix
#::-------------------------------------
self.fig = Figure()
self.ax1 = self.fig.add_subplot(111)
self.axes=[self.ax1]
self.canvas = FigureCanvas(self.fig)
self.canvas.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas.updateGeometry()
self.groupBoxG1 = QGroupBox('Confusion Matrix')
self.groupBoxG1Layout= QVBoxLayout()
self.groupBoxG1.setLayout(self.groupBoxG1Layout)
self.groupBoxG1Layout.addWidget(self.canvas)
#::---------------------------------------------
# Graphic 2 : ROC Curve
#::---------------------------------------------
self.fig2 = Figure()
self.ax2 = self.fig2.add_subplot(111)
self.axes2 = [self.ax2]
self.canvas2 = FigureCanvas(self.fig2)
self.canvas2.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas2.updateGeometry()
self.groupBoxG2 = QGroupBox('ROC Curve')
self.groupBoxG2Layout = QVBoxLayout()
self.groupBoxG2.setLayout(self.groupBoxG2Layout)
self.groupBoxG2Layout.addWidget(self.canvas2)
#::-------------------------------------------
# Graphic 3 : Importance of Features
#::-------------------------------------------
self.fig3 = Figure()
self.ax3 = self.fig3.add_subplot(111)
self.axes3 = [self.ax3]
self.canvas3 = FigureCanvas(self.fig3)
self.canvas3.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas3.updateGeometry()
self.groupBoxG3 = QGroupBox('Importance of Features')
self.groupBoxG3Layout = QVBoxLayout()
self.groupBoxG3.setLayout(self.groupBoxG3Layout)
self.groupBoxG3Layout.addWidget(self.canvas3)
#::--------------------------------------------
# Graphic 4 : ROC Curve by class
#::--------------------------------------------
self.fig4 = Figure()
self.ax4 = self.fig4.add_subplot(111)
self.axes4 = [self.ax4]
self.canvas4 = FigureCanvas(self.fig4)
self.canvas4.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas4.updateGeometry()
self.groupBoxG4 = QGroupBox('ROC Curve by Class')
self.groupBoxG4Layout = QVBoxLayout()
self.groupBoxG4.setLayout(self.groupBoxG4Layout)
self.groupBoxG4Layout.addWidget(self.canvas4)
#::-------------------------------------------------
# End of graphs
#::-------------------------------------------------
self.layout.addWidget(self.groupBox1, 0, 0, 3, 2)
self.layout.addWidget(self.groupBoxG1, 0, 2, 1, 1)
self.layout.addWidget(self.groupBoxG3, 0, 3, 1, 1)
self.layout.addWidget(self.groupBoxG2, 1, 2, 1, 1)
self.layout.addWidget(self.groupBoxG4, 1, 3, 1, 1)
self.layout.addWidget(self.groupBox2, 2, 2, 1, 1)
self.layout.addWidget(self.groupBox3, 2, 3, 1, 1)
self.setCentralWidget(self.main_widget)
self.resize(1800, 1200)
self.show()
def update(self):
'''
Random Forest Classifier
We pupulate the dashboard using the parametres chosen by the user
The parameters are processed to execute in the skit-learn Random Forest algorithm
then the results are presented in graphics and reports in the canvas
:return:None
'''
# processing the parameters
self.list_corr_features = pd.DataFrame([])
if self.feature0.isChecked():
if len(self.list_corr_features)==0:
self.list_corr_features = df[features_list[0]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[0]]],axis=1)
if self.feature1.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[1]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[1]]],axis=1)
if self.feature2.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[2]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[2]]],axis=1)
if self.feature3.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[3]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[3]]],axis=1)
if self.feature4.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[4]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[4]]],axis=1)
if self.feature5.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[5]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[5]]],axis=1)
if self.feature6.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[6]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[6]]],axis=1)
if self.feature7.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[7]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[7]]],axis=1)
if self.feature8.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[8]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[8]]],axis=1)
if self.feature9.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[9]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[9]]],axis=1)
if self.feature10.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[10]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[10]]], axis=1)
if self.feature11.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[11]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[11]]], axis=1)
if self.feature12.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[12]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[12]]], axis=1)
if self.feature13.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[13]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[13]]], axis=1)
if self.feature14.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[14]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[14]]], axis=1)
if self.feature15.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[15]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[15]]], axis=1)
if self.feature16.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[16]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[16]]], axis=1)
if self.feature17.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[17]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[17]]], axis=1)
if self.feature18.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[18]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[18]]], axis=1)
if self.feature19.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[19]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[19]]], axis=1)
if self.feature20.isChecked():
if len(self.list_corr_features)==0:
self.list_corr_features = df[features_list[20]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[20]]],axis=1)
if self.feature21.isChecked():
if len(self.list_corr_features) == 20:
self.list_corr_features = df[features_list[1]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[21]]],axis=1)
if self.feature22.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[22]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[22]]],axis=1)
if self.feature23.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[23]]
else:
self.list_corr_features = | pd.concat([self.list_corr_features, df[features_list[23]]],axis=1) | pandas.concat |