max_stars_repo_path
stringlengths 5
128
| max_stars_repo_name
stringlengths 8
105
| max_stars_count
int64 0
41.3k
| id
stringlengths 5
5
| content
stringlengths 19
155k
| content_cleaned
stringlengths 17
155k
| language
stringclasses 18
values | language_score
float64 0.05
1
| edu_score
float64 0.76
4.4
| edu_int_score
int64 1
4
|
---|---|---|---|---|---|---|---|---|---|
lmctl/project/mutate/base.py | manojn97/lmctl | 3 | 13500 | import abc
class Mutator(abc.ABC):
def apply(self, original_content):
return original_content
| import abc
class Mutator(abc.ABC):
def apply(self, original_content):
return original_content
| none | 1 | 3.110995 | 3 |
src/django_otp/conf.py | jaap3/django-otp | 318 | 13501 | <filename>src/django_otp/conf.py
import django.conf
class Settings:
"""
This is a simple class to take the place of the global settings object. An
instance will contain all of our settings as attributes, with default values
if they are not specified by the configuration.
"""
defaults = {
'OTP_LOGIN_URL': django.conf.settings.LOGIN_URL,
'OTP_ADMIN_HIDE_SENSITIVE_DATA': False,
}
def __getattr__(self, name):
if name in self.defaults:
return getattr(django.conf.settings, name, self.defaults[name])
else:
return getattr(django.conf.settings, name)
settings = Settings()
| <filename>src/django_otp/conf.py
import django.conf
class Settings:
"""
This is a simple class to take the place of the global settings object. An
instance will contain all of our settings as attributes, with default values
if they are not specified by the configuration.
"""
defaults = {
'OTP_LOGIN_URL': django.conf.settings.LOGIN_URL,
'OTP_ADMIN_HIDE_SENSITIVE_DATA': False,
}
def __getattr__(self, name):
if name in self.defaults:
return getattr(django.conf.settings, name, self.defaults[name])
else:
return getattr(django.conf.settings, name)
settings = Settings()
| pt | 0.144566 | 3.088048 | 3 |
Moderation/purge.py | DevFlock/Multis | 3 | 13502 | import asyncio
import discord
from discord.ext import commands
from discord.ext.commands.core import has_permissions
class cog(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command(aliases=["clear"])
@has_permissions(ban_members=True)
async def purge(self, ctx, count):
await ctx.channel.purge(limit=count+1)
message = await ctx.send(f"Deleted {count} messages.")
asyncio.sleep(2)
await message.delete()
def setup(client):
client.add_cog(cog(client))
| import asyncio
import discord
from discord.ext import commands
from discord.ext.commands.core import has_permissions
class cog(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command(aliases=["clear"])
@has_permissions(ban_members=True)
async def purge(self, ctx, count):
await ctx.channel.purge(limit=count+1)
message = await ctx.send(f"Deleted {count} messages.")
asyncio.sleep(2)
await message.delete()
def setup(client):
client.add_cog(cog(client))
| none | 1 | 2.432014 | 2 |
tensorboard/acceptance/__init__.py | DeepLearnI/atlas | 296 | 13503 | from .test_tensorboard_rest_api import TestTensorboardRestAPI
from .test_tensorboard_server import TestTensorboardServer
from .test_tensorboard_endpoints import TestTensorboardEndpoint | from .test_tensorboard_rest_api import TestTensorboardRestAPI
from .test_tensorboard_server import TestTensorboardServer
from .test_tensorboard_endpoints import TestTensorboardEndpoint | none | 1 | 0.991267 | 1 |
tests/store/test_fetch_purchases_to_ship.py | yuzi-ziyu/alphasea-agent | 1 | 13504 | from unittest import TestCase
from ..helpers import (
create_web3,
create_contract,
get_future_execution_start_at_timestamp,
proceed_time,
get_prediction_time_shift,
get_purchase_time_shift,
get_shipping_time_shift,
get_publication_time_shift,
get_tournament_id,
get_chain_id,
create_store,
generate_redis_namespace,
BaseHardhatTestCase
)
from src.web3 import get_account_address
execution_start_at = get_future_execution_start_at_timestamp()
content = 'abc'.encode()
model_id = 'model1'
model_id_other = 'model_other'
class TestStoreFetchPurchasesToShip(BaseHardhatTestCase):
def setUp(self):
super().setUp()
w3 = create_web3()
contract = create_contract(w3)
store = create_store(w3, contract)
self.store = store
self.w3 = w3
w3_other = create_web3(account_index=1)
contract_other = create_contract(w3_other)
store_other = create_store(w3_other, contract_other)
w3_purchaser = create_web3(account_index=2)
contract_purchaser = create_contract(w3_purchaser)
store_purchaser = create_store(w3_purchaser, contract_purchaser)
self.store_purchaser = store_purchaser
self.w3_purchaser = w3_purchaser
# predict
proceed_time(w3, execution_start_at + get_prediction_time_shift())
store.create_models_if_not_exist([dict(
model_id=model_id,
tournament_id=get_tournament_id(),
prediction_license='CC0-1.0',
)])
store.create_predictions([dict(
model_id=model_id,
execution_start_at=execution_start_at,
content=content,
price=1,
)])
# other predict
store_other.create_models_if_not_exist([dict(
model_id=model_id_other,
tournament_id=get_tournament_id(),
prediction_license='CC0-1.0',
)])
store_other.create_predictions([dict(
model_id=model_id_other,
execution_start_at=execution_start_at,
content=content,
price=1,
)])
# purchase
proceed_time(w3, execution_start_at + get_purchase_time_shift())
store_purchaser.create_purchases([dict(
model_id=model_id,
execution_start_at=execution_start_at,
), dict(
model_id=model_id_other,
execution_start_at=execution_start_at,
)])
def test_ok(self):
purchases = self.store.fetch_purchases_to_ship(
tournament_id=get_tournament_id(),
execution_start_at=execution_start_at
)
self.assertEqual(purchases, [{
**purchases[0],
'model_id': model_id,
'execution_start_at': execution_start_at,
'purchaser': get_account_address(self.w3_purchaser.eth.default_account),
}])
def test_different_tournament_id(self):
purchases = self.store.fetch_purchases_to_ship(
tournament_id='different',
execution_start_at=execution_start_at
)
self.assertEqual(purchases, [])
def test_different_execution_start_at(self):
purchases = self.store.fetch_purchases_to_ship(
tournament_id=get_tournament_id(),
execution_start_at=execution_start_at + 1,
)
self.assertEqual(purchases, [])
def test_already_shipped(self):
store = self.store
# ship
proceed_time(self.w3, execution_start_at + get_shipping_time_shift())
store.ship_purchases([dict(
model_id=model_id,
execution_start_at=execution_start_at,
purchaser=get_account_address(self.w3_purchaser.eth.default_account),
)])
purchases = store.fetch_purchases_to_ship(
tournament_id=get_tournament_id(),
execution_start_at=execution_start_at,
)
self.assertEqual(purchases, [])
| from unittest import TestCase
from ..helpers import (
create_web3,
create_contract,
get_future_execution_start_at_timestamp,
proceed_time,
get_prediction_time_shift,
get_purchase_time_shift,
get_shipping_time_shift,
get_publication_time_shift,
get_tournament_id,
get_chain_id,
create_store,
generate_redis_namespace,
BaseHardhatTestCase
)
from src.web3 import get_account_address
execution_start_at = get_future_execution_start_at_timestamp()
content = 'abc'.encode()
model_id = 'model1'
model_id_other = 'model_other'
class TestStoreFetchPurchasesToShip(BaseHardhatTestCase):
def setUp(self):
super().setUp()
w3 = create_web3()
contract = create_contract(w3)
store = create_store(w3, contract)
self.store = store
self.w3 = w3
w3_other = create_web3(account_index=1)
contract_other = create_contract(w3_other)
store_other = create_store(w3_other, contract_other)
w3_purchaser = create_web3(account_index=2)
contract_purchaser = create_contract(w3_purchaser)
store_purchaser = create_store(w3_purchaser, contract_purchaser)
self.store_purchaser = store_purchaser
self.w3_purchaser = w3_purchaser
# predict
proceed_time(w3, execution_start_at + get_prediction_time_shift())
store.create_models_if_not_exist([dict(
model_id=model_id,
tournament_id=get_tournament_id(),
prediction_license='CC0-1.0',
)])
store.create_predictions([dict(
model_id=model_id,
execution_start_at=execution_start_at,
content=content,
price=1,
)])
# other predict
store_other.create_models_if_not_exist([dict(
model_id=model_id_other,
tournament_id=get_tournament_id(),
prediction_license='CC0-1.0',
)])
store_other.create_predictions([dict(
model_id=model_id_other,
execution_start_at=execution_start_at,
content=content,
price=1,
)])
# purchase
proceed_time(w3, execution_start_at + get_purchase_time_shift())
store_purchaser.create_purchases([dict(
model_id=model_id,
execution_start_at=execution_start_at,
), dict(
model_id=model_id_other,
execution_start_at=execution_start_at,
)])
def test_ok(self):
purchases = self.store.fetch_purchases_to_ship(
tournament_id=get_tournament_id(),
execution_start_at=execution_start_at
)
self.assertEqual(purchases, [{
**purchases[0],
'model_id': model_id,
'execution_start_at': execution_start_at,
'purchaser': get_account_address(self.w3_purchaser.eth.default_account),
}])
def test_different_tournament_id(self):
purchases = self.store.fetch_purchases_to_ship(
tournament_id='different',
execution_start_at=execution_start_at
)
self.assertEqual(purchases, [])
def test_different_execution_start_at(self):
purchases = self.store.fetch_purchases_to_ship(
tournament_id=get_tournament_id(),
execution_start_at=execution_start_at + 1,
)
self.assertEqual(purchases, [])
def test_already_shipped(self):
store = self.store
# ship
proceed_time(self.w3, execution_start_at + get_shipping_time_shift())
store.ship_purchases([dict(
model_id=model_id,
execution_start_at=execution_start_at,
purchaser=get_account_address(self.w3_purchaser.eth.default_account),
)])
purchases = store.fetch_purchases_to_ship(
tournament_id=get_tournament_id(),
execution_start_at=execution_start_at,
)
self.assertEqual(purchases, [])
| it | 0.141563 | 2.148052 | 2 |
tests/test_add_option_backtrace.py | ponponon/loguru | 11,391 | 13505 | <reponame>ponponon/loguru<gh_stars>1000+
from loguru import logger
# See "test_catch_exceptions.py" for extended testing
def test_backtrace(writer):
logger.add(writer, format="{message}", backtrace=True)
try:
1 / 0
except Exception:
logger.exception("")
result_with = writer.read().strip()
logger.remove()
writer.clear()
logger.add(writer, format="{message}", backtrace=False)
try:
1 / 0
except Exception:
logger.exception("")
result_without = writer.read().strip()
assert len(result_with.splitlines()) > len(result_without.splitlines())
| from loguru import logger
# See "test_catch_exceptions.py" for extended testing
def test_backtrace(writer):
logger.add(writer, format="{message}", backtrace=True)
try:
1 / 0
except Exception:
logger.exception("")
result_with = writer.read().strip()
logger.remove()
writer.clear()
logger.add(writer, format="{message}", backtrace=False)
try:
1 / 0
except Exception:
logger.exception("")
result_without = writer.read().strip()
assert len(result_with.splitlines()) > len(result_without.splitlines()) | it | 0.24827 | 2.834776 | 3 |
BasicPythonPrograms/PythonDestructor.py | Pushkar745/PythonProgramming | 0 | 13506 | <reponame>Pushkar745/PythonProgramming<gh_stars>0
class Employee:
#Initializaing
def __init__(self):
print('Employee created ')
#Deleting (Calling destructor)
def __del__(self):
print('Destructor called,Employee deleted')
obj=Employee()
del obj | class Employee:
#Initializaing
def __init__(self):
print('Employee created ')
#Deleting (Calling destructor)
def __del__(self):
print('Destructor called,Employee deleted')
obj=Employee()
del obj | it | 0.099496 | 3.459575 | 3 |
envi/registers.py | ConfusedMoonbear/vivisect | 1 | 13507 | <reponame>ConfusedMoonbear/vivisect
"""
Similar to the memory subsystem, this is a unified way to
access information about objects which contain registers
"""
import envi.bits as e_bits
from envi.const import *
class InvalidRegisterName(Exception):
pass
class RegisterContext:
def __init__(self, regdef=(), metas=(), pcindex=None, spindex=None, srindex=None):
"""
Hand in a register definition which consists of
a list of (<name>, <width>) tuples.
"""
self.loadRegDef(regdef)
self.loadRegMetas(metas)
self.setRegisterIndexes(pcindex, spindex, srindex=srindex)
self._rctx_dirty = False
def getRegisterSnap(self):
"""
Use this to bulk save off the register state.
"""
return list(self._rctx_vals)
def setRegisterSnap(self, snap):
"""
Use this to bulk restore the register state.
NOTE: This may only be used under the assumption that the
RegisterContext has been initialized the same way
(like context switches in tracers, or emulaction snaps)
"""
self._rctx_vals = list(snap)
def isDirty(self):
"""
Returns true if registers in this context have been modififed
since their import.
"""
return self._rctx_dirty
def setIsDirty(self, bool):
self._rctx_dirty = bool
def setRegisterIndexes(self, pcindex, spindex, srindex=None):
self._rctx_pcindex = pcindex
self._rctx_spindex = spindex
self._rctx_srindex = srindex
def loadRegDef(self, regdef, defval=0):
"""
Load a register definition. A register definition consists
of a list of tuples with the following format:
(regname, regwidth)
NOTE: All widths in envi RegisterContexts are in bits.
"""
self._rctx_regdef = regdef # Save this for snaps etc..
self._rctx_names = {}
self._rctx_ids = {}
self._rctx_widths = []
self._rctx_vals = []
self._rctx_masks = []
for i, (name, width) in enumerate(regdef):
self._rctx_names[name] = i
self._rctx_ids[i] = name
self._rctx_widths.append(width)
self._rctx_masks.append((2**width)-1)
self._rctx_vals.append(defval)
def getRegDef(self):
return self._rctx_regdef
def loadRegMetas(self, metas, statmetas=None):
"""
Load a set of defined "meta" registers for this architecture. Meta
registers are defined as registers who exist as a subset of the bits
in some other "real" register. The argument metas is a list of tuples
with the following format:
(regname, regidx, reg_shift_offset, reg_width)
The given example is for the AX register in the i386 subsystem
regname: "ax"
reg_shift_offset: 0
reg_width: 16
Optionally a set of status meta registers can be loaded as well.
The argument is a list of tuples with the following format:
(regname, regidx, reg_shift_offset, reg_width, description)
"""
self._rctx_regmetas = metas
for name, idx, offset, width in metas:
self.addMetaRegister(name, idx, offset, width)
self._rctx_statmetas = statmetas
def addMetaRegister(self, name, idx, offset, width):
"""
Meta registers are registers which are really just directly
addressable parts of already existing registers (eax -> al).
To add a meta register, you give the name, the idx of the *real*
register, the width of the meta reg, and it's left shifted (in bits)
offset into the real register value. The RegisterContext will take
care of accesses after that.
"""
newidx = (offset << 24) + (width << 16) + idx
self._rctx_names[name] = newidx
self._rctx_ids[newidx] = name
def isMetaRegister(self, index):
return (index & 0xffff) != index
def _rctx_Import(self, sobj):
"""
Given an object with attributes with the same names as
registers in our context, populate our values from it.
NOTE: This also clears the dirty flag
"""
# On import from a structure, we are clean again.
self._rctx_dirty = False
for name,idx in self._rctx_names.items():
# Skip meta registers
if (idx & 0xffff) != idx:
continue
x = getattr(sobj, name, None)
if x != None:
self._rctx_vals[idx] = x
def _rctx_Export(self, sobj):
"""
Given an object with attributes with the same names as
registers in our context, set the ones he has to match
our values.
"""
for name,idx in self._rctx_names.items():
# Skip meta registers
if (idx & 0xffff) != idx:
continue
if hasattr(sobj, name):
setattr(sobj, name, self._rctx_vals[idx])
def getRegisterInfo(self, meta=False):
"""
Return an object which can be stored off, and restored
to re-initialize a register context. (much like snapshot
but it takes the definitions with it)
"""
regdef = self._rctx_regdef
regmeta = self._rctx_regmetas
pcindex = self._rctx_pcindex
spindex = self._rctx_spindex
snap = self.getRegisterSnap()
return (regdef, regmeta, pcindex, spindex, snap)
def setRegisterInfo(self, info):
regdef, regmeta, pcindex, spindex, snap = info
self.loadRegDef(regdef)
self.loadRegMetas(regmeta)
self.setRegisterIndexes(pcindex, spindex)
self.setRegisterSnap(snap)
def getRegisterName(self, index):
return self._rctx_ids.get(index,"REG%.8x" % index)
def getProgramCounter(self):
"""
Get the value of the program counter for this register context.
"""
return self.getRegister(self._rctx_pcindex)
def setProgramCounter(self, value):
"""
Set the value of the program counter for this register context.
"""
self.setRegister(self._rctx_pcindex, value)
def getStackCounter(self):
return self.getRegister(self._rctx_spindex)
def setStackCounter(self, value):
self.setRegister(self._rctx_spindex, value)
def hasStatusRegister(self):
'''
Returns True if this context is aware of a status register.
'''
if self._rctx_srindex == None:
return False
return True
def getStatusRegNameDesc(self):
'''
Return a list of status register names and descriptions.
'''
return [(name, desc) for name, idx, offset, width, desc in self._rctx_statmetas]
def getStatusRegister(self):
'''
Gets the status register for this register context.
'''
return self.getRegister(self._rctx_srindex)
def setStatusRegister(self, value):
'''
Sets the status register for this register context.
'''
self.setRegister(self._rctx_srindex, value)
def getStatusFlags(self):
'''
Return a dictionary of reg name and reg value for the meta registers
that are part of the status register.
'''
ret = {}
for name, idx, offset, width, desc in self._rctx_statmetas:
ret[name] = self.getRegisterByName(name)
return ret
def getRegisterByName(self, name):
idx = self._rctx_names.get(name)
if idx == None:
raise InvalidRegisterName("Unknown Register: %s" % name)
return self.getRegister(idx)
def setRegisterByName(self, name, value):
idx = self._rctx_names.get(name)
if idx == None:
raise InvalidRegisterName("Unknown Register: %s" % name)
self.setRegister(idx, value)
def getRegisterNames(self):
'''
Returns a list of the 'real' (non meta) registers.
'''
regs = [rname for rname, ridx in self._rctx_names.items()
if not self.isMetaRegister(ridx)]
return regs
def getRegisterNameIndexes(self):
'''
Return a list of all the 'real' (non meta) registers and their indexes.
Example: for regname, regidx in x.getRegisterNameIndexes():
'''
regs = [(rname, ridx) for rname, ridx in self._rctx_names.items()
if not self.isMetaRegister(ridx)]
return regs
def getRegisters(self):
"""
Get all the *real* registers from this context as a dictionary of name
value pairs.
"""
ret = {}
for name,idx in self._rctx_names.items():
if (idx & 0xffff) != idx:
continue
ret[name] = self.getRegister(idx)
return ret
def setRegisters(self, regdict):
"""
For any name value pairs in the specified dictionary, set the current
register values in this context.
"""
for name,value in regdict.items():
self.setRegisterByName(name, value)
def getRegisterIndex(self, name):
"""
Get a register index by name.
(faster to use the index multiple times)
"""
return self._rctx_names.get(name)
def getRegisterWidth(self, index):
"""
Return the width of the register which lives at the specified
index (width is always in bits).
"""
ridx = index & 0xffff
if ridx == index:
return self._rctx_widths[index]
width = (index >> 16) & 0xff
return width
def getRegister(self, index):
"""
Return the current value of the specified register index.
"""
ridx = index & 0xffff
value = self._rctx_vals[ridx]
if ridx != index:
value = self._xlateToMetaReg(index, value)
return value
def getMetaRegInfo(self, index):
'''
Return the appropriate realreg, shift, mask info
for the specified metareg idx (or None if it's not
meta).
Example:
real_reg, lshift, mask = r.getMetaRegInfo(x)
'''
ridx = index & 0xffff
if ridx == index:
return None
offset = (index >> 24) & 0xff
width = (index >> 16) & 0xff
mask = (2**width)-1
return ridx, offset, mask
def _xlateToMetaReg(self, index, value):
'''
Translate a register value to the meta register value
(used when getting a meta register)
'''
ridx = index & 0xffff
offset = (index >> 24) & 0xff
width = (index >> 16) & 0xff
mask = (2**width)-1
if offset != 0:
value >>= offset
return value & mask
def _xlateToNativeReg(self, index, value):
'''
Translate a register value to the native register value
(used when setting a meta register)
'''
ridx = index & 0xffff
width = (index >> 16) & 0xff
offset = (index >> 24) & 0xff
# FIXME is it faster to generate or look these up?
mask = (2 ** width) - 1
mask = mask << offset
# NOTE: basewidth is in *bits*
basewidth = self._rctx_widths[ridx]
basemask = (2 ** basewidth) - 1
# cut a whole in basemask at the size/offset of mask
finalmask = basemask ^ mask
curval = self._rctx_vals[ridx]
if offset:
value <<= offset
return value | (curval & finalmask)
def setRegister(self, index, value):
"""
Set a register value by index.
"""
self._rctx_dirty = True
ridx = index & 0xffff
# If it's a meta register index, lets mask it into
# the real thing...
if ridx != index:
value = self._xlateToNativeReg(index, value)
self._rctx_vals[ridx] = (value & self._rctx_masks[ridx])
def getRealRegisterNameByIdx(self, regidx):
"""
Returns the Name of the Containing register (in the case
of meta-registers) or the name of the register.
(by Index)
"""
return self.getRegisterName(regidx& RMETA_NMASK)
def getRealRegisterName(self, regname):
"""
Returns the Name of the Containing register (in the case
of meta-registers) or the name of the register.
"""
ridx = self.getRegisterIndex(regname)
if ridx != None:
return self.getRegisterName(ridx & RMETA_NMASK)
return regname
def addLocalEnums(l, regdef):
"""
Update a dictionary (or module locals) with REG_FOO index
values for all the base registers defined in regdef.
"""
for i,(rname,width) in enumerate(regdef):
l["REG_%s" % rname.upper()] = i
def addLocalStatusMetas(l, metas, statmetas, regname):
'''
Dynamically create data based on the status register meta register
definition.
Adds new meta registers and bitmask constants.
'''
for metaname, idx, offset, width, desc in statmetas:
# create meta registers
metas.append( (metaname, idx, offset, width) )
# create local bitmask constants (EFLAGS_%)
l['%s_%s' % (regname, metaname)] = 1 << offset # TODO: fix for arbitrary width
def addLocalMetas(l, metas):
"""
Update a dictionary (or module locals) with REG_FOO index
values for all meta registers defined in metas.
"""
for name, idx, offset, width in metas:
l["REG_%s" % name.upper()] = (offset << 24) | (width << 16) | idx
| """
Similar to the memory subsystem, this is a unified way to
access information about objects which contain registers
"""
import envi.bits as e_bits
from envi.const import *
class InvalidRegisterName(Exception):
pass
class RegisterContext:
def __init__(self, regdef=(), metas=(), pcindex=None, spindex=None, srindex=None):
"""
Hand in a register definition which consists of
a list of (<name>, <width>) tuples.
"""
self.loadRegDef(regdef)
self.loadRegMetas(metas)
self.setRegisterIndexes(pcindex, spindex, srindex=srindex)
self._rctx_dirty = False
def getRegisterSnap(self):
"""
Use this to bulk save off the register state.
"""
return list(self._rctx_vals)
def setRegisterSnap(self, snap):
"""
Use this to bulk restore the register state.
NOTE: This may only be used under the assumption that the
RegisterContext has been initialized the same way
(like context switches in tracers, or emulaction snaps)
"""
self._rctx_vals = list(snap)
def isDirty(self):
"""
Returns true if registers in this context have been modififed
since their import.
"""
return self._rctx_dirty
def setIsDirty(self, bool):
self._rctx_dirty = bool
def setRegisterIndexes(self, pcindex, spindex, srindex=None):
self._rctx_pcindex = pcindex
self._rctx_spindex = spindex
self._rctx_srindex = srindex
def loadRegDef(self, regdef, defval=0):
"""
Load a register definition. A register definition consists
of a list of tuples with the following format:
(regname, regwidth)
NOTE: All widths in envi RegisterContexts are in bits.
"""
self._rctx_regdef = regdef # Save this for snaps etc..
self._rctx_names = {}
self._rctx_ids = {}
self._rctx_widths = []
self._rctx_vals = []
self._rctx_masks = []
for i, (name, width) in enumerate(regdef):
self._rctx_names[name] = i
self._rctx_ids[i] = name
self._rctx_widths.append(width)
self._rctx_masks.append((2**width)-1)
self._rctx_vals.append(defval)
def getRegDef(self):
return self._rctx_regdef
def loadRegMetas(self, metas, statmetas=None):
"""
Load a set of defined "meta" registers for this architecture. Meta
registers are defined as registers who exist as a subset of the bits
in some other "real" register. The argument metas is a list of tuples
with the following format:
(regname, regidx, reg_shift_offset, reg_width)
The given example is for the AX register in the i386 subsystem
regname: "ax"
reg_shift_offset: 0
reg_width: 16
Optionally a set of status meta registers can be loaded as well.
The argument is a list of tuples with the following format:
(regname, regidx, reg_shift_offset, reg_width, description)
"""
self._rctx_regmetas = metas
for name, idx, offset, width in metas:
self.addMetaRegister(name, idx, offset, width)
self._rctx_statmetas = statmetas
def addMetaRegister(self, name, idx, offset, width):
"""
Meta registers are registers which are really just directly
addressable parts of already existing registers (eax -> al).
To add a meta register, you give the name, the idx of the *real*
register, the width of the meta reg, and it's left shifted (in bits)
offset into the real register value. The RegisterContext will take
care of accesses after that.
"""
newidx = (offset << 24) + (width << 16) + idx
self._rctx_names[name] = newidx
self._rctx_ids[newidx] = name
def isMetaRegister(self, index):
return (index & 0xffff) != index
def _rctx_Import(self, sobj):
"""
Given an object with attributes with the same names as
registers in our context, populate our values from it.
NOTE: This also clears the dirty flag
"""
# On import from a structure, we are clean again.
self._rctx_dirty = False
for name,idx in self._rctx_names.items():
# Skip meta registers
if (idx & 0xffff) != idx:
continue
x = getattr(sobj, name, None)
if x != None:
self._rctx_vals[idx] = x
def _rctx_Export(self, sobj):
"""
Given an object with attributes with the same names as
registers in our context, set the ones he has to match
our values.
"""
for name,idx in self._rctx_names.items():
# Skip meta registers
if (idx & 0xffff) != idx:
continue
if hasattr(sobj, name):
setattr(sobj, name, self._rctx_vals[idx])
def getRegisterInfo(self, meta=False):
"""
Return an object which can be stored off, and restored
to re-initialize a register context. (much like snapshot
but it takes the definitions with it)
"""
regdef = self._rctx_regdef
regmeta = self._rctx_regmetas
pcindex = self._rctx_pcindex
spindex = self._rctx_spindex
snap = self.getRegisterSnap()
return (regdef, regmeta, pcindex, spindex, snap)
def setRegisterInfo(self, info):
regdef, regmeta, pcindex, spindex, snap = info
self.loadRegDef(regdef)
self.loadRegMetas(regmeta)
self.setRegisterIndexes(pcindex, spindex)
self.setRegisterSnap(snap)
def getRegisterName(self, index):
return self._rctx_ids.get(index,"REG%.8x" % index)
def getProgramCounter(self):
"""
Get the value of the program counter for this register context.
"""
return self.getRegister(self._rctx_pcindex)
def setProgramCounter(self, value):
"""
Set the value of the program counter for this register context.
"""
self.setRegister(self._rctx_pcindex, value)
def getStackCounter(self):
return self.getRegister(self._rctx_spindex)
def setStackCounter(self, value):
self.setRegister(self._rctx_spindex, value)
def hasStatusRegister(self):
'''
Returns True if this context is aware of a status register.
'''
if self._rctx_srindex == None:
return False
return True
def getStatusRegNameDesc(self):
'''
Return a list of status register names and descriptions.
'''
return [(name, desc) for name, idx, offset, width, desc in self._rctx_statmetas]
def getStatusRegister(self):
'''
Gets the status register for this register context.
'''
return self.getRegister(self._rctx_srindex)
def setStatusRegister(self, value):
'''
Sets the status register for this register context.
'''
self.setRegister(self._rctx_srindex, value)
def getStatusFlags(self):
'''
Return a dictionary of reg name and reg value for the meta registers
that are part of the status register.
'''
ret = {}
for name, idx, offset, width, desc in self._rctx_statmetas:
ret[name] = self.getRegisterByName(name)
return ret
def getRegisterByName(self, name):
idx = self._rctx_names.get(name)
if idx == None:
raise InvalidRegisterName("Unknown Register: %s" % name)
return self.getRegister(idx)
def setRegisterByName(self, name, value):
idx = self._rctx_names.get(name)
if idx == None:
raise InvalidRegisterName("Unknown Register: %s" % name)
self.setRegister(idx, value)
def getRegisterNames(self):
'''
Returns a list of the 'real' (non meta) registers.
'''
regs = [rname for rname, ridx in self._rctx_names.items()
if not self.isMetaRegister(ridx)]
return regs
def getRegisterNameIndexes(self):
'''
Return a list of all the 'real' (non meta) registers and their indexes.
Example: for regname, regidx in x.getRegisterNameIndexes():
'''
regs = [(rname, ridx) for rname, ridx in self._rctx_names.items()
if not self.isMetaRegister(ridx)]
return regs
def getRegisters(self):
"""
Get all the *real* registers from this context as a dictionary of name
value pairs.
"""
ret = {}
for name,idx in self._rctx_names.items():
if (idx & 0xffff) != idx:
continue
ret[name] = self.getRegister(idx)
return ret
def setRegisters(self, regdict):
"""
For any name value pairs in the specified dictionary, set the current
register values in this context.
"""
for name,value in regdict.items():
self.setRegisterByName(name, value)
def getRegisterIndex(self, name):
"""
Get a register index by name.
(faster to use the index multiple times)
"""
return self._rctx_names.get(name)
def getRegisterWidth(self, index):
"""
Return the width of the register which lives at the specified
index (width is always in bits).
"""
ridx = index & 0xffff
if ridx == index:
return self._rctx_widths[index]
width = (index >> 16) & 0xff
return width
def getRegister(self, index):
"""
Return the current value of the specified register index.
"""
ridx = index & 0xffff
value = self._rctx_vals[ridx]
if ridx != index:
value = self._xlateToMetaReg(index, value)
return value
def getMetaRegInfo(self, index):
'''
Return the appropriate realreg, shift, mask info
for the specified metareg idx (or None if it's not
meta).
Example:
real_reg, lshift, mask = r.getMetaRegInfo(x)
'''
ridx = index & 0xffff
if ridx == index:
return None
offset = (index >> 24) & 0xff
width = (index >> 16) & 0xff
mask = (2**width)-1
return ridx, offset, mask
def _xlateToMetaReg(self, index, value):
'''
Translate a register value to the meta register value
(used when getting a meta register)
'''
ridx = index & 0xffff
offset = (index >> 24) & 0xff
width = (index >> 16) & 0xff
mask = (2**width)-1
if offset != 0:
value >>= offset
return value & mask
def _xlateToNativeReg(self, index, value):
'''
Translate a register value to the native register value
(used when setting a meta register)
'''
ridx = index & 0xffff
width = (index >> 16) & 0xff
offset = (index >> 24) & 0xff
# FIXME is it faster to generate or look these up?
mask = (2 ** width) - 1
mask = mask << offset
# NOTE: basewidth is in *bits*
basewidth = self._rctx_widths[ridx]
basemask = (2 ** basewidth) - 1
# cut a whole in basemask at the size/offset of mask
finalmask = basemask ^ mask
curval = self._rctx_vals[ridx]
if offset:
value <<= offset
return value | (curval & finalmask)
def setRegister(self, index, value):
"""
Set a register value by index.
"""
self._rctx_dirty = True
ridx = index & 0xffff
# If it's a meta register index, lets mask it into
# the real thing...
if ridx != index:
value = self._xlateToNativeReg(index, value)
self._rctx_vals[ridx] = (value & self._rctx_masks[ridx])
def getRealRegisterNameByIdx(self, regidx):
"""
Returns the Name of the Containing register (in the case
of meta-registers) or the name of the register.
(by Index)
"""
return self.getRegisterName(regidx& RMETA_NMASK)
def getRealRegisterName(self, regname):
"""
Returns the Name of the Containing register (in the case
of meta-registers) or the name of the register.
"""
ridx = self.getRegisterIndex(regname)
if ridx != None:
return self.getRegisterName(ridx & RMETA_NMASK)
return regname
def addLocalEnums(l, regdef):
"""
Update a dictionary (or module locals) with REG_FOO index
values for all the base registers defined in regdef.
"""
for i,(rname,width) in enumerate(regdef):
l["REG_%s" % rname.upper()] = i
def addLocalStatusMetas(l, metas, statmetas, regname):
'''
Dynamically create data based on the status register meta register
definition.
Adds new meta registers and bitmask constants.
'''
for metaname, idx, offset, width, desc in statmetas:
# create meta registers
metas.append( (metaname, idx, offset, width) )
# create local bitmask constants (EFLAGS_%)
l['%s_%s' % (regname, metaname)] = 1 << offset # TODO: fix for arbitrary width
def addLocalMetas(l, metas):
"""
Update a dictionary (or module locals) with REG_FOO index
values for all meta registers defined in metas.
"""
for name, idx, offset, width in metas:
l["REG_%s" % name.upper()] = (offset << 24) | (width << 16) | idx | pt | 0.177589 | 2.8049 | 3 |
services/nris-api/backend/app/extensions.py | parc-jason/mds | 0 | 13508 | <reponame>parc-jason/mds
from flask_caching import Cache
from flask_jwt_oidc import JwtManager
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate, MigrateCommand
from flask import current_app
from elasticapm.contrib.flask import ElasticAPM
from .config import Config
from .helper import Api
apm = ElasticAPM()
db = SQLAlchemy()
migrate = Migrate()
jwt = JwtManager()
cache = Cache()
api = Api(
prefix=f'{Config.BASE_PATH}',
doc=f'{Config.BASE_PATH}/',
default='nris_api',
default_label='NRIS related operations')
| from flask_caching import Cache
from flask_jwt_oidc import JwtManager
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate, MigrateCommand
from flask import current_app
from elasticapm.contrib.flask import ElasticAPM
from .config import Config
from .helper import Api
apm = ElasticAPM()
db = SQLAlchemy()
migrate = Migrate()
jwt = JwtManager()
cache = Cache()
api = Api(
prefix=f'{Config.BASE_PATH}',
doc=f'{Config.BASE_PATH}/',
default='nris_api',
default_label='NRIS related operations') | none | 1 | 1.886151 | 2 |
tests/test_mqtt_async.py | mpi-sws-rse/antevents-python | 7 | 13509 | <gh_stars>1-10
# Copyright 2017 by MPI-SWS and Data-Ken Research.
# Licensed under the Apache 2.0 License.
"""Test async version of mqtt libraries. Depends on hbmqtt
(https://github.com/beerfactory/hbmqtt)
"""
import unittest
import sys
import asyncio
import string
from random import choice, seed
from antevents.base import Scheduler, SensorPub, SensorEvent
import antevents.linq.output
import antevents.linq.combinators
import antevents.linq.select
from antevents.adapters.mqtt_async import QueueWriter, QueueReader
from antevents.linq.transducer import PeriodicMedianTransducer
from utils import ValueListSensor, ValidateAndStopSubscriber
seed()
try:
import hbmqtt
HBMQTT_AVAILABLE = True
except ImportError:
HBMQTT_AVAILABLE = False
URL = "mqtt://localhost:1883"
VALUES = [
1.0,
2.5,
3.7,
4.1,
8.1,
0.5,
6.5,
4.5,
3.9,
6.5
]
EXPECTED = [
2.5,
4.1,
4.5,
6.5
]
def msg_to_event(msg):
return SensorEvent(sensor_id=msg[0], ts=msg[1], val=msg[2])
CHARS=string.ascii_letters+string.digits
def get_topic_name(test_class):
return test_class.__class__.__name__ + ''.join([ choice(CHARS) for i in range(5) ])
@unittest.skipUnless(HBMQTT_AVAILABLE,
"HBMQTT library not installed for python at %s" %
sys.executable)
class TestCase(unittest.TestCase):
def setUp(self):
# Creating a new event loop each test case does not seem to work.
# I think it is due to hbmqtt not cleaning up some state in the asyncio
# layer.
#self.loop = asyncio.new_event_loop()
self.loop = asyncio.get_event_loop()
self.sched = Scheduler(self.loop)
def tearDown(self):
pass
#self.loop.stop()
#self.loop.close()
def test_client_only(self):
SENSOR_ID='sensor-1'
TOPIC=get_topic_name(self)
sensor = SensorPub(ValueListSensor(SENSOR_ID, VALUES))
td = sensor.transduce(PeriodicMedianTransducer(period=3))
qw = QueueWriter(td, URL, TOPIC, self.sched)
qw.output()
self.sched.schedule_periodic(sensor, 0.5)
self.sched.run_forever()
self.assertFalse(qw.has_pending_requests(),
"QueueWriter has pending requests: %s" % qw.dump_state())
print("test_client_only completed")
def send_and_recv_body(self, sleep_timeout):
SENSOR_ID='sensor-1'
TOPIC=get_topic_name(self)
sensor = SensorPub(ValueListSensor(SENSOR_ID, VALUES))
td = sensor.transduce(PeriodicMedianTransducer(period=3))
qw = QueueWriter(td, URL, TOPIC, self.sched)
qw.output()
qr = QueueReader(URL, TOPIC, self.sched, timeout=sleep_timeout)
self.sched.schedule_periodic(sensor, 0.5)
stop_qr = self.sched.schedule_on_main_event_loop(qr)
vs = ValidateAndStopSubscriber(EXPECTED, self, stop_qr)
qr.select(msg_to_event).subscribe(vs)
self.sched.run_forever()
self.assertFalse(qw.has_pending_requests(),
"QueueWriter has pending requests: %s" % qw.dump_state())
self.assertEqual(qr.state, QueueReader.FINAL_STATE)
self.assertEqual(vs.next_idx, len(EXPECTED))
print("send_and_recv_bod(%s) completed" % sleep_timeout)
def test_short_timeout(self):
self.send_and_recv_body(0.1)
def test_long_timeout(self):
self.send_and_recv_body(3.0)
if __name__ == '__main__':
unittest.main()
| # Copyright 2017 by MPI-SWS and Data-Ken Research.
# Licensed under the Apache 2.0 License.
"""Test async version of mqtt libraries. Depends on hbmqtt
(https://github.com/beerfactory/hbmqtt)
"""
import unittest
import sys
import asyncio
import string
from random import choice, seed
from antevents.base import Scheduler, SensorPub, SensorEvent
import antevents.linq.output
import antevents.linq.combinators
import antevents.linq.select
from antevents.adapters.mqtt_async import QueueWriter, QueueReader
from antevents.linq.transducer import PeriodicMedianTransducer
from utils import ValueListSensor, ValidateAndStopSubscriber
seed()
try:
import hbmqtt
HBMQTT_AVAILABLE = True
except ImportError:
HBMQTT_AVAILABLE = False
URL = "mqtt://localhost:1883"
VALUES = [
1.0,
2.5,
3.7,
4.1,
8.1,
0.5,
6.5,
4.5,
3.9,
6.5
]
EXPECTED = [
2.5,
4.1,
4.5,
6.5
]
def msg_to_event(msg):
return SensorEvent(sensor_id=msg[0], ts=msg[1], val=msg[2])
CHARS=string.ascii_letters+string.digits
def get_topic_name(test_class):
return test_class.__class__.__name__ + ''.join([ choice(CHARS) for i in range(5) ])
@unittest.skipUnless(HBMQTT_AVAILABLE,
"HBMQTT library not installed for python at %s" %
sys.executable)
class TestCase(unittest.TestCase):
def setUp(self):
# Creating a new event loop each test case does not seem to work.
# I think it is due to hbmqtt not cleaning up some state in the asyncio
# layer.
#self.loop = asyncio.new_event_loop()
self.loop = asyncio.get_event_loop()
self.sched = Scheduler(self.loop)
def tearDown(self):
pass
#self.loop.stop()
#self.loop.close()
def test_client_only(self):
SENSOR_ID='sensor-1'
TOPIC=get_topic_name(self)
sensor = SensorPub(ValueListSensor(SENSOR_ID, VALUES))
td = sensor.transduce(PeriodicMedianTransducer(period=3))
qw = QueueWriter(td, URL, TOPIC, self.sched)
qw.output()
self.sched.schedule_periodic(sensor, 0.5)
self.sched.run_forever()
self.assertFalse(qw.has_pending_requests(),
"QueueWriter has pending requests: %s" % qw.dump_state())
print("test_client_only completed")
def send_and_recv_body(self, sleep_timeout):
SENSOR_ID='sensor-1'
TOPIC=get_topic_name(self)
sensor = SensorPub(ValueListSensor(SENSOR_ID, VALUES))
td = sensor.transduce(PeriodicMedianTransducer(period=3))
qw = QueueWriter(td, URL, TOPIC, self.sched)
qw.output()
qr = QueueReader(URL, TOPIC, self.sched, timeout=sleep_timeout)
self.sched.schedule_periodic(sensor, 0.5)
stop_qr = self.sched.schedule_on_main_event_loop(qr)
vs = ValidateAndStopSubscriber(EXPECTED, self, stop_qr)
qr.select(msg_to_event).subscribe(vs)
self.sched.run_forever()
self.assertFalse(qw.has_pending_requests(),
"QueueWriter has pending requests: %s" % qw.dump_state())
self.assertEqual(qr.state, QueueReader.FINAL_STATE)
self.assertEqual(vs.next_idx, len(EXPECTED))
print("send_and_recv_bod(%s) completed" % sleep_timeout)
def test_short_timeout(self):
self.send_and_recv_body(0.1)
def test_long_timeout(self):
self.send_and_recv_body(3.0)
if __name__ == '__main__':
unittest.main() | pt | 0.169016 | 2.116433 | 2 |
edit/main.py | team-alpha-kr/Partner-pyWeb | 0 | 13510 | <filename>edit/main.py
# -*- coding: utf8 -*-
import os
from flask import Flask, request, render_template, request, redirect, url_for, jsonify
from flask_discord import DiscordOAuth2Session, requires_authorization
from discord import Webhook, RequestsWebhookAdapter
webhook = Webhook.partial(814742019489660939, "rvSBVHtGPflSASjeGEEKdZxC5Z_w1UM_ovc_xD0ZPcFy1UeUybFM4ClGANu6CEWTQame", adapter=RequestsWebhookAdapter())
run_webhook = Webhook.partial(804602090537091072, "6ZMww14Nh7OVeeHUt5bWeixreoWQmSzPVfFmIpU3BEr8OYLGqickY1VyoqH2IeMs1Kd8", adapter=RequestsWebhookAdapter())
app = Flask(__name__)
app.secret_key = b"<KEY>"
os.environ["OAUTHLIB_INSECURE_TRANSPORT"] = "false"
app.config["DISCORD_CLIENT_ID"] = "801279922722045962"
app.config["DISCORD_CLIENT_SECRET"] = "<KEY>" # Discord client secret.
# app.config["DISCORD_REDIRECT_URI"] = "http://localhost:3333/callback" # URL to your callback endpoint.
app.config["DISCORD_REDIRECT_URI"] = "https://partner-e.alphakr.xyz/callback" # URL to your callback endpoint.
app.config["DISCORD_BOT_TOKEN"] = "<KEY>"
discord = DiscordOAuth2Session(app)
def on_json_loading_failed_return_dict(e):
return '없음'
@app.route('/', methods=['GET','POST'])
def index():
return render_template('form/1.html')
@app.route("/login", methods=["GET"])
def login():
if not discord.authorized:
return discord.create_session(scope=['guilds', 'email', 'identify'])
else:
return render_template("login.html")
@app.route("/callback", methods=["GET", "POST"])
def callback():
data = discord.callback()
redirect_to = data.get("redirect", "/form/1")
return redirect(redirect_to)
@app.route("/logout", methods=['GET', 'POST'])
def logout():
if discord.authorized:
discord.revoke()
return redirect(url_for("index"))
else:
return redirect(url_for("index"))
@app.route('/form/1', methods=['GET','POST'])
def form1():
if request.method == 'GET':
if discord.authorized: #로그인이 되어있는가
try:
discord.fetch_guilds() #로그인정보을 가져와라
except:
return redirect(url_for("logout")) #못가져오면 로그아웃
user = discord.fetch_user()
return render_template('form/1.html', user=user)
else: #로그인이 안되어있는가?
return redirect(url_for("login"))
else:
if discord.authorized: #로그인이 되어있는가
try:
discord.fetch_guilds() #로그인정보을 가져와라
except:
return redirect(url_for("logout")) #못가져오면 로그아웃
user = discord.fetch_user()
run_webhook.send(f"⛔ [ 403 ERROR ] {user}님이 파트너 신청 1단계 페이지에 정상적이지 않은 접근을 시도 했습니다.")
return "<script>alert('정상적이지 않은 접근입니다.');location.replace('/');</script>"
else: #로그인이 안되어있는가?
ip = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
run_webhook.send(f"⛔ [ 403 ERROR ] 비 로그인 유저({ip})가 파트너 신청 1단계 페이지에 정상적이지 않은 접근을 시도 했습니다.")
return "<script>alert('정상적이지 않은 접근입니다.');location.replace('/');</script>"
@app.route('/form/2', methods=['GET','POST'])
def form2():
if request.method == 'POST':
if discord.authorized: #로그인이 되어있는가
try:
discord.fetch_guilds() #로그인정보을 가져와라
except:
return redirect(url_for("logout")) #못가져오면 로그아웃
code = request.form['code']
nickname = request.form['nickname']
return render_template('form/2.html', code=code, nickname=nickname)
else: #로그인이 안되어있는가?
return redirect(url_for("login"))
else:
if discord.authorized: #로그인이 되어있는가
try:
discord.fetch_guilds() #로그인정보을 가져와라
except:
return redirect(url_for("logout")) #못가져오면 로그아웃
user = discord.fetch_user()
run_webhook.send(f"⛔ [ 403 ERROR ] {user}님이 파트너 신청 2단계 페이지에 정상적이지 않은 접근을 시도 했습니다.")
return "<script>alert('정상적이지 않은 접근입니다.');location.replace('/');</script>"
else: #로그인이 안되어있는가?
ip = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
run_webhook.send(f"⛔ [ 403 ERROR ] 비 로그인 유저({ip})가 파트너 신청 2단계 페이지에 정상적이지 않은 접근을 시도 했습니다.")
return "<script>alert('정상적이지 않은 접근입니다.');location.replace('/');</script>"
@app.route('/form/3', methods=['GET','POST'])
def form3():
if request.method == 'POST':
if discord.authorized: #로그인이 되어있는가
try:
discord.fetch_guilds() #로그인정보을 가져와라
except:
return redirect(url_for("logout")) #못가져오면 로그아웃
code = request.form['code']
nickname = request.form['nickname']
server = request.form['server']
member = request.form['member']
category = request.form['category']
etc_text = request.form['etc_text']
return render_template('form/3.html', code=code, nickname=nickname, server=server, member=member, category=category, etc_text=etc_text)
else: #로그인이 안되어있는가?
return redirect(url_for("login"))
else:
if discord.authorized: #로그인이 되어있는가
try:
discord.fetch_guilds() #로그인정보을 가져와라
except:
return redirect(url_for("logout")) #못가져오면 로그아웃
return "<script>alert('정상적이지 않은 접근입니다.');location.replace('/');</script>"
else: #로그인이 안되어있는가?
return "<script>alert('정상적이지 않은 접근입니다.');location.replace('/');</script>"
@app.route('/form/action', methods=['GET','POST'])
def action():
if request.method == 'GET':
if discord.authorized: #로그인이 되어있는가
try:
discord.fetch_guilds() #로그인정보을 가져와라
except:
return redirect(url_for("logout")) #못가져오면 로그아웃
user = discord.fetch_user()
run_webhook.send(f"⛔ [ 403 ERROR ] {user}님이 파트너 신청 결과 전송 페이지에 정상적이지 않은 접근을 시도 했습니다.")
return "<script>alert('정상적이지 않은 접근입니다.');location.replace('/');</script>"
else: #로그인이 안되어있는가?
ip = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
run_webhook.send(f"⛔ [ 403 ERROR ] 비 로그인 유저({ip})가 파트너 신청 결과 전송 페이지에 정상적이지 않은 접근을 시도 했습니다.")
return "<script>alert('정상적이지 않은 접근입니다.');location.replace('/');</script>"
else:
if discord.authorized: #로그인이 되어있는가
try:
discord.fetch_guilds() #로그인정보을 가져와라
except:
return redirect(url_for("logout")) #못가져오면 로그아웃
code = request.form['code']
nickname = request.form['nickname']
server = request.form['server']
member = request.form['member']
category = request.form['category']
etc_text = request.form['etc_text']
message = request.form['message']
image = request.form['image']
video = request.form['video']
if etc_text == '':
etc_text = 'Unknown'
webhook.send(f"<@<PASSWORD>56043785>\n✅ 파트너 수정 신청이 도착했습니다.\n\n파트너 코드: {code}\n신청자: {nickname}\n서버(초대 링크): {server}\n멤버 수: {member}\n카테고리 정보: {category} ({etc_text})\n홍보지: {message}\n이미지: {image}\n영상: {video}")
return render_template('form/action.html', code = code)
else: #로그인이 안되어있는가?
return redirect(url_for("index"))
@app.route('/guide/<id>', methods=['GET', 'POST'])
def guide(id):
return f"<script>location.replace('https://team-alpha-kr.github.io/Partner-Guide/{id}.html');</script>"
# S: 2021 파트너 웹사이트 개편 코드
# S: 210210 공지사항
@app.route('/notice/<id>', methods=['GET', 'POST'])
def notice(id):
return render_template(f"2021temp/notice/{id}.html")
# E: 210210 공지사항
# E: 2021 파트너 웹사이트 개편 코드
@app.errorhandler(404)
def page_not_found(error):
return render_template("error/404.html")
@app.errorhandler(500)
def servererror(error):
run_webhook.send(f"<@673776952578146315> ⛔ [ 500 ERROR ] 서버에 오류가 발생했습니다.")
return render_template("error/500.html")
@app.errorhandler(400)
def badrequest(error):
run_webhook.send(f"<@673776952578146315> ⛔ [ 400 ERROR ] 서버에 오류가 발생했습니다.")
return render_template("error/400.html")
run_webhook.send("✅ 파트너 정보 수정 - 웹사이트가 실행이 되었습니다!")
app.run(host='0.0.0.0', port=3333, debug=False) | <filename>edit/main.py
# -*- coding: utf8 -*-
import os
from flask import Flask, request, render_template, request, redirect, url_for, jsonify
from flask_discord import DiscordOAuth2Session, requires_authorization
from discord import Webhook, RequestsWebhookAdapter
webhook = Webhook.partial(814742019489660939, "rvSBVHtGPflSASjeGEEKdZxC5Z_w1UM_ovc_xD0ZPcFy1UeUybFM4ClGANu6CEWTQame", adapter=RequestsWebhookAdapter())
run_webhook = Webhook.partial(804602090537091072, "6ZMww14Nh7OVeeHUt5bWeixreoWQmSzPVfFmIpU3BEr8OYLGqickY1VyoqH2IeMs1Kd8", adapter=RequestsWebhookAdapter())
app = Flask(__name__)
app.secret_key = b"<KEY>"
os.environ["OAUTHLIB_INSECURE_TRANSPORT"] = "false"
app.config["DISCORD_CLIENT_ID"] = "801279922722045962"
app.config["DISCORD_CLIENT_SECRET"] = "<KEY>" # Discord client secret.
# app.config["DISCORD_REDIRECT_URI"] = "http://localhost:3333/callback" # URL to your callback endpoint.
app.config["DISCORD_REDIRECT_URI"] = "https://partner-e.alphakr.xyz/callback" # URL to your callback endpoint.
app.config["DISCORD_BOT_TOKEN"] = "<KEY>"
discord = DiscordOAuth2Session(app)
def on_json_loading_failed_return_dict(e):
return '없음'
@app.route('/', methods=['GET','POST'])
def index():
return render_template('form/1.html')
@app.route("/login", methods=["GET"])
def login():
if not discord.authorized:
return discord.create_session(scope=['guilds', 'email', 'identify'])
else:
return render_template("login.html")
@app.route("/callback", methods=["GET", "POST"])
def callback():
data = discord.callback()
redirect_to = data.get("redirect", "/form/1")
return redirect(redirect_to)
@app.route("/logout", methods=['GET', 'POST'])
def logout():
if discord.authorized:
discord.revoke()
return redirect(url_for("index"))
else:
return redirect(url_for("index"))
@app.route('/form/1', methods=['GET','POST'])
def form1():
if request.method == 'GET':
if discord.authorized: #로그인이 되어있는가
try:
discord.fetch_guilds() #로그인정보을 가져와라
except:
return redirect(url_for("logout")) #못가져오면 로그아웃
user = discord.fetch_user()
return render_template('form/1.html', user=user)
else: #로그인이 안되어있는가?
return redirect(url_for("login"))
else:
if discord.authorized: #로그인이 되어있는가
try:
discord.fetch_guilds() #로그인정보을 가져와라
except:
return redirect(url_for("logout")) #못가져오면 로그아웃
user = discord.fetch_user()
run_webhook.send(f"⛔ [ 403 ERROR ] {user}님이 파트너 신청 1단계 페이지에 정상적이지 않은 접근을 시도 했습니다.")
return "<script>alert('정상적이지 않은 접근입니다.');location.replace('/');</script>"
else: #로그인이 안되어있는가?
ip = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
run_webhook.send(f"⛔ [ 403 ERROR ] 비 로그인 유저({ip})가 파트너 신청 1단계 페이지에 정상적이지 않은 접근을 시도 했습니다.")
return "<script>alert('정상적이지 않은 접근입니다.');location.replace('/');</script>"
@app.route('/form/2', methods=['GET','POST'])
def form2():
if request.method == 'POST':
if discord.authorized: #로그인이 되어있는가
try:
discord.fetch_guilds() #로그인정보을 가져와라
except:
return redirect(url_for("logout")) #못가져오면 로그아웃
code = request.form['code']
nickname = request.form['nickname']
return render_template('form/2.html', code=code, nickname=nickname)
else: #로그인이 안되어있는가?
return redirect(url_for("login"))
else:
if discord.authorized: #로그인이 되어있는가
try:
discord.fetch_guilds() #로그인정보을 가져와라
except:
return redirect(url_for("logout")) #못가져오면 로그아웃
user = discord.fetch_user()
run_webhook.send(f"⛔ [ 403 ERROR ] {user}님이 파트너 신청 2단계 페이지에 정상적이지 않은 접근을 시도 했습니다.")
return "<script>alert('정상적이지 않은 접근입니다.');location.replace('/');</script>"
else: #로그인이 안되어있는가?
ip = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
run_webhook.send(f"⛔ [ 403 ERROR ] 비 로그인 유저({ip})가 파트너 신청 2단계 페이지에 정상적이지 않은 접근을 시도 했습니다.")
return "<script>alert('정상적이지 않은 접근입니다.');location.replace('/');</script>"
@app.route('/form/3', methods=['GET','POST'])
def form3():
if request.method == 'POST':
if discord.authorized: #로그인이 되어있는가
try:
discord.fetch_guilds() #로그인정보을 가져와라
except:
return redirect(url_for("logout")) #못가져오면 로그아웃
code = request.form['code']
nickname = request.form['nickname']
server = request.form['server']
member = request.form['member']
category = request.form['category']
etc_text = request.form['etc_text']
return render_template('form/3.html', code=code, nickname=nickname, server=server, member=member, category=category, etc_text=etc_text)
else: #로그인이 안되어있는가?
return redirect(url_for("login"))
else:
if discord.authorized: #로그인이 되어있는가
try:
discord.fetch_guilds() #로그인정보을 가져와라
except:
return redirect(url_for("logout")) #못가져오면 로그아웃
return "<script>alert('정상적이지 않은 접근입니다.');location.replace('/');</script>"
else: #로그인이 안되어있는가?
return "<script>alert('정상적이지 않은 접근입니다.');location.replace('/');</script>"
@app.route('/form/action', methods=['GET','POST'])
def action():
if request.method == 'GET':
if discord.authorized: #로그인이 되어있는가
try:
discord.fetch_guilds() #로그인정보을 가져와라
except:
return redirect(url_for("logout")) #못가져오면 로그아웃
user = discord.fetch_user()
run_webhook.send(f"⛔ [ 403 ERROR ] {user}님이 파트너 신청 결과 전송 페이지에 정상적이지 않은 접근을 시도 했습니다.")
return "<script>alert('정상적이지 않은 접근입니다.');location.replace('/');</script>"
else: #로그인이 안되어있는가?
ip = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
run_webhook.send(f"⛔ [ 403 ERROR ] 비 로그인 유저({ip})가 파트너 신청 결과 전송 페이지에 정상적이지 않은 접근을 시도 했습니다.")
return "<script>alert('정상적이지 않은 접근입니다.');location.replace('/');</script>"
else:
if discord.authorized: #로그인이 되어있는가
try:
discord.fetch_guilds() #로그인정보을 가져와라
except:
return redirect(url_for("logout")) #못가져오면 로그아웃
code = request.form['code']
nickname = request.form['nickname']
server = request.form['server']
member = request.form['member']
category = request.form['category']
etc_text = request.form['etc_text']
message = request.form['message']
image = request.form['image']
video = request.form['video']
if etc_text == '':
etc_text = 'Unknown'
webhook.send(f"<@<PASSWORD>56043785>\n✅ 파트너 수정 신청이 도착했습니다.\n\n파트너 코드: {code}\n신청자: {nickname}\n서버(초대 링크): {server}\n멤버 수: {member}\n카테고리 정보: {category} ({etc_text})\n홍보지: {message}\n이미지: {image}\n영상: {video}")
return render_template('form/action.html', code = code)
else: #로그인이 안되어있는가?
return redirect(url_for("index"))
@app.route('/guide/<id>', methods=['GET', 'POST'])
def guide(id):
return f"<script>location.replace('https://team-alpha-kr.github.io/Partner-Guide/{id}.html');</script>"
# S: 2021 파트너 웹사이트 개편 코드
# S: 210210 공지사항
@app.route('/notice/<id>', methods=['GET', 'POST'])
def notice(id):
return render_template(f"2021temp/notice/{id}.html")
# E: 210210 공지사항
# E: 2021 파트너 웹사이트 개편 코드
@app.errorhandler(404)
def page_not_found(error):
return render_template("error/404.html")
@app.errorhandler(500)
def servererror(error):
run_webhook.send(f"<@673776952578146315> ⛔ [ 500 ERROR ] 서버에 오류가 발생했습니다.")
return render_template("error/500.html")
@app.errorhandler(400)
def badrequest(error):
run_webhook.send(f"<@673776952578146315> ⛔ [ 400 ERROR ] 서버에 오류가 발생했습니다.")
return render_template("error/400.html")
run_webhook.send("✅ 파트너 정보 수정 - 웹사이트가 실행이 되었습니다!")
app.run(host='0.0.0.0', port=3333, debug=False) | ko | 1.00007 | 2.744826 | 3 |
protocols/tpkt.py | dparnishchev/s7scan | 98 | 13511 | from scapy.fields import ByteField, ShortField
from scapy.packet import Packet
class TPKT(Packet):
name = "TPKT"
fields_desc = [ByteField("version", 3),
ByteField("reserved", 0),
ShortField("length", 0x0000)]
| from scapy.fields import ByteField, ShortField
from scapy.packet import Packet
class TPKT(Packet):
name = "TPKT"
fields_desc = [ByteField("version", 3),
ByteField("reserved", 0),
ShortField("length", 0x0000)]
| none | 1 | 2.327009 | 2 |
pylbm_ui/widgets/message.py | pylbm/pylbm_ui | 3 | 13512 | <reponame>pylbm/pylbm_ui
import ipyvuetify as v
class Message(v.Container):
def __init__(self, message):
self.message = v.Alert(
children=[f'{message}...'],
class_='primary--text'
)
super().__init__(
children=[
v.Row(
children=[
v.ProgressCircular(
indeterminate=True,
color='primary',
size=70,
width=4
)
],
justify='center'
),
v.Row(
children=[
self.message,
],
justify='center'
)
]
)
def update(self, new_message):
self.message.children = [f'{new_message}...'] | import ipyvuetify as v
class Message(v.Container):
def __init__(self, message):
self.message = v.Alert(
children=[f'{message}...'],
class_='primary--text'
)
super().__init__(
children=[
v.Row(
children=[
v.ProgressCircular(
indeterminate=True,
color='primary',
size=70,
width=4
)
],
justify='center'
),
v.Row(
children=[
self.message,
],
justify='center'
)
]
)
def update(self, new_message):
self.message.children = [f'{new_message}...'] | none | 1 | 2.355824 | 2 |
args_parser.py | vmartinv/capital_gains_calculator | 0 | 13513 | import argparse
import datetime
def get_last_elapsed_tax_year() -> int:
now = datetime.datetime.now()
if now.date() >= datetime.date(now.year, 4, 6):
return now.year - 1
else:
return now.year - 2
def create_parser() -> argparse.ArgumentParser:
# Schwab transactions
# Montly GBP/USD history from
# https://www.gov.uk/government/collections/exchange-rates-for-customs-and-vat
default_gbp_history_file = "GBP_USD_monthly_history.csv"
# Initial vesting and spin-off prices
default_initial_prices_file = "initial_prices.csv"
default_pdf_report = "calculations.pdf"
parser = argparse.ArgumentParser(
description="Calculate capital gains from stock transactions.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--tax_year",
type=int,
default=get_last_elapsed_tax_year(),
nargs="?",
help="First year of the tax year to calculate gains on",
)
parser.add_argument(
"--schwab",
type=str,
nargs="?",
help="file containing the exported transactions from Schwab",
)
parser.add_argument(
"--trading212",
type=str,
nargs="?",
help="folder containing the exported transaction files from Trading212",
)
parser.add_argument(
"--gbp_history",
type=str,
default=default_gbp_history_file,
nargs="?",
help="monthly GBP/USD prices from HMRC",
)
parser.add_argument(
"--initial_prices",
type=str,
default=default_initial_prices_file,
nargs="?",
help="file cointaining stock prices in USD at the moment of vesting, split, etc.",
)
parser.add_argument(
"--report",
type=str,
default=default_pdf_report,
nargs="?",
help="where to save the generated pdf report",
)
return parser
| import argparse
import datetime
def get_last_elapsed_tax_year() -> int:
now = datetime.datetime.now()
if now.date() >= datetime.date(now.year, 4, 6):
return now.year - 1
else:
return now.year - 2
def create_parser() -> argparse.ArgumentParser:
# Schwab transactions
# Montly GBP/USD history from
# https://www.gov.uk/government/collections/exchange-rates-for-customs-and-vat
default_gbp_history_file = "GBP_USD_monthly_history.csv"
# Initial vesting and spin-off prices
default_initial_prices_file = "initial_prices.csv"
default_pdf_report = "calculations.pdf"
parser = argparse.ArgumentParser(
description="Calculate capital gains from stock transactions.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--tax_year",
type=int,
default=get_last_elapsed_tax_year(),
nargs="?",
help="First year of the tax year to calculate gains on",
)
parser.add_argument(
"--schwab",
type=str,
nargs="?",
help="file containing the exported transactions from Schwab",
)
parser.add_argument(
"--trading212",
type=str,
nargs="?",
help="folder containing the exported transaction files from Trading212",
)
parser.add_argument(
"--gbp_history",
type=str,
default=default_gbp_history_file,
nargs="?",
help="monthly GBP/USD prices from HMRC",
)
parser.add_argument(
"--initial_prices",
type=str,
default=default_initial_prices_file,
nargs="?",
help="file cointaining stock prices in USD at the moment of vesting, split, etc.",
)
parser.add_argument(
"--report",
type=str,
default=default_pdf_report,
nargs="?",
help="where to save the generated pdf report",
)
return parser
| es | 0.131111 | 3.130347 | 3 |
src/pydts/examples_utils/datasets.py | tomer1812/pydts | 0 | 13514 | import pandas as pd
from pydts.config import *
DATASETS_DIR = os.path.join(os.path.dirname((os.path.dirname(__file__))), 'datasets')
def load_LOS_simulated_data():
os.path.join(os.path.dirname(__file__))
return pd.read_csv(os.path.join(DATASETS_DIR, 'LOS_simulated_data.csv')) | import pandas as pd
from pydts.config import *
DATASETS_DIR = os.path.join(os.path.dirname((os.path.dirname(__file__))), 'datasets')
def load_LOS_simulated_data():
os.path.join(os.path.dirname(__file__))
return pd.read_csv(os.path.join(DATASETS_DIR, 'LOS_simulated_data.csv')) | none | 1 | 2.498811 | 2 |
busker/migrations/0013_auto_20200906_1933.py | tinpan-io/django-busker | 2 | 13515 | <gh_stars>1-10
# Generated by Django 3.1.1 on 2020-09-06 19:33
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('busker', '0012_auto_20200905_2042'),
]
operations = [
migrations.AlterModelOptions(
name='downloadcode',
options={'ordering': ['id']},
),
migrations.AlterField(
model_name='file',
name='work',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='files', to='busker.downloadablework'),
),
]
| # Generated by Django 3.1.1 on 2020-09-06 19:33
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('busker', '0012_auto_20200905_2042'),
]
operations = [
migrations.AlterModelOptions(
name='downloadcode',
options={'ordering': ['id']},
),
migrations.AlterField(
model_name='file',
name='work',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='files', to='busker.downloadablework'),
),
] | fr | 0.13841 | 1.372627 | 1 |
livy/cli/submit.py | tzing/python-livy | 1 | 13516 | """Submit a batch task to livy server."""
import argparse
import datetime
import importlib
import json
import logging
import re
import typing
import livy
import livy.cli.config
import livy.cli.logging
logger = logging.getLogger(__name__)
class PreSubmitArguments(argparse.Namespace):
"""Typed :py:class:`~argparse.Namespace` for arguments before task submission."""
# task
script: str
args: typing.List[str]
class_name: str
jars: typing.List[str]
py_files: typing.List[str]
files: typing.List[str]
archives: typing.List[str]
queue_name: str
session_name: str
api_url: str
driver_memory: str
driver_cores: int
executor_memory: str
executor_cores: int
num_executors: int
spark_conf: typing.List[typing.Tuple[str, str]]
# log
watch_log: bool
# time
time_prog_start: datetime.datetime
"Local time this script is called"
class TaskEndedArguments(PreSubmitArguments):
"""Typed :py:class:`~argparse.Namespace` for arguments when task is ended.
It contains all attributes from :py:class:`~livy.cli.submit.PreSubmitArguments`.
"""
# task
batch_id: int
"Batch ID response by livy server"
state: str
"Task ended state"
# time
time_task_submit: datetime.datetime
"Local time before task is submitted"
time_task_ended: datetime.datetime
"Local time that detected task is ended"
def main(argv=None):
"""CLI entrypoint"""
# parse argument
cfg = livy.cli.config.load()
parser = argparse.ArgumentParser(
prog="livy submit",
description=__doc__,
)
parser.add_argument(
"script",
help="Path to the script that contains the application to be executed",
)
parser.add_argument(
"args",
nargs="*",
help="Arguments for the task script",
)
parser.add_argument(
"--class-name",
metavar="COM.EXAMPLE.FOO",
help="Application Java/Spark main class (for Java/Scala task)",
)
parser.add_argument(
"--jars",
nargs="+",
metavar="FOO.JAR",
help="Java dependencies to be used in this batch",
)
parser.add_argument(
"--py-files",
nargs="+",
metavar="FOO.ZIP",
help="Python dependencies to be used in this batch",
)
parser.add_argument(
"--files",
nargs="+",
metavar="FOO.TXT",
help="Files to be used in this batch",
)
parser.add_argument(
"--archives",
nargs="+",
metavar="FOO.TAR",
help="Archives to be used in this batch",
)
parser.add_argument(
"--queue-name",
metavar="DEFAULT",
help="The name of the YARN queue to which submitted",
)
parser.add_argument(
"--session-name",
metavar="HELLO",
help="The session name to execute this batch",
)
group = parser.add_argument_group("pre-submit actions")
group.add_argument(
"--on-pre-submit",
metavar="PLUG",
nargs="+",
default=cfg.submit.pre_submit,
help="Run plugin(s) before submit",
)
group = parser.add_argument_group("livy server configuration")
group.add_argument(
"--api-url",
required=cfg.root.api_url is None,
default=cfg.root.api_url,
help="Base-URL for Livy API server",
)
group.add_argument(
"--driver-memory",
metavar="10G",
default=cfg.submit.driver_memory,
type=argmem,
help="Amount of memory to use for the driver process.",
)
group.add_argument(
"--driver-cores",
metavar="N",
default=cfg.submit.driver_cores,
type=int,
help="Number of cores to use for the driver process.",
)
group.add_argument(
"--executor-memory",
metavar="10G",
default=cfg.submit.executor_memory,
type=argmem,
help="Amount of memory to use for the executor process.",
)
group.add_argument(
"--executor-cores",
metavar="N",
default=cfg.submit.executor_cores,
type=int,
help="Number of cores to use for each executor.",
)
group.add_argument(
"--num-executors",
metavar="N",
default=cfg.submit.num_executors,
type=int,
help="Number of executors to launch for this batch.",
)
group.add_argument(
"--spark-conf",
metavar="CONF_NAME=VALUE",
nargs="+",
default=cfg.submit.spark_conf,
type=argkvpair,
help="Spark configuration properties.",
)
group = parser.add_argument_group("post-submit actions")
g = group.add_mutually_exclusive_group()
g.set_defaults(watch_log=cfg.submit.watch_log)
g.add_argument(
"--watch-log",
dest="watch_log",
action="store_true",
help="Watching for logs until it is finished",
)
g.add_argument(
"--no-watch-log",
dest="watch_log",
action="store_false",
help="Not to watch for logs. Only submit the task and quit.",
)
group = parser.add_argument_group("after-task-finish actions")
group.add_argument(
"--on-task-success",
metavar="PLUG",
nargs="+",
default=cfg.submit.task_success,
help="Run plugin(s) on task is finished and success",
)
group.add_argument(
"--on-task-failed",
metavar="PLUG",
nargs="+",
default=cfg.submit.task_fail,
help="Run plugin(s) on task is ended and failed",
)
group.add_argument(
"--on-task-ended",
metavar="PLUG",
nargs="+",
default=cfg.submit.task_fail,
help="Run plugin(s) on task is ended and ended and regardless to its state",
)
livy.cli.logging.setup_argparse(parser)
args: PreSubmitArguments = parser.parse_args(argv)
# time stamping
tzlocal = datetime.datetime.now(datetime.timezone.utc).astimezone().tzinfo
def now() -> datetime.datetime:
return datetime.datetime.now().astimezone(tzlocal)
args.time_prog_start = now()
# setup logger
livy.cli.logging.init(args)
console = livy.cli.logging.get("livy-read-log.main")
console.info("Submission task started")
# run pre-submit actions
args: TaskEndedArguments = run_hook(console, "PRE-SUBMIT", args, args.on_pre_submit)
# check server state
client = livy.LivyClient(url=args.api_url)
try:
client.check(False)
except livy.RequestError as e:
console.error("Failed to connect to server: %s", e)
return 1
# build request payload
submit_parameter = {}
for key, value in [
("file", args.script),
("class_name", args.class_name),
("args", args.args),
("jars", args.jars),
("py_files", args.py_files),
("files", args.files),
("driver_memory", args.driver_memory),
("driver_cores", args.driver_cores),
("executor_memory", args.executor_memory),
("executor_cores", args.executor_cores),
("num_executors", args.num_executors),
("archives", args.archives),
("queue", args.queue_name),
("name", args.session_name),
("conf", {k: v for k, v in args.spark_conf}),
]:
if value:
submit_parameter[key] = value
console.info(
"Creating batch with parameters: %s",
json.dumps(submit_parameter, indent=2),
)
# timing
args.time_task_submit = now()
console.debug("Batch submission time= %s", args.time_task_submit)
# submit
try:
submit_resp = client.create_batch(**submit_parameter)
except livy.RequestError as e:
console.error("Failed to connect to server: %s", e)
return 1
console.info("Server response: %s", json.dumps(submit_resp, indent=2))
args.batch_id = submit_resp.get("id", None)
if not isinstance(args.batch_id, int) or args.batch_id < 0:
console.error("Failed to get batch id. Something goes wrong.")
return 1
# watch log
if not args.watch_log:
console.info("Batch %d created.", args.batch_id)
return 0
console.info("Start reading logs of batch %d", args.batch_id)
reader = livy.LivyBatchLogReader(client, args.batch_id)
try:
reader.read_until_finish()
except livy.RequestError as e:
console.error(
"Error occurs during read log. HTTP code=%d, Reason=%s", e.code, e.reason
)
return 1
except KeyboardInterrupt:
msg_args = args.batch_id, args.api_url # just for shorten
console.warning("Keyboard interrupt. Local livy-submit process terminating.")
console.warning("Your task might be still running on the server.")
console.warning("For reading the logs, call:")
console.warning(" livy read-log %d --api-url %s", *msg_args)
console.warning("For stopping the task, call:")
console.warning(" livy kill %d --api-url %s", *msg_args)
return 1
# timing
args.time_task_ended = now()
console.debug("Batch finishing time= %s", args.time_task_ended)
# get ending state
try:
args.state = client.get_batch_state(args.batch_id)
except livy.RequestError:
console.error("Error during query batch ending state.")
return 1
if args.state == "success":
exit_code = 0
state_level = logging.INFO
else:
exit_code = 1
state_level = logging.WARNING
console.log(state_level, "Batch#%d ended with state= %s", args.batch_id, args.state)
elapsed_time = args.time_task_ended - args.time_task_submit
console.info(
"Batch execution time: %dsec (%s)",
elapsed_time.total_seconds(),
human_readable_timeperiod(elapsed_time),
)
# run task-end actions
if args.state == "success":
args = run_hook(console, "TASK-SUCCESS", args, args.on_task_success)
else:
args = run_hook(console, "TASK-FAILED", args, args.on_task_failed)
args = run_hook(console, "TASK", args, args.on_task_ended)
return exit_code
def argmem(s: str):
"""Validate input for memory size"""
if not re.fullmatch(r"\d+[gm]b?", s, re.RegexFlag.IGNORECASE):
raise argparse.ArgumentTypeError(
"please specific memory size in format '1234mb'"
)
return s
def argkvpair(val):
"""Splitting key value pair"""
k, v = val.split("=", 1)
return k, v
def run_hook(
logger: logging.Logger,
identifier: str,
args: argparse.Namespace,
actions: typing.List[str],
) -> argparse.Namespace:
"""Run hook actions"""
for action_name in actions:
logger.info("Run %s action %s", identifier.lower(), action_name)
func = get_function(action_name)
if not func:
logger.warning("Failed to get action function instance. Stop process.")
exit(1)
try:
args = func(identifier, args)
except:
logger.exception(
"Error occurs during %s action. Stop process.", identifier.lower()
)
exit(1)
if not isinstance(args, argparse.Namespace):
logger.error(
"Expect namespace object from %s's return value. Got %s",
action_name,
type(args).__name__,
)
exit(1)
return args
def get_function(name: str) -> typing.Callable:
"""Get function by module name"""
m = re.fullmatch(r"([\w.]+):(\w+)", name, re.RegexFlag.I)
if not m:
logger.error("Failed to resolve function name: %s", name)
logger.error("Please specific it in module:func format")
return
module_name, func_name = m.groups()
try:
module = importlib.import_module(module_name)
except ImportError:
logger.error("Failed to find module: %s", module_name)
return
try:
func = getattr(module, func_name)
except AttributeError:
logger.error("Failed to find function %s in %s", func_name, module_name)
return
return func
def human_readable_timeperiod(period: datetime.timedelta):
"""Convert time period to human readable format"""
total_seconds = int(period.total_seconds())
terms = []
days = total_seconds // 86400
if days:
terms.append(f"{days}d")
hours = total_seconds // 3600 % 24
if hours:
terms.append(f"{hours}h")
minutes = total_seconds // 60 % 60
if minutes:
terms.append(f"{minutes}m")
seconds = total_seconds % 60
if seconds:
terms.append(f"{seconds}s")
return " ".join(terms)
if __name__ == "__main__":
exit(main())
| """Submit a batch task to livy server."""
import argparse
import datetime
import importlib
import json
import logging
import re
import typing
import livy
import livy.cli.config
import livy.cli.logging
logger = logging.getLogger(__name__)
class PreSubmitArguments(argparse.Namespace):
"""Typed :py:class:`~argparse.Namespace` for arguments before task submission."""
# task
script: str
args: typing.List[str]
class_name: str
jars: typing.List[str]
py_files: typing.List[str]
files: typing.List[str]
archives: typing.List[str]
queue_name: str
session_name: str
api_url: str
driver_memory: str
driver_cores: int
executor_memory: str
executor_cores: int
num_executors: int
spark_conf: typing.List[typing.Tuple[str, str]]
# log
watch_log: bool
# time
time_prog_start: datetime.datetime
"Local time this script is called"
class TaskEndedArguments(PreSubmitArguments):
"""Typed :py:class:`~argparse.Namespace` for arguments when task is ended.
It contains all attributes from :py:class:`~livy.cli.submit.PreSubmitArguments`.
"""
# task
batch_id: int
"Batch ID response by livy server"
state: str
"Task ended state"
# time
time_task_submit: datetime.datetime
"Local time before task is submitted"
time_task_ended: datetime.datetime
"Local time that detected task is ended"
def main(argv=None):
"""CLI entrypoint"""
# parse argument
cfg = livy.cli.config.load()
parser = argparse.ArgumentParser(
prog="livy submit",
description=__doc__,
)
parser.add_argument(
"script",
help="Path to the script that contains the application to be executed",
)
parser.add_argument(
"args",
nargs="*",
help="Arguments for the task script",
)
parser.add_argument(
"--class-name",
metavar="COM.EXAMPLE.FOO",
help="Application Java/Spark main class (for Java/Scala task)",
)
parser.add_argument(
"--jars",
nargs="+",
metavar="FOO.JAR",
help="Java dependencies to be used in this batch",
)
parser.add_argument(
"--py-files",
nargs="+",
metavar="FOO.ZIP",
help="Python dependencies to be used in this batch",
)
parser.add_argument(
"--files",
nargs="+",
metavar="FOO.TXT",
help="Files to be used in this batch",
)
parser.add_argument(
"--archives",
nargs="+",
metavar="FOO.TAR",
help="Archives to be used in this batch",
)
parser.add_argument(
"--queue-name",
metavar="DEFAULT",
help="The name of the YARN queue to which submitted",
)
parser.add_argument(
"--session-name",
metavar="HELLO",
help="The session name to execute this batch",
)
group = parser.add_argument_group("pre-submit actions")
group.add_argument(
"--on-pre-submit",
metavar="PLUG",
nargs="+",
default=cfg.submit.pre_submit,
help="Run plugin(s) before submit",
)
group = parser.add_argument_group("livy server configuration")
group.add_argument(
"--api-url",
required=cfg.root.api_url is None,
default=cfg.root.api_url,
help="Base-URL for Livy API server",
)
group.add_argument(
"--driver-memory",
metavar="10G",
default=cfg.submit.driver_memory,
type=argmem,
help="Amount of memory to use for the driver process.",
)
group.add_argument(
"--driver-cores",
metavar="N",
default=cfg.submit.driver_cores,
type=int,
help="Number of cores to use for the driver process.",
)
group.add_argument(
"--executor-memory",
metavar="10G",
default=cfg.submit.executor_memory,
type=argmem,
help="Amount of memory to use for the executor process.",
)
group.add_argument(
"--executor-cores",
metavar="N",
default=cfg.submit.executor_cores,
type=int,
help="Number of cores to use for each executor.",
)
group.add_argument(
"--num-executors",
metavar="N",
default=cfg.submit.num_executors,
type=int,
help="Number of executors to launch for this batch.",
)
group.add_argument(
"--spark-conf",
metavar="CONF_NAME=VALUE",
nargs="+",
default=cfg.submit.spark_conf,
type=argkvpair,
help="Spark configuration properties.",
)
group = parser.add_argument_group("post-submit actions")
g = group.add_mutually_exclusive_group()
g.set_defaults(watch_log=cfg.submit.watch_log)
g.add_argument(
"--watch-log",
dest="watch_log",
action="store_true",
help="Watching for logs until it is finished",
)
g.add_argument(
"--no-watch-log",
dest="watch_log",
action="store_false",
help="Not to watch for logs. Only submit the task and quit.",
)
group = parser.add_argument_group("after-task-finish actions")
group.add_argument(
"--on-task-success",
metavar="PLUG",
nargs="+",
default=cfg.submit.task_success,
help="Run plugin(s) on task is finished and success",
)
group.add_argument(
"--on-task-failed",
metavar="PLUG",
nargs="+",
default=cfg.submit.task_fail,
help="Run plugin(s) on task is ended and failed",
)
group.add_argument(
"--on-task-ended",
metavar="PLUG",
nargs="+",
default=cfg.submit.task_fail,
help="Run plugin(s) on task is ended and ended and regardless to its state",
)
livy.cli.logging.setup_argparse(parser)
args: PreSubmitArguments = parser.parse_args(argv)
# time stamping
tzlocal = datetime.datetime.now(datetime.timezone.utc).astimezone().tzinfo
def now() -> datetime.datetime:
return datetime.datetime.now().astimezone(tzlocal)
args.time_prog_start = now()
# setup logger
livy.cli.logging.init(args)
console = livy.cli.logging.get("livy-read-log.main")
console.info("Submission task started")
# run pre-submit actions
args: TaskEndedArguments = run_hook(console, "PRE-SUBMIT", args, args.on_pre_submit)
# check server state
client = livy.LivyClient(url=args.api_url)
try:
client.check(False)
except livy.RequestError as e:
console.error("Failed to connect to server: %s", e)
return 1
# build request payload
submit_parameter = {}
for key, value in [
("file", args.script),
("class_name", args.class_name),
("args", args.args),
("jars", args.jars),
("py_files", args.py_files),
("files", args.files),
("driver_memory", args.driver_memory),
("driver_cores", args.driver_cores),
("executor_memory", args.executor_memory),
("executor_cores", args.executor_cores),
("num_executors", args.num_executors),
("archives", args.archives),
("queue", args.queue_name),
("name", args.session_name),
("conf", {k: v for k, v in args.spark_conf}),
]:
if value:
submit_parameter[key] = value
console.info(
"Creating batch with parameters: %s",
json.dumps(submit_parameter, indent=2),
)
# timing
args.time_task_submit = now()
console.debug("Batch submission time= %s", args.time_task_submit)
# submit
try:
submit_resp = client.create_batch(**submit_parameter)
except livy.RequestError as e:
console.error("Failed to connect to server: %s", e)
return 1
console.info("Server response: %s", json.dumps(submit_resp, indent=2))
args.batch_id = submit_resp.get("id", None)
if not isinstance(args.batch_id, int) or args.batch_id < 0:
console.error("Failed to get batch id. Something goes wrong.")
return 1
# watch log
if not args.watch_log:
console.info("Batch %d created.", args.batch_id)
return 0
console.info("Start reading logs of batch %d", args.batch_id)
reader = livy.LivyBatchLogReader(client, args.batch_id)
try:
reader.read_until_finish()
except livy.RequestError as e:
console.error(
"Error occurs during read log. HTTP code=%d, Reason=%s", e.code, e.reason
)
return 1
except KeyboardInterrupt:
msg_args = args.batch_id, args.api_url # just for shorten
console.warning("Keyboard interrupt. Local livy-submit process terminating.")
console.warning("Your task might be still running on the server.")
console.warning("For reading the logs, call:")
console.warning(" livy read-log %d --api-url %s", *msg_args)
console.warning("For stopping the task, call:")
console.warning(" livy kill %d --api-url %s", *msg_args)
return 1
# timing
args.time_task_ended = now()
console.debug("Batch finishing time= %s", args.time_task_ended)
# get ending state
try:
args.state = client.get_batch_state(args.batch_id)
except livy.RequestError:
console.error("Error during query batch ending state.")
return 1
if args.state == "success":
exit_code = 0
state_level = logging.INFO
else:
exit_code = 1
state_level = logging.WARNING
console.log(state_level, "Batch#%d ended with state= %s", args.batch_id, args.state)
elapsed_time = args.time_task_ended - args.time_task_submit
console.info(
"Batch execution time: %dsec (%s)",
elapsed_time.total_seconds(),
human_readable_timeperiod(elapsed_time),
)
# run task-end actions
if args.state == "success":
args = run_hook(console, "TASK-SUCCESS", args, args.on_task_success)
else:
args = run_hook(console, "TASK-FAILED", args, args.on_task_failed)
args = run_hook(console, "TASK", args, args.on_task_ended)
return exit_code
def argmem(s: str):
"""Validate input for memory size"""
if not re.fullmatch(r"\d+[gm]b?", s, re.RegexFlag.IGNORECASE):
raise argparse.ArgumentTypeError(
"please specific memory size in format '1234mb'"
)
return s
def argkvpair(val):
"""Splitting key value pair"""
k, v = val.split("=", 1)
return k, v
def run_hook(
logger: logging.Logger,
identifier: str,
args: argparse.Namespace,
actions: typing.List[str],
) -> argparse.Namespace:
"""Run hook actions"""
for action_name in actions:
logger.info("Run %s action %s", identifier.lower(), action_name)
func = get_function(action_name)
if not func:
logger.warning("Failed to get action function instance. Stop process.")
exit(1)
try:
args = func(identifier, args)
except:
logger.exception(
"Error occurs during %s action. Stop process.", identifier.lower()
)
exit(1)
if not isinstance(args, argparse.Namespace):
logger.error(
"Expect namespace object from %s's return value. Got %s",
action_name,
type(args).__name__,
)
exit(1)
return args
def get_function(name: str) -> typing.Callable:
"""Get function by module name"""
m = re.fullmatch(r"([\w.]+):(\w+)", name, re.RegexFlag.I)
if not m:
logger.error("Failed to resolve function name: %s", name)
logger.error("Please specific it in module:func format")
return
module_name, func_name = m.groups()
try:
module = importlib.import_module(module_name)
except ImportError:
logger.error("Failed to find module: %s", module_name)
return
try:
func = getattr(module, func_name)
except AttributeError:
logger.error("Failed to find function %s in %s", func_name, module_name)
return
return func
def human_readable_timeperiod(period: datetime.timedelta):
"""Convert time period to human readable format"""
total_seconds = int(period.total_seconds())
terms = []
days = total_seconds // 86400
if days:
terms.append(f"{days}d")
hours = total_seconds // 3600 % 24
if hours:
terms.append(f"{hours}h")
minutes = total_seconds // 60 % 60
if minutes:
terms.append(f"{minutes}m")
seconds = total_seconds % 60
if seconds:
terms.append(f"{seconds}s")
return " ".join(terms)
if __name__ == "__main__":
exit(main())
| pt | 0.112358 | 2.60752 | 3 |
setup.py | nickyfoto/premoji | 0 | 13517 | """Minimal setup file for learn project."""
import pathlib
from setuptools import setup, find_packages
# The directory containing this file
HERE = pathlib.Path(__file__).parent
# The text of the README file
README = (HERE / "README.md").read_text()
setup(
name = 'premoji',
version = '0.1.4',
description = 'predict emoji on given text',
long_description = README,
long_description_content_type = "text/markdown",
license = "MIT",
author = '<NAME>',
author_email = '<EMAIL>',
url = 'https://macworks.io',
download_url = 'https://github.com/nickyfoto/premoji/archive/v0.1.3-alpha.tar.gz',
packages = find_packages(where='src'),
package_dir = {'': 'src'},
include_package_data=True,
install_requires = [
'numpy',
'scikit-learn',
],
classifiers = [
'Development Status :: 3 - Alpha', # Chose either "3 - Alpha", "4 - Beta" or "5 - Production/Stable" as the current state of your package
'Intended Audience :: Developers', # Define that your audience are developers
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License', # Again, pick a license
'Programming Language :: Python :: 3.7',
]
)
| """Minimal setup file for learn project."""
import pathlib
from setuptools import setup, find_packages
# The directory containing this file
HERE = pathlib.Path(__file__).parent
# The text of the README file
README = (HERE / "README.md").read_text()
setup(
name = 'premoji',
version = '0.1.4',
description = 'predict emoji on given text',
long_description = README,
long_description_content_type = "text/markdown",
license = "MIT",
author = '<NAME>',
author_email = '<EMAIL>',
url = 'https://macworks.io',
download_url = 'https://github.com/nickyfoto/premoji/archive/v0.1.3-alpha.tar.gz',
packages = find_packages(where='src'),
package_dir = {'': 'src'},
include_package_data=True,
install_requires = [
'numpy',
'scikit-learn',
],
classifiers = [
'Development Status :: 3 - Alpha', # Chose either "3 - Alpha", "4 - Beta" or "5 - Production/Stable" as the current state of your package
'Intended Audience :: Developers', # Define that your audience are developers
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License', # Again, pick a license
'Programming Language :: Python :: 3.7',
]
)
| pt | 0.250915 | 1.776083 | 2 |
02-current-time.py | KeithWilliamsGMIT/Emerging-Technologies-Python-Fundamentals | 0 | 13518 | <reponame>KeithWilliamsGMIT/Emerging-Technologies-Python-Fundamentals<filename>02-current-time.py
# Author: <NAME>
# Date: 21/09/2017
from time import strftime
# This line prints the current date and time to the console in the format 01-10-2017 13:15:30.
# strftime must be imported from the time package before being used.
print(strftime("%d-%m-%Y %H:%M:%S")) | # Author: <NAME>
# Date: 21/09/2017
from time import strftime
# This line prints the current date and time to the console in the format 01-10-2017 13:15:30.
# strftime must be imported from the time package before being used.
print(strftime("%d-%m-%Y %H:%M:%S")) | pt | 0.136429 | 3.701859 | 4 |
git_management/clone.py | afsantaliestra/scripts | 0 | 13519 | <reponame>afsantaliestra/scripts<gh_stars>0
import os
filepath = 'list.txt'
with open(filepath) as fp:
while line := fp.readline():
line = line.strip()
os.system(f'git clone {line}')
| import os
filepath = 'list.txt'
with open(filepath) as fp:
while line := fp.readline():
line = line.strip()
os.system(f'git clone {line}') | none | 1 | 2.515881 | 3 |
train.py | amansoni/sequential-decision-problem-algorithms | 0 | 13520 | <reponame>amansoni/sequential-decision-problem-algorithms
import argparse
import os
import sys
parser = argparse.ArgumentParser(description="Run commands")
parser.add_argument('-w', '--num-workers', default=1, type=int,
help="Number of workers")
parser.add_argument('-r', '--remotes', default=None,
help='The address of pre-existing VNC servers and '
'rewarders to use (e.g. -r vnc://localhost:5900+15900,vnc://localhost:5901+15901).')
parser.add_argument('-e', '--env-id', type=str, default="PongDeterministic-v3",
help="Environment id")
parser.add_argument('-l', '--log-dir', type=str, default="/tmp/pong",
help="Log directory path")
def new_tmux_cmd(session, name, cmd):
if isinstance(cmd, (list, tuple)):
cmd = " ".join(str(v) for v in cmd)
return name, "tmux send-keys -t {}:{} '{}' Enter".format(session, name, cmd)
def create_tmux_commands(session, num_workers, remotes, env_id, logdir, shell='sh'):
# for launching the TF workers and for launching tensorboard
base_cmd = [
'CUDA_VISIBLE_DEVICES=', sys.executable, 'worker.py',
'--log-dir', logdir, '--env-id', env_id,
'--num-workers', str(num_workers)]
if remotes is None:
remotes = ["1"] * num_workers
else:
remotes = remotes.split(',')
assert len(remotes) == num_workers
cmds_map = [new_tmux_cmd(session, "ps", base_cmd + ["--job-name", "ps"])]
for i in range(num_workers):
cmds_map += [new_tmux_cmd(session,
"w-%d" % i, base_cmd + ["--job-name", "worker", "--task", str(i), "--remotes", remotes[i]])]
cmds_map += [new_tmux_cmd(session, "tb", ["tensorboard --logdir {} --port 12345".format(logdir)])]
cmds_map += [new_tmux_cmd(session, "htop", ["htop"])]
windows = [v[0] for v in cmds_map]
cmds = [
"mkdir -p {}".format(logdir),
"tmux kill-session -t {}".format(session),
"tmux new-session -s {} -n {} -d {}".format(session, windows[0], shell)
]
for w in windows[1:]:
cmds += ["tmux new-window -t {} -n {} {}".format(session, w, shell)]
cmds += ["sleep 1"]
for window, cmd in cmds_map:
cmds += [cmd]
return cmds
def run():
args = parser.parse_args()
cmds = create_tmux_commands("a3c", args.num_workers, args.remotes, args.env_id, args.log_dir)
print("\n".join(cmds))
os.system("\n".join(cmds))
if __name__ == "__main__":
run()
| import argparse
import os
import sys
parser = argparse.ArgumentParser(description="Run commands")
parser.add_argument('-w', '--num-workers', default=1, type=int,
help="Number of workers")
parser.add_argument('-r', '--remotes', default=None,
help='The address of pre-existing VNC servers and '
'rewarders to use (e.g. -r vnc://localhost:5900+15900,vnc://localhost:5901+15901).')
parser.add_argument('-e', '--env-id', type=str, default="PongDeterministic-v3",
help="Environment id")
parser.add_argument('-l', '--log-dir', type=str, default="/tmp/pong",
help="Log directory path")
def new_tmux_cmd(session, name, cmd):
if isinstance(cmd, (list, tuple)):
cmd = " ".join(str(v) for v in cmd)
return name, "tmux send-keys -t {}:{} '{}' Enter".format(session, name, cmd)
def create_tmux_commands(session, num_workers, remotes, env_id, logdir, shell='sh'):
# for launching the TF workers and for launching tensorboard
base_cmd = [
'CUDA_VISIBLE_DEVICES=', sys.executable, 'worker.py',
'--log-dir', logdir, '--env-id', env_id,
'--num-workers', str(num_workers)]
if remotes is None:
remotes = ["1"] * num_workers
else:
remotes = remotes.split(',')
assert len(remotes) == num_workers
cmds_map = [new_tmux_cmd(session, "ps", base_cmd + ["--job-name", "ps"])]
for i in range(num_workers):
cmds_map += [new_tmux_cmd(session,
"w-%d" % i, base_cmd + ["--job-name", "worker", "--task", str(i), "--remotes", remotes[i]])]
cmds_map += [new_tmux_cmd(session, "tb", ["tensorboard --logdir {} --port 12345".format(logdir)])]
cmds_map += [new_tmux_cmd(session, "htop", ["htop"])]
windows = [v[0] for v in cmds_map]
cmds = [
"mkdir -p {}".format(logdir),
"tmux kill-session -t {}".format(session),
"tmux new-session -s {} -n {} -d {}".format(session, windows[0], shell)
]
for w in windows[1:]:
cmds += ["tmux new-window -t {} -n {} {}".format(session, w, shell)]
cmds += ["sleep 1"]
for window, cmd in cmds_map:
cmds += [cmd]
return cmds
def run():
args = parser.parse_args()
cmds = create_tmux_commands("a3c", args.num_workers, args.remotes, args.env_id, args.log_dir)
print("\n".join(cmds))
os.system("\n".join(cmds))
if __name__ == "__main__":
run() | pt | 0.14048 | 2.441517 | 2 |
MoleculeACE/benchmark/evaluation/results.py | molML/MoleculeACE | 9 | 13521 | """
Class that holds the results: used for evaluating model performance on activity cliff compounds
<NAME>, Eindhoven University of Technology, March 2022
"""
import os
import numpy as np
from MoleculeACE.benchmark.utils.const import Algorithms
from .metrics import calc_rmse, calc_q2f3
class Results:
def __init__(self, predictions=None, reference=None, y_train=None, data=None,
tanimoto_cliff_compounds=None, scaffold_cliff_compounds=None, levenshtein_cliff_compounds=None,
soft_consensus_cliff_compounds=None):
self.predictions = predictions
self.reference = reference
self.y_train = y_train
self.tanimoto_cliff_compounds = tanimoto_cliff_compounds
self.scaffold_cliff_compounds = scaffold_cliff_compounds
self.levenshtein_cliff_compounds = levenshtein_cliff_compounds
self.soft_consensus_cliff_compounds = soft_consensus_cliff_compounds
self.data = data
self.rmse = np.inf
self.q2f3 = 0
self.tanimoto_cliff_rmse = np.inf
self.scaffold_cliff_rmse = np.inf
self.levenshtein_cliff_rmse = np.inf
self.soft_consensus_cliff_rmse = np.inf
def calc_rmse(self, reference=None, predictions=None):
""" Calculate the rmse from two lists of reference and predicted bioactivity"""
if reference is not None:
self.reference = reference
if predictions is not None:
self.predictions = predictions
# calculate the rmsd
self.rmse = calc_rmse(self.reference, self.predictions)
return self.rmse
def calc_q2f3(self, reference=None, predictions=None, y_train=None):
""" Calculates the Q2 F3 score (best according to Todeschini et al. 2016)
Args:
reference: (1d array-like shape) true test values (float)
predictions: (1d array-like shape) predicted test values (float)
y_train: (1d array-like shape) true train values (float)
Returns: Q2F3 score
"""
if reference is not None:
self.reference = reference
if predictions is not None:
self.predictions = predictions
if y_train is not None:
self.y_train = y_train
# calculate the q2f3
self.q2f3 = calc_q2f3(self.reference, self.predictions, self.y_train)
return self.q2f3
def calc_cliff_rmse(self, reference=None, predictions=None, tanimoto_cliff_compounds=None,
scaffold_cliff_compounds=None, levenshtein_cliff_compounds=None,
soft_consensus_cliff_compounds=None):
""" Calculate the rmse of only cliff compounds
Args:
levenshtein_cliff_compounds: (lst) Binary list of cliff compounds (same length as predictions)
tanimoto_cliff_compounds: (lst) Binary list of cliff compounds (same length as predictions)
scaffold_cliff_compounds: (lst) Binary list of cliff compounds (same length as predictions)
consensus_cliff_compounds: (lst) Binary list of cliff compounds (same length as predictions)
soft_consensus_cliff_compounds: (lst) Binary list of cliff compounds (same length as predictions)
reference: (lst) true bioactivity values
predictions: (lst) predicted bioactivity values
cliff_compounds: (lst) binary list describing if a compound is a cliff compound (1 == cliff, 0 == no cliff)
Returns: (float) rmse
"""
if reference is not None:
self.reference = reference
if predictions is not None:
self.predictions = predictions
if tanimoto_cliff_compounds is not None:
self.tanimoto_cliff_compounds = tanimoto_cliff_compounds
if scaffold_cliff_compounds is not None:
self.scaffold_cliff_compounds = scaffold_cliff_compounds
if levenshtein_cliff_compounds is not None:
self.levenshtein_cliff_compounds = levenshtein_cliff_compounds
if soft_consensus_cliff_compounds is not None:
self.soft_consensus_cliff_compounds = soft_consensus_cliff_compounds
if self.tanimoto_cliff_compounds is not None:
# Subset only reference and predicted values of the cliff compounds, then calculate cliff rmse
clf_ref = [self.reference[idx] for idx, clf in enumerate(self.tanimoto_cliff_compounds) if clf == 1]
clf_prd = [self.predictions[idx] for idx, clf in enumerate(self.tanimoto_cliff_compounds) if clf == 1]
self.tanimoto_cliff_rmse = calc_rmse(clf_ref, clf_prd)
if self.scaffold_cliff_compounds is not None:
# Subset only reference and predicted values of the cliff compounds, then calculate cliff rmse
clf_ref = [self.reference[idx] for idx, clf in enumerate(self.scaffold_cliff_compounds) if clf == 1]
clf_prd = [self.predictions[idx] for idx, clf in enumerate(self.scaffold_cliff_compounds) if clf == 1]
self.scaffold_cliff_rmse = calc_rmse(clf_ref, clf_prd)
if self.levenshtein_cliff_compounds is not None:
# Subset only reference and predicted values of the cliff compounds, then calculate cliff rmse
clf_ref = [self.reference[idx] for idx, clf in enumerate(self.levenshtein_cliff_compounds) if clf == 1]
clf_prd = [self.predictions[idx] for idx, clf in enumerate(self.levenshtein_cliff_compounds) if clf == 1]
self.levenshtein_cliff_rmse = calc_rmse(clf_ref, clf_prd)
if self.soft_consensus_cliff_compounds is not None:
# Subset only reference and predicted values of the cliff compounds, then calculate cliff rmse
clf_ref = [self.reference[idx] for idx, clf in enumerate(self.soft_consensus_cliff_compounds) if clf == 1]
clf_prd = [self.predictions[idx] for idx, clf in enumerate(self.soft_consensus_cliff_compounds) if clf == 1]
self.soft_consensus_cliff_rmse = calc_rmse(clf_ref, clf_prd)
return {'tanimoto_cliff_rmse': self.tanimoto_cliff_rmse, 'scaffold_cliff_rmse': self.scaffold_cliff_rmse,
'levenshtein_cliff_rmse': self.levenshtein_cliff_rmse,
'soft_consensus_cliff_rmse': self.soft_consensus_cliff_rmse}
def to_csv(self, filename, algorithm: Algorithms = None):
# Create output file if it doesnt exist
if self.data is not None:
if not os.path.isfile(filename):
with open(filename, 'w') as f:
f.write('dataset,'
'algorithm,'
'descriptor,'
'augmentation,'
'rmse,'
'cliff_rmse,'
'n_compounds,'
'n_cliff_compounds,'
'n_compounds_train,'
'n_cliff_compounds_train,'
'n_compounds_test,'
'n_cliff_compounds_test\n')
with open(filename, 'a') as f:
f.write(f'{self.data.name},'
f'{algorithm.value},'
f'{self.data.descriptor.value},'
f'{self.data.augmentation},'
f'{self.rmse},'
f'{self.soft_consensus_cliff_rmse},'
f'{self.data.cliffs.stats["n_compounds"]},'
f'{self.data.cliffs.stats["n_soft_consensus_cliff_compounds"]},'
f'{self.data.cliffs.stats["n_compounds_train"]},'
f'{self.data.cliffs.stats["n_soft_consensus_cliff_compounds_train"]},'
f'{self.data.cliffs.stats["n_compounds_test"]},'
f'{self.data.cliffs.stats["n_soft_consensus_cliff_compounds_test"]}\n')
def __repr__(self):
return f"RMSE: {self.rmse:.4f}\n" \
f"Q2F3: {self.q2f3:.4f}\n" \
f"AC-RMSE: {self.soft_consensus_cliff_rmse:.4f}\n"
| """
Class that holds the results: used for evaluating model performance on activity cliff compounds
<NAME>, Eindhoven University of Technology, March 2022
"""
import os
import numpy as np
from MoleculeACE.benchmark.utils.const import Algorithms
from .metrics import calc_rmse, calc_q2f3
class Results:
def __init__(self, predictions=None, reference=None, y_train=None, data=None,
tanimoto_cliff_compounds=None, scaffold_cliff_compounds=None, levenshtein_cliff_compounds=None,
soft_consensus_cliff_compounds=None):
self.predictions = predictions
self.reference = reference
self.y_train = y_train
self.tanimoto_cliff_compounds = tanimoto_cliff_compounds
self.scaffold_cliff_compounds = scaffold_cliff_compounds
self.levenshtein_cliff_compounds = levenshtein_cliff_compounds
self.soft_consensus_cliff_compounds = soft_consensus_cliff_compounds
self.data = data
self.rmse = np.inf
self.q2f3 = 0
self.tanimoto_cliff_rmse = np.inf
self.scaffold_cliff_rmse = np.inf
self.levenshtein_cliff_rmse = np.inf
self.soft_consensus_cliff_rmse = np.inf
def calc_rmse(self, reference=None, predictions=None):
""" Calculate the rmse from two lists of reference and predicted bioactivity"""
if reference is not None:
self.reference = reference
if predictions is not None:
self.predictions = predictions
# calculate the rmsd
self.rmse = calc_rmse(self.reference, self.predictions)
return self.rmse
def calc_q2f3(self, reference=None, predictions=None, y_train=None):
""" Calculates the Q2 F3 score (best according to Todeschini et al. 2016)
Args:
reference: (1d array-like shape) true test values (float)
predictions: (1d array-like shape) predicted test values (float)
y_train: (1d array-like shape) true train values (float)
Returns: Q2F3 score
"""
if reference is not None:
self.reference = reference
if predictions is not None:
self.predictions = predictions
if y_train is not None:
self.y_train = y_train
# calculate the q2f3
self.q2f3 = calc_q2f3(self.reference, self.predictions, self.y_train)
return self.q2f3
def calc_cliff_rmse(self, reference=None, predictions=None, tanimoto_cliff_compounds=None,
scaffold_cliff_compounds=None, levenshtein_cliff_compounds=None,
soft_consensus_cliff_compounds=None):
""" Calculate the rmse of only cliff compounds
Args:
levenshtein_cliff_compounds: (lst) Binary list of cliff compounds (same length as predictions)
tanimoto_cliff_compounds: (lst) Binary list of cliff compounds (same length as predictions)
scaffold_cliff_compounds: (lst) Binary list of cliff compounds (same length as predictions)
consensus_cliff_compounds: (lst) Binary list of cliff compounds (same length as predictions)
soft_consensus_cliff_compounds: (lst) Binary list of cliff compounds (same length as predictions)
reference: (lst) true bioactivity values
predictions: (lst) predicted bioactivity values
cliff_compounds: (lst) binary list describing if a compound is a cliff compound (1 == cliff, 0 == no cliff)
Returns: (float) rmse
"""
if reference is not None:
self.reference = reference
if predictions is not None:
self.predictions = predictions
if tanimoto_cliff_compounds is not None:
self.tanimoto_cliff_compounds = tanimoto_cliff_compounds
if scaffold_cliff_compounds is not None:
self.scaffold_cliff_compounds = scaffold_cliff_compounds
if levenshtein_cliff_compounds is not None:
self.levenshtein_cliff_compounds = levenshtein_cliff_compounds
if soft_consensus_cliff_compounds is not None:
self.soft_consensus_cliff_compounds = soft_consensus_cliff_compounds
if self.tanimoto_cliff_compounds is not None:
# Subset only reference and predicted values of the cliff compounds, then calculate cliff rmse
clf_ref = [self.reference[idx] for idx, clf in enumerate(self.tanimoto_cliff_compounds) if clf == 1]
clf_prd = [self.predictions[idx] for idx, clf in enumerate(self.tanimoto_cliff_compounds) if clf == 1]
self.tanimoto_cliff_rmse = calc_rmse(clf_ref, clf_prd)
if self.scaffold_cliff_compounds is not None:
# Subset only reference and predicted values of the cliff compounds, then calculate cliff rmse
clf_ref = [self.reference[idx] for idx, clf in enumerate(self.scaffold_cliff_compounds) if clf == 1]
clf_prd = [self.predictions[idx] for idx, clf in enumerate(self.scaffold_cliff_compounds) if clf == 1]
self.scaffold_cliff_rmse = calc_rmse(clf_ref, clf_prd)
if self.levenshtein_cliff_compounds is not None:
# Subset only reference and predicted values of the cliff compounds, then calculate cliff rmse
clf_ref = [self.reference[idx] for idx, clf in enumerate(self.levenshtein_cliff_compounds) if clf == 1]
clf_prd = [self.predictions[idx] for idx, clf in enumerate(self.levenshtein_cliff_compounds) if clf == 1]
self.levenshtein_cliff_rmse = calc_rmse(clf_ref, clf_prd)
if self.soft_consensus_cliff_compounds is not None:
# Subset only reference and predicted values of the cliff compounds, then calculate cliff rmse
clf_ref = [self.reference[idx] for idx, clf in enumerate(self.soft_consensus_cliff_compounds) if clf == 1]
clf_prd = [self.predictions[idx] for idx, clf in enumerate(self.soft_consensus_cliff_compounds) if clf == 1]
self.soft_consensus_cliff_rmse = calc_rmse(clf_ref, clf_prd)
return {'tanimoto_cliff_rmse': self.tanimoto_cliff_rmse, 'scaffold_cliff_rmse': self.scaffold_cliff_rmse,
'levenshtein_cliff_rmse': self.levenshtein_cliff_rmse,
'soft_consensus_cliff_rmse': self.soft_consensus_cliff_rmse}
def to_csv(self, filename, algorithm: Algorithms = None):
# Create output file if it doesnt exist
if self.data is not None:
if not os.path.isfile(filename):
with open(filename, 'w') as f:
f.write('dataset,'
'algorithm,'
'descriptor,'
'augmentation,'
'rmse,'
'cliff_rmse,'
'n_compounds,'
'n_cliff_compounds,'
'n_compounds_train,'
'n_cliff_compounds_train,'
'n_compounds_test,'
'n_cliff_compounds_test\n')
with open(filename, 'a') as f:
f.write(f'{self.data.name},'
f'{algorithm.value},'
f'{self.data.descriptor.value},'
f'{self.data.augmentation},'
f'{self.rmse},'
f'{self.soft_consensus_cliff_rmse},'
f'{self.data.cliffs.stats["n_compounds"]},'
f'{self.data.cliffs.stats["n_soft_consensus_cliff_compounds"]},'
f'{self.data.cliffs.stats["n_compounds_train"]},'
f'{self.data.cliffs.stats["n_soft_consensus_cliff_compounds_train"]},'
f'{self.data.cliffs.stats["n_compounds_test"]},'
f'{self.data.cliffs.stats["n_soft_consensus_cliff_compounds_test"]}\n')
def __repr__(self):
return f"RMSE: {self.rmse:.4f}\n" \
f"Q2F3: {self.q2f3:.4f}\n" \
f"AC-RMSE: {self.soft_consensus_cliff_rmse:.4f}\n"
| pt | 0.111068 | 2.543025 | 3 |
checkout/orders/__init__.py | accelero-cloud/tutorials | 2 | 13522 | <reponame>accelero-cloud/tutorials
from checkout.orders.order_service import Order, AuthorisationRequest
| from checkout.orders.order_service import Order, AuthorisationRequest | none | 1 | 1.117923 | 1 |
hiisi/__init__.py | ritvje/hiisi | 0 | 13523 | from .hiisi import HiisiHDF
from .odim import OdimPVOL, OdimCOMP
__version__ = "0.0.6"
| from .hiisi import HiisiHDF
from .odim import OdimPVOL, OdimCOMP
__version__ = "0.0.6"
| none | 1 | 1.053539 | 1 |
End of preview. Expand
in Dataset Viewer.
Hello this is the main info
dataset_info: features: - name: max_stars_repo_path dtype: string - name: max_stars_repo_name dtype: string - name: max_stars_count dtype: int64 - name: id dtype: string - name: content dtype: string - name: content_cleaned dtype: string - name: language dtype: string - name: language_score dtype: float64 - name: edu_score dtype: float64 - name: edu_int_score dtype: int64 splits: - name: train num_bytes: 1451315 num_examples: 100 download_size: 551098 dataset_size: 1451315 configs: - config_name: default data_files: - split: train path: data/train-* license: apache-2.0
- Downloads last month
- 2