hexsha
stringlengths
40
40
repo
stringlengths
5
121
path
stringlengths
4
227
license
sequence
language
stringclasses
1 value
identifier
stringlengths
1
107
return_type
stringlengths
2
237
original_string
stringlengths
75
13.4k
original_docstring
stringlengths
13
12.9k
docstring
stringlengths
13
2.57k
docstring_tokens
sequence
code
stringlengths
23
1.88k
code_tokens
sequence
short_docstring
stringlengths
1
1.32k
short_docstring_tokens
sequence
comment
sequence
parameters
list
docstring_params
dict
code_with_imports
stringlengths
23
1.88k
idxs
int64
0
611k
cluster
int64
0
1.02k
641dc17280318cc73531d6b0367fcc38ff54a6da
timolegros/TwitterAPI
main.py
[ "MIT" ]
Python
timeToSnowFlake
<not_specific>
def timeToSnowFlake(endDate): """ Takes the UTC end date in '2020-07-02 21:58:00+00:00' format and converts it to a local time epoch then snowflake. Can also take UTC end date in '2020-07-02 21:58:00' format to convert to epoch and then snowflake :param endDate: UTC date in '2020-07-02 21:58:00+00:00' or '2020-07-02 21:58:00' format :return: snowflake converted from local timezone epoch -- this is so snowflake matches local time """ timeStamp = endDate.timestamp() snowFlake = (int(round(timeStamp * 1000)) - 1288834974657) << 22 return snowFlake
Takes the UTC end date in '2020-07-02 21:58:00+00:00' format and converts it to a local time epoch then snowflake. Can also take UTC end date in '2020-07-02 21:58:00' format to convert to epoch and then snowflake :param endDate: UTC date in '2020-07-02 21:58:00+00:00' or '2020-07-02 21:58:00' format :return: snowflake converted from local timezone epoch -- this is so snowflake matches local time
Takes the UTC end date in '2020-07-02 21:58:00+00:00' format and converts it to a local time epoch then snowflake. Can also take UTC end date in '2020-07-02 21:58:00' format to convert to epoch and then snowflake
[ "Takes", "the", "UTC", "end", "date", "in", "'", "2020", "-", "07", "-", "02", "21", ":", "58", ":", "00", "+", "00", ":", "00", "'", "format", "and", "converts", "it", "to", "a", "local", "time", "epoch", "then", "snowflake", ".", "Can", "also", "take", "UTC", "end", "date", "in", "'", "2020", "-", "07", "-", "02", "21", ":", "58", ":", "00", "'", "format", "to", "convert", "to", "epoch", "and", "then", "snowflake" ]
def timeToSnowFlake(endDate): timeStamp = endDate.timestamp() snowFlake = (int(round(timeStamp * 1000)) - 1288834974657) << 22 return snowFlake
[ "def", "timeToSnowFlake", "(", "endDate", ")", ":", "timeStamp", "=", "endDate", ".", "timestamp", "(", ")", "snowFlake", "=", "(", "int", "(", "round", "(", "timeStamp", "*", "1000", ")", ")", "-", "1288834974657", ")", "<<", "22", "return", "snowFlake" ]
Takes the UTC end date in '2020-07-02 21:58:00+00:00' format and converts it to a local time epoch then snowflake.
[ "Takes", "the", "UTC", "end", "date", "in", "'", "2020", "-", "07", "-", "02", "21", ":", "58", ":", "00", "+", "00", ":", "00", "'", "format", "and", "converts", "it", "to", "a", "local", "time", "epoch", "then", "snowflake", "." ]
[ "\"\"\"\n Takes the UTC end date in '2020-07-02 21:58:00+00:00' format and converts it to a local time epoch then snowflake.\n Can also take UTC end date in '2020-07-02 21:58:00' format to convert to epoch and then snowflake\n :param endDate: UTC date in '2020-07-02 21:58:00+00:00' or '2020-07-02 21:58:00' format\n :return: snowflake converted from local timezone epoch -- this is so snowflake matches local time\n \"\"\"" ]
[ { "param": "endDate", "type": null } ]
{ "returns": [ { "docstring": "snowflake converted from local timezone epoch -- this is so snowflake matches local time", "docstring_tokens": [ "snowflake", "converted", "from", "local", "timezone", "epoch", "--", "this", "is", "so", "snowflake", "matches", "local", "time" ], "type": null } ], "raises": [], "params": [ { "identifier": "endDate", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def timeToSnowFlake(endDate): timeStamp = endDate.timestamp() snowFlake = (int(round(timeStamp * 1000)) - 1288834974657) << 22 return snowFlake
610,187
227
b78df6f2c1e1f0f1911d7913e006c13f23092ac0
mhassan1900/MHut
mhut/datautils.py
[ "MIT" ]
Python
roundoff_df
<not_specific>
def roundoff_df(df, places=0, columns=None, indices=None): """Round off all entries in DataFrame. If no specific columns or indices are provided all DataFrame elements are rounded. Returns a DataFrame with rounding applied places : number of decimal places to round columns: None or list of columns to apply rounding indices: None or list of indices to apply rounding """ tmp = df.copy() if columns==None and indices==None: # round all for j in tmp.columns: tmp[j] = tmp[j].round(places) elif columns!=None and indices==None: # round specific columns for j in columns: tmp[j] = tmp[j].round(places) elif columns==None and indices!=None: # round specific rows for i in indices: tmp.ix[i] = tmp.ix[i].round(places) else: # specific rows & columns (slow at the moment) for i in indices: tmp.ix[i, columns] = tmp.ix[i, columns].round(places) return tmp
Round off all entries in DataFrame. If no specific columns or indices are provided all DataFrame elements are rounded. Returns a DataFrame with rounding applied places : number of decimal places to round columns: None or list of columns to apply rounding indices: None or list of indices to apply rounding
Round off all entries in DataFrame. If no specific columns or indices are provided all DataFrame elements are rounded. Returns a DataFrame with rounding applied places : number of decimal places to round columns: None or list of columns to apply rounding indices: None or list of indices to apply rounding
[ "Round", "off", "all", "entries", "in", "DataFrame", ".", "If", "no", "specific", "columns", "or", "indices", "are", "provided", "all", "DataFrame", "elements", "are", "rounded", ".", "Returns", "a", "DataFrame", "with", "rounding", "applied", "places", ":", "number", "of", "decimal", "places", "to", "round", "columns", ":", "None", "or", "list", "of", "columns", "to", "apply", "rounding", "indices", ":", "None", "or", "list", "of", "indices", "to", "apply", "rounding" ]
def roundoff_df(df, places=0, columns=None, indices=None): tmp = df.copy() if columns==None and indices==None: for j in tmp.columns: tmp[j] = tmp[j].round(places) elif columns!=None and indices==None: for j in columns: tmp[j] = tmp[j].round(places) elif columns==None and indices!=None: for i in indices: tmp.ix[i] = tmp.ix[i].round(places) else: for i in indices: tmp.ix[i, columns] = tmp.ix[i, columns].round(places) return tmp
[ "def", "roundoff_df", "(", "df", ",", "places", "=", "0", ",", "columns", "=", "None", ",", "indices", "=", "None", ")", ":", "tmp", "=", "df", ".", "copy", "(", ")", "if", "columns", "==", "None", "and", "indices", "==", "None", ":", "for", "j", "in", "tmp", ".", "columns", ":", "tmp", "[", "j", "]", "=", "tmp", "[", "j", "]", ".", "round", "(", "places", ")", "elif", "columns", "!=", "None", "and", "indices", "==", "None", ":", "for", "j", "in", "columns", ":", "tmp", "[", "j", "]", "=", "tmp", "[", "j", "]", ".", "round", "(", "places", ")", "elif", "columns", "==", "None", "and", "indices", "!=", "None", ":", "for", "i", "in", "indices", ":", "tmp", ".", "ix", "[", "i", "]", "=", "tmp", ".", "ix", "[", "i", "]", ".", "round", "(", "places", ")", "else", ":", "for", "i", "in", "indices", ":", "tmp", ".", "ix", "[", "i", ",", "columns", "]", "=", "tmp", ".", "ix", "[", "i", ",", "columns", "]", ".", "round", "(", "places", ")", "return", "tmp" ]
Round off all entries in DataFrame.
[ "Round", "off", "all", "entries", "in", "DataFrame", "." ]
[ "\"\"\"Round off all entries in DataFrame. If no specific columns or\n indices are provided all DataFrame elements are rounded.\n Returns a DataFrame with rounding applied\n\n places : number of decimal places to round\n columns: None or list of columns to apply rounding\n indices: None or list of indices to apply rounding\n \"\"\"", "# round all", "# round specific columns", "# round specific rows", "# specific rows & columns (slow at the moment)" ]
[ { "param": "df", "type": null }, { "param": "places", "type": null }, { "param": "columns", "type": null }, { "param": "indices", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "df", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "places", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "columns", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "indices", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def roundoff_df(df, places=0, columns=None, indices=None): tmp = df.copy() if columns==None and indices==None: for j in tmp.columns: tmp[j] = tmp[j].round(places) elif columns!=None and indices==None: for j in columns: tmp[j] = tmp[j].round(places) elif columns==None and indices!=None: for i in indices: tmp.ix[i] = tmp.ix[i].round(places) else: for i in indices: tmp.ix[i, columns] = tmp.ix[i, columns].round(places) return tmp
610,188
256
b0513098cb6c23d23e9ff30e1236f24a37cc3a58
NREL/disco
disco/extensions/upgrade_simulation/upgrades/voltage_upgrade_functions.py
[ "BSD-3-Clause" ]
Python
searchDictKey
<not_specific>
def searchDictKey(dct, value): """This function returns a list of dictionary keys that have a certain value Parameters ---------- dct value Returns ------- """ return [key for key in dct if (dct[key] == value)]
This function returns a list of dictionary keys that have a certain value Parameters ---------- dct value Returns -------
This function returns a list of dictionary keys that have a certain value
[ "This", "function", "returns", "a", "list", "of", "dictionary", "keys", "that", "have", "a", "certain", "value" ]
def searchDictKey(dct, value): return [key for key in dct if (dct[key] == value)]
[ "def", "searchDictKey", "(", "dct", ",", "value", ")", ":", "return", "[", "key", "for", "key", "in", "dct", "if", "(", "dct", "[", "key", "]", "==", "value", ")", "]" ]
This function returns a list of dictionary keys that have a certain value
[ "This", "function", "returns", "a", "list", "of", "dictionary", "keys", "that", "have", "a", "certain", "value" ]
[ "\"\"\"This function returns a list of dictionary keys that have a certain value\r\n\r\n Parameters\r\n ----------\r\n dct\r\n value\r\n\r\n Returns\r\n -------\r\n\r\n \"\"\"" ]
[ { "param": "dct", "type": null }, { "param": "value", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "dct", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "value", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [ { "identifier": "dct\r", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null }, { "identifier": "value\r", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null } ], "others": [] }
def searchDictKey(dct, value): return [key for key in dct if (dct[key] == value)]
610,189
622
4e9073f3337263e2d34e17eac44e73060132ecb6
ryanwang520/poi
poi/nodes.py
[ "MIT" ]
Python
flatten
null
def flatten(items): """Yield items from any nested iterable""" for x in items: if isinstance(x, abc.Iterable) and not isinstance(x, (str, bytes)): for sub_x in flatten(x): yield sub_x else: yield x
Yield items from any nested iterable
Yield items from any nested iterable
[ "Yield", "items", "from", "any", "nested", "iterable" ]
def flatten(items): for x in items: if isinstance(x, abc.Iterable) and not isinstance(x, (str, bytes)): for sub_x in flatten(x): yield sub_x else: yield x
[ "def", "flatten", "(", "items", ")", ":", "for", "x", "in", "items", ":", "if", "isinstance", "(", "x", ",", "abc", ".", "Iterable", ")", "and", "not", "isinstance", "(", "x", ",", "(", "str", ",", "bytes", ")", ")", ":", "for", "sub_x", "in", "flatten", "(", "x", ")", ":", "yield", "sub_x", "else", ":", "yield", "x" ]
Yield items from any nested iterable
[ "Yield", "items", "from", "any", "nested", "iterable" ]
[ "\"\"\"Yield items from any nested iterable\"\"\"" ]
[ { "param": "items", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "items", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import abc def flatten(items): for x in items: if isinstance(x, abc.Iterable) and not isinstance(x, (str, bytes)): for sub_x in flatten(x): yield sub_x else: yield x
610,190
296
5fc77d49704fe2bc976a1480249487d1069b3e9a
darenw/FRITEX
py/friterexp_sym.py
[ "MIT" ]
Python
LfromS
<not_specific>
def LfromS(seq): """ Compute Schroder function value L from a given sequence. This performs the calculation by plodding through the algorithm given in the paper. Humans can easily recognize the relation between elements of S and length of runs of 0s or 1s in the binary representation of L. Knowing that, there probably is a slicker way to code LfromS(). Args: S (list of int): Sequence - see paper for details Returns: value of L, real in range 0.0 .. 1.0 Note that overflows are no problem - S may contain "large" values like ten. Note also there's no check on the length of S, or if it's empty. """ Lambda = 1.0 L = Lambda/2 for c in reversed(seq): L = L/(2**c) L = Lambda - L L = Lambda-L return L
Compute Schroder function value L from a given sequence. This performs the calculation by plodding through the algorithm given in the paper. Humans can easily recognize the relation between elements of S and length of runs of 0s or 1s in the binary representation of L. Knowing that, there probably is a slicker way to code LfromS(). Args: S (list of int): Sequence - see paper for details Returns: value of L, real in range 0.0 .. 1.0 Note that overflows are no problem - S may contain "large" values like ten. Note also there's no check on the length of S, or if it's empty.
Compute Schroder function value L from a given sequence. This performs the calculation by plodding through the algorithm given in the paper. Humans can easily recognize the relation between elements of S and length of runs of 0s or 1s in the binary representation of L. Knowing that, there probably is a slicker way to code LfromS().
[ "Compute", "Schroder", "function", "value", "L", "from", "a", "given", "sequence", ".", "This", "performs", "the", "calculation", "by", "plodding", "through", "the", "algorithm", "given", "in", "the", "paper", ".", "Humans", "can", "easily", "recognize", "the", "relation", "between", "elements", "of", "S", "and", "length", "of", "runs", "of", "0s", "or", "1s", "in", "the", "binary", "representation", "of", "L", ".", "Knowing", "that", "there", "probably", "is", "a", "slicker", "way", "to", "code", "LfromS", "()", "." ]
def LfromS(seq): Lambda = 1.0 L = Lambda/2 for c in reversed(seq): L = L/(2**c) L = Lambda - L L = Lambda-L return L
[ "def", "LfromS", "(", "seq", ")", ":", "Lambda", "=", "1.0", "L", "=", "Lambda", "/", "2", "for", "c", "in", "reversed", "(", "seq", ")", ":", "L", "=", "L", "/", "(", "2", "**", "c", ")", "L", "=", "Lambda", "-", "L", "L", "=", "Lambda", "-", "L", "return", "L" ]
Compute Schroder function value L from a given sequence.
[ "Compute", "Schroder", "function", "value", "L", "from", "a", "given", "sequence", "." ]
[ "\"\"\"\n\t\tCompute Schroder function value L from a given sequence.\n\t\tThis performs the calculation by plodding through the algorithm given in\n\t\tthe paper. Humans can easily recognize the relation between elements of S\n\t\tand length of runs of 0s or 1s in the binary representation of L. Knowing\n\t\tthat, there probably is a slicker way to code LfromS().\n\t\t\n\t\tArgs:\n\t\t\tS (list of int): Sequence - see paper for details\n\t\t\t\n\t\tReturns:\n\t\t\tvalue of L, real in range 0.0 .. 1.0\n\t\t\t\n\t\tNote that overflows are no problem - S may contain \"large\" values like ten.\n\t\tNote also there's no check on the length of S, or if it's empty.\n\t\"\"\"" ]
[ { "param": "seq", "type": null } ]
{ "returns": [ { "docstring": "value of L, real in range 0.0 .. 1.0", "docstring_tokens": [ "value", "of", "L", "real", "in", "range", "0", ".", "0", "..", "1", ".", "0" ], "type": null } ], "raises": [], "params": [ { "identifier": "seq", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [ { "identifier": "S", "type": null, "docstring": "see paper for details", "docstring_tokens": [ "see", "paper", "for", "details" ], "default": null, "is_optional": false } ], "others": [] }
def LfromS(seq): Lambda = 1.0 L = Lambda/2 for c in reversed(seq): L = L/(2**c) L = Lambda - L L = Lambda-L return L
610,191
398
5604d37f37aeac9fe279fc5b2086af959919f749
sdss/lvmspec
py/desispec/io/util.py
[ "BSD-3-Clause" ]
Python
native_endian
<not_specific>
def native_endian(data): """Convert numpy array data to native endianness if needed. Returns new array if endianness is swapped, otherwise returns input data Context: By default, FITS data from astropy.io.fits.getdata() are not Intel native endianness and scipy 0.14 sparse matrices have a bug with non-native endian data. """ if data.dtype.isnative: return data else: return data.byteswap().newbyteorder()
Convert numpy array data to native endianness if needed. Returns new array if endianness is swapped, otherwise returns input data Context: By default, FITS data from astropy.io.fits.getdata() are not Intel native endianness and scipy 0.14 sparse matrices have a bug with non-native endian data.
Convert numpy array data to native endianness if needed. Returns new array if endianness is swapped, otherwise returns input data By default, FITS data from astropy.io.fits.getdata() are not Intel native endianness and scipy 0.14 sparse matrices have a bug with non-native endian data.
[ "Convert", "numpy", "array", "data", "to", "native", "endianness", "if", "needed", ".", "Returns", "new", "array", "if", "endianness", "is", "swapped", "otherwise", "returns", "input", "data", "By", "default", "FITS", "data", "from", "astropy", ".", "io", ".", "fits", ".", "getdata", "()", "are", "not", "Intel", "native", "endianness", "and", "scipy", "0", ".", "14", "sparse", "matrices", "have", "a", "bug", "with", "non", "-", "native", "endian", "data", "." ]
def native_endian(data): if data.dtype.isnative: return data else: return data.byteswap().newbyteorder()
[ "def", "native_endian", "(", "data", ")", ":", "if", "data", ".", "dtype", ".", "isnative", ":", "return", "data", "else", ":", "return", "data", ".", "byteswap", "(", ")", ".", "newbyteorder", "(", ")" ]
Convert numpy array data to native endianness if needed.
[ "Convert", "numpy", "array", "data", "to", "native", "endianness", "if", "needed", "." ]
[ "\"\"\"Convert numpy array data to native endianness if needed.\n\n Returns new array if endianness is swapped, otherwise returns input data\n\n Context:\n By default, FITS data from astropy.io.fits.getdata() are not Intel\n native endianness and scipy 0.14 sparse matrices have a bug with\n non-native endian data.\n \"\"\"" ]
[ { "param": "data", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "data", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def native_endian(data): if data.dtype.isnative: return data else: return data.byteswap().newbyteorder()
610,192
787
4b2023b090e124ee6c65277901ffc2a4a41b36e4
zgeor/runner
Lagramge.Tools/plotter.py
[ "Apache-2.0" ]
Python
loadJsonData
<not_specific>
def loadJsonData(fileName): """ Parses JSON file. Returns a dictionary. """ with open(fileName) as json_data: data = json.load(json_data) return data
Parses JSON file. Returns a dictionary.
Parses JSON file. Returns a dictionary.
[ "Parses", "JSON", "file", ".", "Returns", "a", "dictionary", "." ]
def loadJsonData(fileName): with open(fileName) as json_data: data = json.load(json_data) return data
[ "def", "loadJsonData", "(", "fileName", ")", ":", "with", "open", "(", "fileName", ")", "as", "json_data", ":", "data", "=", "json", ".", "load", "(", "json_data", ")", "return", "data" ]
Parses JSON file.
[ "Parses", "JSON", "file", "." ]
[ "\"\"\"\n Parses JSON file.\n \n Returns a dictionary.\n \"\"\"" ]
[ { "param": "fileName", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "fileName", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import json def loadJsonData(fileName): with open(fileName) as json_data: data = json.load(json_data) return data
610,193
515
a0dd264d0f649cb82335bcac8d603d2ae157cd45
digirati-co-uk/elife-poa-xml-generation
parsePoaXml.py
[ "MIT" ]
Python
remove_tag
<not_specific>
def remove_tag(tag_name, string): """ Remove open and close tags - the tags themselves only - using a non-greedy angle bracket pattern match """ if not string: return string p = re.compile('</?' + tag_name + '.*?>') string = p.sub('', string) return string
Remove open and close tags - the tags themselves only - using a non-greedy angle bracket pattern match
Remove open and close tags - the tags themselves only - using a non-greedy angle bracket pattern match
[ "Remove", "open", "and", "close", "tags", "-", "the", "tags", "themselves", "only", "-", "using", "a", "non", "-", "greedy", "angle", "bracket", "pattern", "match" ]
def remove_tag(tag_name, string): if not string: return string p = re.compile('</?' + tag_name + '.*?>') string = p.sub('', string) return string
[ "def", "remove_tag", "(", "tag_name", ",", "string", ")", ":", "if", "not", "string", ":", "return", "string", "p", "=", "re", ".", "compile", "(", "'</?'", "+", "tag_name", "+", "'.*?>'", ")", "string", "=", "p", ".", "sub", "(", "''", ",", "string", ")", "return", "string" ]
Remove open and close tags - the tags themselves only - using a non-greedy angle bracket pattern match
[ "Remove", "open", "and", "close", "tags", "-", "the", "tags", "themselves", "only", "-", "using", "a", "non", "-", "greedy", "angle", "bracket", "pattern", "match" ]
[ "\"\"\"\n Remove open and close tags - the tags themselves only - using\n a non-greedy angle bracket pattern match\n \"\"\"" ]
[ { "param": "tag_name", "type": null }, { "param": "string", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "tag_name", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "string", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import re def remove_tag(tag_name, string): if not string: return string p = re.compile('</?' + tag_name + '.*?>') string = p.sub('', string) return string
610,194
808
fe30e46a4c6e5ba3d5c8fc5468d0febdf2284176
airtower-luna/mod_gnutls
test/tests/28_HTTP2_support/hooks.py
[ "Apache-2.0" ]
Python
run_connection
null
def run_connection(testname, conn_log, response_log): """Check if HTTP/2 connections using mod_gnutls and mod_http2 work.""" url = f'https://{os.environ["TEST_HOST"]}:{os.environ["TEST_PORT"]}' \ '/status?auto' command = [os.environ['HTTP_CLI'], '--http2', '--location', '--verbose', '--cacert', 'authority/x509.pem', url] proc = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) print(proc.stderr) print(proc.stderr, file=conn_log) print(proc.stdout) print(proc.stdout, file=response_log) proc.check_returncode()
Check if HTTP/2 connections using mod_gnutls and mod_http2 work.
Check if HTTP/2 connections using mod_gnutls and mod_http2 work.
[ "Check", "if", "HTTP", "/", "2", "connections", "using", "mod_gnutls", "and", "mod_http2", "work", "." ]
def run_connection(testname, conn_log, response_log): url = f'https://{os.environ["TEST_HOST"]}:{os.environ["TEST_PORT"]}' \ '/status?auto' command = [os.environ['HTTP_CLI'], '--http2', '--location', '--verbose', '--cacert', 'authority/x509.pem', url] proc = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) print(proc.stderr) print(proc.stderr, file=conn_log) print(proc.stdout) print(proc.stdout, file=response_log) proc.check_returncode()
[ "def", "run_connection", "(", "testname", ",", "conn_log", ",", "response_log", ")", ":", "url", "=", "f'https://{os.environ[\"TEST_HOST\"]}:{os.environ[\"TEST_PORT\"]}'", "'/status?auto'", "command", "=", "[", "os", ".", "environ", "[", "'HTTP_CLI'", "]", ",", "'--http2'", ",", "'--location'", ",", "'--verbose'", ",", "'--cacert'", ",", "'authority/x509.pem'", ",", "url", "]", "proc", "=", "subprocess", ".", "run", "(", "command", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ",", "text", "=", "True", ")", "print", "(", "proc", ".", "stderr", ")", "print", "(", "proc", ".", "stderr", ",", "file", "=", "conn_log", ")", "print", "(", "proc", ".", "stdout", ")", "print", "(", "proc", ".", "stdout", ",", "file", "=", "response_log", ")", "proc", ".", "check_returncode", "(", ")" ]
Check if HTTP/2 connections using mod_gnutls and mod_http2 work.
[ "Check", "if", "HTTP", "/", "2", "connections", "using", "mod_gnutls", "and", "mod_http2", "work", "." ]
[ "\"\"\"Check if HTTP/2 connections using mod_gnutls and mod_http2 work.\"\"\"" ]
[ { "param": "testname", "type": null }, { "param": "conn_log", "type": null }, { "param": "response_log", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "testname", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "conn_log", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "response_log", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import subprocess import os def run_connection(testname, conn_log, response_log): url = f'https://{os.environ["TEST_HOST"]}:{os.environ["TEST_PORT"]}' \ '/status?auto' command = [os.environ['HTTP_CLI'], '--http2', '--location', '--verbose', '--cacert', 'authority/x509.pem', url] proc = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) print(proc.stderr) print(proc.stderr, file=conn_log) print(proc.stdout) print(proc.stdout, file=response_log) proc.check_returncode()
610,195
539
ec75183dd6de8fb86ebaf446814b5dc685657735
UofT-EcoSystem/rlscope
docs/conf.py
[ "Apache-2.0" ]
Python
pprint_msg
<not_specific>
def pprint_msg(dic, prefix=' '): """ Give logger.info a string for neatly printing a dictionary. Usage: logger.info(pprint_msg(arbitrary_object)) """ return "\n" + textwrap.indent(pprint.pformat(dic), prefix=prefix)
Give logger.info a string for neatly printing a dictionary. Usage: logger.info(pprint_msg(arbitrary_object))
Give logger.info a string for neatly printing a dictionary.
[ "Give", "logger", ".", "info", "a", "string", "for", "neatly", "printing", "a", "dictionary", "." ]
def pprint_msg(dic, prefix=' '): return "\n" + textwrap.indent(pprint.pformat(dic), prefix=prefix)
[ "def", "pprint_msg", "(", "dic", ",", "prefix", "=", "' '", ")", ":", "return", "\"\\n\"", "+", "textwrap", ".", "indent", "(", "pprint", ".", "pformat", "(", "dic", ")", ",", "prefix", "=", "prefix", ")" ]
Give logger.info a string for neatly printing a dictionary.
[ "Give", "logger", ".", "info", "a", "string", "for", "neatly", "printing", "a", "dictionary", "." ]
[ "\"\"\"\n Give logger.info a string for neatly printing a dictionary.\n\n Usage:\n logger.info(pprint_msg(arbitrary_object))\n \"\"\"" ]
[ { "param": "dic", "type": null }, { "param": "prefix", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "dic", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "prefix", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import textwrap import pprint def pprint_msg(dic, prefix=' '): return "\n" + textwrap.indent(pprint.pformat(dic), prefix=prefix)
610,196
656
862b2c41f59186b78178aad88014d515c2f6d058
thereisnoaddress/hmphKeyboard
nltk/util.py
[ "Apache-2.0" ]
Python
tokenwrap
<not_specific>
def tokenwrap(tokens, separator=" ", width=70): """ Pretty print a list of text tokens, breaking lines on whitespace :param tokens: the tokens to print :type tokens: list :param separator: the string to use to separate tokens :type separator: str :param width: the display width (default=70) :type width: int """ return '\n'.join(textwrap.wrap(separator.join(tokens), width=width))
Pretty print a list of text tokens, breaking lines on whitespace :param tokens: the tokens to print :type tokens: list :param separator: the string to use to separate tokens :type separator: str :param width: the display width (default=70) :type width: int
Pretty print a list of text tokens, breaking lines on whitespace
[ "Pretty", "print", "a", "list", "of", "text", "tokens", "breaking", "lines", "on", "whitespace" ]
def tokenwrap(tokens, separator=" ", width=70): return '\n'.join(textwrap.wrap(separator.join(tokens), width=width))
[ "def", "tokenwrap", "(", "tokens", ",", "separator", "=", "\" \"", ",", "width", "=", "70", ")", ":", "return", "'\\n'", ".", "join", "(", "textwrap", ".", "wrap", "(", "separator", ".", "join", "(", "tokens", ")", ",", "width", "=", "width", ")", ")" ]
Pretty print a list of text tokens, breaking lines on whitespace
[ "Pretty", "print", "a", "list", "of", "text", "tokens", "breaking", "lines", "on", "whitespace" ]
[ "\"\"\"\n Pretty print a list of text tokens, breaking lines on whitespace\n\n :param tokens: the tokens to print\n :type tokens: list\n :param separator: the string to use to separate tokens\n :type separator: str\n :param width: the display width (default=70)\n :type width: int\n \"\"\"" ]
[ { "param": "tokens", "type": null }, { "param": "separator", "type": null }, { "param": "width", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "tokens", "type": null, "docstring": "the tokens to print", "docstring_tokens": [ "the", "tokens", "to", "print" ], "default": null, "is_optional": null }, { "identifier": "separator", "type": null, "docstring": "the string to use to separate tokens", "docstring_tokens": [ "the", "string", "to", "use", "to", "separate", "tokens" ], "default": null, "is_optional": null }, { "identifier": "width", "type": null, "docstring": "the display width (default=70)", "docstring_tokens": [ "the", "display", "width", "(", "default", "=", "70", ")" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import textwrap def tokenwrap(tokens, separator=" ", width=70): return '\n'.join(textwrap.wrap(separator.join(tokens), width=width))
610,197
464
bd824916f75f8d94cae9de8e873b3c3d07029ed6
garrison/qiskit-ignis
qiskit/ignis/verification/topological_codes/fitters.py
[ "Apache-2.0" ]
Python
lookuptable_decoding
<not_specific>
def lookuptable_decoding(training_results, real_results): """ Calculates the logical error probability using postselection decoding. This postselects all results with trivial syndrome. Args: training_results (dict): A results dictionary, as produced by the ``process_results`` method of a code. real_results (dict): A results dictionary, as produced by the ``process_results`` method of a code. Returns: dict: Dictionary of logical error probabilities for each of the encoded logical states whose results were given in the input. Additional information: Given a two dictionaries of results, as produced by a code object, thelogical error probability is calculated for lookup table decoding. This is done using `training_results` as a guide to which syndrome is most probable for each logical value, and the probability is calculated for the results in `real_results`. """ logical_prob = {} for log in real_results: shots = 0 incorrect_shots = 0 for string in real_results[log]: p = {} for testlog in ["0", "1"]: if string in training_results[testlog]: p[testlog] = training_results[testlog][string] else: p[testlog] = 0 shots += real_results[log][string] if p["1" * (log == "0") + "0" * (log == "1")] > p[log]: incorrect_shots += real_results[log][string] logical_prob[log] = incorrect_shots / shots return logical_prob
Calculates the logical error probability using postselection decoding. This postselects all results with trivial syndrome. Args: training_results (dict): A results dictionary, as produced by the ``process_results`` method of a code. real_results (dict): A results dictionary, as produced by the ``process_results`` method of a code. Returns: dict: Dictionary of logical error probabilities for each of the encoded logical states whose results were given in the input. Additional information: Given a two dictionaries of results, as produced by a code object, thelogical error probability is calculated for lookup table decoding. This is done using `training_results` as a guide to which syndrome is most probable for each logical value, and the probability is calculated for the results in `real_results`.
Calculates the logical error probability using postselection decoding. This postselects all results with trivial syndrome.
[ "Calculates", "the", "logical", "error", "probability", "using", "postselection", "decoding", ".", "This", "postselects", "all", "results", "with", "trivial", "syndrome", "." ]
def lookuptable_decoding(training_results, real_results): logical_prob = {} for log in real_results: shots = 0 incorrect_shots = 0 for string in real_results[log]: p = {} for testlog in ["0", "1"]: if string in training_results[testlog]: p[testlog] = training_results[testlog][string] else: p[testlog] = 0 shots += real_results[log][string] if p["1" * (log == "0") + "0" * (log == "1")] > p[log]: incorrect_shots += real_results[log][string] logical_prob[log] = incorrect_shots / shots return logical_prob
[ "def", "lookuptable_decoding", "(", "training_results", ",", "real_results", ")", ":", "logical_prob", "=", "{", "}", "for", "log", "in", "real_results", ":", "shots", "=", "0", "incorrect_shots", "=", "0", "for", "string", "in", "real_results", "[", "log", "]", ":", "p", "=", "{", "}", "for", "testlog", "in", "[", "\"0\"", ",", "\"1\"", "]", ":", "if", "string", "in", "training_results", "[", "testlog", "]", ":", "p", "[", "testlog", "]", "=", "training_results", "[", "testlog", "]", "[", "string", "]", "else", ":", "p", "[", "testlog", "]", "=", "0", "shots", "+=", "real_results", "[", "log", "]", "[", "string", "]", "if", "p", "[", "\"1\"", "*", "(", "log", "==", "\"0\"", ")", "+", "\"0\"", "*", "(", "log", "==", "\"1\"", ")", "]", ">", "p", "[", "log", "]", ":", "incorrect_shots", "+=", "real_results", "[", "log", "]", "[", "string", "]", "logical_prob", "[", "log", "]", "=", "incorrect_shots", "/", "shots", "return", "logical_prob" ]
Calculates the logical error probability using postselection decoding.
[ "Calculates", "the", "logical", "error", "probability", "using", "postselection", "decoding", "." ]
[ "\"\"\"\n Calculates the logical error probability using postselection decoding.\n This postselects all results with trivial syndrome.\n\n Args:\n training_results (dict): A results dictionary, as produced by the\n ``process_results`` method of a code.\n real_results (dict): A results dictionary, as produced by the\n ``process_results`` method of a code.\n\n Returns:\n dict: Dictionary of logical error probabilities for\n each of the encoded logical states whose results were given in\n the input.\n\n\n Additional information:\n Given a two dictionaries of results, as produced by a code object,\n thelogical error probability is calculated for lookup table\n decoding. This is done using `training_results` as a guide to which\n syndrome is most probable for each logical value, and the\n probability is calculated for the results in `real_results`.\n \"\"\"" ]
[ { "param": "training_results", "type": null }, { "param": "real_results", "type": null } ]
{ "returns": [ { "docstring": "Dictionary of logical error probabilities for\neach of the encoded logical states whose results were given in\nthe input.", "docstring_tokens": [ "Dictionary", "of", "logical", "error", "probabilities", "for", "each", "of", "the", "encoded", "logical", "states", "whose", "results", "were", "given", "in", "the", "input", "." ], "type": "dict" } ], "raises": [], "params": [ { "identifier": "training_results", "type": null, "docstring": "A results dictionary, as produced by the\n``process_results`` method of a code.", "docstring_tokens": [ "A", "results", "dictionary", "as", "produced", "by", "the", "`", "`", "process_results", "`", "`", "method", "of", "a", "code", "." ], "default": null, "is_optional": false }, { "identifier": "real_results", "type": null, "docstring": "A results dictionary, as produced by the\n``process_results`` method of a code.", "docstring_tokens": [ "A", "results", "dictionary", "as", "produced", "by", "the", "`", "`", "process_results", "`", "`", "method", "of", "a", "code", "." ], "default": null, "is_optional": false } ], "outlier_params": [], "others": [] }
def lookuptable_decoding(training_results, real_results): logical_prob = {} for log in real_results: shots = 0 incorrect_shots = 0 for string in real_results[log]: p = {} for testlog in ["0", "1"]: if string in training_results[testlog]: p[testlog] = training_results[testlog][string] else: p[testlog] = 0 shots += real_results[log][string] if p["1" * (log == "0") + "0" * (log == "1")] > p[log]: incorrect_shots += real_results[log][string] logical_prob[log] = incorrect_shots / shots return logical_prob
610,198
527
4352ea0e18b098dffae5afab468e7de9b764ca0b
rodionlim/portfolio-manager
utilfns/dump.py
[ "MIT" ]
Python
chunks
null
def chunks(lst: list, n): """Yield successive n-sized chunks from a list or set.""" if isinstance(lst, set): lst = list(lst) for i in range(0, len(lst), n): yield lst[i:i + n]
Yield successive n-sized chunks from a list or set.
Yield successive n-sized chunks from a list or set.
[ "Yield", "successive", "n", "-", "sized", "chunks", "from", "a", "list", "or", "set", "." ]
def chunks(lst: list, n): if isinstance(lst, set): lst = list(lst) for i in range(0, len(lst), n): yield lst[i:i + n]
[ "def", "chunks", "(", "lst", ":", "list", ",", "n", ")", ":", "if", "isinstance", "(", "lst", ",", "set", ")", ":", "lst", "=", "list", "(", "lst", ")", "for", "i", "in", "range", "(", "0", ",", "len", "(", "lst", ")", ",", "n", ")", ":", "yield", "lst", "[", "i", ":", "i", "+", "n", "]" ]
Yield successive n-sized chunks from a list or set.
[ "Yield", "successive", "n", "-", "sized", "chunks", "from", "a", "list", "or", "set", "." ]
[ "\"\"\"Yield successive n-sized chunks from a list or set.\"\"\"" ]
[ { "param": "lst", "type": "list" }, { "param": "n", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "lst", "type": "list", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "n", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def chunks(lst: list, n): if isinstance(lst, set): lst = list(lst) for i in range(0, len(lst), n): yield lst[i:i + n]
610,199
842
61989dfe28677fb0386d29309c71b7360f3b90f5
callmed/sensor-board
storage/database_workers.py
[ "Apache-2.0" ]
Python
database_query
<not_specific>
def database_query(conn): """ Return the average value of 'consider_last' available measurements in database. """ query = f"""SELECT temperature, humidity FROM measurements ORDER by timestamp ASC""" result = conn.execute(query).all() return result
Return the average value of 'consider_last' available measurements in database.
Return the average value of 'consider_last' available measurements in database.
[ "Return", "the", "average", "value", "of", "'", "consider_last", "'", "available", "measurements", "in", "database", "." ]
def database_query(conn): query = f"""SELECT temperature, humidity FROM measurements ORDER by timestamp ASC""" result = conn.execute(query).all() return result
[ "def", "database_query", "(", "conn", ")", ":", "query", "=", "f\"\"\"SELECT temperature, humidity\n FROM measurements ORDER by timestamp ASC\"\"\"", "result", "=", "conn", ".", "execute", "(", "query", ")", ".", "all", "(", ")", "return", "result" ]
Return the average value of 'consider_last' available measurements in database.
[ "Return", "the", "average", "value", "of", "'", "consider_last", "'", "available", "measurements", "in", "database", "." ]
[ "\"\"\" Return the average value of 'consider_last' available measurements\n in database.\n \"\"\"" ]
[ { "param": "conn", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "conn", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def database_query(conn): query = f"""SELECT temperature, humidity FROM measurements ORDER by timestamp ASC""" result = conn.execute(query).all() return result
610,200
76
b0753ba6cdf5161f3096b59aa6535444ff8a9e92
estorrs/mgitools
mgitools/os_helpers.py
[ "MIT" ]
Python
listfiles
null
def listfiles(folder, regex=None): """Return all files with the given regex in the given folder structure""" for root, folders, files in os.walk(folder): for filename in folders + files: if regex is None: yield os.path.join(root, filename) elif re.findall(regex, os.path.join(root, filename)): yield os.path.join(root, filename)
Return all files with the given regex in the given folder structure
Return all files with the given regex in the given folder structure
[ "Return", "all", "files", "with", "the", "given", "regex", "in", "the", "given", "folder", "structure" ]
def listfiles(folder, regex=None): for root, folders, files in os.walk(folder): for filename in folders + files: if regex is None: yield os.path.join(root, filename) elif re.findall(regex, os.path.join(root, filename)): yield os.path.join(root, filename)
[ "def", "listfiles", "(", "folder", ",", "regex", "=", "None", ")", ":", "for", "root", ",", "folders", ",", "files", "in", "os", ".", "walk", "(", "folder", ")", ":", "for", "filename", "in", "folders", "+", "files", ":", "if", "regex", "is", "None", ":", "yield", "os", ".", "path", ".", "join", "(", "root", ",", "filename", ")", "elif", "re", ".", "findall", "(", "regex", ",", "os", ".", "path", ".", "join", "(", "root", ",", "filename", ")", ")", ":", "yield", "os", ".", "path", ".", "join", "(", "root", ",", "filename", ")" ]
Return all files with the given regex in the given folder structure
[ "Return", "all", "files", "with", "the", "given", "regex", "in", "the", "given", "folder", "structure" ]
[ "\"\"\"Return all files with the given regex in the given folder structure\"\"\"" ]
[ { "param": "folder", "type": null }, { "param": "regex", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "folder", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "regex", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import re import os def listfiles(folder, regex=None): for root, folders, files in os.walk(folder): for filename in folders + files: if regex is None: yield os.path.join(root, filename) elif re.findall(regex, os.path.join(root, filename)): yield os.path.join(root, filename)
610,201
931
2b465564f1365be97c1226ea7047960410b4f4e6
a-wing/mavelous
mavproxy.py
[ "MIT" ]
Python
vcell_to_battery_percent
<not_specific>
def vcell_to_battery_percent(vcell): '''convert a cell voltage to a percentage battery level''' if vcell > 4.1: # above 4.1 is 100% battery return 100.0 elif vcell > 3.81: # 3.81 is 17% remaining, from flight logs return 17.0 + 83.0 * (vcell - 3.81) / (4.1 - 3.81) elif vcell > 3.81: # below 3.2 it degrades fast. It's dead at 3.2 return 0.0 + 17.0 * (vcell - 3.20) / (3.81 - 3.20) # it's dead or disconnected return 0.0
convert a cell voltage to a percentage battery level
convert a cell voltage to a percentage battery level
[ "convert", "a", "cell", "voltage", "to", "a", "percentage", "battery", "level" ]
def vcell_to_battery_percent(vcell): if vcell > 4.1: return 100.0 elif vcell > 3.81: return 17.0 + 83.0 * (vcell - 3.81) / (4.1 - 3.81) elif vcell > 3.81: return 0.0 + 17.0 * (vcell - 3.20) / (3.81 - 3.20) return 0.0
[ "def", "vcell_to_battery_percent", "(", "vcell", ")", ":", "if", "vcell", ">", "4.1", ":", "return", "100.0", "elif", "vcell", ">", "3.81", ":", "return", "17.0", "+", "83.0", "*", "(", "vcell", "-", "3.81", ")", "/", "(", "4.1", "-", "3.81", ")", "elif", "vcell", ">", "3.81", ":", "return", "0.0", "+", "17.0", "*", "(", "vcell", "-", "3.20", ")", "/", "(", "3.81", "-", "3.20", ")", "return", "0.0" ]
convert a cell voltage to a percentage battery level
[ "convert", "a", "cell", "voltage", "to", "a", "percentage", "battery", "level" ]
[ "'''convert a cell voltage to a percentage battery level'''", "# above 4.1 is 100% battery", "# 3.81 is 17% remaining, from flight logs", "# below 3.2 it degrades fast. It's dead at 3.2", "# it's dead or disconnected" ]
[ { "param": "vcell", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "vcell", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def vcell_to_battery_percent(vcell): if vcell > 4.1: return 100.0 elif vcell > 3.81: return 17.0 + 83.0 * (vcell - 3.81) / (4.1 - 3.81) elif vcell > 3.81: return 0.0 + 17.0 * (vcell - 3.20) / (3.81 - 3.20) return 0.0
610,202
432
63e9efee02c7afd3719faa7701d902488784b230
cphan-roblox/ZedThree-clang-tidy-review
review.py
[ "MIT" ]
Python
read_one_line
<not_specific>
def read_one_line(filename, line_offset): """Read a single line from a source file""" # Could cache the files instead of opening them each time? with open(filename, "r") as file: file.seek(line_offset) return file.readline().rstrip("\n")
Read a single line from a source file
Read a single line from a source file
[ "Read", "a", "single", "line", "from", "a", "source", "file" ]
def read_one_line(filename, line_offset): with open(filename, "r") as file: file.seek(line_offset) return file.readline().rstrip("\n")
[ "def", "read_one_line", "(", "filename", ",", "line_offset", ")", ":", "with", "open", "(", "filename", ",", "\"r\"", ")", "as", "file", ":", "file", ".", "seek", "(", "line_offset", ")", "return", "file", ".", "readline", "(", ")", ".", "rstrip", "(", "\"\\n\"", ")" ]
Read a single line from a source file
[ "Read", "a", "single", "line", "from", "a", "source", "file" ]
[ "\"\"\"Read a single line from a source file\"\"\"", "# Could cache the files instead of opening them each time?" ]
[ { "param": "filename", "type": null }, { "param": "line_offset", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "filename", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "line_offset", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def read_one_line(filename, line_offset): with open(filename, "r") as file: file.seek(line_offset) return file.readline().rstrip("\n")
610,203
676
869b90c6ac458743b1cf893c6a70feec3a4df697
wolfy1339/Python-IRC-Bot
log.py
[ "MIT" ]
Python
exnToString
<not_specific>
def exnToString(exn): """Turns a simple exception instance into a string (better than str(e))""" strE = str(exn) if strE: return '{0!s}: {1!s}'.format(exn.__class__.__name__, strE) return exn.__class__.__name__
Turns a simple exception instance into a string (better than str(e))
Turns a simple exception instance into a string (better than str(e))
[ "Turns", "a", "simple", "exception", "instance", "into", "a", "string", "(", "better", "than", "str", "(", "e", "))" ]
def exnToString(exn): strE = str(exn) if strE: return '{0!s}: {1!s}'.format(exn.__class__.__name__, strE) return exn.__class__.__name__
[ "def", "exnToString", "(", "exn", ")", ":", "strE", "=", "str", "(", "exn", ")", "if", "strE", ":", "return", "'{0!s}: {1!s}'", ".", "format", "(", "exn", ".", "__class__", ".", "__name__", ",", "strE", ")", "return", "exn", ".", "__class__", ".", "__name__" ]
Turns a simple exception instance into a string (better than str(e))
[ "Turns", "a", "simple", "exception", "instance", "into", "a", "string", "(", "better", "than", "str", "(", "e", "))" ]
[ "\"\"\"Turns a simple exception instance into a string (better than str(e))\"\"\"" ]
[ { "param": "exn", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "exn", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def exnToString(exn): strE = str(exn) if strE: return '{0!s}: {1!s}'.format(exn.__class__.__name__, strE) return exn.__class__.__name__
610,204
300
bea4c897eed90a5ccde061f1207d8125649454fe
ma-sadeghi/OpenPNM
openpnm/models/phases/diffusivity.py
[ "MIT" ]
Python
fuller_scaling
<not_specific>
def fuller_scaling(target, DABo, To, Po, temperature='pore.temperature', pressure='pore.pressure'): r""" Uses Fuller model to adjust a diffusion coefficient for gases from reference conditions to conditions of interest Parameters ---------- target : GenericPhase The object for which these values are being calculated. This controls the length of the calculated array, and also provides access to other necessary thermofluid properties. DABo : float, array_like Diffusion coefficient at reference conditions Po, To : float, array_like Pressure & temperature at reference conditions, respectively pressure : str The dictionary key containing the pressure values in Pascals (Pa) temperature : str The dictionary key containing the temperature values in Kelvin (K) Returns ------- value : ndarray Array containing scaled gas diffusion coefficient values [m2/s]. """ Ti = target[temperature] Pi = target[pressure] value = DABo*(Ti/To)**1.75*(Po/Pi) return value
r""" Uses Fuller model to adjust a diffusion coefficient for gases from reference conditions to conditions of interest Parameters ---------- target : GenericPhase The object for which these values are being calculated. This controls the length of the calculated array, and also provides access to other necessary thermofluid properties. DABo : float, array_like Diffusion coefficient at reference conditions Po, To : float, array_like Pressure & temperature at reference conditions, respectively pressure : str The dictionary key containing the pressure values in Pascals (Pa) temperature : str The dictionary key containing the temperature values in Kelvin (K) Returns ------- value : ndarray Array containing scaled gas diffusion coefficient values [m2/s].
r""" Uses Fuller model to adjust a diffusion coefficient for gases from reference conditions to conditions of interest Parameters target : GenericPhase The object for which these values are being calculated. This controls the length of the calculated array, and also provides access to other necessary thermofluid properties. DABo : float, array_like Diffusion coefficient at reference conditions Po, To : float, array_like Pressure & temperature at reference conditions, respectively pressure : str The dictionary key containing the pressure values in Pascals (Pa) temperature : str The dictionary key containing the temperature values in Kelvin (K) Returns value : ndarray Array containing scaled gas diffusion coefficient values [m2/s].
[ "r", "\"", "\"", "\"", "Uses", "Fuller", "model", "to", "adjust", "a", "diffusion", "coefficient", "for", "gases", "from", "reference", "conditions", "to", "conditions", "of", "interest", "Parameters", "target", ":", "GenericPhase", "The", "object", "for", "which", "these", "values", "are", "being", "calculated", ".", "This", "controls", "the", "length", "of", "the", "calculated", "array", "and", "also", "provides", "access", "to", "other", "necessary", "thermofluid", "properties", ".", "DABo", ":", "float", "array_like", "Diffusion", "coefficient", "at", "reference", "conditions", "Po", "To", ":", "float", "array_like", "Pressure", "&", "temperature", "at", "reference", "conditions", "respectively", "pressure", ":", "str", "The", "dictionary", "key", "containing", "the", "pressure", "values", "in", "Pascals", "(", "Pa", ")", "temperature", ":", "str", "The", "dictionary", "key", "containing", "the", "temperature", "values", "in", "Kelvin", "(", "K", ")", "Returns", "value", ":", "ndarray", "Array", "containing", "scaled", "gas", "diffusion", "coefficient", "values", "[", "m2", "/", "s", "]", "." ]
def fuller_scaling(target, DABo, To, Po, temperature='pore.temperature', pressure='pore.pressure'): Ti = target[temperature] Pi = target[pressure] value = DABo*(Ti/To)**1.75*(Po/Pi) return value
[ "def", "fuller_scaling", "(", "target", ",", "DABo", ",", "To", ",", "Po", ",", "temperature", "=", "'pore.temperature'", ",", "pressure", "=", "'pore.pressure'", ")", ":", "Ti", "=", "target", "[", "temperature", "]", "Pi", "=", "target", "[", "pressure", "]", "value", "=", "DABo", "*", "(", "Ti", "/", "To", ")", "**", "1.75", "*", "(", "Po", "/", "Pi", ")", "return", "value" ]
r""" Uses Fuller model to adjust a diffusion coefficient for gases from reference conditions to conditions of interest
[ "r", "\"", "\"", "\"", "Uses", "Fuller", "model", "to", "adjust", "a", "diffusion", "coefficient", "for", "gases", "from", "reference", "conditions", "to", "conditions", "of", "interest" ]
[ "r\"\"\"\n Uses Fuller model to adjust a diffusion coefficient for gases from\n reference conditions to conditions of interest\n\n Parameters\n ----------\n target : GenericPhase\n The object for which these values are being calculated. This\n controls the length of the calculated array, and also provides\n access to other necessary thermofluid properties.\n DABo : float, array_like\n Diffusion coefficient at reference conditions\n Po, To : float, array_like\n Pressure & temperature at reference conditions, respectively\n pressure : str\n The dictionary key containing the pressure values in Pascals (Pa)\n temperature : str\n The dictionary key containing the temperature values in Kelvin (K)\n\n Returns\n -------\n value : ndarray\n Array containing scaled gas diffusion coefficient values [m2/s].\n\n \"\"\"" ]
[ { "param": "target", "type": null }, { "param": "DABo", "type": null }, { "param": "To", "type": null }, { "param": "Po", "type": null }, { "param": "temperature", "type": null }, { "param": "pressure", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "target", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "DABo", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "To", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "Po", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "temperature", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "pressure", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def fuller_scaling(target, DABo, To, Po, temperature='pore.temperature', pressure='pore.pressure'): Ti = target[temperature] Pi = target[pressure] value = DABo*(Ti/To)**1.75*(Po/Pi) return value
610,205
331
3b1ebb75313da6ba5b14537dc6bf608169a26404
CGATOxford/cgat
CGAT/CSV2DB.py
[ "BSD-2-Clause", "BSD-3-Clause" ]
Python
quoteRow
<not_specific>
def quoteRow(row, take, map_column2type, missing_values, null="NULL", string_value="%s"): """return a dictionary with properly quoted values.""" # set empty values for int/float to NULL d = {} for t in take: v = row[t] if v == "": d[t] = null elif v in missing_values: d[t] = null elif map_column2type[t] in (int, float): d[t] = str(row[t]) else: d[t] = string_value % row[t] return d
return a dictionary with properly quoted values.
return a dictionary with properly quoted values.
[ "return", "a", "dictionary", "with", "properly", "quoted", "values", "." ]
def quoteRow(row, take, map_column2type, missing_values, null="NULL", string_value="%s"): d = {} for t in take: v = row[t] if v == "": d[t] = null elif v in missing_values: d[t] = null elif map_column2type[t] in (int, float): d[t] = str(row[t]) else: d[t] = string_value % row[t] return d
[ "def", "quoteRow", "(", "row", ",", "take", ",", "map_column2type", ",", "missing_values", ",", "null", "=", "\"NULL\"", ",", "string_value", "=", "\"%s\"", ")", ":", "d", "=", "{", "}", "for", "t", "in", "take", ":", "v", "=", "row", "[", "t", "]", "if", "v", "==", "\"\"", ":", "d", "[", "t", "]", "=", "null", "elif", "v", "in", "missing_values", ":", "d", "[", "t", "]", "=", "null", "elif", "map_column2type", "[", "t", "]", "in", "(", "int", ",", "float", ")", ":", "d", "[", "t", "]", "=", "str", "(", "row", "[", "t", "]", ")", "else", ":", "d", "[", "t", "]", "=", "string_value", "%", "row", "[", "t", "]", "return", "d" ]
return a dictionary with properly quoted values.
[ "return", "a", "dictionary", "with", "properly", "quoted", "values", "." ]
[ "\"\"\"return a dictionary with properly quoted values.\"\"\"", "# set empty values for int/float to NULL" ]
[ { "param": "row", "type": null }, { "param": "take", "type": null }, { "param": "map_column2type", "type": null }, { "param": "missing_values", "type": null }, { "param": "null", "type": null }, { "param": "string_value", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "row", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "take", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "map_column2type", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "missing_values", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "null", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "string_value", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def quoteRow(row, take, map_column2type, missing_values, null="NULL", string_value="%s"): d = {} for t in take: v = row[t] if v == "": d[t] = null elif v in missing_values: d[t] = null elif map_column2type[t] in (int, float): d[t] = str(row[t]) else: d[t] = string_value % row[t] return d
610,206
575
d9f8c5d10302f7a2abf3838d9467f1e07f7604fb
laran/interview-questions
python/questions/spiral_array.py
[ "MIT" ]
Python
print_spiral
null
def print_spiral(a2d): """ Print a 2-dimensional array of integers :param a2d: :return: """ print('') # newline for clarity for y in range(len(a2d)): row = a2d[y] print(", ".join(str(x) for x in row))
Print a 2-dimensional array of integers :param a2d: :return:
Print a 2-dimensional array of integers
[ "Print", "a", "2", "-", "dimensional", "array", "of", "integers" ]
def print_spiral(a2d): print('') for y in range(len(a2d)): row = a2d[y] print(", ".join(str(x) for x in row))
[ "def", "print_spiral", "(", "a2d", ")", ":", "print", "(", "''", ")", "for", "y", "in", "range", "(", "len", "(", "a2d", ")", ")", ":", "row", "=", "a2d", "[", "y", "]", "print", "(", "\", \"", ".", "join", "(", "str", "(", "x", ")", "for", "x", "in", "row", ")", ")" ]
Print a 2-dimensional array of integers
[ "Print", "a", "2", "-", "dimensional", "array", "of", "integers" ]
[ "\"\"\"\n Print a 2-dimensional array of integers\n\n :param a2d:\n :return:\n \"\"\"", "# newline for clarity" ]
[ { "param": "a2d", "type": null } ]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": null } ], "raises": [], "params": [ { "identifier": "a2d", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def print_spiral(a2d): print('') for y in range(len(a2d)): row = a2d[y] print(", ".join(str(x) for x in row))
610,209
903
3ee73a81552a3491b70425f79c950c4c108eb9a4
BartWojtowicz/cne
cne/data.py
[ "Apache-2.0" ]
Python
lorenz
<not_specific>
def lorenz(x, y, z, s=10, r=28, b=2.667): """ Given: x, y, z: a point of interest in three dimensional space s, r, b: parameters defining the lorenz attractor Returns: x_dot, y_dot, z_dot: values of the lorenz attractor's partial derivatives at the point x, y, z """ x_dot = s * (y - x) y_dot = r * x - y - x * z z_dot = x * y - b * z return x_dot, y_dot, z_dot
Given: x, y, z: a point of interest in three dimensional space s, r, b: parameters defining the lorenz attractor Returns: x_dot, y_dot, z_dot: values of the lorenz attractor's partial derivatives at the point x, y, z
x, y, z: a point of interest in three dimensional space s, r, b: parameters defining the lorenz attractor
[ "x", "y", "z", ":", "a", "point", "of", "interest", "in", "three", "dimensional", "space", "s", "r", "b", ":", "parameters", "defining", "the", "lorenz", "attractor" ]
def lorenz(x, y, z, s=10, r=28, b=2.667): x_dot = s * (y - x) y_dot = r * x - y - x * z z_dot = x * y - b * z return x_dot, y_dot, z_dot
[ "def", "lorenz", "(", "x", ",", "y", ",", "z", ",", "s", "=", "10", ",", "r", "=", "28", ",", "b", "=", "2.667", ")", ":", "x_dot", "=", "s", "*", "(", "y", "-", "x", ")", "y_dot", "=", "r", "*", "x", "-", "y", "-", "x", "*", "z", "z_dot", "=", "x", "*", "y", "-", "b", "*", "z", "return", "x_dot", ",", "y_dot", ",", "z_dot" ]
Given: x, y, z: a point of interest in three dimensional space s, r, b: parameters defining the lorenz attractor
[ "Given", ":", "x", "y", "z", ":", "a", "point", "of", "interest", "in", "three", "dimensional", "space", "s", "r", "b", ":", "parameters", "defining", "the", "lorenz", "attractor" ]
[ "\"\"\"\n Given:\n x, y, z: a point of interest in three dimensional space\n s, r, b: parameters defining the lorenz attractor\n Returns:\n x_dot, y_dot, z_dot: values of the lorenz attractor's partial\n derivatives at the point x, y, z\n \"\"\"" ]
[ { "param": "x", "type": null }, { "param": "y", "type": null }, { "param": "z", "type": null }, { "param": "s", "type": null }, { "param": "r", "type": null }, { "param": "b", "type": null } ]
{ "returns": [ { "docstring": "x_dot, y_dot, z_dot: values of the lorenz attractor's partial\nderivatives at the point x, y, z", "docstring_tokens": [ "x_dot", "y_dot", "z_dot", ":", "values", "of", "the", "lorenz", "attractor", "'", "s", "partial", "derivatives", "at", "the", "point", "x", "y", "z" ], "type": null } ], "raises": [], "params": [ { "identifier": "x", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "y", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "z", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "s", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "r", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "b", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def lorenz(x, y, z, s=10, r=28, b=2.667): x_dot = s * (y - x) y_dot = r * x - y - x * z z_dot = x * y - b * z return x_dot, y_dot, z_dot
610,210
587
06a30f6d4c466b3a6e7b8e9c0eb756e2cbdd5568
w2sv/Multi-Tool
src/video_lag_stripper/__init__.py
[ "MIT" ]
Python
new_fps
int
def new_fps(original_fps: int, n_frames_original: int, n_frames_new: int) -> int: """ Heuristically adjusts fps of delagged video to the new number of frames by reducing the original fps and thus leveling the increased speed of the filtered video being a side-effect of the frame discarding Returns: original video fps reduced by what corresponds to the percentual change between n_frames_original and n_frames_new """ return int(original_fps - original_fps * n_frames_new / n_frames_original)
Heuristically adjusts fps of delagged video to the new number of frames by reducing the original fps and thus leveling the increased speed of the filtered video being a side-effect of the frame discarding Returns: original video fps reduced by what corresponds to the percentual change between n_frames_original and n_frames_new
Heuristically adjusts fps of delagged video to the new number of frames by reducing the original fps and thus leveling the increased speed of the filtered video being a side-effect of the frame discarding
[ "Heuristically", "adjusts", "fps", "of", "delagged", "video", "to", "the", "new", "number", "of", "frames", "by", "reducing", "the", "original", "fps", "and", "thus", "leveling", "the", "increased", "speed", "of", "the", "filtered", "video", "being", "a", "side", "-", "effect", "of", "the", "frame", "discarding" ]
def new_fps(original_fps: int, n_frames_original: int, n_frames_new: int) -> int: return int(original_fps - original_fps * n_frames_new / n_frames_original)
[ "def", "new_fps", "(", "original_fps", ":", "int", ",", "n_frames_original", ":", "int", ",", "n_frames_new", ":", "int", ")", "->", "int", ":", "return", "int", "(", "original_fps", "-", "original_fps", "*", "n_frames_new", "/", "n_frames_original", ")" ]
Heuristically adjusts fps of delagged video to the new number of frames by reducing the original fps and thus leveling the increased speed of the filtered video being a side-effect of the frame discarding
[ "Heuristically", "adjusts", "fps", "of", "delagged", "video", "to", "the", "new", "number", "of", "frames", "by", "reducing", "the", "original", "fps", "and", "thus", "leveling", "the", "increased", "speed", "of", "the", "filtered", "video", "being", "a", "side", "-", "effect", "of", "the", "frame", "discarding" ]
[ "\"\"\" Heuristically adjusts fps of delagged video to the new number of frames\n by reducing the original fps and thus leveling the increased speed of the\n filtered video being a side-effect of the frame discarding\n\n Returns:\n original video fps reduced by what corresponds to the\n percentual change between n_frames_original and n_frames_new \"\"\"" ]
[ { "param": "original_fps", "type": "int" }, { "param": "n_frames_original", "type": "int" }, { "param": "n_frames_new", "type": "int" } ]
{ "returns": [ { "docstring": "original video fps reduced by what corresponds to the\npercentual change between n_frames_original and n_frames_new", "docstring_tokens": [ "original", "video", "fps", "reduced", "by", "what", "corresponds", "to", "the", "percentual", "change", "between", "n_frames_original", "and", "n_frames_new" ], "type": null } ], "raises": [], "params": [ { "identifier": "original_fps", "type": "int", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "n_frames_original", "type": "int", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "n_frames_new", "type": "int", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def new_fps(original_fps: int, n_frames_original: int, n_frames_new: int) -> int: return int(original_fps - original_fps * n_frames_new / n_frames_original)
610,211
235
8b595aad29480bcb8a5ba12cdd1f0cf2e6832aed
stefanhoelzl/fancy-dict
fancy_dict/conditions.py
[ "MIT" ]
Python
if_existing
<not_specific>
def if_existing(old_value, _new_value): """Value can be changed if value already exists Args: old_value: value to change _new_value: new value Returns: True if old_value exists """ if old_value is not None: return True return False
Value can be changed if value already exists Args: old_value: value to change _new_value: new value Returns: True if old_value exists
Value can be changed if value already exists
[ "Value", "can", "be", "changed", "if", "value", "already", "exists" ]
def if_existing(old_value, _new_value): if old_value is not None: return True return False
[ "def", "if_existing", "(", "old_value", ",", "_new_value", ")", ":", "if", "old_value", "is", "not", "None", ":", "return", "True", "return", "False" ]
Value can be changed if value already exists
[ "Value", "can", "be", "changed", "if", "value", "already", "exists" ]
[ "\"\"\"Value can be changed if value already exists\n\n Args:\n old_value: value to change\n _new_value: new value\n\n Returns:\n True if old_value exists\n \"\"\"" ]
[ { "param": "old_value", "type": null }, { "param": "_new_value", "type": null } ]
{ "returns": [ { "docstring": "True if old_value exists", "docstring_tokens": [ "True", "if", "old_value", "exists" ], "type": null } ], "raises": [], "params": [ { "identifier": "old_value", "type": null, "docstring": "value to change", "docstring_tokens": [ "value", "to", "change" ], "default": null, "is_optional": null }, { "identifier": "_new_value", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def if_existing(old_value, _new_value): if old_value is not None: return True return False
610,212
276
aa5b8cf0d5d5c5ede25c2138ec4954c3c2fe72a5
cookingcodewithme/turicreate
src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_utils.py
[ "BSD-3-Clause" ]
Python
raise_error_unsupported_option
null
def raise_error_unsupported_option(option, layer_type, layer_name): """ Raise an error if an option is not supported. """ raise RuntimeError( "Unsupported option =%s in layer %s(%s)" % (option, layer_type, layer_name) )
Raise an error if an option is not supported.
Raise an error if an option is not supported.
[ "Raise", "an", "error", "if", "an", "option", "is", "not", "supported", "." ]
def raise_error_unsupported_option(option, layer_type, layer_name): raise RuntimeError( "Unsupported option =%s in layer %s(%s)" % (option, layer_type, layer_name) )
[ "def", "raise_error_unsupported_option", "(", "option", ",", "layer_type", ",", "layer_name", ")", ":", "raise", "RuntimeError", "(", "\"Unsupported option =%s in layer %s(%s)\"", "%", "(", "option", ",", "layer_type", ",", "layer_name", ")", ")" ]
Raise an error if an option is not supported.
[ "Raise", "an", "error", "if", "an", "option", "is", "not", "supported", "." ]
[ "\"\"\"\n Raise an error if an option is not supported.\n \"\"\"" ]
[ { "param": "option", "type": null }, { "param": "layer_type", "type": null }, { "param": "layer_name", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "option", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "layer_type", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "layer_name", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def raise_error_unsupported_option(option, layer_type, layer_name): raise RuntimeError( "Unsupported option =%s in layer %s(%s)" % (option, layer_type, layer_name) )
610,213
145
cd06604c7cb04a20c3934a3d73131e4b808bb1d6
CSIRT-MU/CRUSOE
crusoe_observe/flowmon-rest-client/flowmonclient/AbstractClient.py
[ "MIT" ]
Python
clean_dict
<not_specific>
def clean_dict(dictionary): """ Remove items with None value in the dictionary and recursively in other nested dictionaries. """ if not isinstance(dictionary, dict): return dictionary return {k: clean_dict(v) for k, v in dictionary.items() if v is not None}
Remove items with None value in the dictionary and recursively in other nested dictionaries.
Remove items with None value in the dictionary and recursively in other nested dictionaries.
[ "Remove", "items", "with", "None", "value", "in", "the", "dictionary", "and", "recursively", "in", "other", "nested", "dictionaries", "." ]
def clean_dict(dictionary): if not isinstance(dictionary, dict): return dictionary return {k: clean_dict(v) for k, v in dictionary.items() if v is not None}
[ "def", "clean_dict", "(", "dictionary", ")", ":", "if", "not", "isinstance", "(", "dictionary", ",", "dict", ")", ":", "return", "dictionary", "return", "{", "k", ":", "clean_dict", "(", "v", ")", "for", "k", ",", "v", "in", "dictionary", ".", "items", "(", ")", "if", "v", "is", "not", "None", "}" ]
Remove items with None value in the dictionary and recursively in other nested dictionaries.
[ "Remove", "items", "with", "None", "value", "in", "the", "dictionary", "and", "recursively", "in", "other", "nested", "dictionaries", "." ]
[ "\"\"\" Remove items with None value in the dictionary and recursively in other nested dictionaries. \"\"\"" ]
[ { "param": "dictionary", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "dictionary", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def clean_dict(dictionary): if not isinstance(dictionary, dict): return dictionary return {k: clean_dict(v) for k, v in dictionary.items() if v is not None}
610,214
251
1ab4053b625e6ca2e931d088343011bf230655cd
sapcc/nova
nova/db/sqlalchemy/api.py
[ "Apache-2.0" ]
Python
_db_connection_type
<not_specific>
def _db_connection_type(db_connection): """Returns a lowercase symbol for the db type. This is useful when we need to change what we are doing per DB (like handling regexes). In a CellsV2 world it probably needs to do something better than use the database configuration string. """ db_string = db_connection.split(':')[0].split('+')[0] return db_string.lower()
Returns a lowercase symbol for the db type. This is useful when we need to change what we are doing per DB (like handling regexes). In a CellsV2 world it probably needs to do something better than use the database configuration string.
Returns a lowercase symbol for the db type. This is useful when we need to change what we are doing per DB (like handling regexes). In a CellsV2 world it probably needs to do something better than use the database configuration string.
[ "Returns", "a", "lowercase", "symbol", "for", "the", "db", "type", ".", "This", "is", "useful", "when", "we", "need", "to", "change", "what", "we", "are", "doing", "per", "DB", "(", "like", "handling", "regexes", ")", ".", "In", "a", "CellsV2", "world", "it", "probably", "needs", "to", "do", "something", "better", "than", "use", "the", "database", "configuration", "string", "." ]
def _db_connection_type(db_connection): db_string = db_connection.split(':')[0].split('+')[0] return db_string.lower()
[ "def", "_db_connection_type", "(", "db_connection", ")", ":", "db_string", "=", "db_connection", ".", "split", "(", "':'", ")", "[", "0", "]", ".", "split", "(", "'+'", ")", "[", "0", "]", "return", "db_string", ".", "lower", "(", ")" ]
Returns a lowercase symbol for the db type.
[ "Returns", "a", "lowercase", "symbol", "for", "the", "db", "type", "." ]
[ "\"\"\"Returns a lowercase symbol for the db type.\n\n This is useful when we need to change what we are doing per DB\n (like handling regexes). In a CellsV2 world it probably needs to\n do something better than use the database configuration string.\n \"\"\"" ]
[ { "param": "db_connection", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "db_connection", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _db_connection_type(db_connection): db_string = db_connection.split(':')[0].split('+')[0] return db_string.lower()
610,215
212
0a802b10d779548a2da7e790e238b8f80908beb0
premm1983/Spinnaker
dev/buildtool/git.py
[ "Apache-2.0" ]
Python
__normalize_repo_url
<not_specific>
def __normalize_repo_url(url): """Normalize a repo url for purposes of checking equality. Returns: Either a tuple (HOST, OWNER, PATH) if url is a github-like URL assumed to be in the form <PROTOCOL://HOST/OWNER/PATH> where or ssh@<HOST>:<USER><REPO> in these cases, a '.git' REPO postfix is considered superfluous. Otherwise a string assuming the url is a local path where the string will be the absolute path. """ dot_git = '.git' gitless_url = (url[:-len(dot_git)] if url.endswith(dot_git) else url) # e.g. http://github.com/USER/REPO match = re.match(r'[a-z0-9]+://([^/]+)/([^/]+)/(.+)', gitless_url) if not match: # e.g. [email protected]:USER/REPO match = re.match(r'git@([^:]+):([^/]+)/(.+)', gitless_url) if match: return match.groups() return os.path.abspath(url)
Normalize a repo url for purposes of checking equality. Returns: Either a tuple (HOST, OWNER, PATH) if url is a github-like URL assumed to be in the form <PROTOCOL://HOST/OWNER/PATH> where or ssh@<HOST>:<USER><REPO> in these cases, a '.git' REPO postfix is considered superfluous. Otherwise a string assuming the url is a local path where the string will be the absolute path.
Normalize a repo url for purposes of checking equality.
[ "Normalize", "a", "repo", "url", "for", "purposes", "of", "checking", "equality", "." ]
def __normalize_repo_url(url): dot_git = '.git' gitless_url = (url[:-len(dot_git)] if url.endswith(dot_git) else url) match = re.match(r'[a-z0-9]+://([^/]+)/([^/]+)/(.+)', gitless_url) if not match: match = re.match(r'git@([^:]+):([^/]+)/(.+)', gitless_url) if match: return match.groups() return os.path.abspath(url)
[ "def", "__normalize_repo_url", "(", "url", ")", ":", "dot_git", "=", "'.git'", "gitless_url", "=", "(", "url", "[", ":", "-", "len", "(", "dot_git", ")", "]", "if", "url", ".", "endswith", "(", "dot_git", ")", "else", "url", ")", "match", "=", "re", ".", "match", "(", "r'[a-z0-9]+://([^/]+)/([^/]+)/(.+)'", ",", "gitless_url", ")", "if", "not", "match", ":", "match", "=", "re", ".", "match", "(", "r'git@([^:]+):([^/]+)/(.+)'", ",", "gitless_url", ")", "if", "match", ":", "return", "match", ".", "groups", "(", ")", "return", "os", ".", "path", ".", "abspath", "(", "url", ")" ]
Normalize a repo url for purposes of checking equality.
[ "Normalize", "a", "repo", "url", "for", "purposes", "of", "checking", "equality", "." ]
[ "\"\"\"Normalize a repo url for purposes of checking equality.\n\n Returns:\n Either a tuple (HOST, OWNER, PATH) if url is a github-like URL\n assumed to be in the form <PROTOCOL://HOST/OWNER/PATH> where\n or ssh@<HOST>:<USER><REPO>\n in these cases, a '.git' REPO postfix is considered superfluous.\n\n Otherwise a string assuming the url is a local path\n where the string will be the absolute path.\n \"\"\"", "# e.g. http://github.com/USER/REPO", "# e.g. [email protected]:USER/REPO" ]
[ { "param": "url", "type": null } ]
{ "returns": [ { "docstring": "Either a tuple (HOST, OWNER, PATH) if url is a github-like URL\nassumed to be in the form where\nor ssh@:\nin these cases, a '.git' REPO postfix is considered superfluous.\n\nOtherwise a string assuming the url is a local path\nwhere the string will be the absolute path.", "docstring_tokens": [ "Either", "a", "tuple", "(", "HOST", "OWNER", "PATH", ")", "if", "url", "is", "a", "github", "-", "like", "URL", "assumed", "to", "be", "in", "the", "form", "where", "or", "ssh@", ":", "in", "these", "cases", "a", "'", ".", "git", "'", "REPO", "postfix", "is", "considered", "superfluous", ".", "Otherwise", "a", "string", "assuming", "the", "url", "is", "a", "local", "path", "where", "the", "string", "will", "be", "the", "absolute", "path", "." ], "type": null } ], "raises": [], "params": [ { "identifier": "url", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import re import os def __normalize_repo_url(url): dot_git = '.git' gitless_url = (url[:-len(dot_git)] if url.endswith(dot_git) else url) match = re.match(r'[a-z0-9]+://([^/]+)/([^/]+)/(.+)', gitless_url) if not match: match = re.match(r'git@([^:]+):([^/]+)/(.+)', gitless_url) if match: return match.groups() return os.path.abspath(url)
610,216
186
67f64f22350dd01bbee8536969e6bf3e6e7d46d4
proy3189/coding_challenge
talpa/core/data_checks.py
[ "Apache-2.0" ]
Python
is_numeric
<not_specific>
def is_numeric(df): '''Checks if all columns in a dataframe are numeric. :param df: Dataframe of shape (n_samples, n_features) The input samples. :return: ''' for dtype in df.dtypes: if dtype not in ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']: return False # All numeric. return True
Checks if all columns in a dataframe are numeric. :param df: Dataframe of shape (n_samples, n_features) The input samples. :return:
Checks if all columns in a dataframe are numeric.
[ "Checks", "if", "all", "columns", "in", "a", "dataframe", "are", "numeric", "." ]
def is_numeric(df): for dtype in df.dtypes: if dtype not in ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']: return False return True
[ "def", "is_numeric", "(", "df", ")", ":", "for", "dtype", "in", "df", ".", "dtypes", ":", "if", "dtype", "not", "in", "[", "'int16'", ",", "'int32'", ",", "'int64'", ",", "'float16'", ",", "'float32'", ",", "'float64'", "]", ":", "return", "False", "return", "True" ]
Checks if all columns in a dataframe are numeric.
[ "Checks", "if", "all", "columns", "in", "a", "dataframe", "are", "numeric", "." ]
[ "'''Checks if all columns in a dataframe are numeric.\n :param df: Dataframe of shape (n_samples, n_features)\n The input samples.\n :return:\n '''", "# All numeric." ]
[ { "param": "df", "type": null } ]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": null } ], "raises": [], "params": [ { "identifier": "df", "type": null, "docstring": "Dataframe of shape (n_samples, n_features)\nThe input samples.", "docstring_tokens": [ "Dataframe", "of", "shape", "(", "n_samples", "n_features", ")", "The", "input", "samples", "." ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def is_numeric(df): for dtype in df.dtypes: if dtype not in ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']: return False return True
610,217
452
d94c1afd96ff59bf08b5cc6cc00861791671864b
geoboxers/thatsDEM2
thatsDEM2/remote_files.py
[ "0BSD" ]
Python
is_remote
<not_specific>
def is_remote(path): """Determine whether a file is in a remote location (which can be handled) based on prefix of connection string.""" for token in ["s3://", "http://", "https://"]: # add if path.startswith(token): return True return False
Determine whether a file is in a remote location (which can be handled) based on prefix of connection string.
Determine whether a file is in a remote location (which can be handled) based on prefix of connection string.
[ "Determine", "whether", "a", "file", "is", "in", "a", "remote", "location", "(", "which", "can", "be", "handled", ")", "based", "on", "prefix", "of", "connection", "string", "." ]
def is_remote(path): for token in ["s3://", "http://", "https://"]: if path.startswith(token): return True return False
[ "def", "is_remote", "(", "path", ")", ":", "for", "token", "in", "[", "\"s3://\"", ",", "\"http://\"", ",", "\"https://\"", "]", ":", "if", "path", ".", "startswith", "(", "token", ")", ":", "return", "True", "return", "False" ]
Determine whether a file is in a remote location (which can be handled) based on prefix of connection string.
[ "Determine", "whether", "a", "file", "is", "in", "a", "remote", "location", "(", "which", "can", "be", "handled", ")", "based", "on", "prefix", "of", "connection", "string", "." ]
[ "\"\"\"Determine whether a file is in a remote location (which can be handled) based on prefix of connection string.\"\"\"", "# add" ]
[ { "param": "path", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "path", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def is_remote(path): for token in ["s3://", "http://", "https://"]: if path.startswith(token): return True return False
610,218
854
2ecc6e4ebe8b0d0d68011e770f537471422dcbed
rpa-tomorrow/substorm-nlp
lib/cli/config.py
[ "MIT" ]
Python
config_user_name
str
def config_user_name() -> str: """ Prompt the user about entering a name and return that name """ print("User not configured.\nPlease enter your name: ") name = sys.stdin.readline().strip() return name
Prompt the user about entering a name and return that name
Prompt the user about entering a name and return that name
[ "Prompt", "the", "user", "about", "entering", "a", "name", "and", "return", "that", "name" ]
def config_user_name() -> str: print("User not configured.\nPlease enter your name: ") name = sys.stdin.readline().strip() return name
[ "def", "config_user_name", "(", ")", "->", "str", ":", "print", "(", "\"User not configured.\\nPlease enter your name: \"", ")", "name", "=", "sys", ".", "stdin", ".", "readline", "(", ")", ".", "strip", "(", ")", "return", "name" ]
Prompt the user about entering a name and return that name
[ "Prompt", "the", "user", "about", "entering", "a", "name", "and", "return", "that", "name" ]
[ "\"\"\" Prompt the user about entering a name and return that name \"\"\"" ]
[]
{ "returns": [], "raises": [], "params": [], "outlier_params": [], "others": [] }
import sys def config_user_name() -> str: print("User not configured.\nPlease enter your name: ") name = sys.stdin.readline().strip() return name
610,219
231
a8fbce130a3fd5c6c501e38a41f4e2ff2c644342
jonoco/fumblr
fumblr/models.py
[ "MIT" ]
Python
valid_email
<not_specific>
def valid_email(cls, email): """ Check if email conforms to a valid email pattern """ email_pattern = re.compile('^([a-zA-Z0-9_\-\.]+)@([a-zA-Z0-9_\-\.]+)\.([a-zA-Z]{2,5})$') return email_pattern.match(email)
Check if email conforms to a valid email pattern
Check if email conforms to a valid email pattern
[ "Check", "if", "email", "conforms", "to", "a", "valid", "email", "pattern" ]
def valid_email(cls, email): email_pattern = re.compile('^([a-zA-Z0-9_\-\.]+)@([a-zA-Z0-9_\-\.]+)\.([a-zA-Z]{2,5})$') return email_pattern.match(email)
[ "def", "valid_email", "(", "cls", ",", "email", ")", ":", "email_pattern", "=", "re", ".", "compile", "(", "'^([a-zA-Z0-9_\\-\\.]+)@([a-zA-Z0-9_\\-\\.]+)\\.([a-zA-Z]{2,5})$'", ")", "return", "email_pattern", ".", "match", "(", "email", ")" ]
Check if email conforms to a valid email pattern
[ "Check", "if", "email", "conforms", "to", "a", "valid", "email", "pattern" ]
[ "\"\"\"\n Check if email conforms to a valid email pattern\n\n \"\"\"" ]
[ { "param": "cls", "type": null }, { "param": "email", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "cls", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "email", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import re def valid_email(cls, email): email_pattern = re.compile('^([a-zA-Z0-9_\-\.]+)@([a-zA-Z0-9_\-\.]+)\.([a-zA-Z]{2,5})$') return email_pattern.match(email)
610,220
69
f8ca7a7efe29576cc0fdcf5103e8dd0ec147e809
vladimirnesterov/ten-little-algorithms
ten_little_algorithms.py
[ "MIT" ]
Python
welford
<not_specific>
def welford(x_array): """Welford's method. Mean and variance calculation using Welford's method, taken from part 3 of "Ten Little Algorithms" by Jason Sachs. Args: x_array (array): sample sequence. Returns: M, S: mean and variance of x_array. """ k = 0 M = 0 S = 0 for x in x_array: k += 1 Mnext = M + (x - M) / k S = S + (x - M)*(x - Mnext) M = Mnext return (M, S/(k-1))
Welford's method. Mean and variance calculation using Welford's method, taken from part 3 of "Ten Little Algorithms" by Jason Sachs. Args: x_array (array): sample sequence. Returns: M, S: mean and variance of x_array.
Welford's method. Mean and variance calculation using Welford's method, taken from part 3 of "Ten Little Algorithms" by Jason Sachs.
[ "Welford", "'", "s", "method", ".", "Mean", "and", "variance", "calculation", "using", "Welford", "'", "s", "method", "taken", "from", "part", "3", "of", "\"", "Ten", "Little", "Algorithms", "\"", "by", "Jason", "Sachs", "." ]
def welford(x_array): k = 0 M = 0 S = 0 for x in x_array: k += 1 Mnext = M + (x - M) / k S = S + (x - M)*(x - Mnext) M = Mnext return (M, S/(k-1))
[ "def", "welford", "(", "x_array", ")", ":", "k", "=", "0", "M", "=", "0", "S", "=", "0", "for", "x", "in", "x_array", ":", "k", "+=", "1", "Mnext", "=", "M", "+", "(", "x", "-", "M", ")", "/", "k", "S", "=", "S", "+", "(", "x", "-", "M", ")", "*", "(", "x", "-", "Mnext", ")", "M", "=", "Mnext", "return", "(", "M", ",", "S", "/", "(", "k", "-", "1", ")", ")" ]
Welford's method.
[ "Welford", "'", "s", "method", "." ]
[ "\"\"\"Welford's method.\n \n Mean and variance calculation using Welford's method, \n taken from part 3 of \"Ten Little Algorithms\" by Jason Sachs.\n\n Args:\n x_array (array): sample sequence.\n\n Returns:\n M, S: mean and variance of x_array.\n\n \"\"\"" ]
[ { "param": "x_array", "type": null } ]
{ "returns": [ { "docstring": "M, S: mean and variance of x_array.", "docstring_tokens": [ "M", "S", ":", "mean", "and", "variance", "of", "x_array", "." ], "type": null } ], "raises": [], "params": [ { "identifier": "x_array", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": false } ], "outlier_params": [], "others": [] }
def welford(x_array): k = 0 M = 0 S = 0 for x in x_array: k += 1 Mnext = M + (x - M) / k S = S + (x - M)*(x - Mnext) M = Mnext return (M, S/(k-1))
610,222
874
7634a2f39bea5cc0131572faae31f2a3f3b6acb9
pjturcot/eternal
tests/test_ewc.py
[ "MIT" ]
Python
ewc_v2_siegesupplier
<not_specific>
def ewc_v2_siegesupplier(): """ 1x 10-362 Siege Supplier 2x 1-1 Fire Sigil """ url = "https://eternalwarcry.com/deck-builder?main=BKqLCBB" return url
1x 10-362 Siege Supplier 2x 1-1 Fire Sigil
1x 10-362 Siege Supplier 2x 1-1 Fire Sigil
[ "1x", "10", "-", "362", "Siege", "Supplier", "2x", "1", "-", "1", "Fire", "Sigil" ]
def ewc_v2_siegesupplier(): url = "https://eternalwarcry.com/deck-builder?main=BKqLCBB" return url
[ "def", "ewc_v2_siegesupplier", "(", ")", ":", "url", "=", "\"https://eternalwarcry.com/deck-builder?main=BKqLCBB\"", "return", "url" ]
1x 10-362 Siege Supplier 2x 1-1 Fire Sigil
[ "1x", "10", "-", "362", "Siege", "Supplier", "2x", "1", "-", "1", "Fire", "Sigil" ]
[ "\"\"\"\n 1x 10-362 Siege Supplier\n 2x 1-1 Fire Sigil\n \"\"\"" ]
[]
{ "returns": [], "raises": [], "params": [], "outlier_params": [], "others": [] }
def ewc_v2_siegesupplier(): url = "https://eternalwarcry.com/deck-builder?main=BKqLCBB" return url
610,223
801
bac5f653d34ef31005c7d75f63a6d1faf25e6550
kantai/passe-pypy-taint-tracking
_pytest/assertion/__init__.py
[ "MIT" ]
Python
_load_modules
null
def _load_modules(mode): """Lazily import assertion related code.""" global rewrite, reinterpret from _pytest.assertion import reinterpret if mode == "rewrite": from _pytest.assertion import rewrite
Lazily import assertion related code.
Lazily import assertion related code.
[ "Lazily", "import", "assertion", "related", "code", "." ]
def _load_modules(mode): global rewrite, reinterpret from _pytest.assertion import reinterpret if mode == "rewrite": from _pytest.assertion import rewrite
[ "def", "_load_modules", "(", "mode", ")", ":", "global", "rewrite", ",", "reinterpret", "from", "_pytest", ".", "assertion", "import", "reinterpret", "if", "mode", "==", "\"rewrite\"", ":", "from", "_pytest", ".", "assertion", "import", "rewrite" ]
Lazily import assertion related code.
[ "Lazily", "import", "assertion", "related", "code", "." ]
[ "\"\"\"Lazily import assertion related code.\"\"\"" ]
[ { "param": "mode", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "mode", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _load_modules(mode): global rewrite, reinterpret from _pytest.assertion import reinterpret if mode == "rewrite": from _pytest.assertion import rewrite
610,225
787
e5029c537feabfa64bf0149b86e167f4ef116f2c
AndreWohnsland/Cocktailmaker_AW
src/supporter.py
[ "MIT" ]
Python
plusminus
null
def plusminus(label, operator, minimal=0, maximal=1000, delta=10): """ increases or decreases the value by a given amount in the boundaries""" try: value_ = int(label.text()) value_ = value_ + (delta if operator == "+" else -delta) value_ = min(maximal, max(minimal, (value_ // delta) * delta)) except ValueError: value_ = maximal if operator == "+" else minimal label.setText(str(value_))
increases or decreases the value by a given amount in the boundaries
increases or decreases the value by a given amount in the boundaries
[ "increases", "or", "decreases", "the", "value", "by", "a", "given", "amount", "in", "the", "boundaries" ]
def plusminus(label, operator, minimal=0, maximal=1000, delta=10): try: value_ = int(label.text()) value_ = value_ + (delta if operator == "+" else -delta) value_ = min(maximal, max(minimal, (value_ // delta) * delta)) except ValueError: value_ = maximal if operator == "+" else minimal label.setText(str(value_))
[ "def", "plusminus", "(", "label", ",", "operator", ",", "minimal", "=", "0", ",", "maximal", "=", "1000", ",", "delta", "=", "10", ")", ":", "try", ":", "value_", "=", "int", "(", "label", ".", "text", "(", ")", ")", "value_", "=", "value_", "+", "(", "delta", "if", "operator", "==", "\"+\"", "else", "-", "delta", ")", "value_", "=", "min", "(", "maximal", ",", "max", "(", "minimal", ",", "(", "value_", "//", "delta", ")", "*", "delta", ")", ")", "except", "ValueError", ":", "value_", "=", "maximal", "if", "operator", "==", "\"+\"", "else", "minimal", "label", ".", "setText", "(", "str", "(", "value_", ")", ")" ]
increases or decreases the value by a given amount in the boundaries
[ "increases", "or", "decreases", "the", "value", "by", "a", "given", "amount", "in", "the", "boundaries" ]
[ "\"\"\" increases or decreases the value by a given amount in the boundaries\"\"\"" ]
[ { "param": "label", "type": null }, { "param": "operator", "type": null }, { "param": "minimal", "type": null }, { "param": "maximal", "type": null }, { "param": "delta", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "label", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "operator", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "minimal", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "maximal", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "delta", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def plusminus(label, operator, minimal=0, maximal=1000, delta=10): try: value_ = int(label.text()) value_ = value_ + (delta if operator == "+" else -delta) value_ = min(maximal, max(minimal, (value_ // delta) * delta)) except ValueError: value_ = maximal if operator == "+" else minimal label.setText(str(value_))
610,226
364
c41fb6e68040caafe97b84a8fcfa5b721b7db858
gfhuertac/poderopedia
utils.py
[ "Apache-2.0" ]
Python
xpath_value
<not_specific>
def xpath_value(root, path, logger=None): """ Method that goes to the defined path inside root and extract the text value of that element """ element = root.xpath(path) if len(element): element = element[0].text if logger is not None: logger.debug('Value: {}'.format(element)) return element return None
Method that goes to the defined path inside root and extract the text value of that element
Method that goes to the defined path inside root and extract the text value of that element
[ "Method", "that", "goes", "to", "the", "defined", "path", "inside", "root", "and", "extract", "the", "text", "value", "of", "that", "element" ]
def xpath_value(root, path, logger=None): element = root.xpath(path) if len(element): element = element[0].text if logger is not None: logger.debug('Value: {}'.format(element)) return element return None
[ "def", "xpath_value", "(", "root", ",", "path", ",", "logger", "=", "None", ")", ":", "element", "=", "root", ".", "xpath", "(", "path", ")", "if", "len", "(", "element", ")", ":", "element", "=", "element", "[", "0", "]", ".", "text", "if", "logger", "is", "not", "None", ":", "logger", ".", "debug", "(", "'Value: {}'", ".", "format", "(", "element", ")", ")", "return", "element", "return", "None" ]
Method that goes to the defined path inside root and extract the text value of that element
[ "Method", "that", "goes", "to", "the", "defined", "path", "inside", "root", "and", "extract", "the", "text", "value", "of", "that", "element" ]
[ "\"\"\"\n Method that goes to the defined path inside root and extract\n the text value of that element\n \"\"\"" ]
[ { "param": "root", "type": null }, { "param": "path", "type": null }, { "param": "logger", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "root", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "path", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "logger", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def xpath_value(root, path, logger=None): element = root.xpath(path) if len(element): element = element[0].text if logger is not None: logger.debug('Value: {}'.format(element)) return element return None
610,229
165
d45751fe0142e4d7dfe24a8fcfb32bc15bb912e5
Eli-Tarrago/radssh
radssh/star_commands.py
[ "BSD-3-Clause" ]
Python
star_shell
null
def star_shell(cluster, logdir, cmdline, *args): '''Drop into a local shell - exit subshell to return with session intact''' connections = [] for k in cluster: if k not in cluster.disabled: connections.append(str(k)) os.putenv('RADSSH_CONNECTIONS', ' '.join(connections)) os.system('PS1="(RadSSH subshell) $ " bash')
Drop into a local shell - exit subshell to return with session intact
Drop into a local shell - exit subshell to return with session intact
[ "Drop", "into", "a", "local", "shell", "-", "exit", "subshell", "to", "return", "with", "session", "intact" ]
def star_shell(cluster, logdir, cmdline, *args): connections = [] for k in cluster: if k not in cluster.disabled: connections.append(str(k)) os.putenv('RADSSH_CONNECTIONS', ' '.join(connections)) os.system('PS1="(RadSSH subshell) $ " bash')
[ "def", "star_shell", "(", "cluster", ",", "logdir", ",", "cmdline", ",", "*", "args", ")", ":", "connections", "=", "[", "]", "for", "k", "in", "cluster", ":", "if", "k", "not", "in", "cluster", ".", "disabled", ":", "connections", ".", "append", "(", "str", "(", "k", ")", ")", "os", ".", "putenv", "(", "'RADSSH_CONNECTIONS'", ",", "' '", ".", "join", "(", "connections", ")", ")", "os", ".", "system", "(", "'PS1=\"(RadSSH subshell) $ \" bash'", ")" ]
Drop into a local shell - exit subshell to return with session intact
[ "Drop", "into", "a", "local", "shell", "-", "exit", "subshell", "to", "return", "with", "session", "intact" ]
[ "'''Drop into a local shell - exit subshell to return with session intact'''" ]
[ { "param": "cluster", "type": null }, { "param": "logdir", "type": null }, { "param": "cmdline", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "cluster", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "logdir", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "cmdline", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import os def star_shell(cluster, logdir, cmdline, *args): connections = [] for k in cluster: if k not in cluster.disabled: connections.append(str(k)) os.putenv('RADSSH_CONNECTIONS', ' '.join(connections)) os.system('PS1="(RadSSH subshell) $ " bash')
610,230
98
4e13f44e7c988320f68a913121ffa7fd74a67713
wallnerryan/flocker-profiles
flocker/provision/_install.py
[ "Apache-2.0" ]
Python
is_ubuntu
<not_specific>
def is_ubuntu(distribution): """ Determine whether the named distribution is a version of Ubuntu. :param bytes distribution: The name of the distribution to inspect. :return: ``True`` if the distribution named is a version of Ubuntu, ``False`` otherwise. """ return distribution.startswith("ubuntu-")
Determine whether the named distribution is a version of Ubuntu. :param bytes distribution: The name of the distribution to inspect. :return: ``True`` if the distribution named is a version of Ubuntu, ``False`` otherwise.
Determine whether the named distribution is a version of Ubuntu.
[ "Determine", "whether", "the", "named", "distribution", "is", "a", "version", "of", "Ubuntu", "." ]
def is_ubuntu(distribution): return distribution.startswith("ubuntu-")
[ "def", "is_ubuntu", "(", "distribution", ")", ":", "return", "distribution", ".", "startswith", "(", "\"ubuntu-\"", ")" ]
Determine whether the named distribution is a version of Ubuntu.
[ "Determine", "whether", "the", "named", "distribution", "is", "a", "version", "of", "Ubuntu", "." ]
[ "\"\"\"\n Determine whether the named distribution is a version of Ubuntu.\n\n :param bytes distribution: The name of the distribution to inspect.\n\n :return: ``True`` if the distribution named is a version of Ubuntu,\n ``False`` otherwise.\n \"\"\"" ]
[ { "param": "distribution", "type": null } ]
{ "returns": [ { "docstring": "``True`` if the distribution named is a version of Ubuntu,\n``False`` otherwise.", "docstring_tokens": [ "`", "`", "True", "`", "`", "if", "the", "distribution", "named", "is", "a", "version", "of", "Ubuntu", "`", "`", "False", "`", "`", "otherwise", "." ], "type": null } ], "raises": [], "params": [ { "identifier": "distribution", "type": null, "docstring": "The name of the distribution to inspect.", "docstring_tokens": [ "The", "name", "of", "the", "distribution", "to", "inspect", "." ], "default": null, "is_optional": false } ], "outlier_params": [], "others": [] }
def is_ubuntu(distribution): return distribution.startswith("ubuntu-")
610,231
1,010
c0a408aee08e1fa48afb0b7c68205efd2b2aaeab
selasley/cauldron
cauldron/environ/systems.py
[ "MIT" ]
Python
remove
bool
def remove(path: str, max_retries: int = 3) -> bool: """ Removes the specified path from the local filesystem if it exists. Directories will be removed along with all files and folders within them as well as files. :param path: The location of the file or folder to remove. :param max_retries: The number of times to retry before giving up. :return: A boolean indicating whether or not the removal was successful. """ if not path: return False if not os.path.exists(path): return True remover = os.remove if os.path.isfile(path) else shutil.rmtree for attempt in range(max_retries): try: remover(path) return True except Exception: # Pause briefly in case there's a race condition on lock # for the target. time.sleep(0.02) return False
Removes the specified path from the local filesystem if it exists. Directories will be removed along with all files and folders within them as well as files. :param path: The location of the file or folder to remove. :param max_retries: The number of times to retry before giving up. :return: A boolean indicating whether or not the removal was successful.
Removes the specified path from the local filesystem if it exists. Directories will be removed along with all files and folders within them as well as files.
[ "Removes", "the", "specified", "path", "from", "the", "local", "filesystem", "if", "it", "exists", ".", "Directories", "will", "be", "removed", "along", "with", "all", "files", "and", "folders", "within", "them", "as", "well", "as", "files", "." ]
def remove(path: str, max_retries: int = 3) -> bool: if not path: return False if not os.path.exists(path): return True remover = os.remove if os.path.isfile(path) else shutil.rmtree for attempt in range(max_retries): try: remover(path) return True except Exception: time.sleep(0.02) return False
[ "def", "remove", "(", "path", ":", "str", ",", "max_retries", ":", "int", "=", "3", ")", "->", "bool", ":", "if", "not", "path", ":", "return", "False", "if", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "return", "True", "remover", "=", "os", ".", "remove", "if", "os", ".", "path", ".", "isfile", "(", "path", ")", "else", "shutil", ".", "rmtree", "for", "attempt", "in", "range", "(", "max_retries", ")", ":", "try", ":", "remover", "(", "path", ")", "return", "True", "except", "Exception", ":", "time", ".", "sleep", "(", "0.02", ")", "return", "False" ]
Removes the specified path from the local filesystem if it exists.
[ "Removes", "the", "specified", "path", "from", "the", "local", "filesystem", "if", "it", "exists", "." ]
[ "\"\"\"\n Removes the specified path from the local filesystem if it exists.\n Directories will be removed along with all files and folders within\n them as well as files.\n\n :param path:\n The location of the file or folder to remove.\n :param max_retries:\n The number of times to retry before giving up.\n :return:\n A boolean indicating whether or not the removal was successful.\n \"\"\"", "# Pause briefly in case there's a race condition on lock", "# for the target." ]
[ { "param": "path", "type": "str" }, { "param": "max_retries", "type": "int" } ]
{ "returns": [ { "docstring": "A boolean indicating whether or not the removal was successful.", "docstring_tokens": [ "A", "boolean", "indicating", "whether", "or", "not", "the", "removal", "was", "successful", "." ], "type": null } ], "raises": [], "params": [ { "identifier": "path", "type": "str", "docstring": "The location of the file or folder to remove.", "docstring_tokens": [ "The", "location", "of", "the", "file", "or", "folder", "to", "remove", "." ], "default": null, "is_optional": null }, { "identifier": "max_retries", "type": "int", "docstring": "The number of times to retry before giving up.", "docstring_tokens": [ "The", "number", "of", "times", "to", "retry", "before", "giving", "up", "." ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import shutil import os import time def remove(path: str, max_retries: int = 3) -> bool: if not path: return False if not os.path.exists(path): return True remover = os.remove if os.path.isfile(path) else shutil.rmtree for attempt in range(max_retries): try: remover(path) return True except Exception: time.sleep(0.02) return False
610,232
150
a3b55d65ee49b4131d5c5ff1ecdfecce25521f9c
emc2norway/m2cpp
matlab2cpp/rules/_program.py
[ "BSD-3-Clause" ]
Python
strip
<not_specific>
def strip(text): """Remove trailing spaces and linefeeds. """ if not text: return text start = 0 while text[start] in "\n ": start += 1 end = 0 while text[end-1] in "\n ": end -= 1 if end: text = text[start:end] else: text = text[start:] return text
Remove trailing spaces and linefeeds.
Remove trailing spaces and linefeeds.
[ "Remove", "trailing", "spaces", "and", "linefeeds", "." ]
def strip(text): if not text: return text start = 0 while text[start] in "\n ": start += 1 end = 0 while text[end-1] in "\n ": end -= 1 if end: text = text[start:end] else: text = text[start:] return text
[ "def", "strip", "(", "text", ")", ":", "if", "not", "text", ":", "return", "text", "start", "=", "0", "while", "text", "[", "start", "]", "in", "\"\\n \"", ":", "start", "+=", "1", "end", "=", "0", "while", "text", "[", "end", "-", "1", "]", "in", "\"\\n \"", ":", "end", "-=", "1", "if", "end", ":", "text", "=", "text", "[", "start", ":", "end", "]", "else", ":", "text", "=", "text", "[", "start", ":", "]", "return", "text" ]
Remove trailing spaces and linefeeds.
[ "Remove", "trailing", "spaces", "and", "linefeeds", "." ]
[ "\"\"\"Remove trailing spaces and linefeeds.\n \"\"\"" ]
[ { "param": "text", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "text", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def strip(text): if not text: return text start = 0 while text[start] in "\n ": start += 1 end = 0 while text[end-1] in "\n ": end -= 1 if end: text = text[start:end] else: text = text[start:] return text
610,233
365
dc580ac512cdf725b62857d4a35182bbb10682b9
gamescomputersplay/wordle
quordle-bot.py
[ "MIT" ]
Python
did_we_lose
<not_specific>
def did_we_lose(im, borders): ''' Check if we lost on the end game screen, by looking for red pixels (Because I can't read the answer to the 9th guess) ''' red = (232, 18, 36) results_area_coords = ((borders[0] + borders[2]) // 2 - 50, borders[3], (borders[0] + borders[2]) // 2 + 50, borders[3] + 100) results_area = im.crop(results_area_coords) results_area.save("results.png") px = results_area.load() for i in range(0, results_area.size[0], 10): for j in range(0, results_area.size[1], 10): if px[i, j] == red: return True return False
Check if we lost on the end game screen, by looking for red pixels (Because I can't read the answer to the 9th guess)
Check if we lost on the end game screen, by looking for red pixels (Because I can't read the answer to the 9th guess)
[ "Check", "if", "we", "lost", "on", "the", "end", "game", "screen", "by", "looking", "for", "red", "pixels", "(", "Because", "I", "can", "'", "t", "read", "the", "answer", "to", "the", "9th", "guess", ")" ]
def did_we_lose(im, borders): red = (232, 18, 36) results_area_coords = ((borders[0] + borders[2]) // 2 - 50, borders[3], (borders[0] + borders[2]) // 2 + 50, borders[3] + 100) results_area = im.crop(results_area_coords) results_area.save("results.png") px = results_area.load() for i in range(0, results_area.size[0], 10): for j in range(0, results_area.size[1], 10): if px[i, j] == red: return True return False
[ "def", "did_we_lose", "(", "im", ",", "borders", ")", ":", "red", "=", "(", "232", ",", "18", ",", "36", ")", "results_area_coords", "=", "(", "(", "borders", "[", "0", "]", "+", "borders", "[", "2", "]", ")", "//", "2", "-", "50", ",", "borders", "[", "3", "]", ",", "(", "borders", "[", "0", "]", "+", "borders", "[", "2", "]", ")", "//", "2", "+", "50", ",", "borders", "[", "3", "]", "+", "100", ")", "results_area", "=", "im", ".", "crop", "(", "results_area_coords", ")", "results_area", ".", "save", "(", "\"results.png\"", ")", "px", "=", "results_area", ".", "load", "(", ")", "for", "i", "in", "range", "(", "0", ",", "results_area", ".", "size", "[", "0", "]", ",", "10", ")", ":", "for", "j", "in", "range", "(", "0", ",", "results_area", ".", "size", "[", "1", "]", ",", "10", ")", ":", "if", "px", "[", "i", ",", "j", "]", "==", "red", ":", "return", "True", "return", "False" ]
Check if we lost on the end game screen, by looking for red pixels (Because I can't read the answer to the 9th guess)
[ "Check", "if", "we", "lost", "on", "the", "end", "game", "screen", "by", "looking", "for", "red", "pixels", "(", "Because", "I", "can", "'", "t", "read", "the", "answer", "to", "the", "9th", "guess", ")" ]
[ "''' Check if we lost on the end game screen, by looking for red pixels\r\n (Because I can't read the answer to the 9th guess)\r\n '''" ]
[ { "param": "im", "type": null }, { "param": "borders", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "im", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "borders", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def did_we_lose(im, borders): red = (232, 18, 36) results_area_coords = ((borders[0] + borders[2]) // 2 - 50, borders[3], (borders[0] + borders[2]) // 2 + 50, borders[3] + 100) results_area = im.crop(results_area_coords) results_area.save("results.png") px = results_area.load() for i in range(0, results_area.size[0], 10): for j in range(0, results_area.size[1], 10): if px[i, j] == red: return True return False
610,234
314
35c83872236444ef574b6faabbca686bacaac3e3
epopisces/tools_user_io
user_io.py
[ "MIT" ]
Python
progress_bar
null
def progress_bar ( iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = "\r\n" ): """ Call in a loop to create terminal progress bar @params: iteration - Required : current iteration (Int) total - Required : total iterations (Int) prefix - Optional : prefix string (Str) suffix - Optional : suffix string (Str) decimals - Optional : positive number of decimals in percent complete (Int) length - Optional : character length of bar (Int) fill - Optional : bar fill character (Str) printEnd - Optional : end character (e.g. "\r", "\r\n") (Str) Example Usage: l = len(objs_list) printProgressBar(0, l, prefix = 'Progress:', suffix = 'Complete', length = 50, printEnd="\r\n") for i, obj in enumerate(list): <do something with obj> time.sleep(0.01) printProgressBar(i + 1, l, prefix = "Progress:", suffix = "Complete", length = 50, printEnd="\r\n") Author: Greenstick @ https://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console """ percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total))) filledLength = int(length * iteration // total) bar = fill * filledLength + '-' * (length - filledLength) print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = printEnd) # Print New Line on Complete if iteration == total: print()
Call in a loop to create terminal progress bar @params: iteration - Required : current iteration (Int) total - Required : total iterations (Int) prefix - Optional : prefix string (Str) suffix - Optional : suffix string (Str) decimals - Optional : positive number of decimals in percent complete (Int) length - Optional : character length of bar (Int) fill - Optional : bar fill character (Str) printEnd - Optional : end character (e.g. "\r", "\r\n") (Str) Example Usage: l = len(objs_list) printProgressBar(0, l, prefix = 'Progress:', suffix = 'Complete', length = 50, printEnd="\r\n") for i, obj in enumerate(list): <do something with obj> time.sleep(0.01) printProgressBar(i + 1, l, prefix = "Progress:", suffix = "Complete", length = 50, printEnd="\r\n") Author: Greenstick @ https://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console
Call in a loop to create terminal progress bar
[ "Call", "in", "a", "loop", "to", "create", "terminal", "progress", "bar" ]
def progress_bar ( iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = "\r\n" ): percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total))) filledLength = int(length * iteration // total) bar = fill * filledLength + '-' * (length - filledLength) print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = printEnd) if iteration == total: print()
[ "def", "progress_bar", "(", "iteration", ",", "total", ",", "prefix", "=", "''", ",", "suffix", "=", "''", ",", "decimals", "=", "1", ",", "length", "=", "100", ",", "fill", "=", "'█',", "", "printEnd", "=", "\"\\r\\n\"", ")", ":", "percent", "=", "(", "\"{0:.\"", "+", "str", "(", "decimals", ")", "+", "\"f}\"", ")", ".", "format", "(", "100", "*", "(", "iteration", "/", "float", "(", "total", ")", ")", ")", "filledLength", "=", "int", "(", "length", "*", "iteration", "//", "total", ")", "bar", "=", "fill", "*", "filledLength", "+", "'-'", "*", "(", "length", "-", "filledLength", ")", "print", "(", "'\\r%s |%s| %s%% %s'", "%", "(", "prefix", ",", "bar", ",", "percent", ",", "suffix", ")", ",", "end", "=", "printEnd", ")", "if", "iteration", "==", "total", ":", "print", "(", ")" ]
Call in a loop to create terminal progress bar
[ "Call", "in", "a", "loop", "to", "create", "terminal", "progress", "bar" ]
[ "\"\"\"\n Call in a loop to create terminal progress bar\n @params:\n iteration - Required : current iteration (Int)\n total - Required : total iterations (Int)\n prefix - Optional : prefix string (Str)\n suffix - Optional : suffix string (Str)\n decimals - Optional : positive number of decimals in percent complete (Int)\n length - Optional : character length of bar (Int)\n fill - Optional : bar fill character (Str)\n printEnd - Optional : end character (e.g. \"\\r\", \"\\r\\n\") (Str)\n \n Example Usage:\n l = len(objs_list)\n printProgressBar(0, l, prefix = 'Progress:', suffix = 'Complete', length = 50, printEnd=\"\\r\\n\")\n for i, obj in enumerate(list):\n <do something with obj>\n time.sleep(0.01)\n printProgressBar(i + 1, l, prefix = \"Progress:\", suffix = \"Complete\", length = 50, printEnd=\"\\r\\n\")\n\n Author: Greenstick @ https://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console\n \"\"\"", "# Print New Line on Complete" ]
[ { "param": "iteration", "type": null }, { "param": "total", "type": null }, { "param": "prefix", "type": null }, { "param": "suffix", "type": null }, { "param": "decimals", "type": null }, { "param": "length", "type": null }, { "param": "fill", "type": null }, { "param": "printEnd", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "iteration", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "total", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "prefix", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "suffix", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "decimals", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "length", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "fill", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "printEnd", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [ { "identifier": "params", "docstring": "iteration - Required : current iteration (Int)\ntotal - Required : total iterations (Int)\nprefix - Optional : prefix string (Str)\nsuffix - Optional : suffix string (Str)\ndecimals - Optional : positive number of decimals in percent complete (Int)\nlength - Optional : character length of bar (Int)\nfill - Optional : bar fill character (Str)\nprintEnd - Optional : end character (Str)\n\nExample Usage:\nl = len(objs_list)\nprintProgressBar(0, l, prefix = 'Progress:', suffix = 'Complete', length = 50, printEnd=\"\\r\\n\")\nfor i, obj in enumerate(list):\n\ntime.sleep(0.01)\nprintProgressBar(i + 1, l, prefix = \"Progress:\", suffix = \"Complete\", length = 50, printEnd=\"\\r\\n\")\n\n", "docstring_tokens": [ "iteration", "-", "Required", ":", "current", "iteration", "(", "Int", ")", "total", "-", "Required", ":", "total", "iterations", "(", "Int", ")", "prefix", "-", "Optional", ":", "prefix", "string", "(", "Str", ")", "suffix", "-", "Optional", ":", "suffix", "string", "(", "Str", ")", "decimals", "-", "Optional", ":", "positive", "number", "of", "decimals", "in", "percent", "complete", "(", "Int", ")", "length", "-", "Optional", ":", "character", "length", "of", "bar", "(", "Int", ")", "fill", "-", "Optional", ":", "bar", "fill", "character", "(", "Str", ")", "printEnd", "-", "Optional", ":", "end", "character", "(", "Str", ")", "Example", "Usage", ":", "l", "=", "len", "(", "objs_list", ")", "printProgressBar", "(", "0", "l", "prefix", "=", "'", "Progress", ":", "'", "suffix", "=", "'", "Complete", "'", "length", "=", "50", "printEnd", "=", "\"", "\\", "r", "\\", "n", "\"", ")", "for", "i", "obj", "in", "enumerate", "(", "list", ")", ":", "time", ".", "sleep", "(", "0", ".", "01", ")", "printProgressBar", "(", "i", "+", "1", "l", "prefix", "=", "\"", "Progress", ":", "\"", "suffix", "=", "\"", "Complete", "\"", "length", "=", "50", "printEnd", "=", "\"", "\\", "r", "\\", "n", "\"", ")" ] } ] }
def progress_bar ( iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = "\r\n" ): percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total))) filledLength = int(length * iteration // total) bar = fill * filledLength + '-' * (length - filledLength) print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = printEnd) if iteration == total: print()
610,235
664
acc744aa5f891609fb11118bde569e4a669512a2
tbuglioni/openfoodfactAPP
api/cleaner.py
[ "MIT" ]
Python
__cleaner_multiples_entry
<not_specific>
def __cleaner_multiples_entry(product_to_clean): """ clean x elements with split (list)""" mini_str = str(product_to_clean).lower() mini_list = mini_str.split(",") cleaned_list = [] for elt in mini_list: elt2 = elt.strip() cleaned_list.append(elt2) return cleaned_list
clean x elements with split (list)
clean x elements with split (list)
[ "clean", "x", "elements", "with", "split", "(", "list", ")" ]
def __cleaner_multiples_entry(product_to_clean): mini_str = str(product_to_clean).lower() mini_list = mini_str.split(",") cleaned_list = [] for elt in mini_list: elt2 = elt.strip() cleaned_list.append(elt2) return cleaned_list
[ "def", "__cleaner_multiples_entry", "(", "product_to_clean", ")", ":", "mini_str", "=", "str", "(", "product_to_clean", ")", ".", "lower", "(", ")", "mini_list", "=", "mini_str", ".", "split", "(", "\",\"", ")", "cleaned_list", "=", "[", "]", "for", "elt", "in", "mini_list", ":", "elt2", "=", "elt", ".", "strip", "(", ")", "cleaned_list", ".", "append", "(", "elt2", ")", "return", "cleaned_list" ]
clean x elements with split (list)
[ "clean", "x", "elements", "with", "split", "(", "list", ")" ]
[ "\"\"\" clean x elements with split (list)\"\"\"" ]
[ { "param": "product_to_clean", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "product_to_clean", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def __cleaner_multiples_entry(product_to_clean): mini_str = str(product_to_clean).lower() mini_list = mini_str.split(",") cleaned_list = [] for elt in mini_list: elt2 = elt.strip() cleaned_list.append(elt2) return cleaned_list
610,236
707
a84289422dc33db0c5a97e1774563f10a5555266
PingHuskar/hackerrank
mathematics/number-theory/cheese-and-random-toppings.py
[ "Unlicense" ]
Python
nCk
<not_specific>
def nCk(n, k): """ coefficient binomial (ou nombre de combinaisons) """ if n < k: return 0 return math.factorial(n) // math.factorial(k) // math.factorial(n - k)
coefficient binomial (ou nombre de combinaisons)
coefficient binomial (ou nombre de combinaisons)
[ "coefficient", "binomial", "(", "ou", "nombre", "de", "combinaisons", ")" ]
def nCk(n, k): if n < k: return 0 return math.factorial(n) // math.factorial(k) // math.factorial(n - k)
[ "def", "nCk", "(", "n", ",", "k", ")", ":", "if", "n", "<", "k", ":", "return", "0", "return", "math", ".", "factorial", "(", "n", ")", "//", "math", ".", "factorial", "(", "k", ")", "//", "math", ".", "factorial", "(", "n", "-", "k", ")" ]
coefficient binomial (ou nombre de combinaisons)
[ "coefficient", "binomial", "(", "ou", "nombre", "de", "combinaisons", ")" ]
[ "\"\"\" coefficient binomial (ou nombre de combinaisons) \"\"\"" ]
[ { "param": "n", "type": null }, { "param": "k", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "n", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "k", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import math def nCk(n, k): if n < k: return 0 return math.factorial(n) // math.factorial(k) // math.factorial(n - k)
610,237
384
68869e4dfb7b3bbf6f80a874cc71671ee95f1872
giangnguyen2412/dissect_catastrophic_forgetting
prepro/statistics.py
[ "MIT" ]
Python
cal_num
<not_specific>
def cal_num(path): """ Calculate the number of each class: path - the directory where contains the images divided into classes return: a dictionary with key is name of class, value is the number of images of each class respectively """ stat = {} subdirs = os.listdir(path) for subdir in subdirs: p = os.path.join(path, subdir) if not os.path.isdir(p): continue stat[subdir] = len([img for img in os.listdir(p) if img.find('.jpg')]) return stat
Calculate the number of each class: path - the directory where contains the images divided into classes return: a dictionary with key is name of class, value is the number of images of each class respectively
Calculate the number of each class: path - the directory where contains the images divided into classes return: a dictionary with key is name of class, value is the number of images of each class respectively
[ "Calculate", "the", "number", "of", "each", "class", ":", "path", "-", "the", "directory", "where", "contains", "the", "images", "divided", "into", "classes", "return", ":", "a", "dictionary", "with", "key", "is", "name", "of", "class", "value", "is", "the", "number", "of", "images", "of", "each", "class", "respectively" ]
def cal_num(path): stat = {} subdirs = os.listdir(path) for subdir in subdirs: p = os.path.join(path, subdir) if not os.path.isdir(p): continue stat[subdir] = len([img for img in os.listdir(p) if img.find('.jpg')]) return stat
[ "def", "cal_num", "(", "path", ")", ":", "stat", "=", "{", "}", "subdirs", "=", "os", ".", "listdir", "(", "path", ")", "for", "subdir", "in", "subdirs", ":", "p", "=", "os", ".", "path", ".", "join", "(", "path", ",", "subdir", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "p", ")", ":", "continue", "stat", "[", "subdir", "]", "=", "len", "(", "[", "img", "for", "img", "in", "os", ".", "listdir", "(", "p", ")", "if", "img", ".", "find", "(", "'.jpg'", ")", "]", ")", "return", "stat" ]
Calculate the number of each class: path - the directory where contains the images divided into classes return: a dictionary with key is name of class, value is the number of images of each class respectively
[ "Calculate", "the", "number", "of", "each", "class", ":", "path", "-", "the", "directory", "where", "contains", "the", "images", "divided", "into", "classes", "return", ":", "a", "dictionary", "with", "key", "is", "name", "of", "class", "value", "is", "the", "number", "of", "images", "of", "each", "class", "respectively" ]
[ "\"\"\"\n Calculate the number of each class:\n path - the directory where contains the images divided into classes\n return: a dictionary with key is name of class, value is the number of images of each class respectively\n \"\"\"" ]
[ { "param": "path", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "path", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import os def cal_num(path): stat = {} subdirs = os.listdir(path) for subdir in subdirs: p = os.path.join(path, subdir) if not os.path.isdir(p): continue stat[subdir] = len([img for img in os.listdir(p) if img.find('.jpg')]) return stat
610,238
822
848fbcfbd996f24ac332bfad27929748b792b85b
andreww5au/PaSD-client
pasd/conversion.py
[ "MIT" ]
Python
scale_temp
<not_specific>
def scale_temp(value, reverse=False, pcb_version=0): """ Given a raw register value and the PCB version number, find out what scale and offset are needed, convert the raw value to deg C (if reverse=False), or convert a value in deg C to raw (if reverse=True). For now, raw values are hundredths of a deg C, as a signed 16-bit integer value :param value: raw register contents as a value from 0-65535, or a floating point temperature in degrees :param reverse: Boolean, True to perform physical->raw conversion instead of raw->physical :param pcb_version: integer PCB version number, 0-65535 :return: value in deg C (if reverse=False), or raw value as an unsigned 16 bit integer """ if reverse: if value < 0: return (int(value * 100) + 65536) & 0xFFFF else: return int(value * 100) & 0xFFFF else: if value >= 32768: value -= 65536 return value / 100.0 # raw_value is a signed 16-bit integer containing temp in 1/100th of a degree
Given a raw register value and the PCB version number, find out what scale and offset are needed, convert the raw value to deg C (if reverse=False), or convert a value in deg C to raw (if reverse=True). For now, raw values are hundredths of a deg C, as a signed 16-bit integer value :param value: raw register contents as a value from 0-65535, or a floating point temperature in degrees :param reverse: Boolean, True to perform physical->raw conversion instead of raw->physical :param pcb_version: integer PCB version number, 0-65535 :return: value in deg C (if reverse=False), or raw value as an unsigned 16 bit integer
Given a raw register value and the PCB version number, find out what scale and offset are needed, convert the raw value to deg C (if reverse=False), or convert a value in deg C to raw (if reverse=True). For now, raw values are hundredths of a deg C, as a signed 16-bit integer value
[ "Given", "a", "raw", "register", "value", "and", "the", "PCB", "version", "number", "find", "out", "what", "scale", "and", "offset", "are", "needed", "convert", "the", "raw", "value", "to", "deg", "C", "(", "if", "reverse", "=", "False", ")", "or", "convert", "a", "value", "in", "deg", "C", "to", "raw", "(", "if", "reverse", "=", "True", ")", ".", "For", "now", "raw", "values", "are", "hundredths", "of", "a", "deg", "C", "as", "a", "signed", "16", "-", "bit", "integer", "value" ]
def scale_temp(value, reverse=False, pcb_version=0): if reverse: if value < 0: return (int(value * 100) + 65536) & 0xFFFF else: return int(value * 100) & 0xFFFF else: if value >= 32768: value -= 65536 return value / 100.0
[ "def", "scale_temp", "(", "value", ",", "reverse", "=", "False", ",", "pcb_version", "=", "0", ")", ":", "if", "reverse", ":", "if", "value", "<", "0", ":", "return", "(", "int", "(", "value", "*", "100", ")", "+", "65536", ")", "&", "0xFFFF", "else", ":", "return", "int", "(", "value", "*", "100", ")", "&", "0xFFFF", "else", ":", "if", "value", ">=", "32768", ":", "value", "-=", "65536", "return", "value", "/", "100.0" ]
Given a raw register value and the PCB version number, find out what scale and offset are needed, convert the raw value to deg C (if reverse=False), or convert a value in deg C to raw (if reverse=True).
[ "Given", "a", "raw", "register", "value", "and", "the", "PCB", "version", "number", "find", "out", "what", "scale", "and", "offset", "are", "needed", "convert", "the", "raw", "value", "to", "deg", "C", "(", "if", "reverse", "=", "False", ")", "or", "convert", "a", "value", "in", "deg", "C", "to", "raw", "(", "if", "reverse", "=", "True", ")", "." ]
[ "\"\"\"\n Given a raw register value and the PCB version number, find out what scale and offset are needed, convert the raw\n value to deg C (if reverse=False), or convert a value in deg C to raw (if reverse=True).\n\n For now, raw values are hundredths of a deg C, as a signed 16-bit integer value\n\n :param value: raw register contents as a value from 0-65535, or a floating point temperature in degrees\n :param reverse: Boolean, True to perform physical->raw conversion instead of raw->physical\n :param pcb_version: integer PCB version number, 0-65535\n :return: value in deg C (if reverse=False), or raw value as an unsigned 16 bit integer\n \"\"\"", "# raw_value is a signed 16-bit integer containing temp in 1/100th of a degree" ]
[ { "param": "value", "type": null }, { "param": "reverse", "type": null }, { "param": "pcb_version", "type": null } ]
{ "returns": [ { "docstring": "value in deg C (if reverse=False), or raw value as an unsigned 16 bit integer", "docstring_tokens": [ "value", "in", "deg", "C", "(", "if", "reverse", "=", "False", ")", "or", "raw", "value", "as", "an", "unsigned", "16", "bit", "integer" ], "type": null } ], "raises": [], "params": [ { "identifier": "value", "type": null, "docstring": "raw register contents as a value from 0-65535, or a floating point temperature in degrees", "docstring_tokens": [ "raw", "register", "contents", "as", "a", "value", "from", "0", "-", "65535", "or", "a", "floating", "point", "temperature", "in", "degrees" ], "default": null, "is_optional": null }, { "identifier": "reverse", "type": null, "docstring": "Boolean, True to perform physical->raw conversion instead of raw->physical", "docstring_tokens": [ "Boolean", "True", "to", "perform", "physical", "-", ">", "raw", "conversion", "instead", "of", "raw", "-", ">", "physical" ], "default": null, "is_optional": null }, { "identifier": "pcb_version", "type": null, "docstring": "integer PCB version number, 0-65535", "docstring_tokens": [ "integer", "PCB", "version", "number", "0", "-", "65535" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def scale_temp(value, reverse=False, pcb_version=0): if reverse: if value < 0: return (int(value * 100) + 65536) & 0xFFFF else: return int(value * 100) & 0xFFFF else: if value >= 32768: value -= 65536 return value / 100.0
610,240
29
1f8b2f5ee40b3c66a831a2e95f036b4a43917e26
saltudelft/type4py
type4py/utils.py
[ "Apache-2.0" ]
Python
filter_directory
str
def filter_directory(directory: str, extension: str = '.py') -> str: """ Delete all files within the given directory with filenames not ending in the given extension """ for root, dirs, files in os.walk(directory): [os.remove(os.path.join(root, fi)) for fi in files if not fi.endswith(extension)] return directory
Delete all files within the given directory with filenames not ending in the given extension
Delete all files within the given directory with filenames not ending in the given extension
[ "Delete", "all", "files", "within", "the", "given", "directory", "with", "filenames", "not", "ending", "in", "the", "given", "extension" ]
def filter_directory(directory: str, extension: str = '.py') -> str: for root, dirs, files in os.walk(directory): [os.remove(os.path.join(root, fi)) for fi in files if not fi.endswith(extension)] return directory
[ "def", "filter_directory", "(", "directory", ":", "str", ",", "extension", ":", "str", "=", "'.py'", ")", "->", "str", ":", "for", "root", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "directory", ")", ":", "[", "os", ".", "remove", "(", "os", ".", "path", ".", "join", "(", "root", ",", "fi", ")", ")", "for", "fi", "in", "files", "if", "not", "fi", ".", "endswith", "(", "extension", ")", "]", "return", "directory" ]
Delete all files within the given directory with filenames not ending in the given extension
[ "Delete", "all", "files", "within", "the", "given", "directory", "with", "filenames", "not", "ending", "in", "the", "given", "extension" ]
[ "\"\"\"\n Delete all files within the given directory with filenames not ending in the given extension\n \"\"\"" ]
[ { "param": "directory", "type": "str" }, { "param": "extension", "type": "str" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "directory", "type": "str", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "extension", "type": "str", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import os def filter_directory(directory: str, extension: str = '.py') -> str: for root, dirs, files in os.walk(directory): [os.remove(os.path.join(root, fi)) for fi in files if not fi.endswith(extension)] return directory
610,241
822
3bafca14ef915ff925b30e2466d8f98e9682a3f8
miyuchina/ipsum-lorem
dump.py
[ "MIT" ]
Python
dump_assets
null
def dump_assets(site_obj): """ Copy all the assets to the static folder. Args: site_obj: provides access to site-wide variables. """ src = site_obj.get_assets_dir() dst = site_obj.get_static_dir() + "assets/" # Remove previously cached assets if os.path.exists(dst): shutil.rmtree(dst) # Copy to static folder shutil.copytree(src, dst)
Copy all the assets to the static folder. Args: site_obj: provides access to site-wide variables.
Copy all the assets to the static folder.
[ "Copy", "all", "the", "assets", "to", "the", "static", "folder", "." ]
def dump_assets(site_obj): src = site_obj.get_assets_dir() dst = site_obj.get_static_dir() + "assets/" if os.path.exists(dst): shutil.rmtree(dst) shutil.copytree(src, dst)
[ "def", "dump_assets", "(", "site_obj", ")", ":", "src", "=", "site_obj", ".", "get_assets_dir", "(", ")", "dst", "=", "site_obj", ".", "get_static_dir", "(", ")", "+", "\"assets/\"", "if", "os", ".", "path", ".", "exists", "(", "dst", ")", ":", "shutil", ".", "rmtree", "(", "dst", ")", "shutil", ".", "copytree", "(", "src", ",", "dst", ")" ]
Copy all the assets to the static folder.
[ "Copy", "all", "the", "assets", "to", "the", "static", "folder", "." ]
[ "\"\"\"\n Copy all the assets to the static folder.\n\n Args:\n site_obj: provides access to site-wide variables.\n \"\"\"", "# Remove previously cached assets", "# Copy to static folder" ]
[ { "param": "site_obj", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "site_obj", "type": null, "docstring": "provides access to site-wide variables.", "docstring_tokens": [ "provides", "access", "to", "site", "-", "wide", "variables", "." ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import shutil import os def dump_assets(site_obj): src = site_obj.get_assets_dir() dst = site_obj.get_static_dir() + "assets/" if os.path.exists(dst): shutil.rmtree(dst) shutil.copytree(src, dst)
610,242
830
91b95e7eb31f67443dba0276117775e473aeb7e9
seraconlp/gdmix
scripts/download_process_movieLens_data.py
[ "BSD-2-Clause" ]
Python
process_data
<not_specific>
def process_data(data, items, users): """Process data such as join features by id, normalize feature values, et.al. """ # normalized release_date by 2020 items['release_date'] = items['release_date'].apply(lambda x: float(str(x).split('-')[-1]) / 2000.0) # normalized the age: users['age'] = users['age'] / 100.0 # append uid to data data.insert(0, 'uid', range(len(data))) # binarize the score, use 3.0 as threshold. data['response'] = data['rating'].apply(lambda x: 1 if x > 3.0 else 0) data = data.drop('rating', 1) # create per_movie dataset, per-movie features are user features per_movie = data.join(users.set_index('user_id'), on='user_id', lsuffix='', rsuffix='_other') # create per_user dataset, per_user features are movie features per_user = data.join(items.set_index('movie_id'), 'movie_id', lsuffix='', rsuffix='_other') # join to create the global_data global_data = per_user.join(users.set_index('user_id'), 'user_id', lsuffix='', rsuffix='_other') global_data.sort_values(by=['uid'], inplace=True) per_movie.sort_values(by=['uid'], inplace=True) per_user.sort_values(by=['uid'], inplace=True) return global_data, per_movie, per_user
Process data such as join features by id, normalize feature values, et.al.
Process data such as join features by id, normalize feature values, et.al.
[ "Process", "data", "such", "as", "join", "features", "by", "id", "normalize", "feature", "values", "et", ".", "al", "." ]
def process_data(data, items, users): items['release_date'] = items['release_date'].apply(lambda x: float(str(x).split('-')[-1]) / 2000.0) users['age'] = users['age'] / 100.0 data.insert(0, 'uid', range(len(data))) data['response'] = data['rating'].apply(lambda x: 1 if x > 3.0 else 0) data = data.drop('rating', 1) per_movie = data.join(users.set_index('user_id'), on='user_id', lsuffix='', rsuffix='_other') per_user = data.join(items.set_index('movie_id'), 'movie_id', lsuffix='', rsuffix='_other') global_data = per_user.join(users.set_index('user_id'), 'user_id', lsuffix='', rsuffix='_other') global_data.sort_values(by=['uid'], inplace=True) per_movie.sort_values(by=['uid'], inplace=True) per_user.sort_values(by=['uid'], inplace=True) return global_data, per_movie, per_user
[ "def", "process_data", "(", "data", ",", "items", ",", "users", ")", ":", "items", "[", "'release_date'", "]", "=", "items", "[", "'release_date'", "]", ".", "apply", "(", "lambda", "x", ":", "float", "(", "str", "(", "x", ")", ".", "split", "(", "'-'", ")", "[", "-", "1", "]", ")", "/", "2000.0", ")", "users", "[", "'age'", "]", "=", "users", "[", "'age'", "]", "/", "100.0", "data", ".", "insert", "(", "0", ",", "'uid'", ",", "range", "(", "len", "(", "data", ")", ")", ")", "data", "[", "'response'", "]", "=", "data", "[", "'rating'", "]", ".", "apply", "(", "lambda", "x", ":", "1", "if", "x", ">", "3.0", "else", "0", ")", "data", "=", "data", ".", "drop", "(", "'rating'", ",", "1", ")", "per_movie", "=", "data", ".", "join", "(", "users", ".", "set_index", "(", "'user_id'", ")", ",", "on", "=", "'user_id'", ",", "lsuffix", "=", "''", ",", "rsuffix", "=", "'_other'", ")", "per_user", "=", "data", ".", "join", "(", "items", ".", "set_index", "(", "'movie_id'", ")", ",", "'movie_id'", ",", "lsuffix", "=", "''", ",", "rsuffix", "=", "'_other'", ")", "global_data", "=", "per_user", ".", "join", "(", "users", ".", "set_index", "(", "'user_id'", ")", ",", "'user_id'", ",", "lsuffix", "=", "''", ",", "rsuffix", "=", "'_other'", ")", "global_data", ".", "sort_values", "(", "by", "=", "[", "'uid'", "]", ",", "inplace", "=", "True", ")", "per_movie", ".", "sort_values", "(", "by", "=", "[", "'uid'", "]", ",", "inplace", "=", "True", ")", "per_user", ".", "sort_values", "(", "by", "=", "[", "'uid'", "]", ",", "inplace", "=", "True", ")", "return", "global_data", ",", "per_movie", ",", "per_user" ]
Process data such as join features by id, normalize feature values, et.al.
[ "Process", "data", "such", "as", "join", "features", "by", "id", "normalize", "feature", "values", "et", ".", "al", "." ]
[ "\"\"\"Process data such as join features by id, normalize feature values, et.al. \"\"\"", "# normalized release_date by 2020", "# normalized the age:", "# append uid to data", "# binarize the score, use 3.0 as threshold.", "# create per_movie dataset, per-movie features are user features", "# create per_user dataset, per_user features are movie features", "# join to create the global_data" ]
[ { "param": "data", "type": null }, { "param": "items", "type": null }, { "param": "users", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "data", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "items", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "users", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def process_data(data, items, users): items['release_date'] = items['release_date'].apply(lambda x: float(str(x).split('-')[-1]) / 2000.0) users['age'] = users['age'] / 100.0 data.insert(0, 'uid', range(len(data))) data['response'] = data['rating'].apply(lambda x: 1 if x > 3.0 else 0) data = data.drop('rating', 1) per_movie = data.join(users.set_index('user_id'), on='user_id', lsuffix='', rsuffix='_other') per_user = data.join(items.set_index('movie_id'), 'movie_id', lsuffix='', rsuffix='_other') global_data = per_user.join(users.set_index('user_id'), 'user_id', lsuffix='', rsuffix='_other') global_data.sort_values(by=['uid'], inplace=True) per_movie.sort_values(by=['uid'], inplace=True) per_user.sort_values(by=['uid'], inplace=True) return global_data, per_movie, per_user
610,243
894
27439a68082bb075152e2516951f0adea1fa8926
UW-GAC/pie
core/migrations/0007_add_studyresponse_groups_permissions.py
[ "MIT" ]
Python
delete_permissions_for_studyresponse
null
def delete_permissions_for_studyresponse(apps, schema_editor): """Delete permissions for the tags.StudyResponse model.""" Group = apps.get_model('auth', 'Group') Permission = apps.get_model('auth', 'Permission') study_response_permissions = Permission.objects.filter(content_type__app_label='tags', content_type__model='studyresponse') # Remove all permissions for the StudyResponse model for phenotype taggers. taggers = Group.objects.get(name='phenotype_taggers') taggers.permissions.remove(*Permission.objects.filter( content_type__app_label='tags', content_type__model='studyresponse', codename__startswith='add')) taggers.permissions.remove(*Permission.objects.filter( content_type__app_label='tags', content_type__model='studyresponse', codename__startswith='change'))
Delete permissions for the tags.StudyResponse model.
Delete permissions for the tags.StudyResponse model.
[ "Delete", "permissions", "for", "the", "tags", ".", "StudyResponse", "model", "." ]
def delete_permissions_for_studyresponse(apps, schema_editor): Group = apps.get_model('auth', 'Group') Permission = apps.get_model('auth', 'Permission') study_response_permissions = Permission.objects.filter(content_type__app_label='tags', content_type__model='studyresponse') taggers = Group.objects.get(name='phenotype_taggers') taggers.permissions.remove(*Permission.objects.filter( content_type__app_label='tags', content_type__model='studyresponse', codename__startswith='add')) taggers.permissions.remove(*Permission.objects.filter( content_type__app_label='tags', content_type__model='studyresponse', codename__startswith='change'))
[ "def", "delete_permissions_for_studyresponse", "(", "apps", ",", "schema_editor", ")", ":", "Group", "=", "apps", ".", "get_model", "(", "'auth'", ",", "'Group'", ")", "Permission", "=", "apps", ".", "get_model", "(", "'auth'", ",", "'Permission'", ")", "study_response_permissions", "=", "Permission", ".", "objects", ".", "filter", "(", "content_type__app_label", "=", "'tags'", ",", "content_type__model", "=", "'studyresponse'", ")", "taggers", "=", "Group", ".", "objects", ".", "get", "(", "name", "=", "'phenotype_taggers'", ")", "taggers", ".", "permissions", ".", "remove", "(", "*", "Permission", ".", "objects", ".", "filter", "(", "content_type__app_label", "=", "'tags'", ",", "content_type__model", "=", "'studyresponse'", ",", "codename__startswith", "=", "'add'", ")", ")", "taggers", ".", "permissions", ".", "remove", "(", "*", "Permission", ".", "objects", ".", "filter", "(", "content_type__app_label", "=", "'tags'", ",", "content_type__model", "=", "'studyresponse'", ",", "codename__startswith", "=", "'change'", ")", ")" ]
Delete permissions for the tags.StudyResponse model.
[ "Delete", "permissions", "for", "the", "tags", ".", "StudyResponse", "model", "." ]
[ "\"\"\"Delete permissions for the tags.StudyResponse model.\"\"\"", "# Remove all permissions for the StudyResponse model for phenotype taggers." ]
[ { "param": "apps", "type": null }, { "param": "schema_editor", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "apps", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "schema_editor", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def delete_permissions_for_studyresponse(apps, schema_editor): Group = apps.get_model('auth', 'Group') Permission = apps.get_model('auth', 'Permission') study_response_permissions = Permission.objects.filter(content_type__app_label='tags', content_type__model='studyresponse') taggers = Group.objects.get(name='phenotype_taggers') taggers.permissions.remove(*Permission.objects.filter( content_type__app_label='tags', content_type__model='studyresponse', codename__startswith='add')) taggers.permissions.remove(*Permission.objects.filter( content_type__app_label='tags', content_type__model='studyresponse', codename__startswith='change'))
610,244
753
b157ab140948d401f0fafcd457ed6a94d0703a68
sharon2719/airflow
airflow/providers/google/cloud/hooks/cloud_sql.py
[ "Apache-2.0" ]
Python
_generate_unique_path
str
def _generate_unique_path() -> str: """ We are not using mkdtemp here as the path generated with mkdtemp can be close to 60 characters and there is a limitation in length of socket path to around 100 characters in total. We append project/location/instance to it later and postgres appends its own prefix, so we chose a shorter "/tmp/[8 random characters]" """ random.seed() while True: candidate = "/tmp/" + ''.join( random.choice(string.ascii_lowercase + string.digits) for _ in range(8) ) if not os.path.exists(candidate): return candidate
We are not using mkdtemp here as the path generated with mkdtemp can be close to 60 characters and there is a limitation in length of socket path to around 100 characters in total. We append project/location/instance to it later and postgres appends its own prefix, so we chose a shorter "/tmp/[8 random characters]"
We are not using mkdtemp here as the path generated with mkdtemp can be close to 60 characters and there is a limitation in length of socket path to around 100 characters in total. We append project/location/instance to it later and postgres appends its own prefix, so we chose a shorter "/tmp/[8 random characters]"
[ "We", "are", "not", "using", "mkdtemp", "here", "as", "the", "path", "generated", "with", "mkdtemp", "can", "be", "close", "to", "60", "characters", "and", "there", "is", "a", "limitation", "in", "length", "of", "socket", "path", "to", "around", "100", "characters", "in", "total", ".", "We", "append", "project", "/", "location", "/", "instance", "to", "it", "later", "and", "postgres", "appends", "its", "own", "prefix", "so", "we", "chose", "a", "shorter", "\"", "/", "tmp", "/", "[", "8", "random", "characters", "]", "\"" ]
def _generate_unique_path() -> str: random.seed() while True: candidate = "/tmp/" + ''.join( random.choice(string.ascii_lowercase + string.digits) for _ in range(8) ) if not os.path.exists(candidate): return candidate
[ "def", "_generate_unique_path", "(", ")", "->", "str", ":", "random", ".", "seed", "(", ")", "while", "True", ":", "candidate", "=", "\"/tmp/\"", "+", "''", ".", "join", "(", "random", ".", "choice", "(", "string", ".", "ascii_lowercase", "+", "string", ".", "digits", ")", "for", "_", "in", "range", "(", "8", ")", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "candidate", ")", ":", "return", "candidate" ]
We are not using mkdtemp here as the path generated with mkdtemp can be close to 60 characters and there is a limitation in length of socket path to around 100 characters in total.
[ "We", "are", "not", "using", "mkdtemp", "here", "as", "the", "path", "generated", "with", "mkdtemp", "can", "be", "close", "to", "60", "characters", "and", "there", "is", "a", "limitation", "in", "length", "of", "socket", "path", "to", "around", "100", "characters", "in", "total", "." ]
[ "\"\"\"\n We are not using mkdtemp here as the path generated with mkdtemp\n can be close to 60 characters and there is a limitation in\n length of socket path to around 100 characters in total.\n We append project/location/instance to it later and postgres\n appends its own prefix, so we chose a shorter \"/tmp/[8 random characters]\"\n \"\"\"" ]
[]
{ "returns": [], "raises": [], "params": [], "outlier_params": [], "others": [] }
import string import os import random def _generate_unique_path() -> str: random.seed() while True: candidate = "/tmp/" + ''.join( random.choice(string.ascii_lowercase + string.digits) for _ in range(8) ) if not os.path.exists(candidate): return candidate
610,245
661
3df28169bd946933e4a57cea61994e5d4afef8fc
potato-inoue/espnet
espnet/nets/pytorch_backend/rnn/attentions.py
[ "Apache-2.0" ]
Python
_apply_attention_constraint
<not_specific>
def _apply_attention_constraint( e, last_attended_idx, backward_window=1, forward_window=3 ): """Apply monotonic attention constraint. This function apply the monotonic attention constraint introduced in `Deep Voice 3: Scaling Text-to-Speech with Convolutional Sequence Learning`_. Args: e (Tensor): Attention energy before applying softmax (1, T). last_attended_idx (int): The index of the inputs of the last attended [0, T]. backward_window (int, optional): Backward window size in attention constraint. forward_window (int, optional): Forward window size in attetion constraint. Returns: Tensor: Monotonic constrained attention energy (1, T). .. _`Deep Voice 3: Scaling Text-to-Speech with Convolutional Sequence Learning`: https://arxiv.org/abs/1710.07654 """ if e.size(0) != 1: raise NotImplementedError("Batch attention constraining is not yet supported.") backward_idx = last_attended_idx - backward_window forward_idx = last_attended_idx + forward_window if backward_idx > 0: e[:, :backward_idx] = -float("inf") if forward_idx < e.size(1): e[:, forward_idx:] = -float("inf") return e
Apply monotonic attention constraint. This function apply the monotonic attention constraint introduced in `Deep Voice 3: Scaling Text-to-Speech with Convolutional Sequence Learning`_. Args: e (Tensor): Attention energy before applying softmax (1, T). last_attended_idx (int): The index of the inputs of the last attended [0, T]. backward_window (int, optional): Backward window size in attention constraint. forward_window (int, optional): Forward window size in attetion constraint. Returns: Tensor: Monotonic constrained attention energy (1, T). .. _`Deep Voice 3: Scaling Text-to-Speech with Convolutional Sequence Learning`: https://arxiv.org/abs/1710.07654
Apply monotonic attention constraint. This function apply the monotonic attention constraint introduced in `Deep Voice 3: Scaling Text-to-Speech with Convolutional Sequence Learning`_.
[ "Apply", "monotonic", "attention", "constraint", ".", "This", "function", "apply", "the", "monotonic", "attention", "constraint", "introduced", "in", "`", "Deep", "Voice", "3", ":", "Scaling", "Text", "-", "to", "-", "Speech", "with", "Convolutional", "Sequence", "Learning", "`", "_", "." ]
def _apply_attention_constraint( e, last_attended_idx, backward_window=1, forward_window=3 ): if e.size(0) != 1: raise NotImplementedError("Batch attention constraining is not yet supported.") backward_idx = last_attended_idx - backward_window forward_idx = last_attended_idx + forward_window if backward_idx > 0: e[:, :backward_idx] = -float("inf") if forward_idx < e.size(1): e[:, forward_idx:] = -float("inf") return e
[ "def", "_apply_attention_constraint", "(", "e", ",", "last_attended_idx", ",", "backward_window", "=", "1", ",", "forward_window", "=", "3", ")", ":", "if", "e", ".", "size", "(", "0", ")", "!=", "1", ":", "raise", "NotImplementedError", "(", "\"Batch attention constraining is not yet supported.\"", ")", "backward_idx", "=", "last_attended_idx", "-", "backward_window", "forward_idx", "=", "last_attended_idx", "+", "forward_window", "if", "backward_idx", ">", "0", ":", "e", "[", ":", ",", ":", "backward_idx", "]", "=", "-", "float", "(", "\"inf\"", ")", "if", "forward_idx", "<", "e", ".", "size", "(", "1", ")", ":", "e", "[", ":", ",", "forward_idx", ":", "]", "=", "-", "float", "(", "\"inf\"", ")", "return", "e" ]
Apply monotonic attention constraint.
[ "Apply", "monotonic", "attention", "constraint", "." ]
[ "\"\"\"Apply monotonic attention constraint.\n\n This function apply the monotonic attention constraint\n introduced in `Deep Voice 3: Scaling\n Text-to-Speech with Convolutional Sequence Learning`_.\n\n Args:\n e (Tensor): Attention energy before applying softmax (1, T).\n last_attended_idx (int): The index of the inputs of the last attended [0, T].\n backward_window (int, optional): Backward window size in attention constraint.\n forward_window (int, optional): Forward window size in attetion constraint.\n\n Returns:\n Tensor: Monotonic constrained attention energy (1, T).\n\n .. _`Deep Voice 3: Scaling Text-to-Speech with Convolutional Sequence Learning`:\n https://arxiv.org/abs/1710.07654\n\n \"\"\"" ]
[ { "param": "e", "type": null }, { "param": "last_attended_idx", "type": null }, { "param": "backward_window", "type": null }, { "param": "forward_window", "type": null } ]
{ "returns": [ { "docstring": "Monotonic constrained attention energy (1, T).", "docstring_tokens": [ "Monotonic", "constrained", "attention", "energy", "(", "1", "T", ")", "." ], "type": "Tensor" } ], "raises": [], "params": [ { "identifier": "e", "type": null, "docstring": "Attention energy before applying softmax (1, T).", "docstring_tokens": [ "Attention", "energy", "before", "applying", "softmax", "(", "1", "T", ")", "." ], "default": null, "is_optional": false }, { "identifier": "last_attended_idx", "type": null, "docstring": "The index of the inputs of the last attended [0, T].", "docstring_tokens": [ "The", "index", "of", "the", "inputs", "of", "the", "last", "attended", "[", "0", "T", "]", "." ], "default": null, "is_optional": false }, { "identifier": "backward_window", "type": null, "docstring": "Backward window size in attention constraint.", "docstring_tokens": [ "Backward", "window", "size", "in", "attention", "constraint", "." ], "default": null, "is_optional": true }, { "identifier": "forward_window", "type": null, "docstring": "Forward window size in attetion constraint.", "docstring_tokens": [ "Forward", "window", "size", "in", "attetion", "constraint", "." ], "default": null, "is_optional": true } ], "outlier_params": [], "others": [] }
def _apply_attention_constraint( e, last_attended_idx, backward_window=1, forward_window=3 ): if e.size(0) != 1: raise NotImplementedError("Batch attention constraining is not yet supported.") backward_idx = last_attended_idx - backward_window forward_idx = last_attended_idx + forward_window if backward_idx > 0: e[:, :backward_idx] = -float("inf") if forward_idx < e.size(1): e[:, forward_idx:] = -float("inf") return e
610,246
229
2c95a5e1e4a7842b576e7dc778495295ae5a6702
fhal/la
la/farray/group.py
[ "BSD-2-Clause" ]
Python
unique_group
<not_specific>
def unique_group(groups): """Find unique groups in list not including None.""" ugroups = set(groups) ugroups -= set((None,)) ugroups = list(ugroups) ugroups.sort() return ugroups
Find unique groups in list not including None.
Find unique groups in list not including None.
[ "Find", "unique", "groups", "in", "list", "not", "including", "None", "." ]
def unique_group(groups): ugroups = set(groups) ugroups -= set((None,)) ugroups = list(ugroups) ugroups.sort() return ugroups
[ "def", "unique_group", "(", "groups", ")", ":", "ugroups", "=", "set", "(", "groups", ")", "ugroups", "-=", "set", "(", "(", "None", ",", ")", ")", "ugroups", "=", "list", "(", "ugroups", ")", "ugroups", ".", "sort", "(", ")", "return", "ugroups" ]
Find unique groups in list not including None.
[ "Find", "unique", "groups", "in", "list", "not", "including", "None", "." ]
[ "\"\"\"Find unique groups in list not including None.\"\"\"" ]
[ { "param": "groups", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "groups", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def unique_group(groups): ugroups = set(groups) ugroups -= set((None,)) ugroups = list(ugroups) ugroups.sort() return ugroups
610,247
376
0680076f3a7e7a327b515d733725cdd6d0a3b0c0
szhan/ecdyn
algorithms/es.py
[ "MIT" ]
Python
checkStrategy
<not_specific>
def checkStrategy(minstrategy): """ Check that strategy meets a minimum, otherwise not enough exploration. """ def decorator(func): def wrapper(*args, **kargs): children = func(*args, **kargs) for child in children: for i, s in enumerate(child.strategy): if s < minstrategy: child.strategy[i] = minstrategy return children return wrapper return decorator
Check that strategy meets a minimum, otherwise not enough exploration.
Check that strategy meets a minimum, otherwise not enough exploration.
[ "Check", "that", "strategy", "meets", "a", "minimum", "otherwise", "not", "enough", "exploration", "." ]
def checkStrategy(minstrategy): def decorator(func): def wrapper(*args, **kargs): children = func(*args, **kargs) for child in children: for i, s in enumerate(child.strategy): if s < minstrategy: child.strategy[i] = minstrategy return children return wrapper return decorator
[ "def", "checkStrategy", "(", "minstrategy", ")", ":", "def", "decorator", "(", "func", ")", ":", "def", "wrapper", "(", "*", "args", ",", "**", "kargs", ")", ":", "children", "=", "func", "(", "*", "args", ",", "**", "kargs", ")", "for", "child", "in", "children", ":", "for", "i", ",", "s", "in", "enumerate", "(", "child", ".", "strategy", ")", ":", "if", "s", "<", "minstrategy", ":", "child", ".", "strategy", "[", "i", "]", "=", "minstrategy", "return", "children", "return", "wrapper", "return", "decorator" ]
Check that strategy meets a minimum, otherwise not enough exploration.
[ "Check", "that", "strategy", "meets", "a", "minimum", "otherwise", "not", "enough", "exploration", "." ]
[ "\"\"\" Check that strategy meets a minimum, otherwise not enough exploration. \"\"\"" ]
[ { "param": "minstrategy", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "minstrategy", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def checkStrategy(minstrategy): def decorator(func): def wrapper(*args, **kargs): children = func(*args, **kargs) for child in children: for i, s in enumerate(child.strategy): if s < minstrategy: child.strategy[i] = minstrategy return children return wrapper return decorator
610,248
436
1a02bba392bd9e8efac34dc642479e9d0c6fbdea
jimmyqtran/IntermediateSoftwareDesignPython
Project 7 - Map, Filter, Reduce/MapFilterReduce.py
[ "MIT" ]
Python
sum_of_squares
<not_specific>
def sum_of_squares(mfrlist): """Add up the squares of all elements in the list""" return (mfrlist .map(lambda x: x ** 2) .reduce(lambda a, b: a + b, 0))
Add up the squares of all elements in the list
Add up the squares of all elements in the list
[ "Add", "up", "the", "squares", "of", "all", "elements", "in", "the", "list" ]
def sum_of_squares(mfrlist): return (mfrlist .map(lambda x: x ** 2) .reduce(lambda a, b: a + b, 0))
[ "def", "sum_of_squares", "(", "mfrlist", ")", ":", "return", "(", "mfrlist", ".", "map", "(", "lambda", "x", ":", "x", "**", "2", ")", ".", "reduce", "(", "lambda", "a", ",", "b", ":", "a", "+", "b", ",", "0", ")", ")" ]
Add up the squares of all elements in the list
[ "Add", "up", "the", "squares", "of", "all", "elements", "in", "the", "list" ]
[ "\"\"\"Add up the squares of all elements in the list\"\"\"" ]
[ { "param": "mfrlist", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "mfrlist", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def sum_of_squares(mfrlist): return (mfrlist .map(lambda x: x ** 2) .reduce(lambda a, b: a + b, 0))
610,249
411
70d9370ef091b8ae15854cec0c862ef6bd92e764
SetBased/py-etlt
etlt/helper/Type2Helper.py
[ "MIT" ]
Python
_get_date_type
<not_specific>
def _get_date_type(date): """ Returns the type of a date. :param str|datetime.date date: The date. :rtype: str """ if isinstance(date, str): return 'str' if isinstance(date, datetime.date): return 'date' if isinstance(date, int): return 'int' raise ValueError('Unexpected type {0!s}'.format(date.__class__))
Returns the type of a date. :param str|datetime.date date: The date. :rtype: str
Returns the type of a date.
[ "Returns", "the", "type", "of", "a", "date", "." ]
def _get_date_type(date): if isinstance(date, str): return 'str' if isinstance(date, datetime.date): return 'date' if isinstance(date, int): return 'int' raise ValueError('Unexpected type {0!s}'.format(date.__class__))
[ "def", "_get_date_type", "(", "date", ")", ":", "if", "isinstance", "(", "date", ",", "str", ")", ":", "return", "'str'", "if", "isinstance", "(", "date", ",", "datetime", ".", "date", ")", ":", "return", "'date'", "if", "isinstance", "(", "date", ",", "int", ")", ":", "return", "'int'", "raise", "ValueError", "(", "'Unexpected type {0!s}'", ".", "format", "(", "date", ".", "__class__", ")", ")" ]
Returns the type of a date.
[ "Returns", "the", "type", "of", "a", "date", "." ]
[ "\"\"\"\n Returns the type of a date.\n\n :param str|datetime.date date: The date.\n\n :rtype: str\n \"\"\"" ]
[ { "param": "date", "type": null } ]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": "str" } ], "raises": [], "params": [ { "identifier": "date", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": false } ], "outlier_params": [], "others": [] }
import datetime def _get_date_type(date): if isinstance(date, str): return 'str' if isinstance(date, datetime.date): return 'date' if isinstance(date, int): return 'int' raise ValueError('Unexpected type {0!s}'.format(date.__class__))
610,250
916
fece65803b42972a7cfc324626e15b63979dcdb7
e-mayo/mscreen
mscreen/autodocktools_prepare_py3k/MolKit/pdb2pqr/src/utilities.py
[ "MIT" ]
Python
analyzeConnectivity
<not_specific>
def analyzeConnectivity(map, key): """ Analyze the connectivity of a given map using the key value. Parameters map: The map to analyze (dict) key: The key value (variable) Returns list: A list of connected values to the key (list) """ list = [] keys = [key] while len(keys) > 0: key = keys[0] if key not in list: list.append(key) if key in map: for value in map[key]: if value not in list: keys.append(value) keys.pop(keys.index(key)) return list
Analyze the connectivity of a given map using the key value. Parameters map: The map to analyze (dict) key: The key value (variable) Returns list: A list of connected values to the key (list)
Analyze the connectivity of a given map using the key value. Parameters map: The map to analyze (dict) key: The key value (variable) Returns list: A list of connected values to the key (list)
[ "Analyze", "the", "connectivity", "of", "a", "given", "map", "using", "the", "key", "value", ".", "Parameters", "map", ":", "The", "map", "to", "analyze", "(", "dict", ")", "key", ":", "The", "key", "value", "(", "variable", ")", "Returns", "list", ":", "A", "list", "of", "connected", "values", "to", "the", "key", "(", "list", ")" ]
def analyzeConnectivity(map, key): list = [] keys = [key] while len(keys) > 0: key = keys[0] if key not in list: list.append(key) if key in map: for value in map[key]: if value not in list: keys.append(value) keys.pop(keys.index(key)) return list
[ "def", "analyzeConnectivity", "(", "map", ",", "key", ")", ":", "list", "=", "[", "]", "keys", "=", "[", "key", "]", "while", "len", "(", "keys", ")", ">", "0", ":", "key", "=", "keys", "[", "0", "]", "if", "key", "not", "in", "list", ":", "list", ".", "append", "(", "key", ")", "if", "key", "in", "map", ":", "for", "value", "in", "map", "[", "key", "]", ":", "if", "value", "not", "in", "list", ":", "keys", ".", "append", "(", "value", ")", "keys", ".", "pop", "(", "keys", ".", "index", "(", "key", ")", ")", "return", "list" ]
Analyze the connectivity of a given map using the key value.
[ "Analyze", "the", "connectivity", "of", "a", "given", "map", "using", "the", "key", "value", "." ]
[ "\"\"\"\n Analyze the connectivity of a given map using the key value.\n\n Parameters\n map: The map to analyze (dict)\n key: The key value (variable)\n Returns\n list: A list of connected values to the key (list)\n \"\"\"" ]
[ { "param": "map", "type": null }, { "param": "key", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "map", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "key", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def analyzeConnectivity(map, key): list = [] keys = [key] while len(keys) > 0: key = keys[0] if key not in list: list.append(key) if key in map: for value in map[key]: if value not in list: keys.append(value) keys.pop(keys.index(key)) return list
610,251
371
b271e0b7fff6d9277bb80e44505b82d54cfc6e43
jpirnay/meerk40t
meerk40t/core/bindalias.py
[ "MIT" ]
Python
keymap_execute
<not_specific>
def keymap_execute(context, keyvalue, keydown=True): """ Execute keybind accelerator if it exists and return true Else return false """ if keyvalue not in context.keymap: return False action = context.keymap[keyvalue] if keydown or action.startswith("+"): if not keydown and action.startswith("+"): action = "-" + action[1:] for cmd in action.split(";"): context("%s\n" % cmd) return True
Execute keybind accelerator if it exists and return true Else return false
Execute keybind accelerator if it exists and return true Else return false
[ "Execute", "keybind", "accelerator", "if", "it", "exists", "and", "return", "true", "Else", "return", "false" ]
def keymap_execute(context, keyvalue, keydown=True): if keyvalue not in context.keymap: return False action = context.keymap[keyvalue] if keydown or action.startswith("+"): if not keydown and action.startswith("+"): action = "-" + action[1:] for cmd in action.split(";"): context("%s\n" % cmd) return True
[ "def", "keymap_execute", "(", "context", ",", "keyvalue", ",", "keydown", "=", "True", ")", ":", "if", "keyvalue", "not", "in", "context", ".", "keymap", ":", "return", "False", "action", "=", "context", ".", "keymap", "[", "keyvalue", "]", "if", "keydown", "or", "action", ".", "startswith", "(", "\"+\"", ")", ":", "if", "not", "keydown", "and", "action", ".", "startswith", "(", "\"+\"", ")", ":", "action", "=", "\"-\"", "+", "action", "[", "1", ":", "]", "for", "cmd", "in", "action", ".", "split", "(", "\";\"", ")", ":", "context", "(", "\"%s\\n\"", "%", "cmd", ")", "return", "True" ]
Execute keybind accelerator if it exists and return true Else return false
[ "Execute", "keybind", "accelerator", "if", "it", "exists", "and", "return", "true", "Else", "return", "false" ]
[ "\"\"\"\n Execute keybind accelerator if it exists and return true\n\n Else return false\n \"\"\"" ]
[ { "param": "context", "type": null }, { "param": "keyvalue", "type": null }, { "param": "keydown", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "context", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "keyvalue", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "keydown", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def keymap_execute(context, keyvalue, keydown=True): if keyvalue not in context.keymap: return False action = context.keymap[keyvalue] if keydown or action.startswith("+"): if not keydown and action.startswith("+"): action = "-" + action[1:] for cmd in action.split(";"): context("%s\n" % cmd) return True
610,252
761
93e3d0c313d95121ba09bc52ee9664a41ecde76c
ManyBodyPhysics/LectureNotesPhysics
Programs/Chapter10-programs/python/srg_pairing/py27/srg_pairing.py
[ "CC0-1.0" ]
Python
myPlotSettings
<not_specific>
def myPlotSettings(ax, formatter): '''save these settings for use in other plots''' ax.xaxis.set_major_formatter(formatter) ax.yaxis.set_major_formatter(formatter) ax.tick_params(axis='both',which='major',width=1.5,length=8) ax.tick_params(axis='both',which='minor',width=1.5,length=5) ax.tick_params(axis='both',width=2,length=10,labelsize=20) for s in ['left', 'right', 'top', 'bottom']: ax.spines[s].set_linewidth(2) ax.set_xlim([0.0007,13]) return
save these settings for use in other plots
save these settings for use in other plots
[ "save", "these", "settings", "for", "use", "in", "other", "plots" ]
def myPlotSettings(ax, formatter): ax.xaxis.set_major_formatter(formatter) ax.yaxis.set_major_formatter(formatter) ax.tick_params(axis='both',which='major',width=1.5,length=8) ax.tick_params(axis='both',which='minor',width=1.5,length=5) ax.tick_params(axis='both',width=2,length=10,labelsize=20) for s in ['left', 'right', 'top', 'bottom']: ax.spines[s].set_linewidth(2) ax.set_xlim([0.0007,13]) return
[ "def", "myPlotSettings", "(", "ax", ",", "formatter", ")", ":", "ax", ".", "xaxis", ".", "set_major_formatter", "(", "formatter", ")", "ax", ".", "yaxis", ".", "set_major_formatter", "(", "formatter", ")", "ax", ".", "tick_params", "(", "axis", "=", "'both'", ",", "which", "=", "'major'", ",", "width", "=", "1.5", ",", "length", "=", "8", ")", "ax", ".", "tick_params", "(", "axis", "=", "'both'", ",", "which", "=", "'minor'", ",", "width", "=", "1.5", ",", "length", "=", "5", ")", "ax", ".", "tick_params", "(", "axis", "=", "'both'", ",", "width", "=", "2", ",", "length", "=", "10", ",", "labelsize", "=", "20", ")", "for", "s", "in", "[", "'left'", ",", "'right'", ",", "'top'", ",", "'bottom'", "]", ":", "ax", ".", "spines", "[", "s", "]", ".", "set_linewidth", "(", "2", ")", "ax", ".", "set_xlim", "(", "[", "0.0007", ",", "13", "]", ")", "return" ]
save these settings for use in other plots
[ "save", "these", "settings", "for", "use", "in", "other", "plots" ]
[ "'''save these settings for use in other plots'''" ]
[ { "param": "ax", "type": null }, { "param": "formatter", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "ax", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "formatter", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def myPlotSettings(ax, formatter): ax.xaxis.set_major_formatter(formatter) ax.yaxis.set_major_formatter(formatter) ax.tick_params(axis='both',which='major',width=1.5,length=8) ax.tick_params(axis='both',which='minor',width=1.5,length=5) ax.tick_params(axis='both',width=2,length=10,labelsize=20) for s in ['left', 'right', 'top', 'bottom']: ax.spines[s].set_linewidth(2) ax.set_xlim([0.0007,13]) return
610,253
263
27af28514cf08b63c0c680e33a73c90210940008
desihub/desietc
desietc/util.py
[ "MIT" ]
Python
mjd_to_date
<not_specific>
def mjd_to_date(mjd, utc_offset): """Convert an MJD value to a datetime using the specified UTC offset in hours. Use utc_offset of -7 for local time at Kitt Peak. Use :func:`date_to_mjd` to invert this calculation. """ return datetime.datetime(2019, 1, 1) + datetime.timedelta(days=mjd - 58484.0, hours=utc_offset)
Convert an MJD value to a datetime using the specified UTC offset in hours. Use utc_offset of -7 for local time at Kitt Peak. Use :func:`date_to_mjd` to invert this calculation.
Convert an MJD value to a datetime using the specified UTC offset in hours. Use utc_offset of -7 for local time at Kitt Peak. Use :func:`date_to_mjd` to invert this calculation.
[ "Convert", "an", "MJD", "value", "to", "a", "datetime", "using", "the", "specified", "UTC", "offset", "in", "hours", ".", "Use", "utc_offset", "of", "-", "7", "for", "local", "time", "at", "Kitt", "Peak", ".", "Use", ":", "func", ":", "`", "date_to_mjd", "`", "to", "invert", "this", "calculation", "." ]
def mjd_to_date(mjd, utc_offset): return datetime.datetime(2019, 1, 1) + datetime.timedelta(days=mjd - 58484.0, hours=utc_offset)
[ "def", "mjd_to_date", "(", "mjd", ",", "utc_offset", ")", ":", "return", "datetime", ".", "datetime", "(", "2019", ",", "1", ",", "1", ")", "+", "datetime", ".", "timedelta", "(", "days", "=", "mjd", "-", "58484.0", ",", "hours", "=", "utc_offset", ")" ]
Convert an MJD value to a datetime using the specified UTC offset in hours.
[ "Convert", "an", "MJD", "value", "to", "a", "datetime", "using", "the", "specified", "UTC", "offset", "in", "hours", "." ]
[ "\"\"\"Convert an MJD value to a datetime using the specified UTC offset in hours.\n\n Use utc_offset of -7 for local time at Kitt Peak.\n Use :func:`date_to_mjd` to invert this calculation.\n \"\"\"" ]
[ { "param": "mjd", "type": null }, { "param": "utc_offset", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "mjd", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "utc_offset", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import datetime def mjd_to_date(mjd, utc_offset): return datetime.datetime(2019, 1, 1) + datetime.timedelta(days=mjd - 58484.0, hours=utc_offset)
610,255
535
5166f9c73f1e8187790bb0c7600de690576ff221
plastikos/salt-testing
salttesting/jenkins.py
[ "Apache-2.0" ]
Python
save_state
null
def save_state(options): ''' Save some state data to be used between executions, minion IP address, minion states synced, etc... ''' state_file = os.path.join(options.workspace, '.state.json') if os.path.isfile(state_file): try: state = json.load(open(os.path.join(options.workspace, '.state.json'), 'r')) except ValueError: state = {} else: state = {} for varname in ('workspace', 'require_sudo', 'output_columns', 'salt_minion_synced', 'minion_ip_address', 'minion_python_executable', 'salt_minion_bootstrapped'): if varname not in state and varname in options: state[varname] = getattr(options, varname) json.dump(state, open(state_file, 'w'))
Save some state data to be used between executions, minion IP address, minion states synced, etc...
Save some state data to be used between executions, minion IP address, minion states synced, etc
[ "Save", "some", "state", "data", "to", "be", "used", "between", "executions", "minion", "IP", "address", "minion", "states", "synced", "etc" ]
def save_state(options): state_file = os.path.join(options.workspace, '.state.json') if os.path.isfile(state_file): try: state = json.load(open(os.path.join(options.workspace, '.state.json'), 'r')) except ValueError: state = {} else: state = {} for varname in ('workspace', 'require_sudo', 'output_columns', 'salt_minion_synced', 'minion_ip_address', 'minion_python_executable', 'salt_minion_bootstrapped'): if varname not in state and varname in options: state[varname] = getattr(options, varname) json.dump(state, open(state_file, 'w'))
[ "def", "save_state", "(", "options", ")", ":", "state_file", "=", "os", ".", "path", ".", "join", "(", "options", ".", "workspace", ",", "'.state.json'", ")", "if", "os", ".", "path", ".", "isfile", "(", "state_file", ")", ":", "try", ":", "state", "=", "json", ".", "load", "(", "open", "(", "os", ".", "path", ".", "join", "(", "options", ".", "workspace", ",", "'.state.json'", ")", ",", "'r'", ")", ")", "except", "ValueError", ":", "state", "=", "{", "}", "else", ":", "state", "=", "{", "}", "for", "varname", "in", "(", "'workspace'", ",", "'require_sudo'", ",", "'output_columns'", ",", "'salt_minion_synced'", ",", "'minion_ip_address'", ",", "'minion_python_executable'", ",", "'salt_minion_bootstrapped'", ")", ":", "if", "varname", "not", "in", "state", "and", "varname", "in", "options", ":", "state", "[", "varname", "]", "=", "getattr", "(", "options", ",", "varname", ")", "json", ".", "dump", "(", "state", ",", "open", "(", "state_file", ",", "'w'", ")", ")" ]
Save some state data to be used between executions, minion IP address, minion states synced, etc...
[ "Save", "some", "state", "data", "to", "be", "used", "between", "executions", "minion", "IP", "address", "minion", "states", "synced", "etc", "..." ]
[ "'''\n Save some state data to be used between executions, minion IP address, minion states synced, etc...\n '''" ]
[ { "param": "options", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "options", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import os import json def save_state(options): state_file = os.path.join(options.workspace, '.state.json') if os.path.isfile(state_file): try: state = json.load(open(os.path.join(options.workspace, '.state.json'), 'r')) except ValueError: state = {} else: state = {} for varname in ('workspace', 'require_sudo', 'output_columns', 'salt_minion_synced', 'minion_ip_address', 'minion_python_executable', 'salt_minion_bootstrapped'): if varname not in state and varname in options: state[varname] = getattr(options, varname) json.dump(state, open(state_file, 'w'))
610,256
433
0ef6f4cf9f99c15e487d9d9c4ca0ea1d83727ba6
juliaaz/Naibe-Bayes-Classifier
main.py
[ "MIT" ]
Python
merge_dicts
<not_specific>
def merge_dicts(dict1, dict2): """ Merges all the dictionaries, so in result bag of words can be created. """ if len(dict1) < len(dict2): dict1, dict2 = dict2, dict1 for key, value in dict2.items(): dict1[key] = dict1.get(key, 0) + value return dict1
Merges all the dictionaries, so in result bag of words can be created.
Merges all the dictionaries, so in result bag of words can be created.
[ "Merges", "all", "the", "dictionaries", "so", "in", "result", "bag", "of", "words", "can", "be", "created", "." ]
def merge_dicts(dict1, dict2): if len(dict1) < len(dict2): dict1, dict2 = dict2, dict1 for key, value in dict2.items(): dict1[key] = dict1.get(key, 0) + value return dict1
[ "def", "merge_dicts", "(", "dict1", ",", "dict2", ")", ":", "if", "len", "(", "dict1", ")", "<", "len", "(", "dict2", ")", ":", "dict1", ",", "dict2", "=", "dict2", ",", "dict1", "for", "key", ",", "value", "in", "dict2", ".", "items", "(", ")", ":", "dict1", "[", "key", "]", "=", "dict1", ".", "get", "(", "key", ",", "0", ")", "+", "value", "return", "dict1" ]
Merges all the dictionaries, so in result bag of words can be created.
[ "Merges", "all", "the", "dictionaries", "so", "in", "result", "bag", "of", "words", "can", "be", "created", "." ]
[ "\"\"\"\n Merges all the dictionaries, so in result bag of words can be created.\n \"\"\"" ]
[ { "param": "dict1", "type": null }, { "param": "dict2", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "dict1", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "dict2", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def merge_dicts(dict1, dict2): if len(dict1) < len(dict2): dict1, dict2 = dict2, dict1 for key, value in dict2.items(): dict1[key] = dict1.get(key, 0) + value return dict1
610,257
450
93819aefc4a0c6918cc7cff29cfb796c092b6523
dharif23/ncprep
ncprep/_operations.py
[ "MIT" ]
Python
generate_headers
<not_specific>
def generate_headers(weighted=None): """ This function generates pandas header/names based on weighted or not parameter :param weighted: Boolean yes/no :return: headers, a python list """ # Assign headers based on weighted or not if weighted == "yes" or weighted == "Yes" or weighted == "Y" or weighted == "y": headers = ['source', 'target', 'weight', 'timestamp'] elif weighted == "no" or weighted == "No" or weighted == "N" or weighted == "n": headers = ['source', 'target', 'timestamp'] else: print('Please provide weighted argument with yes/no, y/n, Yes/No', log_type='error') sys.exit(1) # Return return headers
This function generates pandas header/names based on weighted or not parameter :param weighted: Boolean yes/no :return: headers, a python list
This function generates pandas header/names based on weighted or not parameter
[ "This", "function", "generates", "pandas", "header", "/", "names", "based", "on", "weighted", "or", "not", "parameter" ]
def generate_headers(weighted=None): if weighted == "yes" or weighted == "Yes" or weighted == "Y" or weighted == "y": headers = ['source', 'target', 'weight', 'timestamp'] elif weighted == "no" or weighted == "No" or weighted == "N" or weighted == "n": headers = ['source', 'target', 'timestamp'] else: print('Please provide weighted argument with yes/no, y/n, Yes/No', log_type='error') sys.exit(1) return headers
[ "def", "generate_headers", "(", "weighted", "=", "None", ")", ":", "if", "weighted", "==", "\"yes\"", "or", "weighted", "==", "\"Yes\"", "or", "weighted", "==", "\"Y\"", "or", "weighted", "==", "\"y\"", ":", "headers", "=", "[", "'source'", ",", "'target'", ",", "'weight'", ",", "'timestamp'", "]", "elif", "weighted", "==", "\"no\"", "or", "weighted", "==", "\"No\"", "or", "weighted", "==", "\"N\"", "or", "weighted", "==", "\"n\"", ":", "headers", "=", "[", "'source'", ",", "'target'", ",", "'timestamp'", "]", "else", ":", "print", "(", "'Please provide weighted argument with yes/no, y/n, Yes/No'", ",", "log_type", "=", "'error'", ")", "sys", ".", "exit", "(", "1", ")", "return", "headers" ]
This function generates pandas header/names based on weighted or not parameter
[ "This", "function", "generates", "pandas", "header", "/", "names", "based", "on", "weighted", "or", "not", "parameter" ]
[ "\"\"\"\n This function generates pandas header/names based on weighted or not parameter\n :param weighted: Boolean yes/no\n :return: headers, a python list\n \"\"\"", "# Assign headers based on weighted or not", "# Return" ]
[ { "param": "weighted", "type": null } ]
{ "returns": [ { "docstring": "headers, a python list", "docstring_tokens": [ "headers", "a", "python", "list" ], "type": null } ], "raises": [], "params": [ { "identifier": "weighted", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import sys def generate_headers(weighted=None): if weighted == "yes" or weighted == "Yes" or weighted == "Y" or weighted == "y": headers = ['source', 'target', 'weight', 'timestamp'] elif weighted == "no" or weighted == "No" or weighted == "N" or weighted == "n": headers = ['source', 'target', 'timestamp'] else: print('Please provide weighted argument with yes/no, y/n, Yes/No', log_type='error') sys.exit(1) return headers
610,258
228
7be2b8ea52ab7e89e26da6d4577f7e090bfb06e4
EdTsft/swilite
swilite/core.py
[ "MIT" ]
Python
_fixWindowsPath
<not_specific>
def _fixWindowsPath(dll): """ When the path to the DLL is not in Windows search path, Windows will not be able to find other DLLs on the same directory, so we have to add it to the path. This function takes care of it. :parameters: - `dll` (str) - File name of the DLL """ if sys.platform[:3] != 'win': return # Nothing to do here pathToDll = os.path.dirname(dll) currentWindowsPath = os.getenv('PATH') if pathToDll not in currentWindowsPath: # We will prepend the path, to avoid conflicts between DLLs newPath = pathToDll + ';' + currentWindowsPath os.putenv('PATH', newPath)
When the path to the DLL is not in Windows search path, Windows will not be able to find other DLLs on the same directory, so we have to add it to the path. This function takes care of it. :parameters: - `dll` (str) - File name of the DLL
When the path to the DLL is not in Windows search path, Windows will not be able to find other DLLs on the same directory, so we have to add it to the path. This function takes care of it.
[ "When", "the", "path", "to", "the", "DLL", "is", "not", "in", "Windows", "search", "path", "Windows", "will", "not", "be", "able", "to", "find", "other", "DLLs", "on", "the", "same", "directory", "so", "we", "have", "to", "add", "it", "to", "the", "path", ".", "This", "function", "takes", "care", "of", "it", "." ]
def _fixWindowsPath(dll): if sys.platform[:3] != 'win': return pathToDll = os.path.dirname(dll) currentWindowsPath = os.getenv('PATH') if pathToDll not in currentWindowsPath: newPath = pathToDll + ';' + currentWindowsPath os.putenv('PATH', newPath)
[ "def", "_fixWindowsPath", "(", "dll", ")", ":", "if", "sys", ".", "platform", "[", ":", "3", "]", "!=", "'win'", ":", "return", "pathToDll", "=", "os", ".", "path", ".", "dirname", "(", "dll", ")", "currentWindowsPath", "=", "os", ".", "getenv", "(", "'PATH'", ")", "if", "pathToDll", "not", "in", "currentWindowsPath", ":", "newPath", "=", "pathToDll", "+", "';'", "+", "currentWindowsPath", "os", ".", "putenv", "(", "'PATH'", ",", "newPath", ")" ]
When the path to the DLL is not in Windows search path, Windows will not be able to find other DLLs on the same directory, so we have to add it to the path.
[ "When", "the", "path", "to", "the", "DLL", "is", "not", "in", "Windows", "search", "path", "Windows", "will", "not", "be", "able", "to", "find", "other", "DLLs", "on", "the", "same", "directory", "so", "we", "have", "to", "add", "it", "to", "the", "path", "." ]
[ "\"\"\"\n When the path to the DLL is not in Windows search path, Windows will not be\n able to find other DLLs on the same directory, so we have to add it to the\n path. This function takes care of it.\n\n :parameters:\n - `dll` (str) - File name of the DLL\n \"\"\"", "# Nothing to do here", "# We will prepend the path, to avoid conflicts between DLLs" ]
[ { "param": "dll", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "dll", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [ { "identifier": "parameters", "docstring": "`dll` (str) - File name of the DLL", "docstring_tokens": [ "`", "dll", "`", "(", "str", ")", "-", "File", "name", "of", "the", "DLL" ] } ] }
import sys import os def _fixWindowsPath(dll): if sys.platform[:3] != 'win': return pathToDll = os.path.dirname(dll) currentWindowsPath = os.getenv('PATH') if pathToDll not in currentWindowsPath: newPath = pathToDll + ';' + currentWindowsPath os.putenv('PATH', newPath)
610,259
926
24e7eedfe75248ab75ca5a43e5b75732f5c5fe4e
davisan/PRNUPythonColab
src/Functions.py
[ "Unlicense" ]
Python
SeeProgress
null
def SeeProgress(i): """ SeeProgress(i) outputs i without performing carriage return This function is designed to be used in slow for-loops to show how the calculations progress. If the first call in the loop is not with i=1, it's convenient to call SeeProgress(1) before the loop. """ if i==1 | i==0 : print('\n ') print('* %(i)d *' % {"i": i}, end="\r")
SeeProgress(i) outputs i without performing carriage return This function is designed to be used in slow for-loops to show how the calculations progress. If the first call in the loop is not with i=1, it's convenient to call SeeProgress(1) before the loop.
SeeProgress(i) outputs i without performing carriage return This function is designed to be used in slow for-loops to show how the calculations progress. If the first call in the loop is not with i=1, it's convenient to call SeeProgress(1) before the loop.
[ "SeeProgress", "(", "i", ")", "outputs", "i", "without", "performing", "carriage", "return", "This", "function", "is", "designed", "to", "be", "used", "in", "slow", "for", "-", "loops", "to", "show", "how", "the", "calculations", "progress", ".", "If", "the", "first", "call", "in", "the", "loop", "is", "not", "with", "i", "=", "1", "it", "'", "s", "convenient", "to", "call", "SeeProgress", "(", "1", ")", "before", "the", "loop", "." ]
def SeeProgress(i): if i==1 | i==0 : print('\n ') print('* %(i)d *' % {"i": i}, end="\r")
[ "def", "SeeProgress", "(", "i", ")", ":", "if", "i", "==", "1", "|", "i", "==", "0", ":", "print", "(", "'\\n '", ")", "print", "(", "'* %(i)d *'", "%", "{", "\"i\"", ":", "i", "}", ",", "end", "=", "\"\\r\"", ")" ]
SeeProgress(i) outputs i without performing carriage return This function is designed to be used in slow for-loops to show how the calculations progress.
[ "SeeProgress", "(", "i", ")", "outputs", "i", "without", "performing", "carriage", "return", "This", "function", "is", "designed", "to", "be", "used", "in", "slow", "for", "-", "loops", "to", "show", "how", "the", "calculations", "progress", "." ]
[ "\"\"\"\n SeeProgress(i) outputs i without performing carriage return\n This function is designed to be used in slow for-loops to show how the \n calculations progress. If the first call in the loop is not with i=1, it's\n convenient to call SeeProgress(1) before the loop.\n \"\"\"" ]
[ { "param": "i", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "i", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def SeeProgress(i): if i==1 | i==0 : print('\n ') print('* %(i)d *' % {"i": i}, end="\r")
610,260
285
55e2a0ceed415cfeefcd78bb32f95c450c9e874c
sadpterodactyl/single_cell_pbal
pdclust_expanded/.ipynb_checkpoints/pdclust_qc-checkpoint.py
[ "BSD-3-Clause" ]
Python
plot_figure
null
def plot_figure(fig,out_dir,file_name): """ Function for plotting figures in fixed dimensions and scale """ plot_dim_x=800 plot_dim_y=800 #plotly.offline.iplot(fig,validate=False, filename='customizing-subplot-axes') fig.write_image(out_dir+"/results/"+file_name+".png",width=plot_dim_x,height=plot_dim_y,scale=4)
Function for plotting figures in fixed dimensions and scale
Function for plotting figures in fixed dimensions and scale
[ "Function", "for", "plotting", "figures", "in", "fixed", "dimensions", "and", "scale" ]
def plot_figure(fig,out_dir,file_name): plot_dim_x=800 plot_dim_y=800 fig.write_image(out_dir+"/results/"+file_name+".png",width=plot_dim_x,height=plot_dim_y,scale=4)
[ "def", "plot_figure", "(", "fig", ",", "out_dir", ",", "file_name", ")", ":", "plot_dim_x", "=", "800", "plot_dim_y", "=", "800", "fig", ".", "write_image", "(", "out_dir", "+", "\"/results/\"", "+", "file_name", "+", "\".png\"", ",", "width", "=", "plot_dim_x", ",", "height", "=", "plot_dim_y", ",", "scale", "=", "4", ")" ]
Function for plotting figures in fixed dimensions and scale
[ "Function", "for", "plotting", "figures", "in", "fixed", "dimensions", "and", "scale" ]
[ "\"\"\"\n Function for plotting figures in fixed dimensions and scale\n \"\"\"", "#plotly.offline.iplot(fig,validate=False, filename='customizing-subplot-axes')" ]
[ { "param": "fig", "type": null }, { "param": "out_dir", "type": null }, { "param": "file_name", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "fig", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "out_dir", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "file_name", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def plot_figure(fig,out_dir,file_name): plot_dim_x=800 plot_dim_y=800 fig.write_image(out_dir+"/results/"+file_name+".png",width=plot_dim_x,height=plot_dim_y,scale=4)
610,261
375
6c1e46c4ab5065ddb8b1b74913e70c0556f1981f
ayulockin/mmf
tests/test_utils.py
[ "BSD-3-Clause" ]
Python
search_log
<not_specific>
def search_log(log_file: str, search_condition: Optional[List[Callable]] = None): """Searches a log file for a particular search conditions which can be list of functions and returns it back Args: log_file (str): Log file in which search needs to be performed search_condition (List[Callable], optional): Search conditions in form of list. Each corresponding to a function to test a condition. Defaults to None. Returns: JSONObject: Json representation of the search line Throws: AssertionError: If no log line is found meeting the conditions """ if search_condition is None: search_condition = {} lines = [] with open(log_file) as f: lines = f.readlines() filtered_line = None for line in lines: line = line.strip() if "progress" not in line: continue info_index = line.find(" : ") line = line[info_index + 3 :] res = json.loads(line) meets_condition = True for condition_fn in search_condition: meets_condition = meets_condition and condition_fn(res) if meets_condition: filtered_line = res break assert filtered_line is not None, "No match for search condition in log file" return filtered_line
Searches a log file for a particular search conditions which can be list of functions and returns it back Args: log_file (str): Log file in which search needs to be performed search_condition (List[Callable], optional): Search conditions in form of list. Each corresponding to a function to test a condition. Defaults to None. Returns: JSONObject: Json representation of the search line Throws: AssertionError: If no log line is found meeting the conditions
Searches a log file for a particular search conditions which can be list of functions and returns it back
[ "Searches", "a", "log", "file", "for", "a", "particular", "search", "conditions", "which", "can", "be", "list", "of", "functions", "and", "returns", "it", "back" ]
def search_log(log_file: str, search_condition: Optional[List[Callable]] = None): if search_condition is None: search_condition = {} lines = [] with open(log_file) as f: lines = f.readlines() filtered_line = None for line in lines: line = line.strip() if "progress" not in line: continue info_index = line.find(" : ") line = line[info_index + 3 :] res = json.loads(line) meets_condition = True for condition_fn in search_condition: meets_condition = meets_condition and condition_fn(res) if meets_condition: filtered_line = res break assert filtered_line is not None, "No match for search condition in log file" return filtered_line
[ "def", "search_log", "(", "log_file", ":", "str", ",", "search_condition", ":", "Optional", "[", "List", "[", "Callable", "]", "]", "=", "None", ")", ":", "if", "search_condition", "is", "None", ":", "search_condition", "=", "{", "}", "lines", "=", "[", "]", "with", "open", "(", "log_file", ")", "as", "f", ":", "lines", "=", "f", ".", "readlines", "(", ")", "filtered_line", "=", "None", "for", "line", "in", "lines", ":", "line", "=", "line", ".", "strip", "(", ")", "if", "\"progress\"", "not", "in", "line", ":", "continue", "info_index", "=", "line", ".", "find", "(", "\" : \"", ")", "line", "=", "line", "[", "info_index", "+", "3", ":", "]", "res", "=", "json", ".", "loads", "(", "line", ")", "meets_condition", "=", "True", "for", "condition_fn", "in", "search_condition", ":", "meets_condition", "=", "meets_condition", "and", "condition_fn", "(", "res", ")", "if", "meets_condition", ":", "filtered_line", "=", "res", "break", "assert", "filtered_line", "is", "not", "None", ",", "\"No match for search condition in log file\"", "return", "filtered_line" ]
Searches a log file for a particular search conditions which can be list of functions and returns it back
[ "Searches", "a", "log", "file", "for", "a", "particular", "search", "conditions", "which", "can", "be", "list", "of", "functions", "and", "returns", "it", "back" ]
[ "\"\"\"Searches a log file for a particular search conditions which can be list\n of functions and returns it back\n\n Args:\n log_file (str): Log file in which search needs to be performed\n search_condition (List[Callable], optional): Search conditions in form of list.\n Each corresponding to a function to test a condition. Defaults to None.\n\n Returns:\n JSONObject: Json representation of the search line\n\n Throws:\n AssertionError: If no log line is found meeting the conditions\n \"\"\"" ]
[ { "param": "log_file", "type": "str" }, { "param": "search_condition", "type": "Optional[List[Callable]]" } ]
{ "returns": [ { "docstring": "Json representation of the search line", "docstring_tokens": [ "Json", "representation", "of", "the", "search", "line" ], "type": "JSONObject" } ], "raises": [], "params": [ { "identifier": "log_file", "type": "str", "docstring": "Log file in which search needs to be performed", "docstring_tokens": [ "Log", "file", "in", "which", "search", "needs", "to", "be", "performed" ], "default": null, "is_optional": false }, { "identifier": "search_condition", "type": "Optional[List[Callable]]", "docstring": "Search conditions in form of list.\nEach corresponding to a function to test a condition. Defaults to None.", "docstring_tokens": [ "Search", "conditions", "in", "form", "of", "list", ".", "Each", "corresponding", "to", "a", "function", "to", "test", "a", "condition", ".", "Defaults", "to", "None", "." ], "default": null, "is_optional": true } ], "outlier_params": [], "others": [] }
import json def search_log(log_file: str, search_condition: Optional[List[Callable]] = None): if search_condition is None: search_condition = {} lines = [] with open(log_file) as f: lines = f.readlines() filtered_line = None for line in lines: line = line.strip() if "progress" not in line: continue info_index = line.find(" : ") line = line[info_index + 3 :] res = json.loads(line) meets_condition = True for condition_fn in search_condition: meets_condition = meets_condition and condition_fn(res) if meets_condition: filtered_line = res break assert filtered_line is not None, "No match for search condition in log file" return filtered_line
610,262
158
c038d72cc45bb3e10a3576b95c3f8ce18ef5feb6
gj-regensburg/gjr-telegram-common
gjrbotlib/bot_utilities.py
[ "MIT" ]
Python
test
null
def test(commands): """ This does check if the commands run without exception be thrown """ errors = 0 for key, handler in commands.items(): try: handler() print("SUCCESS with command '{}':".format(key)) except: print("ERROR with command '{}'".format(key)) errors += 1 print("Finishing with {} errors".format(errors))
This does check if the commands run without exception be thrown
This does check if the commands run without exception be thrown
[ "This", "does", "check", "if", "the", "commands", "run", "without", "exception", "be", "thrown" ]
def test(commands): errors = 0 for key, handler in commands.items(): try: handler() print("SUCCESS with command '{}':".format(key)) except: print("ERROR with command '{}'".format(key)) errors += 1 print("Finishing with {} errors".format(errors))
[ "def", "test", "(", "commands", ")", ":", "errors", "=", "0", "for", "key", ",", "handler", "in", "commands", ".", "items", "(", ")", ":", "try", ":", "handler", "(", ")", "print", "(", "\"SUCCESS with command '{}':\"", ".", "format", "(", "key", ")", ")", "except", ":", "print", "(", "\"ERROR with command '{}'\"", ".", "format", "(", "key", ")", ")", "errors", "+=", "1", "print", "(", "\"Finishing with {} errors\"", ".", "format", "(", "errors", ")", ")" ]
This does check if the commands run without exception be thrown
[ "This", "does", "check", "if", "the", "commands", "run", "without", "exception", "be", "thrown" ]
[ "\"\"\" This does check if the commands run without exception be thrown \"\"\"" ]
[ { "param": "commands", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "commands", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def test(commands): errors = 0 for key, handler in commands.items(): try: handler() print("SUCCESS with command '{}':".format(key)) except: print("ERROR with command '{}'".format(key)) errors += 1 print("Finishing with {} errors".format(errors))
610,263
661
c6e824bfad9fa9ac06526ae2daff7905f5db3bf0
ZPedroP/ASAPPpy
ASAPPpy/tools/convert_xml_to_corpus.py
[ "MIT" ]
Python
write_training_corpus
null
def write_training_corpus(data, filename): """ Function used to debug the corpus state during preprocessing """ with open(filename, 'w') as f: for item in data: f.write("%s\n" % item)
Function used to debug the corpus state during preprocessing
Function used to debug the corpus state during preprocessing
[ "Function", "used", "to", "debug", "the", "corpus", "state", "during", "preprocessing" ]
def write_training_corpus(data, filename): with open(filename, 'w') as f: for item in data: f.write("%s\n" % item)
[ "def", "write_training_corpus", "(", "data", ",", "filename", ")", ":", "with", "open", "(", "filename", ",", "'w'", ")", "as", "f", ":", "for", "item", "in", "data", ":", "f", ".", "write", "(", "\"%s\\n\"", "%", "item", ")" ]
Function used to debug the corpus state during preprocessing
[ "Function", "used", "to", "debug", "the", "corpus", "state", "during", "preprocessing" ]
[ "\"\"\" Function used to debug the corpus state during preprocessing \"\"\"" ]
[ { "param": "data", "type": null }, { "param": "filename", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "data", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "filename", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def write_training_corpus(data, filename): with open(filename, 'w') as f: for item in data: f.write("%s\n" % item)
610,265
522
7cc610801126a706b2d521558f80085fad3b871c
NarmakTwo/PyTools
PyTools.py
[ "MIT" ]
Python
scan
<not_specific>
def scan(file,string): """ scan a file for a string """ file1 = open(file, 'r') stringl = string.lower() if stringl in file1.read().lower(): return True else: return False
scan a file for a string
scan a file for a string
[ "scan", "a", "file", "for", "a", "string" ]
def scan(file,string): file1 = open(file, 'r') stringl = string.lower() if stringl in file1.read().lower(): return True else: return False
[ "def", "scan", "(", "file", ",", "string", ")", ":", "file1", "=", "open", "(", "file", ",", "'r'", ")", "stringl", "=", "string", ".", "lower", "(", ")", "if", "stringl", "in", "file1", ".", "read", "(", ")", ".", "lower", "(", ")", ":", "return", "True", "else", ":", "return", "False" ]
scan a file for a string
[ "scan", "a", "file", "for", "a", "string" ]
[ "\"\"\"\n scan a file for a string\n \"\"\"" ]
[ { "param": "file", "type": null }, { "param": "string", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "file", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "string", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def scan(file,string): file1 = open(file, 'r') stringl = string.lower() if stringl in file1.read().lower(): return True else: return False
610,266
115
6b1dc9b7fb5fdc38481c1bea53086576cfc836d6
jwlodek/pyauto
pyautogit/logger.py
[ "BSD-3-Clause" ]
Python
close_logger
null
def close_logger(): """Function that closes the opened logfile """ global _LOG_FILE_POINTER if _LOG_FILE_POINTER is not None: _LOG_FILE_POINTER.close() _LOG_FILE_POINTER = None
Function that closes the opened logfile
Function that closes the opened logfile
[ "Function", "that", "closes", "the", "opened", "logfile" ]
def close_logger(): global _LOG_FILE_POINTER if _LOG_FILE_POINTER is not None: _LOG_FILE_POINTER.close() _LOG_FILE_POINTER = None
[ "def", "close_logger", "(", ")", ":", "global", "_LOG_FILE_POINTER", "if", "_LOG_FILE_POINTER", "is", "not", "None", ":", "_LOG_FILE_POINTER", ".", "close", "(", ")", "_LOG_FILE_POINTER", "=", "None" ]
Function that closes the opened logfile
[ "Function", "that", "closes", "the", "opened", "logfile" ]
[ "\"\"\"Function that closes the opened logfile\n \"\"\"" ]
[]
{ "returns": [], "raises": [], "params": [], "outlier_params": [], "others": [] }
def close_logger(): global _LOG_FILE_POINTER if _LOG_FILE_POINTER is not None: _LOG_FILE_POINTER.close() _LOG_FILE_POINTER = None
610,267
718
44f8ef29c1158ea24fcf5ba4983e15e253de9ec9
dcaballe/iree-llvm-sandbox
run_benchmarks.py
[ "Apache-2.0" ]
Python
_convert_path_to_module
str
def _convert_path_to_module(test_script : str) -> str: """Convert the path of the test script to its module name.""" test_script = test_script.replace(os.sep, ".") test_script = test_script.strip(".") if test_script.endswith(".py"): return test_script[:-3] return test_script
Convert the path of the test script to its module name.
Convert the path of the test script to its module name.
[ "Convert", "the", "path", "of", "the", "test", "script", "to", "its", "module", "name", "." ]
def _convert_path_to_module(test_script : str) -> str: test_script = test_script.replace(os.sep, ".") test_script = test_script.strip(".") if test_script.endswith(".py"): return test_script[:-3] return test_script
[ "def", "_convert_path_to_module", "(", "test_script", ":", "str", ")", "->", "str", ":", "test_script", "=", "test_script", ".", "replace", "(", "os", ".", "sep", ",", "\".\"", ")", "test_script", "=", "test_script", ".", "strip", "(", "\".\"", ")", "if", "test_script", ".", "endswith", "(", "\".py\"", ")", ":", "return", "test_script", "[", ":", "-", "3", "]", "return", "test_script" ]
Convert the path of the test script to its module name.
[ "Convert", "the", "path", "of", "the", "test", "script", "to", "its", "module", "name", "." ]
[ "\"\"\"Convert the path of the test script to its module name.\"\"\"" ]
[ { "param": "test_script", "type": "str" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "test_script", "type": "str", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import os def _convert_path_to_module(test_script : str) -> str: test_script = test_script.replace(os.sep, ".") test_script = test_script.strip(".") if test_script.endswith(".py"): return test_script[:-3] return test_script
610,268
923
31c303fd1dc776d588ff2a9db33cbc061fe2dea7
qdegraaf/pygeneticoptimizer
optimizer/genetic_optimizer.py
[ "MIT" ]
Python
_levenshtein
int
def _levenshtein(startstring: str, targetstring: str, costs: Tuple[int, int, int] = (1, 1, 1)) -> int: """ return the Levenshtein distance between the strings startstring and targetstring For all i and j, dist[i,j] will contain the Levenshtein distance between the first i characters of s and the first j characters of t costs: a tuple or a list with three integers (d, i, s) where d defines the costs for a deletion i defines the costs for an insertion and s defines the costs for a substitution """ rows = len(startstring) + 1 cols = len(targetstring) + 1 deletes, inserts, substitutes = costs dist = [[0 for _ in range(cols)] for _ in range(rows)] # source prefixes can be transformed into empty strings # by deletions: for row in range(1, rows): dist[row][0] = row * deletes # target prefixes can be created from an empty source string # by inserting the characters for col in range(1, cols): dist[0][col] = col * inserts for col in range(1, cols): for row in range(1, rows): if startstring[row - 1] == targetstring[col - 1]: cost = 0 else: cost = substitutes dist[row][col] = min(dist[row - 1][col] + deletes, dist[row][col - 1] + inserts, dist[row - 1][col - 1] + cost) # substitution return dist[row][col]
return the Levenshtein distance between the strings startstring and targetstring For all i and j, dist[i,j] will contain the Levenshtein distance between the first i characters of s and the first j characters of t costs: a tuple or a list with three integers (d, i, s) where d defines the costs for a deletion i defines the costs for an insertion and s defines the costs for a substitution
return the Levenshtein distance between the strings startstring and targetstring For all i and j, dist[i,j] will contain the Levenshtein distance between the first i characters of s and the first j characters of t a tuple or a list with three integers (d, i, s) where d defines the costs for a deletion i defines the costs for an insertion and s defines the costs for a substitution
[ "return", "the", "Levenshtein", "distance", "between", "the", "strings", "startstring", "and", "targetstring", "For", "all", "i", "and", "j", "dist", "[", "i", "j", "]", "will", "contain", "the", "Levenshtein", "distance", "between", "the", "first", "i", "characters", "of", "s", "and", "the", "first", "j", "characters", "of", "t", "a", "tuple", "or", "a", "list", "with", "three", "integers", "(", "d", "i", "s", ")", "where", "d", "defines", "the", "costs", "for", "a", "deletion", "i", "defines", "the", "costs", "for", "an", "insertion", "and", "s", "defines", "the", "costs", "for", "a", "substitution" ]
def _levenshtein(startstring: str, targetstring: str, costs: Tuple[int, int, int] = (1, 1, 1)) -> int: rows = len(startstring) + 1 cols = len(targetstring) + 1 deletes, inserts, substitutes = costs dist = [[0 for _ in range(cols)] for _ in range(rows)] for row in range(1, rows): dist[row][0] = row * deletes for col in range(1, cols): dist[0][col] = col * inserts for col in range(1, cols): for row in range(1, rows): if startstring[row - 1] == targetstring[col - 1]: cost = 0 else: cost = substitutes dist[row][col] = min(dist[row - 1][col] + deletes, dist[row][col - 1] + inserts, dist[row - 1][col - 1] + cost) return dist[row][col]
[ "def", "_levenshtein", "(", "startstring", ":", "str", ",", "targetstring", ":", "str", ",", "costs", ":", "Tuple", "[", "int", ",", "int", ",", "int", "]", "=", "(", "1", ",", "1", ",", "1", ")", ")", "->", "int", ":", "rows", "=", "len", "(", "startstring", ")", "+", "1", "cols", "=", "len", "(", "targetstring", ")", "+", "1", "deletes", ",", "inserts", ",", "substitutes", "=", "costs", "dist", "=", "[", "[", "0", "for", "_", "in", "range", "(", "cols", ")", "]", "for", "_", "in", "range", "(", "rows", ")", "]", "for", "row", "in", "range", "(", "1", ",", "rows", ")", ":", "dist", "[", "row", "]", "[", "0", "]", "=", "row", "*", "deletes", "for", "col", "in", "range", "(", "1", ",", "cols", ")", ":", "dist", "[", "0", "]", "[", "col", "]", "=", "col", "*", "inserts", "for", "col", "in", "range", "(", "1", ",", "cols", ")", ":", "for", "row", "in", "range", "(", "1", ",", "rows", ")", ":", "if", "startstring", "[", "row", "-", "1", "]", "==", "targetstring", "[", "col", "-", "1", "]", ":", "cost", "=", "0", "else", ":", "cost", "=", "substitutes", "dist", "[", "row", "]", "[", "col", "]", "=", "min", "(", "dist", "[", "row", "-", "1", "]", "[", "col", "]", "+", "deletes", ",", "dist", "[", "row", "]", "[", "col", "-", "1", "]", "+", "inserts", ",", "dist", "[", "row", "-", "1", "]", "[", "col", "-", "1", "]", "+", "cost", ")", "return", "dist", "[", "row", "]", "[", "col", "]" ]
return the Levenshtein distance between the strings startstring and targetstring For all i and j, dist[i,j] will contain the Levenshtein distance between the first i characters of s and the first j characters of t
[ "return", "the", "Levenshtein", "distance", "between", "the", "strings", "startstring", "and", "targetstring", "For", "all", "i", "and", "j", "dist", "[", "i", "j", "]", "will", "contain", "the", "Levenshtein", "distance", "between", "the", "first", "i", "characters", "of", "s", "and", "the", "first", "j", "characters", "of", "t" ]
[ "\"\"\"\n return the Levenshtein distance between the strings startstring and\n targetstring\n For all i and j, dist[i,j] will contain the Levenshtein\n distance between the first i characters of s and the\n first j characters of t\n\n costs: a tuple or a list with three integers (d, i, s)\n where d defines the costs for a deletion\n i defines the costs for an insertion and\n s defines the costs for a substitution\n \"\"\"", "# source prefixes can be transformed into empty strings", "# by deletions:", "# target prefixes can be created from an empty source string", "# by inserting the characters", "# substitution" ]
[ { "param": "startstring", "type": "str" }, { "param": "targetstring", "type": "str" }, { "param": "costs", "type": "Tuple[int, int, int]" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "startstring", "type": "str", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "targetstring", "type": "str", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "costs", "type": "Tuple[int, int, int]", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _levenshtein(startstring: str, targetstring: str, costs: Tuple[int, int, int] = (1, 1, 1)) -> int: rows = len(startstring) + 1 cols = len(targetstring) + 1 deletes, inserts, substitutes = costs dist = [[0 for _ in range(cols)] for _ in range(rows)] for row in range(1, rows): dist[row][0] = row * deletes for col in range(1, cols): dist[0][col] = col * inserts for col in range(1, cols): for row in range(1, rows): if startstring[row - 1] == targetstring[col - 1]: cost = 0 else: cost = substitutes dist[row][col] = min(dist[row - 1][col] + deletes, dist[row][col - 1] + inserts, dist[row - 1][col - 1] + cost) return dist[row][col]
610,269
756
68b8e7dbe442c7b6fd61ce801dacded91cd54f52
ashishdas009/in-toto
tests/runtests.py
[ "Apache-2.0" ]
Python
check_usable_gpg
null
def check_usable_gpg(): """Set `TEST_SKIP_GPG` environment variable if neither gpg2 nor gpg is available. """ os.environ["TEST_SKIP_GPG"] = "1" for gpg in ["gpg2", "gpg"]: try: subprocess.check_call([gpg, "--version"]) except OSError: pass else: # If one of the two exists, we can unset the skip envvar and ... os.environ.pop("TEST_SKIP_GPG", None) # ... abort the availability check.: break
Set `TEST_SKIP_GPG` environment variable if neither gpg2 nor gpg is available.
Set `TEST_SKIP_GPG` environment variable if neither gpg2 nor gpg is available.
[ "Set", "`", "TEST_SKIP_GPG", "`", "environment", "variable", "if", "neither", "gpg2", "nor", "gpg", "is", "available", "." ]
def check_usable_gpg(): os.environ["TEST_SKIP_GPG"] = "1" for gpg in ["gpg2", "gpg"]: try: subprocess.check_call([gpg, "--version"]) except OSError: pass else: os.environ.pop("TEST_SKIP_GPG", None) break
[ "def", "check_usable_gpg", "(", ")", ":", "os", ".", "environ", "[", "\"TEST_SKIP_GPG\"", "]", "=", "\"1\"", "for", "gpg", "in", "[", "\"gpg2\"", ",", "\"gpg\"", "]", ":", "try", ":", "subprocess", ".", "check_call", "(", "[", "gpg", ",", "\"--version\"", "]", ")", "except", "OSError", ":", "pass", "else", ":", "os", ".", "environ", ".", "pop", "(", "\"TEST_SKIP_GPG\"", ",", "None", ")", "break" ]
Set `TEST_SKIP_GPG` environment variable if neither gpg2 nor gpg is available.
[ "Set", "`", "TEST_SKIP_GPG", "`", "environment", "variable", "if", "neither", "gpg2", "nor", "gpg", "is", "available", "." ]
[ "\"\"\"Set `TEST_SKIP_GPG` environment variable if neither gpg2 nor gpg is\n available.\n\n \"\"\"", "# If one of the two exists, we can unset the skip envvar and ...", "# ... abort the availability check.:" ]
[]
{ "returns": [], "raises": [], "params": [], "outlier_params": [], "others": [] }
import subprocess import os def check_usable_gpg(): os.environ["TEST_SKIP_GPG"] = "1" for gpg in ["gpg2", "gpg"]: try: subprocess.check_call([gpg, "--version"]) except OSError: pass else: os.environ.pop("TEST_SKIP_GPG", None) break
610,270
98
53693c8d2c9e0e2e41c4e4573a9ccbdee19b4e2d
GeminiDRSoftware/GHOSTDR
ghostdr/ghost/recipes/qa/recipes_ARC.py
[ "BSD-3-Clause" ]
Python
makeProcessedArc
<not_specific>
def makeProcessedArc(p): """ This recipe performs the standardization and corrections needed to convert the raw input arc images into a single stacked arc image. This output processed arc is stored on disk using storeProcessedArc and has a name equal to the name of the first input arc image with "_arc.fits" appended. The wavelength solution is also stored. Parameters ---------- p : Primitives object A primitive set matching the recipe_tags. """ p.prepare() p.addDQ() p.addVAR(read_noise=True) p.overscanCorrect() #p.tileArrays() p.biasCorrect() p.ADUToElectrons() p.addVAR(poisson_noise=True) # TODO? p.ADUToElectrons() p.darkCorrect() #p.rejectCosmicRays( # ) p.tileArrays() p.extractProfile(sky_correct=False, write_result=True) p.fitWavelength() p.storeProcessedArc() return
This recipe performs the standardization and corrections needed to convert the raw input arc images into a single stacked arc image. This output processed arc is stored on disk using storeProcessedArc and has a name equal to the name of the first input arc image with "_arc.fits" appended. The wavelength solution is also stored. Parameters ---------- p : Primitives object A primitive set matching the recipe_tags.
This recipe performs the standardization and corrections needed to convert the raw input arc images into a single stacked arc image. This output processed arc is stored on disk using storeProcessedArc and has a name equal to the name of the first input arc image with "_arc.fits" appended. The wavelength solution is also stored. Parameters p : Primitives object A primitive set matching the recipe_tags.
[ "This", "recipe", "performs", "the", "standardization", "and", "corrections", "needed", "to", "convert", "the", "raw", "input", "arc", "images", "into", "a", "single", "stacked", "arc", "image", ".", "This", "output", "processed", "arc", "is", "stored", "on", "disk", "using", "storeProcessedArc", "and", "has", "a", "name", "equal", "to", "the", "name", "of", "the", "first", "input", "arc", "image", "with", "\"", "_arc", ".", "fits", "\"", "appended", ".", "The", "wavelength", "solution", "is", "also", "stored", ".", "Parameters", "p", ":", "Primitives", "object", "A", "primitive", "set", "matching", "the", "recipe_tags", "." ]
def makeProcessedArc(p): p.prepare() p.addDQ() p.addVAR(read_noise=True) p.overscanCorrect() p.biasCorrect() p.ADUToElectrons() p.addVAR(poisson_noise=True) p.darkCorrect() p.tileArrays() p.extractProfile(sky_correct=False, write_result=True) p.fitWavelength() p.storeProcessedArc() return
[ "def", "makeProcessedArc", "(", "p", ")", ":", "p", ".", "prepare", "(", ")", "p", ".", "addDQ", "(", ")", "p", ".", "addVAR", "(", "read_noise", "=", "True", ")", "p", ".", "overscanCorrect", "(", ")", "p", ".", "biasCorrect", "(", ")", "p", ".", "ADUToElectrons", "(", ")", "p", ".", "addVAR", "(", "poisson_noise", "=", "True", ")", "p", ".", "darkCorrect", "(", ")", "p", ".", "tileArrays", "(", ")", "p", ".", "extractProfile", "(", "sky_correct", "=", "False", ",", "write_result", "=", "True", ")", "p", ".", "fitWavelength", "(", ")", "p", ".", "storeProcessedArc", "(", ")", "return" ]
This recipe performs the standardization and corrections needed to convert the raw input arc images into a single stacked arc image.
[ "This", "recipe", "performs", "the", "standardization", "and", "corrections", "needed", "to", "convert", "the", "raw", "input", "arc", "images", "into", "a", "single", "stacked", "arc", "image", "." ]
[ "\"\"\"\n This recipe performs the standardization and corrections needed to convert\n the raw input arc images into a single stacked arc image. This output\n processed arc is stored on disk using storeProcessedArc and has a name\n equal to the name of the first input arc image with \"_arc.fits\" appended.\n The wavelength solution is also stored.\n\n Parameters\n ----------\n p : Primitives object\n A primitive set matching the recipe_tags.\n \"\"\"", "#p.tileArrays()", "# TODO? p.ADUToElectrons()", "#p.rejectCosmicRays(", "# )" ]
[ { "param": "p", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "p", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def makeProcessedArc(p): p.prepare() p.addDQ() p.addVAR(read_noise=True) p.overscanCorrect() p.biasCorrect() p.ADUToElectrons() p.addVAR(poisson_noise=True) p.darkCorrect() p.tileArrays() p.extractProfile(sky_correct=False, write_result=True) p.fitWavelength() p.storeProcessedArc() return
610,272
292
e84b2a6cd52805678e95a8f7df1018bac33a1504
boyter/scc-data
convert_json.py
[ "Unlicense" ]
Python
multipleGitIgnore
<not_specific>
def multipleGitIgnore(): ''' Converts the output of multipleGitIgnore into something we can throw into a chart library since it needs to be sorted It is a count of the number of projects and gitignores EG. gitignore:project where 123 projects have 2 gitignore files https://jsfiddle.net/jqt81ufs/1/ ''' data = '[]' with codecs.open('./results/multipleGitIgnore.json', 'r') as myfile: data = myfile.read() d = json.loads(data) new = [] for x, y in d.iteritems(): new.append([int(x), y]) def cmp(a, b): if a == b: return 0 if a < b: return -1 return 1 new.sort(cmp) with codecs.open("./results/multipleGitIgnore_converted.json", "w", "utf-8") as text_file: text_file.write(json.dumps(new, sort_keys=True))
Converts the output of multipleGitIgnore into something we can throw into a chart library since it needs to be sorted It is a count of the number of projects and gitignores EG. gitignore:project where 123 projects have 2 gitignore files https://jsfiddle.net/jqt81ufs/1/
Converts the output of multipleGitIgnore into something we can throw into a chart library since it needs to be sorted It is a count of the number of projects and gitignores EG.
[ "Converts", "the", "output", "of", "multipleGitIgnore", "into", "something", "we", "can", "throw", "into", "a", "chart", "library", "since", "it", "needs", "to", "be", "sorted", "It", "is", "a", "count", "of", "the", "number", "of", "projects", "and", "gitignores", "EG", "." ]
def multipleGitIgnore(): data = '[]' with codecs.open('./results/multipleGitIgnore.json', 'r') as myfile: data = myfile.read() d = json.loads(data) new = [] for x, y in d.iteritems(): new.append([int(x), y]) def cmp(a, b): if a == b: return 0 if a < b: return -1 return 1 new.sort(cmp) with codecs.open("./results/multipleGitIgnore_converted.json", "w", "utf-8") as text_file: text_file.write(json.dumps(new, sort_keys=True))
[ "def", "multipleGitIgnore", "(", ")", ":", "data", "=", "'[]'", "with", "codecs", ".", "open", "(", "'./results/multipleGitIgnore.json'", ",", "'r'", ")", "as", "myfile", ":", "data", "=", "myfile", ".", "read", "(", ")", "d", "=", "json", ".", "loads", "(", "data", ")", "new", "=", "[", "]", "for", "x", ",", "y", "in", "d", ".", "iteritems", "(", ")", ":", "new", ".", "append", "(", "[", "int", "(", "x", ")", ",", "y", "]", ")", "def", "cmp", "(", "a", ",", "b", ")", ":", "if", "a", "==", "b", ":", "return", "0", "if", "a", "<", "b", ":", "return", "-", "1", "return", "1", "new", ".", "sort", "(", "cmp", ")", "with", "codecs", ".", "open", "(", "\"./results/multipleGitIgnore_converted.json\"", ",", "\"w\"", ",", "\"utf-8\"", ")", "as", "text_file", ":", "text_file", ".", "write", "(", "json", ".", "dumps", "(", "new", ",", "sort_keys", "=", "True", ")", ")" ]
Converts the output of multipleGitIgnore into something we can throw into a chart library since it needs to be sorted It is a count of the number of projects and gitignores
[ "Converts", "the", "output", "of", "multipleGitIgnore", "into", "something", "we", "can", "throw", "into", "a", "chart", "library", "since", "it", "needs", "to", "be", "sorted", "It", "is", "a", "count", "of", "the", "number", "of", "projects", "and", "gitignores" ]
[ "'''\n Converts the output of multipleGitIgnore into something\n we can throw into a chart library since it needs to \n be sorted\n It is a count of the number of projects and gitignores\n\n EG. gitignore:project where 123 projects have 2 gitignore files\n https://jsfiddle.net/jqt81ufs/1/\n '''" ]
[]
{ "returns": [], "raises": [], "params": [], "outlier_params": [], "others": [] }
import codecs import json def multipleGitIgnore(): data = '[]' with codecs.open('./results/multipleGitIgnore.json', 'r') as myfile: data = myfile.read() d = json.loads(data) new = [] for x, y in d.iteritems(): new.append([int(x), y]) def cmp(a, b): if a == b: return 0 if a < b: return -1 return 1 new.sort(cmp) with codecs.open("./results/multipleGitIgnore_converted.json", "w", "utf-8") as text_file: text_file.write(json.dumps(new, sort_keys=True))
610,273
614
f0c62efb5a7074133e57810b9f3cd8f4d88d2bd5
Rcuz8/Cirq
cirq/work/observable_measurement.py
[ "Apache-2.0" ]
Python
_repetitions_to_do
int
def _repetitions_to_do(accumulator: BitstringAccumulator, desired_reps: int) -> int: """Stub function to chunk desired repetitions into groups of 10,000.""" done = accumulator.n_repetitions todo = desired_reps - done if todo <= 0: return 0 to_do_next = min(10_000, todo) return to_do_next
Stub function to chunk desired repetitions into groups of 10,000.
Stub function to chunk desired repetitions into groups of 10,000.
[ "Stub", "function", "to", "chunk", "desired", "repetitions", "into", "groups", "of", "10", "000", "." ]
def _repetitions_to_do(accumulator: BitstringAccumulator, desired_reps: int) -> int: done = accumulator.n_repetitions todo = desired_reps - done if todo <= 0: return 0 to_do_next = min(10_000, todo) return to_do_next
[ "def", "_repetitions_to_do", "(", "accumulator", ":", "BitstringAccumulator", ",", "desired_reps", ":", "int", ")", "->", "int", ":", "done", "=", "accumulator", ".", "n_repetitions", "todo", "=", "desired_reps", "-", "done", "if", "todo", "<=", "0", ":", "return", "0", "to_do_next", "=", "min", "(", "10_000", ",", "todo", ")", "return", "to_do_next" ]
Stub function to chunk desired repetitions into groups of 10,000.
[ "Stub", "function", "to", "chunk", "desired", "repetitions", "into", "groups", "of", "10", "000", "." ]
[ "\"\"\"Stub function to chunk desired repetitions into groups of 10,000.\"\"\"" ]
[ { "param": "accumulator", "type": "BitstringAccumulator" }, { "param": "desired_reps", "type": "int" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "accumulator", "type": "BitstringAccumulator", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "desired_reps", "type": "int", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _repetitions_to_do(accumulator: BitstringAccumulator, desired_reps: int) -> int: done = accumulator.n_repetitions todo = desired_reps - done if todo <= 0: return 0 to_do_next = min(10_000, todo) return to_do_next
610,274
907
2eb4b36cd6a7f2187747a2a5e00c0b0b2d9fe7fe
lydiadwyer/sheparddb
data/selenium/features/steps.py
[ "Apache-2.0" ]
Python
find_field_by_class
<not_specific>
def find_field_by_class(browser, attribute): """ find an input by its class """ xpath = "//input[@class='%s']" % attribute elems = browser.find_elements_by_xpath(xpath) return elems[0] if elems else False
find an input by its class
find an input by its class
[ "find", "an", "input", "by", "its", "class" ]
def find_field_by_class(browser, attribute): xpath = "//input[@class='%s']" % attribute elems = browser.find_elements_by_xpath(xpath) return elems[0] if elems else False
[ "def", "find_field_by_class", "(", "browser", ",", "attribute", ")", ":", "xpath", "=", "\"//input[@class='%s']\"", "%", "attribute", "elems", "=", "browser", ".", "find_elements_by_xpath", "(", "xpath", ")", "return", "elems", "[", "0", "]", "if", "elems", "else", "False" ]
find an input by its class
[ "find", "an", "input", "by", "its", "class" ]
[ "\"\"\" find an input by its class \"\"\"" ]
[ { "param": "browser", "type": null }, { "param": "attribute", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "browser", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "attribute", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def find_field_by_class(browser, attribute): xpath = "//input[@class='%s']" % attribute elems = browser.find_elements_by_xpath(xpath) return elems[0] if elems else False
610,275
699
498246054897849d72b07dc078d8b150091d7c85
bencorrado/backend-client
wirepas_backend_client/tools/utils.py
[ "Apache-2.0" ]
Python
flatten
<not_specific>
def flatten(input_dict, separator="/", prefix=""): """ Flattens a dictionary with nested dictionaries and lists into a single dictionary. The key compression is done using the chosen separator. """ output_dict = {} def step(member, parent_key=""): if isinstance(member, dict): for key, value in member.items(): step( value, f"{parent_key}{separator}{key}" if parent_key else str(key), ) elif isinstance(member, list): for index, sublist in enumerate(member, start=0): step( sublist, f"{parent_key}{separator}{index}" if parent_key else str(index), ) else: output_dict[f"{parent_key}"] = member step(input_dict) return output_dict
Flattens a dictionary with nested dictionaries and lists into a single dictionary. The key compression is done using the chosen separator.
Flattens a dictionary with nested dictionaries and lists into a single dictionary. The key compression is done using the chosen separator.
[ "Flattens", "a", "dictionary", "with", "nested", "dictionaries", "and", "lists", "into", "a", "single", "dictionary", ".", "The", "key", "compression", "is", "done", "using", "the", "chosen", "separator", "." ]
def flatten(input_dict, separator="/", prefix=""): output_dict = {} def step(member, parent_key=""): if isinstance(member, dict): for key, value in member.items(): step( value, f"{parent_key}{separator}{key}" if parent_key else str(key), ) elif isinstance(member, list): for index, sublist in enumerate(member, start=0): step( sublist, f"{parent_key}{separator}{index}" if parent_key else str(index), ) else: output_dict[f"{parent_key}"] = member step(input_dict) return output_dict
[ "def", "flatten", "(", "input_dict", ",", "separator", "=", "\"/\"", ",", "prefix", "=", "\"\"", ")", ":", "output_dict", "=", "{", "}", "def", "step", "(", "member", ",", "parent_key", "=", "\"\"", ")", ":", "if", "isinstance", "(", "member", ",", "dict", ")", ":", "for", "key", ",", "value", "in", "member", ".", "items", "(", ")", ":", "step", "(", "value", ",", "f\"{parent_key}{separator}{key}\"", "if", "parent_key", "else", "str", "(", "key", ")", ",", ")", "elif", "isinstance", "(", "member", ",", "list", ")", ":", "for", "index", ",", "sublist", "in", "enumerate", "(", "member", ",", "start", "=", "0", ")", ":", "step", "(", "sublist", ",", "f\"{parent_key}{separator}{index}\"", "if", "parent_key", "else", "str", "(", "index", ")", ",", ")", "else", ":", "output_dict", "[", "f\"{parent_key}\"", "]", "=", "member", "step", "(", "input_dict", ")", "return", "output_dict" ]
Flattens a dictionary with nested dictionaries and lists into a single dictionary.
[ "Flattens", "a", "dictionary", "with", "nested", "dictionaries", "and", "lists", "into", "a", "single", "dictionary", "." ]
[ "\"\"\"\n Flattens a dictionary with nested dictionaries and lists\n into a single dictionary.\n\n The key compression is done using the chosen separator.\n \"\"\"" ]
[ { "param": "input_dict", "type": null }, { "param": "separator", "type": null }, { "param": "prefix", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "input_dict", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "separator", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "prefix", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def flatten(input_dict, separator="/", prefix=""): output_dict = {} def step(member, parent_key=""): if isinstance(member, dict): for key, value in member.items(): step( value, f"{parent_key}{separator}{key}" if parent_key else str(key), ) elif isinstance(member, list): for index, sublist in enumerate(member, start=0): step( sublist, f"{parent_key}{separator}{index}" if parent_key else str(index), ) else: output_dict[f"{parent_key}"] = member step(input_dict) return output_dict
610,276
1,002
4c0d82e762873868710f4a732df621a2dd4bc4d9
codecrap/qtt
src/qtt/pgeometry.py
[ "MIT" ]
Python
circular_mean
<not_specific>
def circular_mean(weights, angles): """ Calculate circular mean of a set of 2D vectors """ x = y = 0. for angle, weight in zip(angles, weights): x += math.cos(math.radians(angle)) * weight y += math.sin(math.radians(angle)) * weight mean = math.degrees(math.atan2(y, x)) return mean
Calculate circular mean of a set of 2D vectors
Calculate circular mean of a set of 2D vectors
[ "Calculate", "circular", "mean", "of", "a", "set", "of", "2D", "vectors" ]
def circular_mean(weights, angles): x = y = 0. for angle, weight in zip(angles, weights): x += math.cos(math.radians(angle)) * weight y += math.sin(math.radians(angle)) * weight mean = math.degrees(math.atan2(y, x)) return mean
[ "def", "circular_mean", "(", "weights", ",", "angles", ")", ":", "x", "=", "y", "=", "0.", "for", "angle", ",", "weight", "in", "zip", "(", "angles", ",", "weights", ")", ":", "x", "+=", "math", ".", "cos", "(", "math", ".", "radians", "(", "angle", ")", ")", "*", "weight", "y", "+=", "math", ".", "sin", "(", "math", ".", "radians", "(", "angle", ")", ")", "*", "weight", "mean", "=", "math", ".", "degrees", "(", "math", ".", "atan2", "(", "y", ",", "x", ")", ")", "return", "mean" ]
Calculate circular mean of a set of 2D vectors
[ "Calculate", "circular", "mean", "of", "a", "set", "of", "2D", "vectors" ]
[ "\"\"\" Calculate circular mean of a set of 2D vectors \"\"\"" ]
[ { "param": "weights", "type": null }, { "param": "angles", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "weights", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "angles", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import math def circular_mean(weights, angles): x = y = 0. for angle, weight in zip(angles, weights): x += math.cos(math.radians(angle)) * weight y += math.sin(math.radians(angle)) * weight mean = math.degrees(math.atan2(y, x)) return mean
610,277
432
e957f33ff118997c6100d71c76f7cac02b867946
markrdeacon/Python101
roots.py
[ "MIT" ]
Python
sqrt
<not_specific>
def sqrt(x): '''Compute the square roots using the method of Heron of Alexandria Args: X: The number for which the square root is to be calculated. Returns: The square root of x. Raises: ValueError: If x is negative ''' if x < 0: raise ValueError('Cannot compute square root of ' f'negative number {x}') guess = x i = 0 while guess * guess != x and i < 20: guess = (guess + x / guess) / 2.0 i += 1 return guess
Compute the square roots using the method of Heron of Alexandria Args: X: The number for which the square root is to be calculated. Returns: The square root of x. Raises: ValueError: If x is negative
Compute the square roots using the method of Heron of Alexandria The number for which the square root is to be calculated. Returns: The square root of x. Raises: ValueError: If x is negative
[ "Compute", "the", "square", "roots", "using", "the", "method", "of", "Heron", "of", "Alexandria", "The", "number", "for", "which", "the", "square", "root", "is", "to", "be", "calculated", ".", "Returns", ":", "The", "square", "root", "of", "x", ".", "Raises", ":", "ValueError", ":", "If", "x", "is", "negative" ]
def sqrt(x): if x < 0: raise ValueError('Cannot compute square root of ' f'negative number {x}') guess = x i = 0 while guess * guess != x and i < 20: guess = (guess + x / guess) / 2.0 i += 1 return guess
[ "def", "sqrt", "(", "x", ")", ":", "if", "x", "<", "0", ":", "raise", "ValueError", "(", "'Cannot compute square root of '", "f'negative number {x}'", ")", "guess", "=", "x", "i", "=", "0", "while", "guess", "*", "guess", "!=", "x", "and", "i", "<", "20", ":", "guess", "=", "(", "guess", "+", "x", "/", "guess", ")", "/", "2.0", "i", "+=", "1", "return", "guess" ]
Compute the square roots using the method of Heron of Alexandria
[ "Compute", "the", "square", "roots", "using", "the", "method", "of", "Heron", "of", "Alexandria" ]
[ "'''Compute the square roots using the method of \n Heron of Alexandria\n\n Args:\n X: The number for which the square root is \n to be calculated.\n Returns:\n The square root of x.\n Raises:\n ValueError: If x is negative\n '''" ]
[ { "param": "x", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "x", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def sqrt(x): if x < 0: raise ValueError('Cannot compute square root of ' f'negative number {x}') guess = x i = 0 while guess * guess != x and i < 20: guess = (guess + x / guess) / 2.0 i += 1 return guess
610,278
84
55db6402b8f20a876e916f24f693f3ff16fcdf05
kbsezginel/tutorials
file-conversion/tinker_to_xyz.py
[ "MIT" ]
Python
write_xyz
null
def write_xyz(file_name, atoms, coordinates, header='mol'): """ Write given atomic coordinates to file in xyz format """ with open(file_name, 'w') as xyz_file: xyz_file.write(str(len(coordinates)) + '\n') xyz_file.write(header + '\n') format = '%-2s %7.4f %7.4f %7.4f\n' for atom, coor in zip(atoms, coordinates): xyz_file.write(format % (atom, coor[0], coor[1], coor[2]))
Write given atomic coordinates to file in xyz format
Write given atomic coordinates to file in xyz format
[ "Write", "given", "atomic", "coordinates", "to", "file", "in", "xyz", "format" ]
def write_xyz(file_name, atoms, coordinates, header='mol'): with open(file_name, 'w') as xyz_file: xyz_file.write(str(len(coordinates)) + '\n') xyz_file.write(header + '\n') format = '%-2s %7.4f %7.4f %7.4f\n' for atom, coor in zip(atoms, coordinates): xyz_file.write(format % (atom, coor[0], coor[1], coor[2]))
[ "def", "write_xyz", "(", "file_name", ",", "atoms", ",", "coordinates", ",", "header", "=", "'mol'", ")", ":", "with", "open", "(", "file_name", ",", "'w'", ")", "as", "xyz_file", ":", "xyz_file", ".", "write", "(", "str", "(", "len", "(", "coordinates", ")", ")", "+", "'\\n'", ")", "xyz_file", ".", "write", "(", "header", "+", "'\\n'", ")", "format", "=", "'%-2s %7.4f %7.4f %7.4f\\n'", "for", "atom", ",", "coor", "in", "zip", "(", "atoms", ",", "coordinates", ")", ":", "xyz_file", ".", "write", "(", "format", "%", "(", "atom", ",", "coor", "[", "0", "]", ",", "coor", "[", "1", "]", ",", "coor", "[", "2", "]", ")", ")" ]
Write given atomic coordinates to file in xyz format
[ "Write", "given", "atomic", "coordinates", "to", "file", "in", "xyz", "format" ]
[ "\"\"\" Write given atomic coordinates to file in xyz format \"\"\"" ]
[ { "param": "file_name", "type": null }, { "param": "atoms", "type": null }, { "param": "coordinates", "type": null }, { "param": "header", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "file_name", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "atoms", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "coordinates", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "header", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def write_xyz(file_name, atoms, coordinates, header='mol'): with open(file_name, 'w') as xyz_file: xyz_file.write(str(len(coordinates)) + '\n') xyz_file.write(header + '\n') format = '%-2s %7.4f %7.4f %7.4f\n' for atom, coor in zip(atoms, coordinates): xyz_file.write(format % (atom, coor[0], coor[1], coor[2]))
610,279
644
293dd51ab18198b58d47a54541a4421ae65ccb02
ookimi/ldt
ldt/dicts/normalize.py
[ "Apache-2.0" ]
Python
turn_to_words
<not_specific>
def turn_to_words(word): """Split on non-alphanumeric characters, if any.""" res = [] subword = "" for char in list(word): if char.isalnum(): subword = subword + char else: if subword: res.append(subword) subword = "" res.append(subword) return res
Split on non-alphanumeric characters, if any.
Split on non-alphanumeric characters, if any.
[ "Split", "on", "non", "-", "alphanumeric", "characters", "if", "any", "." ]
def turn_to_words(word): res = [] subword = "" for char in list(word): if char.isalnum(): subword = subword + char else: if subword: res.append(subword) subword = "" res.append(subword) return res
[ "def", "turn_to_words", "(", "word", ")", ":", "res", "=", "[", "]", "subword", "=", "\"\"", "for", "char", "in", "list", "(", "word", ")", ":", "if", "char", ".", "isalnum", "(", ")", ":", "subword", "=", "subword", "+", "char", "else", ":", "if", "subword", ":", "res", ".", "append", "(", "subword", ")", "subword", "=", "\"\"", "res", ".", "append", "(", "subword", ")", "return", "res" ]
Split on non-alphanumeric characters, if any.
[ "Split", "on", "non", "-", "alphanumeric", "characters", "if", "any", "." ]
[ "\"\"\"Split on non-alphanumeric characters, if any.\"\"\"" ]
[ { "param": "word", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "word", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def turn_to_words(word): res = [] subword = "" for char in list(word): if char.isalnum(): subword = subword + char else: if subword: res.append(subword) subword = "" res.append(subword) return res
610,280
1,012
9e6d38aff03aae22bd4ba8fd38fcee5f7f88431d
dleebrown/ANNA
ANNA_test.py
[ "BSD-3-Clause" ]
Python
unnormalize_parameters
<not_specific>
def unnormalize_parameters(normed_parameters, minvals, maxvals): """takes in a list of parameters and undoes simple min/max normalization according to min/max values INPUTS normed_parameters: length n, containing parameters for a star minvals: length n, minimum parameter values maxvals: length n, max parameter values OUTPUTS unnormed_parameters: length n, unnormalized parameters """ unnormed_parameters = normed_parameters*(maxvals-minvals) + minvals return unnormed_parameters
takes in a list of parameters and undoes simple min/max normalization according to min/max values INPUTS normed_parameters: length n, containing parameters for a star minvals: length n, minimum parameter values maxvals: length n, max parameter values OUTPUTS unnormed_parameters: length n, unnormalized parameters
takes in a list of parameters and undoes simple min/max normalization according to min/max values INPUTS normed_parameters: length n, containing parameters for a star minvals: length n, minimum parameter values maxvals: length n, max parameter values OUTPUTS unnormed_parameters: length n, unnormalized parameters
[ "takes", "in", "a", "list", "of", "parameters", "and", "undoes", "simple", "min", "/", "max", "normalization", "according", "to", "min", "/", "max", "values", "INPUTS", "normed_parameters", ":", "length", "n", "containing", "parameters", "for", "a", "star", "minvals", ":", "length", "n", "minimum", "parameter", "values", "maxvals", ":", "length", "n", "max", "parameter", "values", "OUTPUTS", "unnormed_parameters", ":", "length", "n", "unnormalized", "parameters" ]
def unnormalize_parameters(normed_parameters, minvals, maxvals): unnormed_parameters = normed_parameters*(maxvals-minvals) + minvals return unnormed_parameters
[ "def", "unnormalize_parameters", "(", "normed_parameters", ",", "minvals", ",", "maxvals", ")", ":", "unnormed_parameters", "=", "normed_parameters", "*", "(", "maxvals", "-", "minvals", ")", "+", "minvals", "return", "unnormed_parameters" ]
takes in a list of parameters and undoes simple min/max normalization according to min/max values INPUTS normed_parameters: length n, containing parameters for a star minvals: length n, minimum parameter values maxvals: length n, max parameter values OUTPUTS unnormed_parameters: length n, unnormalized parameters
[ "takes", "in", "a", "list", "of", "parameters", "and", "undoes", "simple", "min", "/", "max", "normalization", "according", "to", "min", "/", "max", "values", "INPUTS", "normed_parameters", ":", "length", "n", "containing", "parameters", "for", "a", "star", "minvals", ":", "length", "n", "minimum", "parameter", "values", "maxvals", ":", "length", "n", "max", "parameter", "values", "OUTPUTS", "unnormed_parameters", ":", "length", "n", "unnormalized", "parameters" ]
[ "\"\"\"takes in a list of parameters and undoes simple min/max normalization according to min/max values\n INPUTS\n normed_parameters: length n, containing parameters for a star\n minvals: length n, minimum parameter values\n maxvals: length n, max parameter values\n OUTPUTS\n unnormed_parameters: length n, unnormalized parameters\n \"\"\"" ]
[ { "param": "normed_parameters", "type": null }, { "param": "minvals", "type": null }, { "param": "maxvals", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "normed_parameters", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "minvals", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "maxvals", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def unnormalize_parameters(normed_parameters, minvals, maxvals): unnormed_parameters = normed_parameters*(maxvals-minvals) + minvals return unnormed_parameters
610,281
306
8034744b1f6165b07180c825a46770e968b92e59
jie8357IOII/airflow-dynamic-etl
etl/etl_register.py
[ "MIT" ]
Python
sort_tasks
<not_specific>
def sort_tasks(tasks): """sort task with task_priority each task contain type & priority sort task with task_priority Args: tasks (List[function]): task contain type & priority Returns: List[function]: sorted tasks """ return sorted(tasks, key=lambda task: task._task_priority)
sort task with task_priority each task contain type & priority sort task with task_priority Args: tasks (List[function]): task contain type & priority Returns: List[function]: sorted tasks
sort task with task_priority each task contain type & priority sort task with task_priority
[ "sort", "task", "with", "task_priority", "each", "task", "contain", "type", "&", "priority", "sort", "task", "with", "task_priority" ]
def sort_tasks(tasks): return sorted(tasks, key=lambda task: task._task_priority)
[ "def", "sort_tasks", "(", "tasks", ")", ":", "return", "sorted", "(", "tasks", ",", "key", "=", "lambda", "task", ":", "task", ".", "_task_priority", ")" ]
sort task with task_priority each task contain type & priority sort task with task_priority
[ "sort", "task", "with", "task_priority", "each", "task", "contain", "type", "&", "priority", "sort", "task", "with", "task_priority" ]
[ "\"\"\"sort task with task_priority\n\n each task contain type & priority\n sort task with task_priority\n\n Args:\n tasks (List[function]): task contain type & priority\n\n Returns:\n List[function]: sorted tasks\n \"\"\"" ]
[ { "param": "tasks", "type": null } ]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": "List[function]" } ], "raises": [], "params": [ { "identifier": "tasks", "type": null, "docstring": "task contain type & priority", "docstring_tokens": [ "task", "contain", "type", "&", "priority" ], "default": null, "is_optional": false } ], "outlier_params": [], "others": [] }
def sort_tasks(tasks): return sorted(tasks, key=lambda task: task._task_priority)
610,282
1,022
bb7b63723f91a6a3f6c2992804b70b7df150325b
DerekRoy/free.dm-Common
freedm/utils/formatters.py
[ "MIT" ]
Python
ellipsis
str
def ellipsis(text, length: int) -> str: ''' Truncates a too long string at the provided length and returns it together with an ellipsis :param str text: The text to truncate :param int length: The maximum length :returns text: The truncated text :rtype: str ''' try: return textwrap.shorten(text, length, placeholder='...') except: return text
Truncates a too long string at the provided length and returns it together with an ellipsis :param str text: The text to truncate :param int length: The maximum length :returns text: The truncated text :rtype: str
Truncates a too long string at the provided length and returns it together with an ellipsis
[ "Truncates", "a", "too", "long", "string", "at", "the", "provided", "length", "and", "returns", "it", "together", "with", "an", "ellipsis" ]
def ellipsis(text, length: int) -> str: try: return textwrap.shorten(text, length, placeholder='...') except: return text
[ "def", "ellipsis", "(", "text", ",", "length", ":", "int", ")", "->", "str", ":", "try", ":", "return", "textwrap", ".", "shorten", "(", "text", ",", "length", ",", "placeholder", "=", "'...'", ")", "except", ":", "return", "text" ]
Truncates a too long string at the provided length and returns it together with an ellipsis
[ "Truncates", "a", "too", "long", "string", "at", "the", "provided", "length", "and", "returns", "it", "together", "with", "an", "ellipsis" ]
[ "'''\n Truncates a too long string at the provided length and returns it together with an ellipsis\n :param str text: The text to truncate\n :param int length: The maximum length\n :returns text: The truncated text\n :rtype: str\n '''" ]
[ { "param": "text", "type": null }, { "param": "length", "type": "int" } ]
{ "returns": [ { "docstring": "The truncated text", "docstring_tokens": [ "The", "truncated", "text" ], "type": "text" } ], "raises": [], "params": [ { "identifier": "text", "type": null, "docstring": "The text to truncate", "docstring_tokens": [ "The", "text", "to", "truncate" ], "default": null, "is_optional": false }, { "identifier": "length", "type": "int", "docstring": "The maximum length", "docstring_tokens": [ "The", "maximum", "length" ], "default": null, "is_optional": false } ], "outlier_params": [], "others": [] }
import textwrap def ellipsis(text, length: int) -> str: try: return textwrap.shorten(text, length, placeholder='...') except: return text
610,283
815
2948acfadd49c24754e74ef61a81f42ef61460e1
fstab50/branchdiff
scripts/builddeb.py
[ "MIT" ]
Python
increment_version
<not_specific>
def increment_version(current): """ Returns current version incremented by 1 minor version number """ minor = current.split('.')[-1] major = '.'.join(current.split('.')[:-1]) inc_minor = int(minor) + 1 return major + '.' + str(inc_minor)
Returns current version incremented by 1 minor version number
Returns current version incremented by 1 minor version number
[ "Returns", "current", "version", "incremented", "by", "1", "minor", "version", "number" ]
def increment_version(current): minor = current.split('.')[-1] major = '.'.join(current.split('.')[:-1]) inc_minor = int(minor) + 1 return major + '.' + str(inc_minor)
[ "def", "increment_version", "(", "current", ")", ":", "minor", "=", "current", ".", "split", "(", "'.'", ")", "[", "-", "1", "]", "major", "=", "'.'", ".", "join", "(", "current", ".", "split", "(", "'.'", ")", "[", ":", "-", "1", "]", ")", "inc_minor", "=", "int", "(", "minor", ")", "+", "1", "return", "major", "+", "'.'", "+", "str", "(", "inc_minor", ")" ]
Returns current version incremented by 1 minor version number
[ "Returns", "current", "version", "incremented", "by", "1", "minor", "version", "number" ]
[ "\"\"\"\n Returns current version incremented by 1 minor version number\n \"\"\"" ]
[ { "param": "current", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "current", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def increment_version(current): minor = current.split('.')[-1] major = '.'.join(current.split('.')[:-1]) inc_minor = int(minor) + 1 return major + '.' + str(inc_minor)
610,284
112
4360dbfe331b14cc1dba23bc901234d471a9fc1f
nathan-hekman/platform-services-python-sdk
ibm_platform_services/case_management_v1.py
[ "Apache-2.0" ]
Python
from_dict
'FileWithMetadata'
def from_dict(cls, _dict: Dict) -> 'FileWithMetadata': """Initialize a FileWithMetadata object from a json dictionary.""" args = {} if 'data' in _dict: args['data'] = _dict.get('data') else: raise ValueError('Required property \'data\' not present in FileWithMetadata JSON') if 'filename' in _dict: args['filename'] = _dict.get('filename') if 'content_type' in _dict: args['content_type'] = _dict.get('content_type') return cls(**args)
Initialize a FileWithMetadata object from a json dictionary.
Initialize a FileWithMetadata object from a json dictionary.
[ "Initialize", "a", "FileWithMetadata", "object", "from", "a", "json", "dictionary", "." ]
def from_dict(cls, _dict: Dict) -> 'FileWithMetadata': args = {} if 'data' in _dict: args['data'] = _dict.get('data') else: raise ValueError('Required property \'data\' not present in FileWithMetadata JSON') if 'filename' in _dict: args['filename'] = _dict.get('filename') if 'content_type' in _dict: args['content_type'] = _dict.get('content_type') return cls(**args)
[ "def", "from_dict", "(", "cls", ",", "_dict", ":", "Dict", ")", "->", "'FileWithMetadata'", ":", "args", "=", "{", "}", "if", "'data'", "in", "_dict", ":", "args", "[", "'data'", "]", "=", "_dict", ".", "get", "(", "'data'", ")", "else", ":", "raise", "ValueError", "(", "'Required property \\'data\\' not present in FileWithMetadata JSON'", ")", "if", "'filename'", "in", "_dict", ":", "args", "[", "'filename'", "]", "=", "_dict", ".", "get", "(", "'filename'", ")", "if", "'content_type'", "in", "_dict", ":", "args", "[", "'content_type'", "]", "=", "_dict", ".", "get", "(", "'content_type'", ")", "return", "cls", "(", "**", "args", ")" ]
Initialize a FileWithMetadata object from a json dictionary.
[ "Initialize", "a", "FileWithMetadata", "object", "from", "a", "json", "dictionary", "." ]
[ "\"\"\"Initialize a FileWithMetadata object from a json dictionary.\"\"\"" ]
[ { "param": "cls", "type": null }, { "param": "_dict", "type": "Dict" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "cls", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "_dict", "type": "Dict", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def from_dict(cls, _dict: Dict) -> 'FileWithMetadata': args = {} if 'data' in _dict: args['data'] = _dict.get('data') else: raise ValueError('Required property \'data\' not present in FileWithMetadata JSON') if 'filename' in _dict: args['filename'] = _dict.get('filename') if 'content_type' in _dict: args['content_type'] = _dict.get('content_type') return cls(**args)
610,285
550
44fd5ae3dfdd2e33fe7f4b784405363f2e417375
qxcv/joint-regressor
keras/models.py
[ "Apache-2.0" ]
Python
repr_layer
<not_specific>
def repr_layer(layer): """Pretty name for a Keras layer""" conf = layer.get_config() name = conf['name'] input_shape = layer.input_shape output_shape = layer.output_shape return '{} ({}->{})'.format(name, input_shape, output_shape)
Pretty name for a Keras layer
Pretty name for a Keras layer
[ "Pretty", "name", "for", "a", "Keras", "layer" ]
def repr_layer(layer): conf = layer.get_config() name = conf['name'] input_shape = layer.input_shape output_shape = layer.output_shape return '{} ({}->{})'.format(name, input_shape, output_shape)
[ "def", "repr_layer", "(", "layer", ")", ":", "conf", "=", "layer", ".", "get_config", "(", ")", "name", "=", "conf", "[", "'name'", "]", "input_shape", "=", "layer", ".", "input_shape", "output_shape", "=", "layer", ".", "output_shape", "return", "'{} ({}->{})'", ".", "format", "(", "name", ",", "input_shape", ",", "output_shape", ")" ]
Pretty name for a Keras layer
[ "Pretty", "name", "for", "a", "Keras", "layer" ]
[ "\"\"\"Pretty name for a Keras layer\"\"\"" ]
[ { "param": "layer", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "layer", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def repr_layer(layer): conf = layer.get_config() name = conf['name'] input_shape = layer.input_shape output_shape = layer.output_shape return '{} ({}->{})'.format(name, input_shape, output_shape)
610,286
7
8faa84053b2cffa4efc282ada0f6151585353954
rwbogl/huffman
huffman.py
[ "MIT" ]
Python
inverse_dict
<not_specific>
def inverse_dict(original): """Return a dictionary that is the inverse of the original. Given the pair original[key] = value, the returned dictionary will give ret[value] = key. It is important to keep two separate dictionaries in case there is key/value collision. Trying to insert a value that matches a key as a key will overwrite the old key. Example: original = {"a": "b", "foo": "a"} original["a"] = "foo" # Lost pair {"a": "b"}. :original: Dictionary. :returns: Inverse dictionary of `original`. """ ret = dict() for key, value in original.items(): ret[value] = key return ret
Return a dictionary that is the inverse of the original. Given the pair original[key] = value, the returned dictionary will give ret[value] = key. It is important to keep two separate dictionaries in case there is key/value collision. Trying to insert a value that matches a key as a key will overwrite the old key. Example: original = {"a": "b", "foo": "a"} original["a"] = "foo" # Lost pair {"a": "b"}. :original: Dictionary. :returns: Inverse dictionary of `original`.
Return a dictionary that is the inverse of the original. Given the pair original[key] = value, the returned dictionary will give ret[value] = key. It is important to keep two separate dictionaries in case there is key/value collision. Trying to insert a value that matches a key as a key will overwrite the old key.
[ "Return", "a", "dictionary", "that", "is", "the", "inverse", "of", "the", "original", ".", "Given", "the", "pair", "original", "[", "key", "]", "=", "value", "the", "returned", "dictionary", "will", "give", "ret", "[", "value", "]", "=", "key", ".", "It", "is", "important", "to", "keep", "two", "separate", "dictionaries", "in", "case", "there", "is", "key", "/", "value", "collision", ".", "Trying", "to", "insert", "a", "value", "that", "matches", "a", "key", "as", "a", "key", "will", "overwrite", "the", "old", "key", "." ]
def inverse_dict(original): ret = dict() for key, value in original.items(): ret[value] = key return ret
[ "def", "inverse_dict", "(", "original", ")", ":", "ret", "=", "dict", "(", ")", "for", "key", ",", "value", "in", "original", ".", "items", "(", ")", ":", "ret", "[", "value", "]", "=", "key", "return", "ret" ]
Return a dictionary that is the inverse of the original.
[ "Return", "a", "dictionary", "that", "is", "the", "inverse", "of", "the", "original", "." ]
[ "\"\"\"Return a dictionary that is the inverse of the original.\n\n Given the pair original[key] = value, the returned dictionary will give\n ret[value] = key. It is important to keep two separate dictionaries in case\n there is key/value collision. Trying to insert a value that matches a key\n as a key will overwrite the old key.\n\n Example:\n original = {\"a\": \"b\", \"foo\": \"a\"}\n original[\"a\"] = \"foo\" # Lost pair {\"a\": \"b\"}.\n\n :original: Dictionary.\n :returns: Inverse dictionary of `original`.\n\n \"\"\"" ]
[ { "param": "original", "type": null } ]
{ "returns": [ { "docstring": "Inverse dictionary of `original`.", "docstring_tokens": [ "Inverse", "dictionary", "of", "`", "original", "`", "." ], "type": null } ], "raises": [], "params": [ { "identifier": "original", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [ { "identifier": "original", "docstring": null, "docstring_tokens": [ "None" ] } ] }
def inverse_dict(original): ret = dict() for key, value in original.items(): ret[value] = key return ret
610,287
999
d23cc055d39fbc9d45e52b45a03269799fda8daf
callicles/Honeyword-generators
algorithms/classifier.py
[ "MIT" ]
Python
isSameCharacterSequence
<not_specific>
def isSameCharacterSequence(word): """ Checks if the string passed to it is in a sequence of identical characters """ if len(word) == 1: return False else: for i in range(len(word) - 1): if word[i] != word[i + 1]: return False return True
Checks if the string passed to it is in a sequence of identical characters
Checks if the string passed to it is in a sequence of identical characters
[ "Checks", "if", "the", "string", "passed", "to", "it", "is", "in", "a", "sequence", "of", "identical", "characters" ]
def isSameCharacterSequence(word): if len(word) == 1: return False else: for i in range(len(word) - 1): if word[i] != word[i + 1]: return False return True
[ "def", "isSameCharacterSequence", "(", "word", ")", ":", "if", "len", "(", "word", ")", "==", "1", ":", "return", "False", "else", ":", "for", "i", "in", "range", "(", "len", "(", "word", ")", "-", "1", ")", ":", "if", "word", "[", "i", "]", "!=", "word", "[", "i", "+", "1", "]", ":", "return", "False", "return", "True" ]
Checks if the string passed to it is in a sequence of identical characters
[ "Checks", "if", "the", "string", "passed", "to", "it", "is", "in", "a", "sequence", "of", "identical", "characters" ]
[ "\"\"\"\n Checks if the string passed to it is in a sequence of identical characters\n \"\"\"" ]
[ { "param": "word", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "word", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def isSameCharacterSequence(word): if len(word) == 1: return False else: for i in range(len(word) - 1): if word[i] != word[i + 1]: return False return True
610,288
105
d4e531a33f2f6dca7e851e2ad8c69136b26bd282
Satalia/solvengine-python-modeling-lib
pysolveengine/helper.py
[ "MIT" ]
Python
check_name
null
def check_name(name, obj_type): """ For variables and constraints: if they contain ':', the solver will fail :param name: name to check :param obj_type: 'variable' or 'constraint' :return: Nothing, will raise error if the name is incorrect """ if name.find(":") > -1: str_err = ''.join(["A ", obj_type, " name cannot contain ':'.\n", "You named '", name, "'\n", "Please change it and launch it again."]) raise ValueError(str_err)
For variables and constraints: if they contain ':', the solver will fail :param name: name to check :param obj_type: 'variable' or 'constraint' :return: Nothing, will raise error if the name is incorrect
For variables and constraints: if they contain ':', the solver will fail
[ "For", "variables", "and", "constraints", ":", "if", "they", "contain", "'", ":", "'", "the", "solver", "will", "fail" ]
def check_name(name, obj_type): if name.find(":") > -1: str_err = ''.join(["A ", obj_type, " name cannot contain ':'.\n", "You named '", name, "'\n", "Please change it and launch it again."]) raise ValueError(str_err)
[ "def", "check_name", "(", "name", ",", "obj_type", ")", ":", "if", "name", ".", "find", "(", "\":\"", ")", ">", "-", "1", ":", "str_err", "=", "''", ".", "join", "(", "[", "\"A \"", ",", "obj_type", ",", "\" name cannot contain ':'.\\n\"", ",", "\"You named '\"", ",", "name", ",", "\"'\\n\"", ",", "\"Please change it and launch it again.\"", "]", ")", "raise", "ValueError", "(", "str_err", ")" ]
For variables and constraints: if they contain ':', the solver will fail
[ "For", "variables", "and", "constraints", ":", "if", "they", "contain", "'", ":", "'", "the", "solver", "will", "fail" ]
[ "\"\"\"\n For variables and constraints: if they contain ':', the solver will fail\n :param name: name to check\n :param obj_type: 'variable' or 'constraint'\n :return: Nothing, will raise error if the name is incorrect\n \"\"\"" ]
[ { "param": "name", "type": null }, { "param": "obj_type", "type": null } ]
{ "returns": [ { "docstring": "Nothing, will raise error if the name is incorrect", "docstring_tokens": [ "Nothing", "will", "raise", "error", "if", "the", "name", "is", "incorrect" ], "type": null } ], "raises": [], "params": [ { "identifier": "name", "type": null, "docstring": "name to check", "docstring_tokens": [ "name", "to", "check" ], "default": null, "is_optional": null }, { "identifier": "obj_type", "type": null, "docstring": "'variable' or 'constraint'", "docstring_tokens": [ "'", "variable", "'", "or", "'", "constraint", "'" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def check_name(name, obj_type): if name.find(":") > -1: str_err = ''.join(["A ", obj_type, " name cannot contain ':'.\n", "You named '", name, "'\n", "Please change it and launch it again."]) raise ValueError(str_err)
610,289
653
0b8921509c3a26f1ac31d9b1795f7f430ef4d9f2
matthewware/twitter-slack-bot
twitter.py
[ "Apache-2.0" ]
Python
preprocess_text
<not_specific>
def preprocess_text(status): """ convert extended tweets text and full text tweets to something our template can handle """ if hasattr(status, "full_text"): status.text = status.full_text if hasattr(status, "extended_tweet"): status.text = status.extended_tweet["full_text"] if hasattr(status, "quoted_status"): if hasattr(status.quoted_status, "full_text"): status.quoted_status.text = status.quoted_status.full_text if hasattr(status, "retweeted_status"): if hasattr(status.retweeted_status, "full_text"): status.retweeted_status.text = status.retweeted_status.full_text return status
convert extended tweets text and full text tweets to something our template can handle
convert extended tweets text and full text tweets to something our template can handle
[ "convert", "extended", "tweets", "text", "and", "full", "text", "tweets", "to", "something", "our", "template", "can", "handle" ]
def preprocess_text(status): if hasattr(status, "full_text"): status.text = status.full_text if hasattr(status, "extended_tweet"): status.text = status.extended_tweet["full_text"] if hasattr(status, "quoted_status"): if hasattr(status.quoted_status, "full_text"): status.quoted_status.text = status.quoted_status.full_text if hasattr(status, "retweeted_status"): if hasattr(status.retweeted_status, "full_text"): status.retweeted_status.text = status.retweeted_status.full_text return status
[ "def", "preprocess_text", "(", "status", ")", ":", "if", "hasattr", "(", "status", ",", "\"full_text\"", ")", ":", "status", ".", "text", "=", "status", ".", "full_text", "if", "hasattr", "(", "status", ",", "\"extended_tweet\"", ")", ":", "status", ".", "text", "=", "status", ".", "extended_tweet", "[", "\"full_text\"", "]", "if", "hasattr", "(", "status", ",", "\"quoted_status\"", ")", ":", "if", "hasattr", "(", "status", ".", "quoted_status", ",", "\"full_text\"", ")", ":", "status", ".", "quoted_status", ".", "text", "=", "status", ".", "quoted_status", ".", "full_text", "if", "hasattr", "(", "status", ",", "\"retweeted_status\"", ")", ":", "if", "hasattr", "(", "status", ".", "retweeted_status", ",", "\"full_text\"", ")", ":", "status", ".", "retweeted_status", ".", "text", "=", "status", ".", "retweeted_status", ".", "full_text", "return", "status" ]
convert extended tweets text and full text tweets to something our template can handle
[ "convert", "extended", "tweets", "text", "and", "full", "text", "tweets", "to", "something", "our", "template", "can", "handle" ]
[ "\"\"\"\n convert extended tweets text and full text tweets\n to something our template can handle\n \"\"\"" ]
[ { "param": "status", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "status", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def preprocess_text(status): if hasattr(status, "full_text"): status.text = status.full_text if hasattr(status, "extended_tweet"): status.text = status.extended_tweet["full_text"] if hasattr(status, "quoted_status"): if hasattr(status.quoted_status, "full_text"): status.quoted_status.text = status.quoted_status.full_text if hasattr(status, "retweeted_status"): if hasattr(status.retweeted_status, "full_text"): status.retweeted_status.text = status.retweeted_status.full_text return status
610,290
110
cb8906fb1cfd4d2198d9f8555b1ef5f91fb3c4c9
AudiovisualMetadataPlatform/gentle
gentle/diff_align.py
[ "MIT" ]
Python
by_word
null
def by_word(opcodes): '''Take difflib.SequenceMatcher.get_opcodes() output and return an equivalent opcode sequence that only modifies one word at a time''' for op, s1, e1, s2, e2 in opcodes: if op == 'delete': for i in range(s1, e1): yield (op, i, i+1, s2, s2) elif op == 'insert': for i in range(s2, e2): yield (op, s1, s1, i, i+1) else: len1 = e1-s1 len2 = e2-s2 for i1, i2 in zip(range(s1, e1), range(s2, e2)): yield (op, i1, i1 + 1, i2, i2 + 1) if len1 > len2: for i in range(s1 + len2, e1): yield ('delete', i, i+1, e2, e2) if len2 > len1: for i in range(s2 + len1, e2): yield ('insert', s1, s1, i, i+1)
Take difflib.SequenceMatcher.get_opcodes() output and return an equivalent opcode sequence that only modifies one word at a time
Take difflib.SequenceMatcher.get_opcodes() output and return an equivalent opcode sequence that only modifies one word at a time
[ "Take", "difflib", ".", "SequenceMatcher", ".", "get_opcodes", "()", "output", "and", "return", "an", "equivalent", "opcode", "sequence", "that", "only", "modifies", "one", "word", "at", "a", "time" ]
def by_word(opcodes): for op, s1, e1, s2, e2 in opcodes: if op == 'delete': for i in range(s1, e1): yield (op, i, i+1, s2, s2) elif op == 'insert': for i in range(s2, e2): yield (op, s1, s1, i, i+1) else: len1 = e1-s1 len2 = e2-s2 for i1, i2 in zip(range(s1, e1), range(s2, e2)): yield (op, i1, i1 + 1, i2, i2 + 1) if len1 > len2: for i in range(s1 + len2, e1): yield ('delete', i, i+1, e2, e2) if len2 > len1: for i in range(s2 + len1, e2): yield ('insert', s1, s1, i, i+1)
[ "def", "by_word", "(", "opcodes", ")", ":", "for", "op", ",", "s1", ",", "e1", ",", "s2", ",", "e2", "in", "opcodes", ":", "if", "op", "==", "'delete'", ":", "for", "i", "in", "range", "(", "s1", ",", "e1", ")", ":", "yield", "(", "op", ",", "i", ",", "i", "+", "1", ",", "s2", ",", "s2", ")", "elif", "op", "==", "'insert'", ":", "for", "i", "in", "range", "(", "s2", ",", "e2", ")", ":", "yield", "(", "op", ",", "s1", ",", "s1", ",", "i", ",", "i", "+", "1", ")", "else", ":", "len1", "=", "e1", "-", "s1", "len2", "=", "e2", "-", "s2", "for", "i1", ",", "i2", "in", "zip", "(", "range", "(", "s1", ",", "e1", ")", ",", "range", "(", "s2", ",", "e2", ")", ")", ":", "yield", "(", "op", ",", "i1", ",", "i1", "+", "1", ",", "i2", ",", "i2", "+", "1", ")", "if", "len1", ">", "len2", ":", "for", "i", "in", "range", "(", "s1", "+", "len2", ",", "e1", ")", ":", "yield", "(", "'delete'", ",", "i", ",", "i", "+", "1", ",", "e2", ",", "e2", ")", "if", "len2", ">", "len1", ":", "for", "i", "in", "range", "(", "s2", "+", "len1", ",", "e2", ")", ":", "yield", "(", "'insert'", ",", "s1", ",", "s1", ",", "i", ",", "i", "+", "1", ")" ]
Take difflib.SequenceMatcher.get_opcodes() output and return an equivalent opcode sequence that only modifies one word at a time
[ "Take", "difflib", ".", "SequenceMatcher", ".", "get_opcodes", "()", "output", "and", "return", "an", "equivalent", "opcode", "sequence", "that", "only", "modifies", "one", "word", "at", "a", "time" ]
[ "'''Take difflib.SequenceMatcher.get_opcodes() output and\n return an equivalent opcode sequence that only modifies\n one word at a time'''" ]
[ { "param": "opcodes", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "opcodes", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def by_word(opcodes): for op, s1, e1, s2, e2 in opcodes: if op == 'delete': for i in range(s1, e1): yield (op, i, i+1, s2, s2) elif op == 'insert': for i in range(s2, e2): yield (op, s1, s1, i, i+1) else: len1 = e1-s1 len2 = e2-s2 for i1, i2 in zip(range(s1, e1), range(s2, e2)): yield (op, i1, i1 + 1, i2, i2 + 1) if len1 > len2: for i in range(s1 + len2, e1): yield ('delete', i, i+1, e2, e2) if len2 > len1: for i in range(s2 + len1, e2): yield ('insert', s1, s1, i, i+1)
610,291
756
1517b01c753405c6dfa8b22b9d2cf628b4015c5b
ska-sa/katsdptelstate
katsdptelstate/rdb_utility.py
[ "BSD-3-Clause" ]
Python
encode_prev_length
bytes
def encode_prev_length(length: int) -> bytes: """Special helper for zset previous entry lengths. If length < 253 then use 1 byte directly, otherwise set first byte to 254 and add 4 trailing bytes as an unsigned integer. """ if length < 254: return struct.pack('B', length) return b'\xfe' + struct.pack("<I", length)
Special helper for zset previous entry lengths. If length < 253 then use 1 byte directly, otherwise set first byte to 254 and add 4 trailing bytes as an unsigned integer.
Special helper for zset previous entry lengths. If length < 253 then use 1 byte directly, otherwise set first byte to 254 and add 4 trailing bytes as an unsigned integer.
[ "Special", "helper", "for", "zset", "previous", "entry", "lengths", ".", "If", "length", "<", "253", "then", "use", "1", "byte", "directly", "otherwise", "set", "first", "byte", "to", "254", "and", "add", "4", "trailing", "bytes", "as", "an", "unsigned", "integer", "." ]
def encode_prev_length(length: int) -> bytes: if length < 254: return struct.pack('B', length) return b'\xfe' + struct.pack("<I", length)
[ "def", "encode_prev_length", "(", "length", ":", "int", ")", "->", "bytes", ":", "if", "length", "<", "254", ":", "return", "struct", ".", "pack", "(", "'B'", ",", "length", ")", "return", "b'\\xfe'", "+", "struct", ".", "pack", "(", "\"<I\"", ",", "length", ")" ]
Special helper for zset previous entry lengths.
[ "Special", "helper", "for", "zset", "previous", "entry", "lengths", "." ]
[ "\"\"\"Special helper for zset previous entry lengths.\n\n If length < 253 then use 1 byte directly, otherwise\n set first byte to 254 and add 4 trailing bytes as an\n unsigned integer.\n \"\"\"" ]
[ { "param": "length", "type": "int" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "length", "type": "int", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import struct def encode_prev_length(length: int) -> bytes: if length < 254: return struct.pack('B', length) return b'\xfe' + struct.pack("<I", length)
610,292
36
13b6ff05e8a6f44a6e96f4d8850f09c46bd5fc2e
kreimanlab/AugMem
other_models/COPE/model/gem.py
[ "MIT" ]
Python
compute_offsets
<not_specific>
def compute_offsets(task, nc_per_task): """ Compute offsets for cifar to determine which outputs to select for a given task. """ offset1 = 0 offset2 = nc_per_task return offset1, offset2
Compute offsets for cifar to determine which outputs to select for a given task.
Compute offsets for cifar to determine which outputs to select for a given task.
[ "Compute", "offsets", "for", "cifar", "to", "determine", "which", "outputs", "to", "select", "for", "a", "given", "task", "." ]
def compute_offsets(task, nc_per_task): offset1 = 0 offset2 = nc_per_task return offset1, offset2
[ "def", "compute_offsets", "(", "task", ",", "nc_per_task", ")", ":", "offset1", "=", "0", "offset2", "=", "nc_per_task", "return", "offset1", ",", "offset2" ]
Compute offsets for cifar to determine which outputs to select for a given task.
[ "Compute", "offsets", "for", "cifar", "to", "determine", "which", "outputs", "to", "select", "for", "a", "given", "task", "." ]
[ "\"\"\"\n Compute offsets for cifar to determine which\n outputs to select for a given task.\n \"\"\"" ]
[ { "param": "task", "type": null }, { "param": "nc_per_task", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "task", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "nc_per_task", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def compute_offsets(task, nc_per_task): offset1 = 0 offset2 = nc_per_task return offset1, offset2
610,293
554
6781dbeae8e24069eb3b6ba6446c67199916db80
Vinicius-Tanigawa/Undergraduate-Research-Project
SUAVE/SUAVE-2.5.0/trunk/SUAVE/Plugins/pint/util.py
[ "MIT" ]
Python
matrix_to_string
<not_specific>
def matrix_to_string(matrix, row_headers=None, col_headers=None, fmtfun=lambda x: str(int(x))): """Takes a 2D matrix (as nested list) and returns a string. """ ret = [] if col_headers: ret.append('\t' if row_headers else '' + '\t'.join(col_headers)) if row_headers: ret += [rh + '\t' + '\t'.join(fmtfun(f) for f in row) for rh, row in zip(row_headers, matrix)] else: ret += ['\t'.join(fmtfun(f) for f in row) for row in matrix] return '\n'.join(ret)
Takes a 2D matrix (as nested list) and returns a string.
Takes a 2D matrix (as nested list) and returns a string.
[ "Takes", "a", "2D", "matrix", "(", "as", "nested", "list", ")", "and", "returns", "a", "string", "." ]
def matrix_to_string(matrix, row_headers=None, col_headers=None, fmtfun=lambda x: str(int(x))): ret = [] if col_headers: ret.append('\t' if row_headers else '' + '\t'.join(col_headers)) if row_headers: ret += [rh + '\t' + '\t'.join(fmtfun(f) for f in row) for rh, row in zip(row_headers, matrix)] else: ret += ['\t'.join(fmtfun(f) for f in row) for row in matrix] return '\n'.join(ret)
[ "def", "matrix_to_string", "(", "matrix", ",", "row_headers", "=", "None", ",", "col_headers", "=", "None", ",", "fmtfun", "=", "lambda", "x", ":", "str", "(", "int", "(", "x", ")", ")", ")", ":", "ret", "=", "[", "]", "if", "col_headers", ":", "ret", ".", "append", "(", "'\\t'", "if", "row_headers", "else", "''", "+", "'\\t'", ".", "join", "(", "col_headers", ")", ")", "if", "row_headers", ":", "ret", "+=", "[", "rh", "+", "'\\t'", "+", "'\\t'", ".", "join", "(", "fmtfun", "(", "f", ")", "for", "f", "in", "row", ")", "for", "rh", ",", "row", "in", "zip", "(", "row_headers", ",", "matrix", ")", "]", "else", ":", "ret", "+=", "[", "'\\t'", ".", "join", "(", "fmtfun", "(", "f", ")", "for", "f", "in", "row", ")", "for", "row", "in", "matrix", "]", "return", "'\\n'", ".", "join", "(", "ret", ")" ]
Takes a 2D matrix (as nested list) and returns a string.
[ "Takes", "a", "2D", "matrix", "(", "as", "nested", "list", ")", "and", "returns", "a", "string", "." ]
[ "\"\"\"Takes a 2D matrix (as nested list) and returns a string.\n \"\"\"" ]
[ { "param": "matrix", "type": null }, { "param": "row_headers", "type": null }, { "param": "col_headers", "type": null }, { "param": "fmtfun", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "matrix", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "row_headers", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "col_headers", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "fmtfun", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def matrix_to_string(matrix, row_headers=None, col_headers=None, fmtfun=lambda x: str(int(x))): ret = [] if col_headers: ret.append('\t' if row_headers else '' + '\t'.join(col_headers)) if row_headers: ret += [rh + '\t' + '\t'.join(fmtfun(f) for f in row) for rh, row in zip(row_headers, matrix)] else: ret += ['\t'.join(fmtfun(f) for f in row) for row in matrix] return '\n'.join(ret)
610,294
6
72e8365e66e03156dde19b3e475ed4c825d94198
alexseong/azure-intelligence-inventory-optimization
Manual Deployment Guide/Scripts/webjobs/InventoryOptimization/batch-shipyard/convoy/settings.py
[ "MIT" ]
Python
generate_network_security_inbound_rule_name
<not_specific>
def generate_network_security_inbound_rule_name(rule_name, i): # type: (StorageClusterSettings) -> str """Generate a network security inbound rule name :param StorageClusterSettings sc: storage cluster settings :rtype: str :return: inbound rule name """ return '{}_in-{}'.format(rule_name, i)
Generate a network security inbound rule name :param StorageClusterSettings sc: storage cluster settings :rtype: str :return: inbound rule name
Generate a network security inbound rule name
[ "Generate", "a", "network", "security", "inbound", "rule", "name" ]
def generate_network_security_inbound_rule_name(rule_name, i): return '{}_in-{}'.format(rule_name, i)
[ "def", "generate_network_security_inbound_rule_name", "(", "rule_name", ",", "i", ")", ":", "return", "'{}_in-{}'", ".", "format", "(", "rule_name", ",", "i", ")" ]
Generate a network security inbound rule name
[ "Generate", "a", "network", "security", "inbound", "rule", "name" ]
[ "# type: (StorageClusterSettings) -> str", "\"\"\"Generate a network security inbound rule name\n :param StorageClusterSettings sc: storage cluster settings\n :rtype: str\n :return: inbound rule name\n \"\"\"" ]
[ { "param": "rule_name", "type": null }, { "param": "i", "type": null } ]
{ "returns": [ { "docstring": "inbound rule name", "docstring_tokens": [ "inbound", "rule", "name" ], "type": "str" } ], "raises": [], "params": [ { "identifier": "rule_name", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "i", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [ { "identifier": "sc", "type": null, "docstring": "storage cluster settings", "docstring_tokens": [ "storage", "cluster", "settings" ], "default": null, "is_optional": false } ], "others": [] }
def generate_network_security_inbound_rule_name(rule_name, i): return '{}_in-{}'.format(rule_name, i)
610,295
866
bb6fd97c70aa9ca8c0a220f72756d0e09da3cae4
lanesmith/PowerSimData
powersimdata/design/generation/clean_capacity_scaling.py
[ "MIT" ]
Python
_apply_zone_scale_factor_to_ct
null
def _apply_zone_scale_factor_to_ct(ct, fuel, zone_id, scale_factor): """Applies a zone scaling factor to a change table, creating internal change table structure as necessary. New keys are added, existing keys are multiplied. :param dict ct: a dictionary of scale factors, with structure matching ct from powersimdata.input.change_table.ChangeTable. :param str fuel: the fuel to be scaled. :param int zone_id: the zone_id to be scaled. :param int/float scale_factor: how much the zone should be scaled up by. """ if fuel not in ct: ct[fuel] = {} if "zone_id" not in ct[fuel]: ct[fuel]["zone_id"] = {} if zone_id not in ct[fuel]["zone_id"]: ct[fuel]["zone_id"][zone_id] = scale_factor else: ct[fuel]["zone_id"][zone_id] *= scale_factor
Applies a zone scaling factor to a change table, creating internal change table structure as necessary. New keys are added, existing keys are multiplied. :param dict ct: a dictionary of scale factors, with structure matching ct from powersimdata.input.change_table.ChangeTable. :param str fuel: the fuel to be scaled. :param int zone_id: the zone_id to be scaled. :param int/float scale_factor: how much the zone should be scaled up by.
Applies a zone scaling factor to a change table, creating internal change table structure as necessary. New keys are added, existing keys are multiplied.
[ "Applies", "a", "zone", "scaling", "factor", "to", "a", "change", "table", "creating", "internal", "change", "table", "structure", "as", "necessary", ".", "New", "keys", "are", "added", "existing", "keys", "are", "multiplied", "." ]
def _apply_zone_scale_factor_to_ct(ct, fuel, zone_id, scale_factor): if fuel not in ct: ct[fuel] = {} if "zone_id" not in ct[fuel]: ct[fuel]["zone_id"] = {} if zone_id not in ct[fuel]["zone_id"]: ct[fuel]["zone_id"][zone_id] = scale_factor else: ct[fuel]["zone_id"][zone_id] *= scale_factor
[ "def", "_apply_zone_scale_factor_to_ct", "(", "ct", ",", "fuel", ",", "zone_id", ",", "scale_factor", ")", ":", "if", "fuel", "not", "in", "ct", ":", "ct", "[", "fuel", "]", "=", "{", "}", "if", "\"zone_id\"", "not", "in", "ct", "[", "fuel", "]", ":", "ct", "[", "fuel", "]", "[", "\"zone_id\"", "]", "=", "{", "}", "if", "zone_id", "not", "in", "ct", "[", "fuel", "]", "[", "\"zone_id\"", "]", ":", "ct", "[", "fuel", "]", "[", "\"zone_id\"", "]", "[", "zone_id", "]", "=", "scale_factor", "else", ":", "ct", "[", "fuel", "]", "[", "\"zone_id\"", "]", "[", "zone_id", "]", "*=", "scale_factor" ]
Applies a zone scaling factor to a change table, creating internal change table structure as necessary.
[ "Applies", "a", "zone", "scaling", "factor", "to", "a", "change", "table", "creating", "internal", "change", "table", "structure", "as", "necessary", "." ]
[ "\"\"\"Applies a zone scaling factor to a change table, creating internal\n change table structure as necessary. New keys are added, existing keys are\n multiplied.\n\n :param dict ct: a dictionary of scale factors, with structure matching\n ct from powersimdata.input.change_table.ChangeTable.\n :param str fuel: the fuel to be scaled.\n :param int zone_id: the zone_id to be scaled.\n :param int/float scale_factor: how much the zone should be scaled up by.\n \"\"\"" ]
[ { "param": "ct", "type": null }, { "param": "fuel", "type": null }, { "param": "zone_id", "type": null }, { "param": "scale_factor", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "ct", "type": null, "docstring": "a dictionary of scale factors, with structure matching\nct from powersimdata.input.change_table.ChangeTable.", "docstring_tokens": [ "a", "dictionary", "of", "scale", "factors", "with", "structure", "matching", "ct", "from", "powersimdata", ".", "input", ".", "change_table", ".", "ChangeTable", "." ], "default": null, "is_optional": false }, { "identifier": "fuel", "type": null, "docstring": "the fuel to be scaled.", "docstring_tokens": [ "the", "fuel", "to", "be", "scaled", "." ], "default": null, "is_optional": false }, { "identifier": "zone_id", "type": null, "docstring": "the zone_id to be scaled.", "docstring_tokens": [ "the", "zone_id", "to", "be", "scaled", "." ], "default": null, "is_optional": false }, { "identifier": "scale_factor", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": false } ], "outlier_params": [], "others": [] }
def _apply_zone_scale_factor_to_ct(ct, fuel, zone_id, scale_factor): if fuel not in ct: ct[fuel] = {} if "zone_id" not in ct[fuel]: ct[fuel]["zone_id"] = {} if zone_id not in ct[fuel]["zone_id"]: ct[fuel]["zone_id"][zone_id] = scale_factor else: ct[fuel]["zone_id"][zone_id] *= scale_factor
610,296
762