hexsha
stringlengths 40
40
| repo
stringlengths 5
121
| path
stringlengths 4
227
| license
sequence | language
stringclasses 1
value | identifier
stringlengths 1
107
| return_type
stringlengths 2
237
⌀ | original_string
stringlengths 75
13.4k
| original_docstring
stringlengths 13
12.9k
| docstring
stringlengths 13
2.57k
| docstring_tokens
sequence | code
stringlengths 23
1.88k
| code_tokens
sequence | short_docstring
stringlengths 1
1.32k
| short_docstring_tokens
sequence | comment
sequence | parameters
list | docstring_params
dict | code_with_imports
stringlengths 23
1.88k
| idxs
int64 0
611k
| cluster
int64 0
1.02k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
38a7770547b2a57b21d8ee7d2a1bbbad6da8e0ba | decaz/aiozk | aiozk/retry.py | [
"MIT"
] | Python | forever | <not_specific> | def forever(cls):
"""
Forever retry policy, **no delay between retries**
:return: Retry forever policy
:rtype: aiozk.RetryPolicy
"""
def never_wait(_):
return None
return cls(try_limit=None, sleep_func=never_wait) |
Forever retry policy, **no delay between retries**
:return: Retry forever policy
:rtype: aiozk.RetryPolicy
| Forever retry policy, **no delay between retries | [
"Forever",
"retry",
"policy",
"**",
"no",
"delay",
"between",
"retries"
] | def forever(cls):
def never_wait(_):
return None
return cls(try_limit=None, sleep_func=never_wait) | [
"def",
"forever",
"(",
"cls",
")",
":",
"def",
"never_wait",
"(",
"_",
")",
":",
"return",
"None",
"return",
"cls",
"(",
"try_limit",
"=",
"None",
",",
"sleep_func",
"=",
"never_wait",
")"
] | Forever retry policy, **no delay between retries | [
"Forever",
"retry",
"policy",
"**",
"no",
"delay",
"between",
"retries"
] | [
"\"\"\"\n Forever retry policy, **no delay between retries**\n\n :return: Retry forever policy\n :rtype: aiozk.RetryPolicy\n \"\"\""
] | [
{
"param": "cls",
"type": null
}
] | {
"returns": [
{
"docstring": "Retry forever policy",
"docstring_tokens": [
"Retry",
"forever",
"policy"
],
"type": "aiozk.RetryPolicy"
}
],
"raises": [],
"params": [
{
"identifier": "cls",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def forever(cls):
def never_wait(_):
return None
return cls(try_limit=None, sleep_func=never_wait) | 610,408 | 372 |
9a65c9d6e6f9f31dc8d182c83d0e307d1628e0da | ViPeR5000/HAsmartirrigation | custom_components/smart_irrigation/helpers.py | [
"MIT"
] | Python | reset_to | <not_specific> | def reset_to(settings, boolval):
"""Reset all values in the dictionary to the specified bool value."""
for setting in settings:
settings[setting] = boolval
return settings | Reset all values in the dictionary to the specified bool value. | Reset all values in the dictionary to the specified bool value. | [
"Reset",
"all",
"values",
"in",
"the",
"dictionary",
"to",
"the",
"specified",
"bool",
"value",
"."
] | def reset_to(settings, boolval):
for setting in settings:
settings[setting] = boolval
return settings | [
"def",
"reset_to",
"(",
"settings",
",",
"boolval",
")",
":",
"for",
"setting",
"in",
"settings",
":",
"settings",
"[",
"setting",
"]",
"=",
"boolval",
"return",
"settings"
] | Reset all values in the dictionary to the specified bool value. | [
"Reset",
"all",
"values",
"in",
"the",
"dictionary",
"to",
"the",
"specified",
"bool",
"value",
"."
] | [
"\"\"\"Reset all values in the dictionary to the specified bool value.\"\"\""
] | [
{
"param": "settings",
"type": null
},
{
"param": "boolval",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "settings",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "boolval",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def reset_to(settings, boolval):
for setting in settings:
settings[setting] = boolval
return settings | 610,409 | 904 |
d2889bd25359817f1e629dfb812276fe77048d08 | itsdamslife/python-ml-udacity | Intro2Python/Excercises/data_structures.py | [
"MIT"
] | Python | top_three | <not_specific> | def top_three(input_list):
"""Returns a list of the three largest elements input_list in order from largest to smallest.
If input_list has fewer than three elements, return input_list element sorted largest to smallest/
"""
sortedList = sorted(input_list, reverse=True)
return sortedList[:3] | Returns a list of the three largest elements input_list in order from largest to smallest.
If input_list has fewer than three elements, return input_list element sorted largest to smallest/
| Returns a list of the three largest elements input_list in order from largest to smallest.
If input_list has fewer than three elements, return input_list element sorted largest to smallest | [
"Returns",
"a",
"list",
"of",
"the",
"three",
"largest",
"elements",
"input_list",
"in",
"order",
"from",
"largest",
"to",
"smallest",
".",
"If",
"input_list",
"has",
"fewer",
"than",
"three",
"elements",
"return",
"input_list",
"element",
"sorted",
"largest",
"to",
"smallest"
] | def top_three(input_list):
sortedList = sorted(input_list, reverse=True)
return sortedList[:3] | [
"def",
"top_three",
"(",
"input_list",
")",
":",
"sortedList",
"=",
"sorted",
"(",
"input_list",
",",
"reverse",
"=",
"True",
")",
"return",
"sortedList",
"[",
":",
"3",
"]"
] | Returns a list of the three largest elements input_list in order from largest to smallest. | [
"Returns",
"a",
"list",
"of",
"the",
"three",
"largest",
"elements",
"input_list",
"in",
"order",
"from",
"largest",
"to",
"smallest",
"."
] | [
"\"\"\"Returns a list of the three largest elements input_list in order from largest to smallest.\n\n If input_list has fewer than three elements, return input_list element sorted largest to smallest/\n \"\"\""
] | [
{
"param": "input_list",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "input_list",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def top_three(input_list):
sortedList = sorted(input_list, reverse=True)
return sortedList[:3] | 610,410 | 646 |
1cb85529580097408fcd362ae89a3020b8fbcdb3 | vlap/vlap.github.io | pubs/tex2html/bibtex2htmldiv.py | [
"CC0-1.0"
] | Python | normalize_authors | <not_specific> | def normalize_authors(authors):
"""
Takes the authors string from a bibtex entry and rewrites it with
first names first.
"""
authorlist = authors.split('and ')
authornames=[]
for author in authorlist:
if ',' in author:
lastname, firstname = author.split(',')
authornames.append(firstname.strip()+' '+lastname.strip())
else:
authornames.append(author.strip())
if len(authorlist)>1:
authornames[-1] = ' and '+authornames[-1]
if len(authorlist)>2:
return ', '.join(authornames)
else:
return ' '.join(authornames) |
Takes the authors string from a bibtex entry and rewrites it with
first names first.
| Takes the authors string from a bibtex entry and rewrites it with
first names first. | [
"Takes",
"the",
"authors",
"string",
"from",
"a",
"bibtex",
"entry",
"and",
"rewrites",
"it",
"with",
"first",
"names",
"first",
"."
] | def normalize_authors(authors):
authorlist = authors.split('and ')
authornames=[]
for author in authorlist:
if ',' in author:
lastname, firstname = author.split(',')
authornames.append(firstname.strip()+' '+lastname.strip())
else:
authornames.append(author.strip())
if len(authorlist)>1:
authornames[-1] = ' and '+authornames[-1]
if len(authorlist)>2:
return ', '.join(authornames)
else:
return ' '.join(authornames) | [
"def",
"normalize_authors",
"(",
"authors",
")",
":",
"authorlist",
"=",
"authors",
".",
"split",
"(",
"'and '",
")",
"authornames",
"=",
"[",
"]",
"for",
"author",
"in",
"authorlist",
":",
"if",
"','",
"in",
"author",
":",
"lastname",
",",
"firstname",
"=",
"author",
".",
"split",
"(",
"','",
")",
"authornames",
".",
"append",
"(",
"firstname",
".",
"strip",
"(",
")",
"+",
"' '",
"+",
"lastname",
".",
"strip",
"(",
")",
")",
"else",
":",
"authornames",
".",
"append",
"(",
"author",
".",
"strip",
"(",
")",
")",
"if",
"len",
"(",
"authorlist",
")",
">",
"1",
":",
"authornames",
"[",
"-",
"1",
"]",
"=",
"' and '",
"+",
"authornames",
"[",
"-",
"1",
"]",
"if",
"len",
"(",
"authorlist",
")",
">",
"2",
":",
"return",
"', '",
".",
"join",
"(",
"authornames",
")",
"else",
":",
"return",
"' '",
".",
"join",
"(",
"authornames",
")"
] | Takes the authors string from a bibtex entry and rewrites it with
first names first. | [
"Takes",
"the",
"authors",
"string",
"from",
"a",
"bibtex",
"entry",
"and",
"rewrites",
"it",
"with",
"first",
"names",
"first",
"."
] | [
"\"\"\"\n Takes the authors string from a bibtex entry and rewrites it with\n first names first.\n \"\"\""
] | [
{
"param": "authors",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "authors",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def normalize_authors(authors):
authorlist = authors.split('and ')
authornames=[]
for author in authorlist:
if ',' in author:
lastname, firstname = author.split(',')
authornames.append(firstname.strip()+' '+lastname.strip())
else:
authornames.append(author.strip())
if len(authorlist)>1:
authornames[-1] = ' and '+authornames[-1]
if len(authorlist)>2:
return ', '.join(authornames)
else:
return ' '.join(authornames) | 610,411 | 837 |
bed3c14b92e2350c3b1fa6d1cfde92c430e0d547 | MysterionRise/advent-calendar | advent_of_code/2021/dumbo_octopus.py | [
"MIT"
] | Python | flash | null | def flash(octopuses, flashed, i, j):
"""
flashing octopus at i, j and increase energy level of adjacent octopuses
"""
global NUMBER_OF_FLASHES
NUMBER_OF_FLASHES += 1
octopuses[i][j] = 0
flashed[i][j] = True
for x_coord in range(i - 1, i + 2):
for y_coord in range(j - 1, j + 2):
if x_coord == i and y_coord == j:
continue
if x_coord < 0 or x_coord >= len(octopuses):
continue
if y_coord < 0 or y_coord >= len(octopuses[0]):
continue
octopuses[x_coord][y_coord] += 1
if (
not flashed[x_coord][y_coord]
and octopuses[x_coord][y_coord] > 9
):
flash(octopuses, flashed, x_coord, y_coord) |
flashing octopus at i, j and increase energy level of adjacent octopuses
| flashing octopus at i, j and increase energy level of adjacent octopuses | [
"flashing",
"octopus",
"at",
"i",
"j",
"and",
"increase",
"energy",
"level",
"of",
"adjacent",
"octopuses"
] | def flash(octopuses, flashed, i, j):
global NUMBER_OF_FLASHES
NUMBER_OF_FLASHES += 1
octopuses[i][j] = 0
flashed[i][j] = True
for x_coord in range(i - 1, i + 2):
for y_coord in range(j - 1, j + 2):
if x_coord == i and y_coord == j:
continue
if x_coord < 0 or x_coord >= len(octopuses):
continue
if y_coord < 0 or y_coord >= len(octopuses[0]):
continue
octopuses[x_coord][y_coord] += 1
if (
not flashed[x_coord][y_coord]
and octopuses[x_coord][y_coord] > 9
):
flash(octopuses, flashed, x_coord, y_coord) | [
"def",
"flash",
"(",
"octopuses",
",",
"flashed",
",",
"i",
",",
"j",
")",
":",
"global",
"NUMBER_OF_FLASHES",
"NUMBER_OF_FLASHES",
"+=",
"1",
"octopuses",
"[",
"i",
"]",
"[",
"j",
"]",
"=",
"0",
"flashed",
"[",
"i",
"]",
"[",
"j",
"]",
"=",
"True",
"for",
"x_coord",
"in",
"range",
"(",
"i",
"-",
"1",
",",
"i",
"+",
"2",
")",
":",
"for",
"y_coord",
"in",
"range",
"(",
"j",
"-",
"1",
",",
"j",
"+",
"2",
")",
":",
"if",
"x_coord",
"==",
"i",
"and",
"y_coord",
"==",
"j",
":",
"continue",
"if",
"x_coord",
"<",
"0",
"or",
"x_coord",
">=",
"len",
"(",
"octopuses",
")",
":",
"continue",
"if",
"y_coord",
"<",
"0",
"or",
"y_coord",
">=",
"len",
"(",
"octopuses",
"[",
"0",
"]",
")",
":",
"continue",
"octopuses",
"[",
"x_coord",
"]",
"[",
"y_coord",
"]",
"+=",
"1",
"if",
"(",
"not",
"flashed",
"[",
"x_coord",
"]",
"[",
"y_coord",
"]",
"and",
"octopuses",
"[",
"x_coord",
"]",
"[",
"y_coord",
"]",
">",
"9",
")",
":",
"flash",
"(",
"octopuses",
",",
"flashed",
",",
"x_coord",
",",
"y_coord",
")"
] | flashing octopus at i, j and increase energy level of adjacent octopuses | [
"flashing",
"octopus",
"at",
"i",
"j",
"and",
"increase",
"energy",
"level",
"of",
"adjacent",
"octopuses"
] | [
"\"\"\"\n flashing octopus at i, j and increase energy level of adjacent octopuses\n \"\"\""
] | [
{
"param": "octopuses",
"type": null
},
{
"param": "flashed",
"type": null
},
{
"param": "i",
"type": null
},
{
"param": "j",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "octopuses",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "flashed",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "i",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "j",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def flash(octopuses, flashed, i, j):
global NUMBER_OF_FLASHES
NUMBER_OF_FLASHES += 1
octopuses[i][j] = 0
flashed[i][j] = True
for x_coord in range(i - 1, i + 2):
for y_coord in range(j - 1, j + 2):
if x_coord == i and y_coord == j:
continue
if x_coord < 0 or x_coord >= len(octopuses):
continue
if y_coord < 0 or y_coord >= len(octopuses[0]):
continue
octopuses[x_coord][y_coord] += 1
if (
not flashed[x_coord][y_coord]
and octopuses[x_coord][y_coord] > 9
):
flash(octopuses, flashed, x_coord, y_coord) | 610,412 | 433 |
f4c4d98b85fbe0d7bfac7b19f527e658b43131c9 | LachlanAttwood/ConvertMe | venv/lib/python3.8/site-packages/uno/helpers.py | [
"MIT"
] | Python | intersection | <not_specific> | def intersection(a, b):
"""
Returns the intersection of sets a and b.
In plain english:
Returns all the items that are in both a and b.
"""
return a.intersection(b) |
Returns the intersection of sets a and b.
In plain english:
Returns all the items that are in both a and b.
| Returns the intersection of sets a and b.
In plain english:
Returns all the items that are in both a and b. | [
"Returns",
"the",
"intersection",
"of",
"sets",
"a",
"and",
"b",
".",
"In",
"plain",
"english",
":",
"Returns",
"all",
"the",
"items",
"that",
"are",
"in",
"both",
"a",
"and",
"b",
"."
] | def intersection(a, b):
return a.intersection(b) | [
"def",
"intersection",
"(",
"a",
",",
"b",
")",
":",
"return",
"a",
".",
"intersection",
"(",
"b",
")"
] | Returns the intersection of sets a and b. | [
"Returns",
"the",
"intersection",
"of",
"sets",
"a",
"and",
"b",
"."
] | [
"\"\"\"\n Returns the intersection of sets a and b.\n\n In plain english:\n Returns all the items that are in both a and b.\n \"\"\""
] | [
{
"param": "a",
"type": null
},
{
"param": "b",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "a",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "b",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def intersection(a, b):
return a.intersection(b) | 610,413 | 1,022 |
9944ae6e71b2db75cf4dcd31c30d3cef95089e0f | lsst-sqre/jupyterhubutils | jupyterhubutils/utils/utils.py | [
"MIT"
] | Python | floatify | <not_specific> | def floatify(item, default=0.0):
'''Another environment-parser: the empty string should be treated as
None, and return the default, rather than the empty string (which
does not become an integer). Default can be either a float or string
that float() works on. Note that numeric zero (or string '0') returns
0.0, not the default. This is intentional.
'''
if item is None:
return default
if item == '':
return default
return float(item) | Another environment-parser: the empty string should be treated as
None, and return the default, rather than the empty string (which
does not become an integer). Default can be either a float or string
that float() works on. Note that numeric zero (or string '0') returns
0.0, not the default. This is intentional.
| Another environment-parser: the empty string should be treated as
None, and return the default, rather than the empty string (which
does not become an integer). Default can be either a float or string
that float() works on. Note that numeric zero (or string '0') returns
0.0, not the default. This is intentional. | [
"Another",
"environment",
"-",
"parser",
":",
"the",
"empty",
"string",
"should",
"be",
"treated",
"as",
"None",
"and",
"return",
"the",
"default",
"rather",
"than",
"the",
"empty",
"string",
"(",
"which",
"does",
"not",
"become",
"an",
"integer",
")",
".",
"Default",
"can",
"be",
"either",
"a",
"float",
"or",
"string",
"that",
"float",
"()",
"works",
"on",
".",
"Note",
"that",
"numeric",
"zero",
"(",
"or",
"string",
"'",
"0",
"'",
")",
"returns",
"0",
".",
"0",
"not",
"the",
"default",
".",
"This",
"is",
"intentional",
"."
] | def floatify(item, default=0.0):
if item is None:
return default
if item == '':
return default
return float(item) | [
"def",
"floatify",
"(",
"item",
",",
"default",
"=",
"0.0",
")",
":",
"if",
"item",
"is",
"None",
":",
"return",
"default",
"if",
"item",
"==",
"''",
":",
"return",
"default",
"return",
"float",
"(",
"item",
")"
] | Another environment-parser: the empty string should be treated as
None, and return the default, rather than the empty string (which
does not become an integer). | [
"Another",
"environment",
"-",
"parser",
":",
"the",
"empty",
"string",
"should",
"be",
"treated",
"as",
"None",
"and",
"return",
"the",
"default",
"rather",
"than",
"the",
"empty",
"string",
"(",
"which",
"does",
"not",
"become",
"an",
"integer",
")",
"."
] | [
"'''Another environment-parser: the empty string should be treated as\n None, and return the default, rather than the empty string (which\n does not become an integer). Default can be either a float or string\n that float() works on. Note that numeric zero (or string '0') returns\n 0.0, not the default. This is intentional.\n '''"
] | [
{
"param": "item",
"type": null
},
{
"param": "default",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "item",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "default",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def floatify(item, default=0.0):
if item is None:
return default
if item == '':
return default
return float(item) | 610,414 | 846 |
697e86c301bf54185b8c3365e0fab524d5420193 | EarthObservationSimulator/orbits | orbitpy/util.py | [
"Apache-2.0"
] | Python | delete_output_info_object_in_list | <not_specific> | def delete_output_info_object_in_list(out_info_list, other_out_info_object):
""" Remove an output-info object from list of output-info objects which has "loose"-equality with the other
output-info object. Loose equality requires only some of the instance variables of the two objects to match.
Please see the ``check_loose_equality`` function of the respective output-info class to know which instance variables care compared.
:param out_info_list: List of output-info objects.
:paramtype: list, output-info objects
:param other_out_info_object: Output-info object(s) with which comparison for loose-equality shall be made. Can be a list.
:paramtype other_out_info_object: output-info object or list, output-info object
:return: (Potentially) reduced list of output-info objects with an deleted object.
:rtype: list, output-info objects
"""
if not out_info_list: # if None or []
return None
if not isinstance(out_info_list, list):
out_info_list = [out_info_list] # make into list
if other_out_info_object is None:
return out_info_list
if not isinstance(other_out_info_object, list):
other_out_info_object = [other_out_info_object] # make into list
for ooi in other_out_info_object:
for indx, oi in enumerate(out_info_list):
if oi.check_loose_equality(ooi):
del out_info_list[indx] # delete the corresponding output-info object
return out_info_list | Remove an output-info object from list of output-info objects which has "loose"-equality with the other
output-info object. Loose equality requires only some of the instance variables of the two objects to match.
Please see the ``check_loose_equality`` function of the respective output-info class to know which instance variables care compared.
:param out_info_list: List of output-info objects.
:paramtype: list, output-info objects
:param other_out_info_object: Output-info object(s) with which comparison for loose-equality shall be made. Can be a list.
:paramtype other_out_info_object: output-info object or list, output-info object
:return: (Potentially) reduced list of output-info objects with an deleted object.
:rtype: list, output-info objects
| Remove an output-info object from list of output-info objects which has "loose"-equality with the other
output-info object. Loose equality requires only some of the instance variables of the two objects to match.
Please see the ``check_loose_equality`` function of the respective output-info class to know which instance variables care compared. | [
"Remove",
"an",
"output",
"-",
"info",
"object",
"from",
"list",
"of",
"output",
"-",
"info",
"objects",
"which",
"has",
"\"",
"loose",
"\"",
"-",
"equality",
"with",
"the",
"other",
"output",
"-",
"info",
"object",
".",
"Loose",
"equality",
"requires",
"only",
"some",
"of",
"the",
"instance",
"variables",
"of",
"the",
"two",
"objects",
"to",
"match",
".",
"Please",
"see",
"the",
"`",
"`",
"check_loose_equality",
"`",
"`",
"function",
"of",
"the",
"respective",
"output",
"-",
"info",
"class",
"to",
"know",
"which",
"instance",
"variables",
"care",
"compared",
"."
] | def delete_output_info_object_in_list(out_info_list, other_out_info_object):
if not out_info_list:
return None
if not isinstance(out_info_list, list):
out_info_list = [out_info_list]
if other_out_info_object is None:
return out_info_list
if not isinstance(other_out_info_object, list):
other_out_info_object = [other_out_info_object]
for ooi in other_out_info_object:
for indx, oi in enumerate(out_info_list):
if oi.check_loose_equality(ooi):
del out_info_list[indx]
return out_info_list | [
"def",
"delete_output_info_object_in_list",
"(",
"out_info_list",
",",
"other_out_info_object",
")",
":",
"if",
"not",
"out_info_list",
":",
"return",
"None",
"if",
"not",
"isinstance",
"(",
"out_info_list",
",",
"list",
")",
":",
"out_info_list",
"=",
"[",
"out_info_list",
"]",
"if",
"other_out_info_object",
"is",
"None",
":",
"return",
"out_info_list",
"if",
"not",
"isinstance",
"(",
"other_out_info_object",
",",
"list",
")",
":",
"other_out_info_object",
"=",
"[",
"other_out_info_object",
"]",
"for",
"ooi",
"in",
"other_out_info_object",
":",
"for",
"indx",
",",
"oi",
"in",
"enumerate",
"(",
"out_info_list",
")",
":",
"if",
"oi",
".",
"check_loose_equality",
"(",
"ooi",
")",
":",
"del",
"out_info_list",
"[",
"indx",
"]",
"return",
"out_info_list"
] | Remove an output-info object from list of output-info objects which has "loose"-equality with the other
output-info object. | [
"Remove",
"an",
"output",
"-",
"info",
"object",
"from",
"list",
"of",
"output",
"-",
"info",
"objects",
"which",
"has",
"\"",
"loose",
"\"",
"-",
"equality",
"with",
"the",
"other",
"output",
"-",
"info",
"object",
"."
] | [
"\"\"\" Remove an output-info object from list of output-info objects which has \"loose\"-equality with the other\n output-info object. Loose equality requires only some of the instance variables of the two objects to match. \n Please see the ``check_loose_equality`` function of the respective output-info class to know which instance variables care compared.\n\n :param out_info_list: List of output-info objects.\n :paramtype: list, output-info objects\n\n :param other_out_info_object: Output-info object(s) with which comparison for loose-equality shall be made. Can be a list.\n :paramtype other_out_info_object: output-info object or list, output-info object\n \n :return: (Potentially) reduced list of output-info objects with an deleted object. \n :rtype: list, output-info objects\n\n \"\"\"",
"# if None or []",
"# make into list",
"# make into list",
"# delete the corresponding output-info object"
] | [
{
"param": "out_info_list",
"type": null
},
{
"param": "other_out_info_object",
"type": null
}
] | {
"returns": [
{
"docstring": "(Potentially) reduced list of output-info objects with an deleted object.",
"docstring_tokens": [
"(",
"Potentially",
")",
"reduced",
"list",
"of",
"output",
"-",
"info",
"objects",
"with",
"an",
"deleted",
"object",
"."
],
"type": "list, output-info objects"
}
],
"raises": [],
"params": [
{
"identifier": "out_info_list",
"type": null,
"docstring": "List of output-info objects.",
"docstring_tokens": [
"List",
"of",
"output",
"-",
"info",
"objects",
"."
],
"default": null,
"is_optional": null
},
{
"identifier": "other_out_info_object",
"type": null,
"docstring": "Output-info object(s) with which comparison for loose-equality shall be made. Can be a list.",
"docstring_tokens": [
"Output",
"-",
"info",
"object",
"(",
"s",
")",
"with",
"which",
"comparison",
"for",
"loose",
"-",
"equality",
"shall",
"be",
"made",
".",
"Can",
"be",
"a",
"list",
"."
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": [
{
"identifier": "paramtype",
"docstring": "list, output-info objects",
"docstring_tokens": [
"list",
"output",
"-",
"info",
"objects"
]
},
{
"identifier": "paramtype",
"docstring": "output-info object or list, output-info object",
"docstring_tokens": [
"output",
"-",
"info",
"object",
"or",
"list",
"output",
"-",
"info",
"object"
]
}
]
} | def delete_output_info_object_in_list(out_info_list, other_out_info_object):
if not out_info_list:
return None
if not isinstance(out_info_list, list):
out_info_list = [out_info_list]
if other_out_info_object is None:
return out_info_list
if not isinstance(other_out_info_object, list):
other_out_info_object = [other_out_info_object]
for ooi in other_out_info_object:
for indx, oi in enumerate(out_info_list):
if oi.check_loose_equality(ooi):
del out_info_list[indx]
return out_info_list | 610,415 | 771 |
55d2c012a8fcf8428316b4c6b35cbd0df150853f | wusui/pent-py | utilities.py | [
"MIT"
] | Python | flip_2d | <not_specific> | def flip_2d(arr):
"""
Flip a board layout (nxm becomes mxn)
@param {Array} arr Rectangle layout
@return {Array} Rectangle layout flipped along diagonal
"""
return tuple(zip(*arr[::])) |
Flip a board layout (nxm becomes mxn)
@param {Array} arr Rectangle layout
@return {Array} Rectangle layout flipped along diagonal
| Flip a board layout (nxm becomes mxn)
@param {Array} arr Rectangle layout
@return {Array} Rectangle layout flipped along diagonal | [
"Flip",
"a",
"board",
"layout",
"(",
"nxm",
"becomes",
"mxn",
")",
"@param",
"{",
"Array",
"}",
"arr",
"Rectangle",
"layout",
"@return",
"{",
"Array",
"}",
"Rectangle",
"layout",
"flipped",
"along",
"diagonal"
] | def flip_2d(arr):
return tuple(zip(*arr[::])) | [
"def",
"flip_2d",
"(",
"arr",
")",
":",
"return",
"tuple",
"(",
"zip",
"(",
"*",
"arr",
"[",
":",
":",
"]",
")",
")"
] | Flip a board layout (nxm becomes mxn)
@param {Array} arr Rectangle layout
@return {Array} Rectangle layout flipped along diagonal | [
"Flip",
"a",
"board",
"layout",
"(",
"nxm",
"becomes",
"mxn",
")",
"@param",
"{",
"Array",
"}",
"arr",
"Rectangle",
"layout",
"@return",
"{",
"Array",
"}",
"Rectangle",
"layout",
"flipped",
"along",
"diagonal"
] | [
"\"\"\"\r\n Flip a board layout (nxm becomes mxn)\r\n\r\n @param {Array} arr Rectangle layout\r\n @return {Array} Rectangle layout flipped along diagonal\r\n \"\"\""
] | [
{
"param": "arr",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "arr",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def flip_2d(arr):
return tuple(zip(*arr[::])) | 610,417 | 562 |
6ab45a584a998e30f2d762d54f72c990ddb8f7c7 | softquanta/homCVQKD | generic.py | [
"Apache-2.0"
] | Python | precise_reconciliation_efficiency | <not_specific> | def precise_reconciliation_efficiency(r, i, h, q, p, d):
"""
Identifies the reconciliation efficiency to be used with extremely high precision under a given set of parameters.
:param r: The code rate.
:param i: The estimated mutual information.
:param h: The estimated entropy.
:param q: The Galois field.
:param p: The discretization bits.
:param d: The penalty of the entropy estimation.
"""
b = (h - d + r * q - p) / i
print("Under current data, the reconciliation efficiency should ideally be:", b)
if b >= 1:
warnings.warn("Ideal beta is larger than 1, which implies given parameters are not correct.")
return b |
Identifies the reconciliation efficiency to be used with extremely high precision under a given set of parameters.
:param r: The code rate.
:param i: The estimated mutual information.
:param h: The estimated entropy.
:param q: The Galois field.
:param p: The discretization bits.
:param d: The penalty of the entropy estimation.
| Identifies the reconciliation efficiency to be used with extremely high precision under a given set of parameters. | [
"Identifies",
"the",
"reconciliation",
"efficiency",
"to",
"be",
"used",
"with",
"extremely",
"high",
"precision",
"under",
"a",
"given",
"set",
"of",
"parameters",
"."
] | def precise_reconciliation_efficiency(r, i, h, q, p, d):
b = (h - d + r * q - p) / i
print("Under current data, the reconciliation efficiency should ideally be:", b)
if b >= 1:
warnings.warn("Ideal beta is larger than 1, which implies given parameters are not correct.")
return b | [
"def",
"precise_reconciliation_efficiency",
"(",
"r",
",",
"i",
",",
"h",
",",
"q",
",",
"p",
",",
"d",
")",
":",
"b",
"=",
"(",
"h",
"-",
"d",
"+",
"r",
"*",
"q",
"-",
"p",
")",
"/",
"i",
"print",
"(",
"\"Under current data, the reconciliation efficiency should ideally be:\"",
",",
"b",
")",
"if",
"b",
">=",
"1",
":",
"warnings",
".",
"warn",
"(",
"\"Ideal beta is larger than 1, which implies given parameters are not correct.\"",
")",
"return",
"b"
] | Identifies the reconciliation efficiency to be used with extremely high precision under a given set of parameters. | [
"Identifies",
"the",
"reconciliation",
"efficiency",
"to",
"be",
"used",
"with",
"extremely",
"high",
"precision",
"under",
"a",
"given",
"set",
"of",
"parameters",
"."
] | [
"\"\"\"\n Identifies the reconciliation efficiency to be used with extremely high precision under a given set of parameters.\n :param r: The code rate.\n :param i: The estimated mutual information.\n :param h: The estimated entropy.\n :param q: The Galois field.\n :param p: The discretization bits.\n :param d: The penalty of the entropy estimation.\n \"\"\""
] | [
{
"param": "r",
"type": null
},
{
"param": "i",
"type": null
},
{
"param": "h",
"type": null
},
{
"param": "q",
"type": null
},
{
"param": "p",
"type": null
},
{
"param": "d",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "r",
"type": null,
"docstring": "The code rate.",
"docstring_tokens": [
"The",
"code",
"rate",
"."
],
"default": null,
"is_optional": null
},
{
"identifier": "i",
"type": null,
"docstring": "The estimated mutual information.",
"docstring_tokens": [
"The",
"estimated",
"mutual",
"information",
"."
],
"default": null,
"is_optional": null
},
{
"identifier": "h",
"type": null,
"docstring": "The estimated entropy.",
"docstring_tokens": [
"The",
"estimated",
"entropy",
"."
],
"default": null,
"is_optional": null
},
{
"identifier": "q",
"type": null,
"docstring": "The Galois field.",
"docstring_tokens": [
"The",
"Galois",
"field",
"."
],
"default": null,
"is_optional": null
},
{
"identifier": "p",
"type": null,
"docstring": "The discretization bits.",
"docstring_tokens": [
"The",
"discretization",
"bits",
"."
],
"default": null,
"is_optional": null
},
{
"identifier": "d",
"type": null,
"docstring": "The penalty of the entropy estimation.",
"docstring_tokens": [
"The",
"penalty",
"of",
"the",
"entropy",
"estimation",
"."
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import warnings
def precise_reconciliation_efficiency(r, i, h, q, p, d):
b = (h - d + r * q - p) / i
print("Under current data, the reconciliation efficiency should ideally be:", b)
if b >= 1:
warnings.warn("Ideal beta is larger than 1, which implies given parameters are not correct.")
return b | 610,418 | 874 |
f1440ddefbd237e632a1575e4340509d98843e88 | wwang1983/utility | make_project.py | [
"MIT"
] | Python | make_project_dirs | null | def make_project_dirs(project_name, root_path, sub_paths):
""" make project directories under root path """
# make root directory and write CMakeLists.txt for the project
os.makedirs(root_path)
# make sub directories
for folder in sub_paths:
os.makedirs(root_path + os.sep + folder)
if folder == 'inc':
os.makedirs(root_path + os.sep + folder + os.sep + project_name) | make project directories under root path | make project directories under root path | [
"make",
"project",
"directories",
"under",
"root",
"path"
] | def make_project_dirs(project_name, root_path, sub_paths):
os.makedirs(root_path)
for folder in sub_paths:
os.makedirs(root_path + os.sep + folder)
if folder == 'inc':
os.makedirs(root_path + os.sep + folder + os.sep + project_name) | [
"def",
"make_project_dirs",
"(",
"project_name",
",",
"root_path",
",",
"sub_paths",
")",
":",
"os",
".",
"makedirs",
"(",
"root_path",
")",
"for",
"folder",
"in",
"sub_paths",
":",
"os",
".",
"makedirs",
"(",
"root_path",
"+",
"os",
".",
"sep",
"+",
"folder",
")",
"if",
"folder",
"==",
"'inc'",
":",
"os",
".",
"makedirs",
"(",
"root_path",
"+",
"os",
".",
"sep",
"+",
"folder",
"+",
"os",
".",
"sep",
"+",
"project_name",
")"
] | make project directories under root path | [
"make",
"project",
"directories",
"under",
"root",
"path"
] | [
"\"\"\" make project directories under root path \"\"\"",
"# make root directory and write CMakeLists.txt for the project",
"# make sub directories"
] | [
{
"param": "project_name",
"type": null
},
{
"param": "root_path",
"type": null
},
{
"param": "sub_paths",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "project_name",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "root_path",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "sub_paths",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import os
def make_project_dirs(project_name, root_path, sub_paths):
os.makedirs(root_path)
for folder in sub_paths:
os.makedirs(root_path + os.sep + folder)
if folder == 'inc':
os.makedirs(root_path + os.sep + folder + os.sep + project_name) | 610,420 | 270 |
462fc0d6615f42c26ac2bb7a7e251d932bc0adc0 | 1dustindavis/IT-CPE | code/lib/modules/fs_tools.py | [
"BSD-3-Clause"
] | Python | mktempdir | null | def mktempdir(mode=None):
"""
mktempdir(mode=None)
Creates a temp directory with default permissions of 600
"""
dir_name = tempfile.mkdtemp()
if mode:
os.chmod(dir_name, mode) |
mktempdir(mode=None)
Creates a temp directory with default permissions of 600
| mktempdir(mode=None)
Creates a temp directory with default permissions of 600 | [
"mktempdir",
"(",
"mode",
"=",
"None",
")",
"Creates",
"a",
"temp",
"directory",
"with",
"default",
"permissions",
"of",
"600"
] | def mktempdir(mode=None):
dir_name = tempfile.mkdtemp()
if mode:
os.chmod(dir_name, mode) | [
"def",
"mktempdir",
"(",
"mode",
"=",
"None",
")",
":",
"dir_name",
"=",
"tempfile",
".",
"mkdtemp",
"(",
")",
"if",
"mode",
":",
"os",
".",
"chmod",
"(",
"dir_name",
",",
"mode",
")"
] | mktempdir(mode=None)
Creates a temp directory with default permissions of 600 | [
"mktempdir",
"(",
"mode",
"=",
"None",
")",
"Creates",
"a",
"temp",
"directory",
"with",
"default",
"permissions",
"of",
"600"
] | [
"\"\"\"\n mktempdir(mode=None)\n\n Creates a temp directory with default permissions of 600\n \"\"\""
] | [
{
"param": "mode",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "mode",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import tempfile
import os
def mktempdir(mode=None):
dir_name = tempfile.mkdtemp()
if mode:
os.chmod(dir_name, mode) | 610,421 | 643 |
cf65fcb96dabf054abe404455b9e77f6bf03aeb2 | Ghibranalj/calcurse | contrib/vdir/calcurse-vdir.py | [
"BSD-2-Clause"
] | Python | parse_calcurse_data | <not_specific> | def parse_calcurse_data(raw):
"""Parse raw calcurse data to a uid/ical dictionary"""
header = ''.join(raw[:3])
regex = '(BEGIN:(VEVENT|VTODO).*?END:(VEVENT|VTODO).)'
events = [x[0] for x in re.findall(regex, ''.join(raw), re.DOTALL)]
items = {}
for item in events:
uid = re.findall('UID:(.*?)\n', item)[0]
items[uid] = header + item + "END:VCALENDAR\n"
return items | Parse raw calcurse data to a uid/ical dictionary | Parse raw calcurse data to a uid/ical dictionary | [
"Parse",
"raw",
"calcurse",
"data",
"to",
"a",
"uid",
"/",
"ical",
"dictionary"
] | def parse_calcurse_data(raw):
header = ''.join(raw[:3])
regex = '(BEGIN:(VEVENT|VTODO).*?END:(VEVENT|VTODO).)'
events = [x[0] for x in re.findall(regex, ''.join(raw), re.DOTALL)]
items = {}
for item in events:
uid = re.findall('UID:(.*?)\n', item)[0]
items[uid] = header + item + "END:VCALENDAR\n"
return items | [
"def",
"parse_calcurse_data",
"(",
"raw",
")",
":",
"header",
"=",
"''",
".",
"join",
"(",
"raw",
"[",
":",
"3",
"]",
")",
"regex",
"=",
"'(BEGIN:(VEVENT|VTODO).*?END:(VEVENT|VTODO).)'",
"events",
"=",
"[",
"x",
"[",
"0",
"]",
"for",
"x",
"in",
"re",
".",
"findall",
"(",
"regex",
",",
"''",
".",
"join",
"(",
"raw",
")",
",",
"re",
".",
"DOTALL",
")",
"]",
"items",
"=",
"{",
"}",
"for",
"item",
"in",
"events",
":",
"uid",
"=",
"re",
".",
"findall",
"(",
"'UID:(.*?)\\n'",
",",
"item",
")",
"[",
"0",
"]",
"items",
"[",
"uid",
"]",
"=",
"header",
"+",
"item",
"+",
"\"END:VCALENDAR\\n\"",
"return",
"items"
] | Parse raw calcurse data to a uid/ical dictionary | [
"Parse",
"raw",
"calcurse",
"data",
"to",
"a",
"uid",
"/",
"ical",
"dictionary"
] | [
"\"\"\"Parse raw calcurse data to a uid/ical dictionary\"\"\""
] | [
{
"param": "raw",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "raw",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import re
def parse_calcurse_data(raw):
header = ''.join(raw[:3])
regex = '(BEGIN:(VEVENT|VTODO).*?END:(VEVENT|VTODO).)'
events = [x[0] for x in re.findall(regex, ''.join(raw), re.DOTALL)]
items = {}
for item in events:
uid = re.findall('UID:(.*?)\n', item)[0]
items[uid] = header + item + "END:VCALENDAR\n"
return items | 610,422 | 196 |
478d7ff4518bf4d7c7d32dd7cbff66fc6958a2d5 | MeteoSwiss-APN/spack-config | concat_yaml.py | [
"MIT"
] | Python | rename_cray_mpich_to_mpich | <not_specific> | def rename_cray_mpich_to_mpich(packages):
'''
Rename cray-mpich from spack-config module
to mpich to be compatible with spack-c2sm
'''
print('Rename cray-mpich to mpich')
cray_mpich = packages['packages']['cray-mpich']
spec = cray_mpich['externals'][0]['spec']
spec = spec.replace('cray-', '')
cray_mpich['externals'][0]['spec'] = spec
packages['packages']['mpich'] = cray_mpich
packages['packages']['mpich']['buildable'] = False
packages['packages'].pop('cray-mpich')
return packages |
Rename cray-mpich from spack-config module
to mpich to be compatible with spack-c2sm
| Rename cray-mpich from spack-config module
to mpich to be compatible with spack-c2sm | [
"Rename",
"cray",
"-",
"mpich",
"from",
"spack",
"-",
"config",
"module",
"to",
"mpich",
"to",
"be",
"compatible",
"with",
"spack",
"-",
"c2sm"
] | def rename_cray_mpich_to_mpich(packages):
print('Rename cray-mpich to mpich')
cray_mpich = packages['packages']['cray-mpich']
spec = cray_mpich['externals'][0]['spec']
spec = spec.replace('cray-', '')
cray_mpich['externals'][0]['spec'] = spec
packages['packages']['mpich'] = cray_mpich
packages['packages']['mpich']['buildable'] = False
packages['packages'].pop('cray-mpich')
return packages | [
"def",
"rename_cray_mpich_to_mpich",
"(",
"packages",
")",
":",
"print",
"(",
"'Rename cray-mpich to mpich'",
")",
"cray_mpich",
"=",
"packages",
"[",
"'packages'",
"]",
"[",
"'cray-mpich'",
"]",
"spec",
"=",
"cray_mpich",
"[",
"'externals'",
"]",
"[",
"0",
"]",
"[",
"'spec'",
"]",
"spec",
"=",
"spec",
".",
"replace",
"(",
"'cray-'",
",",
"''",
")",
"cray_mpich",
"[",
"'externals'",
"]",
"[",
"0",
"]",
"[",
"'spec'",
"]",
"=",
"spec",
"packages",
"[",
"'packages'",
"]",
"[",
"'mpich'",
"]",
"=",
"cray_mpich",
"packages",
"[",
"'packages'",
"]",
"[",
"'mpich'",
"]",
"[",
"'buildable'",
"]",
"=",
"False",
"packages",
"[",
"'packages'",
"]",
".",
"pop",
"(",
"'cray-mpich'",
")",
"return",
"packages"
] | Rename cray-mpich from spack-config module
to mpich to be compatible with spack-c2sm | [
"Rename",
"cray",
"-",
"mpich",
"from",
"spack",
"-",
"config",
"module",
"to",
"mpich",
"to",
"be",
"compatible",
"with",
"spack",
"-",
"c2sm"
] | [
"'''\n Rename cray-mpich from spack-config module\n to mpich to be compatible with spack-c2sm\n '''"
] | [
{
"param": "packages",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "packages",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def rename_cray_mpich_to_mpich(packages):
print('Rename cray-mpich to mpich')
cray_mpich = packages['packages']['cray-mpich']
spec = cray_mpich['externals'][0]['spec']
spec = spec.replace('cray-', '')
cray_mpich['externals'][0]['spec'] = spec
packages['packages']['mpich'] = cray_mpich
packages['packages']['mpich']['buildable'] = False
packages['packages'].pop('cray-mpich')
return packages | 610,423 | 809 |
7a2c6acdd1e06f8bc1b822c8e0d322f62f083623 | olivercalder/python-libraries | qc.py | [
"MIT"
] | Python | msb | <not_specific> | def msb(x):
''' Returns a mask of the most significant bit of the given integer x.'''
count = 1
while x | (x >> 1) != x:
x |= x >> count
count <<= 1
return (x + 1) >> 1 | Returns a mask of the most significant bit of the given integer x. | Returns a mask of the most significant bit of the given integer x. | [
"Returns",
"a",
"mask",
"of",
"the",
"most",
"significant",
"bit",
"of",
"the",
"given",
"integer",
"x",
"."
] | def msb(x):
count = 1
while x | (x >> 1) != x:
x |= x >> count
count <<= 1
return (x + 1) >> 1 | [
"def",
"msb",
"(",
"x",
")",
":",
"count",
"=",
"1",
"while",
"x",
"|",
"(",
"x",
">>",
"1",
")",
"!=",
"x",
":",
"x",
"|=",
"x",
">>",
"count",
"count",
"<<=",
"1",
"return",
"(",
"x",
"+",
"1",
")",
">>",
"1"
] | Returns a mask of the most significant bit of the given integer x. | [
"Returns",
"a",
"mask",
"of",
"the",
"most",
"significant",
"bit",
"of",
"the",
"given",
"integer",
"x",
"."
] | [
"''' Returns a mask of the most significant bit of the given integer x.'''"
] | [
{
"param": "x",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "x",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def msb(x):
count = 1
while x | (x >> 1) != x:
x |= x >> count
count <<= 1
return (x + 1) >> 1 | 610,424 | 669 |
21ef6341b5089fb7b5f74f9817fed5220dd92d3e | hhucn/dbas | api/models.py | [
"MIT"
] | Python | _demultiplex_bubbletype | <not_specific> | def _demultiplex_bubbletype(bubble):
"""
Use a single field to dispatch the type and resolve BubbleTypes-Enum.
:param bubble: Constructed bubble
:return:
"""
if bubble['is_user']:
t = 'user'
elif bubble['is_system']:
t = 'system'
elif bubble['is_info']:
t = 'info'
else:
t = 'status'
return t |
Use a single field to dispatch the type and resolve BubbleTypes-Enum.
:param bubble: Constructed bubble
:return:
| Use a single field to dispatch the type and resolve BubbleTypes-Enum. | [
"Use",
"a",
"single",
"field",
"to",
"dispatch",
"the",
"type",
"and",
"resolve",
"BubbleTypes",
"-",
"Enum",
"."
] | def _demultiplex_bubbletype(bubble):
if bubble['is_user']:
t = 'user'
elif bubble['is_system']:
t = 'system'
elif bubble['is_info']:
t = 'info'
else:
t = 'status'
return t | [
"def",
"_demultiplex_bubbletype",
"(",
"bubble",
")",
":",
"if",
"bubble",
"[",
"'is_user'",
"]",
":",
"t",
"=",
"'user'",
"elif",
"bubble",
"[",
"'is_system'",
"]",
":",
"t",
"=",
"'system'",
"elif",
"bubble",
"[",
"'is_info'",
"]",
":",
"t",
"=",
"'info'",
"else",
":",
"t",
"=",
"'status'",
"return",
"t"
] | Use a single field to dispatch the type and resolve BubbleTypes-Enum. | [
"Use",
"a",
"single",
"field",
"to",
"dispatch",
"the",
"type",
"and",
"resolve",
"BubbleTypes",
"-",
"Enum",
"."
] | [
"\"\"\"\n Use a single field to dispatch the type and resolve BubbleTypes-Enum.\n\n :param bubble: Constructed bubble\n :return:\n \"\"\""
] | [
{
"param": "bubble",
"type": null
}
] | {
"returns": [
{
"docstring": null,
"docstring_tokens": [
"None"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "bubble",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def _demultiplex_bubbletype(bubble):
if bubble['is_user']:
t = 'user'
elif bubble['is_system']:
t = 'system'
elif bubble['is_info']:
t = 'info'
else:
t = 'status'
return t | 610,425 | 916 |
95dd570df589ff6c558b8673ecc718de3125e390 | dlml/TextRecognitionDataGenerator | TextRecognitionDataGenerator/run.py | [
"MIT"
] | Python | load_dict | <not_specific> | def load_dict(lang):
"""
Read the dictionnary file and returns all words in it.
"""
lang_dict = []
with open(os.path.join('dicts', lang + '.txt'), 'r', encoding="utf8", errors='ignore') as d:
lang_dict = d.readlines()
return lang_dict |
Read the dictionnary file and returns all words in it.
| Read the dictionnary file and returns all words in it. | [
"Read",
"the",
"dictionnary",
"file",
"and",
"returns",
"all",
"words",
"in",
"it",
"."
] | def load_dict(lang):
lang_dict = []
with open(os.path.join('dicts', lang + '.txt'), 'r', encoding="utf8", errors='ignore') as d:
lang_dict = d.readlines()
return lang_dict | [
"def",
"load_dict",
"(",
"lang",
")",
":",
"lang_dict",
"=",
"[",
"]",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"'dicts'",
",",
"lang",
"+",
"'.txt'",
")",
",",
"'r'",
",",
"encoding",
"=",
"\"utf8\"",
",",
"errors",
"=",
"'ignore'",
")",
"as",
"d",
":",
"lang_dict",
"=",
"d",
".",
"readlines",
"(",
")",
"return",
"lang_dict"
] | Read the dictionnary file and returns all words in it. | [
"Read",
"the",
"dictionnary",
"file",
"and",
"returns",
"all",
"words",
"in",
"it",
"."
] | [
"\"\"\"\n Read the dictionnary file and returns all words in it.\n \"\"\""
] | [
{
"param": "lang",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "lang",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import os
def load_dict(lang):
lang_dict = []
with open(os.path.join('dicts', lang + '.txt'), 'r', encoding="utf8", errors='ignore') as d:
lang_dict = d.readlines()
return lang_dict | 610,426 | 908 |
fc826ba87a5235948577c215a31ed9c8e46a7a7b | PariyaPm/foresite | venv/lib/python3.9/site-packages/spnego/_compat.py | [
"MIT"
] | Python | add_metaclass | <not_specific> | def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass. This has been copied from six under the MIT license. """
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
if hasattr(cls, '__qualname__'):
orig_vars['__qualname__'] = cls.__qualname__
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper | Class decorator for creating a class with a metaclass. This has been copied from six under the MIT license. | Class decorator for creating a class with a metaclass. This has been copied from six under the MIT license. | [
"Class",
"decorator",
"for",
"creating",
"a",
"class",
"with",
"a",
"metaclass",
".",
"This",
"has",
"been",
"copied",
"from",
"six",
"under",
"the",
"MIT",
"license",
"."
] | def add_metaclass(metaclass):
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
if hasattr(cls, '__qualname__'):
orig_vars['__qualname__'] = cls.__qualname__
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper | [
"def",
"add_metaclass",
"(",
"metaclass",
")",
":",
"def",
"wrapper",
"(",
"cls",
")",
":",
"orig_vars",
"=",
"cls",
".",
"__dict__",
".",
"copy",
"(",
")",
"slots",
"=",
"orig_vars",
".",
"get",
"(",
"'__slots__'",
")",
"orig_vars",
".",
"pop",
"(",
"'__dict__'",
",",
"None",
")",
"orig_vars",
".",
"pop",
"(",
"'__weakref__'",
",",
"None",
")",
"if",
"hasattr",
"(",
"cls",
",",
"'__qualname__'",
")",
":",
"orig_vars",
"[",
"'__qualname__'",
"]",
"=",
"cls",
".",
"__qualname__",
"return",
"metaclass",
"(",
"cls",
".",
"__name__",
",",
"cls",
".",
"__bases__",
",",
"orig_vars",
")",
"return",
"wrapper"
] | Class decorator for creating a class with a metaclass. | [
"Class",
"decorator",
"for",
"creating",
"a",
"class",
"with",
"a",
"metaclass",
"."
] | [
"\"\"\"Class decorator for creating a class with a metaclass. This has been copied from six under the MIT license. \"\"\""
] | [
{
"param": "metaclass",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "metaclass",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def add_metaclass(metaclass):
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
if hasattr(cls, '__qualname__'):
orig_vars['__qualname__'] = cls.__qualname__
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper | 610,427 | 940 |
e9ea07b56e119b103813f8563798296bfe510cc3 | oktoshi/OpenBazaar-Server | dht/network.py | [
"MIT"
] | Python | _anyRespondSuccess | <not_specific> | def _anyRespondSuccess(responses):
"""
Given the result of a DeferredList of calls to peers, ensure that at least
one of them was contacted and responded with a Truthy result.
"""
for deferSuccess, result in responses:
peerReached, peerResponse = result
if deferSuccess and peerReached and peerResponse:
return True
return False |
Given the result of a DeferredList of calls to peers, ensure that at least
one of them was contacted and responded with a Truthy result.
| Given the result of a DeferredList of calls to peers, ensure that at least
one of them was contacted and responded with a Truthy result. | [
"Given",
"the",
"result",
"of",
"a",
"DeferredList",
"of",
"calls",
"to",
"peers",
"ensure",
"that",
"at",
"least",
"one",
"of",
"them",
"was",
"contacted",
"and",
"responded",
"with",
"a",
"Truthy",
"result",
"."
] | def _anyRespondSuccess(responses):
for deferSuccess, result in responses:
peerReached, peerResponse = result
if deferSuccess and peerReached and peerResponse:
return True
return False | [
"def",
"_anyRespondSuccess",
"(",
"responses",
")",
":",
"for",
"deferSuccess",
",",
"result",
"in",
"responses",
":",
"peerReached",
",",
"peerResponse",
"=",
"result",
"if",
"deferSuccess",
"and",
"peerReached",
"and",
"peerResponse",
":",
"return",
"True",
"return",
"False"
] | Given the result of a DeferredList of calls to peers, ensure that at least
one of them was contacted and responded with a Truthy result. | [
"Given",
"the",
"result",
"of",
"a",
"DeferredList",
"of",
"calls",
"to",
"peers",
"ensure",
"that",
"at",
"least",
"one",
"of",
"them",
"was",
"contacted",
"and",
"responded",
"with",
"a",
"Truthy",
"result",
"."
] | [
"\"\"\"\n Given the result of a DeferredList of calls to peers, ensure that at least\n one of them was contacted and responded with a Truthy result.\n \"\"\""
] | [
{
"param": "responses",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "responses",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def _anyRespondSuccess(responses):
for deferSuccess, result in responses:
peerReached, peerResponse = result
if deferSuccess and peerReached and peerResponse:
return True
return False | 610,428 | 751 |
b0e6df9949f628a7ef708fa984b2f64cedbf5c18 | Cordar/cogmods | propositional/student_projects/Kaltenbrunn2020/models/mSentential/model_builder.py | [
"MIT"
] | Python | merge_mental_and_full | <not_specific> | def merge_mental_and_full(mental, full):
"""Merge Mental Model and Fully Explicit Model.
Arguments:
mental {MentalModel} -- from this Model the Mental Model part is used
full {MentalModel} -- from this Model the Fully Explicit Model part is used
Returns:
MentalModel -- merged from inputs
"""
mental.full_clauses = full.full_clauses
mental.full_poss = full.full_poss
return mental | Merge Mental Model and Fully Explicit Model.
Arguments:
mental {MentalModel} -- from this Model the Mental Model part is used
full {MentalModel} -- from this Model the Fully Explicit Model part is used
Returns:
MentalModel -- merged from inputs
| Merge Mental Model and Fully Explicit Model.
Arguments:
mental {MentalModel} -- from this Model the Mental Model part is used
full {MentalModel} -- from this Model the Fully Explicit Model part is used
- merged from inputs | [
"Merge",
"Mental",
"Model",
"and",
"Fully",
"Explicit",
"Model",
".",
"Arguments",
":",
"mental",
"{",
"MentalModel",
"}",
"--",
"from",
"this",
"Model",
"the",
"Mental",
"Model",
"part",
"is",
"used",
"full",
"{",
"MentalModel",
"}",
"--",
"from",
"this",
"Model",
"the",
"Fully",
"Explicit",
"Model",
"part",
"is",
"used",
"-",
"merged",
"from",
"inputs"
] | def merge_mental_and_full(mental, full):
mental.full_clauses = full.full_clauses
mental.full_poss = full.full_poss
return mental | [
"def",
"merge_mental_and_full",
"(",
"mental",
",",
"full",
")",
":",
"mental",
".",
"full_clauses",
"=",
"full",
".",
"full_clauses",
"mental",
".",
"full_poss",
"=",
"full",
".",
"full_poss",
"return",
"mental"
] | Merge Mental Model and Fully Explicit Model. | [
"Merge",
"Mental",
"Model",
"and",
"Fully",
"Explicit",
"Model",
"."
] | [
"\"\"\"Merge Mental Model and Fully Explicit Model.\n\n Arguments:\n mental {MentalModel} -- from this Model the Mental Model part is used\n full {MentalModel} -- from this Model the Fully Explicit Model part is used\n\n Returns:\n MentalModel -- merged from inputs\n \"\"\""
] | [
{
"param": "mental",
"type": null
},
{
"param": "full",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "mental",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "full",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def merge_mental_and_full(mental, full):
mental.full_clauses = full.full_clauses
mental.full_poss = full.full_poss
return mental | 610,429 | 199 |
6dd6f4ea0121a88f9fafc30c25b46d2fd4e34abe | wardVD/tfx | tfx/tools/cli/container_builder/builder.py | [
"Apache-2.0"
] | Python | _get_image_repo | str | def _get_image_repo(image: str) -> str:
"""Extracts image name before ':' which is REPO part of the image name."""
image_fields = image.split(':')
if len(image_fields) > 2:
raise ValueError(f'Too many ":" in the image name: {image}')
return image_fields[0] | Extracts image name before ':' which is REPO part of the image name. | Extracts image name before ':' which is REPO part of the image name. | [
"Extracts",
"image",
"name",
"before",
"'",
":",
"'",
"which",
"is",
"REPO",
"part",
"of",
"the",
"image",
"name",
"."
] | def _get_image_repo(image: str) -> str:
image_fields = image.split(':')
if len(image_fields) > 2:
raise ValueError(f'Too many ":" in the image name: {image}')
return image_fields[0] | [
"def",
"_get_image_repo",
"(",
"image",
":",
"str",
")",
"->",
"str",
":",
"image_fields",
"=",
"image",
".",
"split",
"(",
"':'",
")",
"if",
"len",
"(",
"image_fields",
")",
">",
"2",
":",
"raise",
"ValueError",
"(",
"f'Too many \":\" in the image name: {image}'",
")",
"return",
"image_fields",
"[",
"0",
"]"
] | Extracts image name before ':' which is REPO part of the image name. | [
"Extracts",
"image",
"name",
"before",
"'",
":",
"'",
"which",
"is",
"REPO",
"part",
"of",
"the",
"image",
"name",
"."
] | [
"\"\"\"Extracts image name before ':' which is REPO part of the image name.\"\"\""
] | [
{
"param": "image",
"type": "str"
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "image",
"type": "str",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def _get_image_repo(image: str) -> str:
image_fields = image.split(':')
if len(image_fields) > 2:
raise ValueError(f'Too many ":" in the image name: {image}')
return image_fields[0] | 610,430 | 813 |
fbec0adcfd7cb61f6341ba20aa78eeb4acbb326d | Hanwant/reinforcement-learning-suite | rl_suite/NN.py | [
"MIT"
] | Python | conv_out_shape | <not_specific> | def conv_out_shape(in_shape, layers):
"""
Calculates output shape of input_shape going through a list of pytorch convolutional layers
in_shape: (H, W)
layers: list of convolution layers
"""
shape = in_shape
for layer in layers:
h_out = ((shape[0] + 2*layer.padding[0] - layer.dilation[0] * (layer.kernel_size[0] - 1)-1) / layer.stride[0])+1
w_out = ((shape[1] + 2*layer.padding[1] - layer.dilation[1] * (layer.kernel_size[1] - 1)-1) / layer.stride[1])+1
shape = (int(h_out), int(w_out))
return shape |
Calculates output shape of input_shape going through a list of pytorch convolutional layers
in_shape: (H, W)
layers: list of convolution layers
| Calculates output shape of input_shape going through a list of pytorch convolutional layers
in_shape: (H, W)
layers: list of convolution layers | [
"Calculates",
"output",
"shape",
"of",
"input_shape",
"going",
"through",
"a",
"list",
"of",
"pytorch",
"convolutional",
"layers",
"in_shape",
":",
"(",
"H",
"W",
")",
"layers",
":",
"list",
"of",
"convolution",
"layers"
] | def conv_out_shape(in_shape, layers):
shape = in_shape
for layer in layers:
h_out = ((shape[0] + 2*layer.padding[0] - layer.dilation[0] * (layer.kernel_size[0] - 1)-1) / layer.stride[0])+1
w_out = ((shape[1] + 2*layer.padding[1] - layer.dilation[1] * (layer.kernel_size[1] - 1)-1) / layer.stride[1])+1
shape = (int(h_out), int(w_out))
return shape | [
"def",
"conv_out_shape",
"(",
"in_shape",
",",
"layers",
")",
":",
"shape",
"=",
"in_shape",
"for",
"layer",
"in",
"layers",
":",
"h_out",
"=",
"(",
"(",
"shape",
"[",
"0",
"]",
"+",
"2",
"*",
"layer",
".",
"padding",
"[",
"0",
"]",
"-",
"layer",
".",
"dilation",
"[",
"0",
"]",
"*",
"(",
"layer",
".",
"kernel_size",
"[",
"0",
"]",
"-",
"1",
")",
"-",
"1",
")",
"/",
"layer",
".",
"stride",
"[",
"0",
"]",
")",
"+",
"1",
"w_out",
"=",
"(",
"(",
"shape",
"[",
"1",
"]",
"+",
"2",
"*",
"layer",
".",
"padding",
"[",
"1",
"]",
"-",
"layer",
".",
"dilation",
"[",
"1",
"]",
"*",
"(",
"layer",
".",
"kernel_size",
"[",
"1",
"]",
"-",
"1",
")",
"-",
"1",
")",
"/",
"layer",
".",
"stride",
"[",
"1",
"]",
")",
"+",
"1",
"shape",
"=",
"(",
"int",
"(",
"h_out",
")",
",",
"int",
"(",
"w_out",
")",
")",
"return",
"shape"
] | Calculates output shape of input_shape going through a list of pytorch convolutional layers
in_shape: (H, W)
layers: list of convolution layers | [
"Calculates",
"output",
"shape",
"of",
"input_shape",
"going",
"through",
"a",
"list",
"of",
"pytorch",
"convolutional",
"layers",
"in_shape",
":",
"(",
"H",
"W",
")",
"layers",
":",
"list",
"of",
"convolution",
"layers"
] | [
"\"\"\"\n Calculates output shape of input_shape going through a list of pytorch convolutional layers\n in_shape: (H, W)\n layers: list of convolution layers\n \"\"\""
] | [
{
"param": "in_shape",
"type": null
},
{
"param": "layers",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "in_shape",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "layers",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def conv_out_shape(in_shape, layers):
shape = in_shape
for layer in layers:
h_out = ((shape[0] + 2*layer.padding[0] - layer.dilation[0] * (layer.kernel_size[0] - 1)-1) / layer.stride[0])+1
w_out = ((shape[1] + 2*layer.padding[1] - layer.dilation[1] * (layer.kernel_size[1] - 1)-1) / layer.stride[1])+1
shape = (int(h_out), int(w_out))
return shape | 610,431 | 86 |
5d0d5dce58e1d1547e9aeef2a4fecfe26b30f0cc | AdamCottrill/FishNet2DB | sqlite2mdb.py | [
"MIT"
] | Python | convert_to_float | <not_specific> | def convert_to_float(val):
"""given a string value val try to convert it to a float. Remove any
extraniouse spaces or trailing periods that often appear in this field
"""
try:
return float(val.replace(" ", "").rstrip("."))
except:
return None | given a string value val try to convert it to a float. Remove any
extraniouse spaces or trailing periods that often appear in this field
| given a string value val try to convert it to a float. Remove any
extraniouse spaces or trailing periods that often appear in this field | [
"given",
"a",
"string",
"value",
"val",
"try",
"to",
"convert",
"it",
"to",
"a",
"float",
".",
"Remove",
"any",
"extraniouse",
"spaces",
"or",
"trailing",
"periods",
"that",
"often",
"appear",
"in",
"this",
"field"
] | def convert_to_float(val):
try:
return float(val.replace(" ", "").rstrip("."))
except:
return None | [
"def",
"convert_to_float",
"(",
"val",
")",
":",
"try",
":",
"return",
"float",
"(",
"val",
".",
"replace",
"(",
"\" \"",
",",
"\"\"",
")",
".",
"rstrip",
"(",
"\".\"",
")",
")",
"except",
":",
"return",
"None"
] | given a string value val try to convert it to a float. | [
"given",
"a",
"string",
"value",
"val",
"try",
"to",
"convert",
"it",
"to",
"a",
"float",
"."
] | [
"\"\"\"given a string value val try to convert it to a float. Remove any\n extraniouse spaces or trailing periods that often appear in this field\n\n \"\"\""
] | [
{
"param": "val",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "val",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def convert_to_float(val):
try:
return float(val.replace(" ", "").rstrip("."))
except:
return None | 610,433 | 817 |
5dba5a4e87805fb2a6d4fb1d694f82977489ba87 | joyceho/abbr-norm | ppNote.py | [
"MIT"
] | Python | _replace_abbr | <not_specific> | def _replace_abbr(x, abbrDict):
'''
Replace an abbreviation using the dictionary
Assumes x is a single word
'''
# check the word directly
if x in abbrDict:
return abbrDict[x]
# check if it's ended by a period (end of sentence)
if x.rstrip(".") in abbrDict:
return abbrDict[x.rstrip(".")]
if x.rstrip("'d") in abbrDict:
return abbrDict[x.rstrip("'d")]
return x |
Replace an abbreviation using the dictionary
Assumes x is a single word
| Replace an abbreviation using the dictionary
Assumes x is a single word | [
"Replace",
"an",
"abbreviation",
"using",
"the",
"dictionary",
"Assumes",
"x",
"is",
"a",
"single",
"word"
] | def _replace_abbr(x, abbrDict):
if x in abbrDict:
return abbrDict[x]
if x.rstrip(".") in abbrDict:
return abbrDict[x.rstrip(".")]
if x.rstrip("'d") in abbrDict:
return abbrDict[x.rstrip("'d")]
return x | [
"def",
"_replace_abbr",
"(",
"x",
",",
"abbrDict",
")",
":",
"if",
"x",
"in",
"abbrDict",
":",
"return",
"abbrDict",
"[",
"x",
"]",
"if",
"x",
".",
"rstrip",
"(",
"\".\"",
")",
"in",
"abbrDict",
":",
"return",
"abbrDict",
"[",
"x",
".",
"rstrip",
"(",
"\".\"",
")",
"]",
"if",
"x",
".",
"rstrip",
"(",
"\"'d\"",
")",
"in",
"abbrDict",
":",
"return",
"abbrDict",
"[",
"x",
".",
"rstrip",
"(",
"\"'d\"",
")",
"]",
"return",
"x"
] | Replace an abbreviation using the dictionary
Assumes x is a single word | [
"Replace",
"an",
"abbreviation",
"using",
"the",
"dictionary",
"Assumes",
"x",
"is",
"a",
"single",
"word"
] | [
"'''\n Replace an abbreviation using the dictionary\n Assumes x is a single word\n '''",
"# check the word directly",
"# check if it's ended by a period (end of sentence)"
] | [
{
"param": "x",
"type": null
},
{
"param": "abbrDict",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "x",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "abbrDict",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def _replace_abbr(x, abbrDict):
if x in abbrDict:
return abbrDict[x]
if x.rstrip(".") in abbrDict:
return abbrDict[x.rstrip(".")]
if x.rstrip("'d") in abbrDict:
return abbrDict[x.rstrip("'d")]
return x | 610,434 | 133 |
fb70c1574c344a41c7aefd18ff72a263b9a4afe1 | ndau/commands | automation/upgrade.py | [
"Apache-2.0"
] | Python | register_sha | null | def register_sha(network_name, sha):
"""
Upload a new current-<network>.txt to S3 that points to the given SHA.
This allows our local docker scripts to know which SHA to use when connecting to the network.
"""
print(f"Registering {sha} as the current one in use on {network_name}...")
current_file_name = f"current-{network_name}.txt"
current_file_path = f"./{current_file_name}"
with open(current_file_path, "w") as f:
f.write(f"{sha}\n")
r = subprocess.run(
["aws", "s3", "cp", current_file_path, f"s3://ndau-images/{current_file_name}"]
)
os.remove(current_file_path)
if r.returncode != 0:
sys.exit(f"aws s3 cp failed with code {r.returncode}") |
Upload a new current-<network>.txt to S3 that points to the given SHA.
This allows our local docker scripts to know which SHA to use when connecting to the network.
| Upload a new current-.txt to S3 that points to the given SHA.
This allows our local docker scripts to know which SHA to use when connecting to the network. | [
"Upload",
"a",
"new",
"current",
"-",
".",
"txt",
"to",
"S3",
"that",
"points",
"to",
"the",
"given",
"SHA",
".",
"This",
"allows",
"our",
"local",
"docker",
"scripts",
"to",
"know",
"which",
"SHA",
"to",
"use",
"when",
"connecting",
"to",
"the",
"network",
"."
] | def register_sha(network_name, sha):
print(f"Registering {sha} as the current one in use on {network_name}...")
current_file_name = f"current-{network_name}.txt"
current_file_path = f"./{current_file_name}"
with open(current_file_path, "w") as f:
f.write(f"{sha}\n")
r = subprocess.run(
["aws", "s3", "cp", current_file_path, f"s3://ndau-images/{current_file_name}"]
)
os.remove(current_file_path)
if r.returncode != 0:
sys.exit(f"aws s3 cp failed with code {r.returncode}") | [
"def",
"register_sha",
"(",
"network_name",
",",
"sha",
")",
":",
"print",
"(",
"f\"Registering {sha} as the current one in use on {network_name}...\"",
")",
"current_file_name",
"=",
"f\"current-{network_name}.txt\"",
"current_file_path",
"=",
"f\"./{current_file_name}\"",
"with",
"open",
"(",
"current_file_path",
",",
"\"w\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"f\"{sha}\\n\"",
")",
"r",
"=",
"subprocess",
".",
"run",
"(",
"[",
"\"aws\"",
",",
"\"s3\"",
",",
"\"cp\"",
",",
"current_file_path",
",",
"f\"s3://ndau-images/{current_file_name}\"",
"]",
")",
"os",
".",
"remove",
"(",
"current_file_path",
")",
"if",
"r",
".",
"returncode",
"!=",
"0",
":",
"sys",
".",
"exit",
"(",
"f\"aws s3 cp failed with code {r.returncode}\"",
")"
] | Upload a new current-<network>.txt to S3 that points to the given SHA. | [
"Upload",
"a",
"new",
"current",
"-",
"<network",
">",
".",
"txt",
"to",
"S3",
"that",
"points",
"to",
"the",
"given",
"SHA",
"."
] | [
"\"\"\"\n Upload a new current-<network>.txt to S3 that points to the given SHA.\n This allows our local docker scripts to know which SHA to use when connecting to the network.\n \"\"\""
] | [
{
"param": "network_name",
"type": null
},
{
"param": "sha",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "network_name",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "sha",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import sys
import subprocess
import os
def register_sha(network_name, sha):
print(f"Registering {sha} as the current one in use on {network_name}...")
current_file_name = f"current-{network_name}.txt"
current_file_path = f"./{current_file_name}"
with open(current_file_path, "w") as f:
f.write(f"{sha}\n")
r = subprocess.run(
["aws", "s3", "cp", current_file_path, f"s3://ndau-images/{current_file_name}"]
)
os.remove(current_file_path)
if r.returncode != 0:
sys.exit(f"aws s3 cp failed with code {r.returncode}") | 610,435 | 962 |
c7dc38d31224e34af8d968356ca8d39ee62480a7 | wwongkamjan/dipnet_press | diplomacy_research/utils/cluster_config/reinforcement.py | [
"MIT"
] | Python | _get_iterator_device | <not_specific> | def _get_iterator_device(job_name, task_id):
""" Returns the iterator device to use """
if job_name != 'learner':
return None
return '/job:%s/task:%d' % (job_name, task_id) | Returns the iterator device to use | Returns the iterator device to use | [
"Returns",
"the",
"iterator",
"device",
"to",
"use"
] | def _get_iterator_device(job_name, task_id):
if job_name != 'learner':
return None
return '/job:%s/task:%d' % (job_name, task_id) | [
"def",
"_get_iterator_device",
"(",
"job_name",
",",
"task_id",
")",
":",
"if",
"job_name",
"!=",
"'learner'",
":",
"return",
"None",
"return",
"'/job:%s/task:%d'",
"%",
"(",
"job_name",
",",
"task_id",
")"
] | Returns the iterator device to use | [
"Returns",
"the",
"iterator",
"device",
"to",
"use"
] | [
"\"\"\" Returns the iterator device to use \"\"\""
] | [
{
"param": "job_name",
"type": null
},
{
"param": "task_id",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "job_name",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "task_id",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def _get_iterator_device(job_name, task_id):
if job_name != 'learner':
return None
return '/job:%s/task:%d' % (job_name, task_id) | 610,436 | 572 |
2f55d5959832e4e060cf88f9e4d1f8f812f25fc6 | taxpon/pyomni | pyomni/webdav/WebdavClient.py | [
"MIT"
] | Python | parseDigestAuthInfo | <not_specific> | def parseDigestAuthInfo(authInfo):
"""
Parses the authentication information returned from a server and returns
a dictionary containing realm, qop, and nonce.
@see: L{AuthorizationError<webdav.Connection.AuthorizationError>}
or the main function of this module.
"""
info = dict()
info["realm"] = re.search('realm="([^"]+)"', authInfo).group(1)
info["qop"] = re.search('qop="([^"]+)"', authInfo).group(1)
info["nonce"] = re.search('nonce="([^"]+)"', authInfo).group(1)
return info |
Parses the authentication information returned from a server and returns
a dictionary containing realm, qop, and nonce.
@see: L{AuthorizationError<webdav.Connection.AuthorizationError>}
or the main function of this module.
| Parses the authentication information returned from a server and returns
a dictionary containing realm, qop, and nonce. | [
"Parses",
"the",
"authentication",
"information",
"returned",
"from",
"a",
"server",
"and",
"returns",
"a",
"dictionary",
"containing",
"realm",
"qop",
"and",
"nonce",
"."
] | def parseDigestAuthInfo(authInfo):
info = dict()
info["realm"] = re.search('realm="([^"]+)"', authInfo).group(1)
info["qop"] = re.search('qop="([^"]+)"', authInfo).group(1)
info["nonce"] = re.search('nonce="([^"]+)"', authInfo).group(1)
return info | [
"def",
"parseDigestAuthInfo",
"(",
"authInfo",
")",
":",
"info",
"=",
"dict",
"(",
")",
"info",
"[",
"\"realm\"",
"]",
"=",
"re",
".",
"search",
"(",
"'realm=\"([^\"]+)\"'",
",",
"authInfo",
")",
".",
"group",
"(",
"1",
")",
"info",
"[",
"\"qop\"",
"]",
"=",
"re",
".",
"search",
"(",
"'qop=\"([^\"]+)\"'",
",",
"authInfo",
")",
".",
"group",
"(",
"1",
")",
"info",
"[",
"\"nonce\"",
"]",
"=",
"re",
".",
"search",
"(",
"'nonce=\"([^\"]+)\"'",
",",
"authInfo",
")",
".",
"group",
"(",
"1",
")",
"return",
"info"
] | Parses the authentication information returned from a server and returns
a dictionary containing realm, qop, and nonce. | [
"Parses",
"the",
"authentication",
"information",
"returned",
"from",
"a",
"server",
"and",
"returns",
"a",
"dictionary",
"containing",
"realm",
"qop",
"and",
"nonce",
"."
] | [
"\"\"\" \r\n Parses the authentication information returned from a server and returns\r\n a dictionary containing realm, qop, and nonce.\r\n \r\n @see: L{AuthorizationError<webdav.Connection.AuthorizationError>} \r\n or the main function of this module.\r\n \"\"\""
] | [
{
"param": "authInfo",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "authInfo",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": [
{
"identifier": "see",
"docstring": "L{AuthorizationError}\nor the main function of this module.",
"docstring_tokens": [
"L",
"{",
"AuthorizationError",
"}",
"or",
"the",
"main",
"function",
"of",
"this",
"module",
"."
]
}
]
} | import re
def parseDigestAuthInfo(authInfo):
info = dict()
info["realm"] = re.search('realm="([^"]+)"', authInfo).group(1)
info["qop"] = re.search('qop="([^"]+)"', authInfo).group(1)
info["nonce"] = re.search('nonce="([^"]+)"', authInfo).group(1)
return info | 610,438 | 28 |
1a366a08502d57b102560ff90f71b4b67f40c354 | Muterra/py_hypergolix | tests/trashtest/trashdemo.py | [
"Unlicense"
] | Python | tearDownClass | null | def tearDownClass(cls):
''' Kill errything and then remove the caches.
'''
try:
cls.hgxlink2.stop_threadsafe(timeout=.5)
cls.hgxlink1.stop_threadsafe(timeout=.5)
cls.hgxcore2.stop_threadsafe(timeout=.5)
cls.hgxcore1.stop_threadsafe(timeout=.5)
cls.server.stop_threadsafe(timeout=.5)
finally:
shutil.rmtree(cls.hgxcore2_cachedir)
shutil.rmtree(cls.hgxcore1_cachedir)
shutil.rmtree(cls.server_cachedir) | Kill errything and then remove the caches.
| Kill errything and then remove the caches. | [
"Kill",
"errything",
"and",
"then",
"remove",
"the",
"caches",
"."
] | def tearDownClass(cls):
try:
cls.hgxlink2.stop_threadsafe(timeout=.5)
cls.hgxlink1.stop_threadsafe(timeout=.5)
cls.hgxcore2.stop_threadsafe(timeout=.5)
cls.hgxcore1.stop_threadsafe(timeout=.5)
cls.server.stop_threadsafe(timeout=.5)
finally:
shutil.rmtree(cls.hgxcore2_cachedir)
shutil.rmtree(cls.hgxcore1_cachedir)
shutil.rmtree(cls.server_cachedir) | [
"def",
"tearDownClass",
"(",
"cls",
")",
":",
"try",
":",
"cls",
".",
"hgxlink2",
".",
"stop_threadsafe",
"(",
"timeout",
"=",
".5",
")",
"cls",
".",
"hgxlink1",
".",
"stop_threadsafe",
"(",
"timeout",
"=",
".5",
")",
"cls",
".",
"hgxcore2",
".",
"stop_threadsafe",
"(",
"timeout",
"=",
".5",
")",
"cls",
".",
"hgxcore1",
".",
"stop_threadsafe",
"(",
"timeout",
"=",
".5",
")",
"cls",
".",
"server",
".",
"stop_threadsafe",
"(",
"timeout",
"=",
".5",
")",
"finally",
":",
"shutil",
".",
"rmtree",
"(",
"cls",
".",
"hgxcore2_cachedir",
")",
"shutil",
".",
"rmtree",
"(",
"cls",
".",
"hgxcore1_cachedir",
")",
"shutil",
".",
"rmtree",
"(",
"cls",
".",
"server_cachedir",
")"
] | Kill errything and then remove the caches. | [
"Kill",
"errything",
"and",
"then",
"remove",
"the",
"caches",
"."
] | [
"''' Kill errything and then remove the caches.\n '''"
] | [
{
"param": "cls",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "cls",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import shutil
def tearDownClass(cls):
try:
cls.hgxlink2.stop_threadsafe(timeout=.5)
cls.hgxlink1.stop_threadsafe(timeout=.5)
cls.hgxcore2.stop_threadsafe(timeout=.5)
cls.hgxcore1.stop_threadsafe(timeout=.5)
cls.server.stop_threadsafe(timeout=.5)
finally:
shutil.rmtree(cls.hgxcore2_cachedir)
shutil.rmtree(cls.hgxcore1_cachedir)
shutil.rmtree(cls.server_cachedir) | 610,439 | 91 |
71af048bf31fc4b659e549ca6abcfbd7082dd5cb | davtsang/slimplectic | slimplectic_GGL.py | [
"MIT"
] | Python | Physical_Limit | <not_specific> | def Physical_Limit(q_list, q_p_list, q_m_list, expression):
""" Physical_Limit takes the physical limit of a function of
the doubled +/- variables that is taking q_- -> 0 and q_+ -> q
The q_lists are expected to be 1-d lists.
If you are passing in q_tables please flatten them using
something like:
q_list = [qval for qvallist in qtable for qval in qvallist]
Physical_Limit outputs PL_Expr an sympy object equivalent to
expression with the physical limit taken.
Inputs:
q_list[dof] - list of sympy objects that correspond to the dof
q_p_list[dof] - list of sympy objects that correspond to the + dof
q_m_list[dof] - list of sympy objects that corerspond to the - dof
Physical_Limit assumes that the q_lists share the same ordering.
"""
dof_count = len(q_list)
sub_list = []
for dof in range(dof_count):
sub_list.append((q_p_list[dof], q_list[dof]))
sub_list.append((q_m_list[dof], 0))
PL_Expr = expression.subs(sub_list)
return PL_Expr | Physical_Limit takes the physical limit of a function of
the doubled +/- variables that is taking q_- -> 0 and q_+ -> q
The q_lists are expected to be 1-d lists.
If you are passing in q_tables please flatten them using
something like:
q_list = [qval for qvallist in qtable for qval in qvallist]
Physical_Limit outputs PL_Expr an sympy object equivalent to
expression with the physical limit taken.
Inputs:
q_list[dof] - list of sympy objects that correspond to the dof
q_p_list[dof] - list of sympy objects that correspond to the + dof
q_m_list[dof] - list of sympy objects that corerspond to the - dof
Physical_Limit assumes that the q_lists share the same ordering.
|
Physical_Limit outputs PL_Expr an sympy object equivalent to
expression with the physical limit taken.
q_list[dof] - list of sympy objects that correspond to the dof
q_p_list[dof] - list of sympy objects that correspond to the + dof
q_m_list[dof] - list of sympy objects that corerspond to the - dof
Physical_Limit assumes that the q_lists share the same ordering. | [
"Physical_Limit",
"outputs",
"PL_Expr",
"an",
"sympy",
"object",
"equivalent",
"to",
"expression",
"with",
"the",
"physical",
"limit",
"taken",
".",
"q_list",
"[",
"dof",
"]",
"-",
"list",
"of",
"sympy",
"objects",
"that",
"correspond",
"to",
"the",
"dof",
"q_p_list",
"[",
"dof",
"]",
"-",
"list",
"of",
"sympy",
"objects",
"that",
"correspond",
"to",
"the",
"+",
"dof",
"q_m_list",
"[",
"dof",
"]",
"-",
"list",
"of",
"sympy",
"objects",
"that",
"corerspond",
"to",
"the",
"-",
"dof",
"Physical_Limit",
"assumes",
"that",
"the",
"q_lists",
"share",
"the",
"same",
"ordering",
"."
] | def Physical_Limit(q_list, q_p_list, q_m_list, expression):
dof_count = len(q_list)
sub_list = []
for dof in range(dof_count):
sub_list.append((q_p_list[dof], q_list[dof]))
sub_list.append((q_m_list[dof], 0))
PL_Expr = expression.subs(sub_list)
return PL_Expr | [
"def",
"Physical_Limit",
"(",
"q_list",
",",
"q_p_list",
",",
"q_m_list",
",",
"expression",
")",
":",
"dof_count",
"=",
"len",
"(",
"q_list",
")",
"sub_list",
"=",
"[",
"]",
"for",
"dof",
"in",
"range",
"(",
"dof_count",
")",
":",
"sub_list",
".",
"append",
"(",
"(",
"q_p_list",
"[",
"dof",
"]",
",",
"q_list",
"[",
"dof",
"]",
")",
")",
"sub_list",
".",
"append",
"(",
"(",
"q_m_list",
"[",
"dof",
"]",
",",
"0",
")",
")",
"PL_Expr",
"=",
"expression",
".",
"subs",
"(",
"sub_list",
")",
"return",
"PL_Expr"
] | Physical_Limit takes the physical limit of a function of
the doubled +/- variables that is taking q_- -> 0 and q_+ -> q
The q_lists are expected to be 1-d lists. | [
"Physical_Limit",
"takes",
"the",
"physical",
"limit",
"of",
"a",
"function",
"of",
"the",
"doubled",
"+",
"/",
"-",
"variables",
"that",
"is",
"taking",
"q_",
"-",
"-",
">",
"0",
"and",
"q_",
"+",
"-",
">",
"q",
"The",
"q_lists",
"are",
"expected",
"to",
"be",
"1",
"-",
"d",
"lists",
"."
] | [
"\"\"\" Physical_Limit takes the physical limit of a function of\n the doubled +/- variables that is taking q_- -> 0 and q_+ -> q\n The q_lists are expected to be 1-d lists.\n If you are passing in q_tables please flatten them using\n something like:\n q_list = [qval for qvallist in qtable for qval in qvallist]\n\n Physical_Limit outputs PL_Expr an sympy object equivalent to\n expression with the physical limit taken.\n\n Inputs:\n q_list[dof] - list of sympy objects that correspond to the dof\n q_p_list[dof] - list of sympy objects that correspond to the + dof\n q_m_list[dof] - list of sympy objects that corerspond to the - dof\n\n Physical_Limit assumes that the q_lists share the same ordering.\n \"\"\""
] | [
{
"param": "q_list",
"type": null
},
{
"param": "q_p_list",
"type": null
},
{
"param": "q_m_list",
"type": null
},
{
"param": "expression",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "q_list",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "q_p_list",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "q_m_list",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "expression",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def Physical_Limit(q_list, q_p_list, q_m_list, expression):
dof_count = len(q_list)
sub_list = []
for dof in range(dof_count):
sub_list.append((q_p_list[dof], q_list[dof]))
sub_list.append((q_m_list[dof], 0))
PL_Expr = expression.subs(sub_list)
return PL_Expr | 610,440 | 343 |
e68dc4b2dfc8b1d2a90cd6513ff0e40b211648dd | MauroLuzzatto/algorithmic-explanations | src/explanation/CategoryMapper.py | [
"Apache-2.0"
] | Python | key_to_int | <not_specific> | def key_to_int(mapper):
"""
convert the key of the json file to an integer
Args:
mapper (TYPE): DESCRIPTION.
Returns:
dict: DESCRIPTION.
"""
return {int(k): v for k, v in mapper.items()} |
convert the key of the json file to an integer
Args:
mapper (TYPE): DESCRIPTION.
Returns:
dict: DESCRIPTION.
| convert the key of the json file to an integer | [
"convert",
"the",
"key",
"of",
"the",
"json",
"file",
"to",
"an",
"integer"
] | def key_to_int(mapper):
return {int(k): v for k, v in mapper.items()} | [
"def",
"key_to_int",
"(",
"mapper",
")",
":",
"return",
"{",
"int",
"(",
"k",
")",
":",
"v",
"for",
"k",
",",
"v",
"in",
"mapper",
".",
"items",
"(",
")",
"}"
] | convert the key of the json file to an integer | [
"convert",
"the",
"key",
"of",
"the",
"json",
"file",
"to",
"an",
"integer"
] | [
"\"\"\"\n convert the key of the json file to an integer\n\n Args:\n mapper (TYPE): DESCRIPTION.\n\n Returns:\n dict: DESCRIPTION.\n\n \"\"\""
] | [
{
"param": "mapper",
"type": null
}
] | {
"returns": [
{
"docstring": null,
"docstring_tokens": [
"None"
],
"type": "dict"
}
],
"raises": [],
"params": [
{
"identifier": "mapper",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": false
}
],
"outlier_params": [],
"others": []
} | def key_to_int(mapper):
return {int(k): v for k, v in mapper.items()} | 610,441 | 89 |
4e8f459b43a6fd6253cfddf56a7dee0022571028 | AnimeshRy/virus-killer-pygame | resources/game_functions.py | [
"MIT"
] | Python | check_high_score | null | def check_high_score(stats, sb):
""""check to see if there is a new score then replace it """
if stats.score > stats.high_score:
stats.high_score = stats.score
sb.prep_highscore() | check to see if there is a new score then replace it | check to see if there is a new score then replace it | [
"check",
"to",
"see",
"if",
"there",
"is",
"a",
"new",
"score",
"then",
"replace",
"it"
] | def check_high_score(stats, sb):
if stats.score > stats.high_score:
stats.high_score = stats.score
sb.prep_highscore() | [
"def",
"check_high_score",
"(",
"stats",
",",
"sb",
")",
":",
"if",
"stats",
".",
"score",
">",
"stats",
".",
"high_score",
":",
"stats",
".",
"high_score",
"=",
"stats",
".",
"score",
"sb",
".",
"prep_highscore",
"(",
")"
] | check to see if there is a new score then replace it | [
"check",
"to",
"see",
"if",
"there",
"is",
"a",
"new",
"score",
"then",
"replace",
"it"
] | [
"\"\"\"\"check to see if there is a new score then replace it \"\"\""
] | [
{
"param": "stats",
"type": null
},
{
"param": "sb",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "stats",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "sb",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def check_high_score(stats, sb):
if stats.score > stats.high_score:
stats.high_score = stats.score
sb.prep_highscore() | 610,442 | 572 |
b91d2a4196aeb52f5b5b246b06ec59dede30d48d | amole-arup/eng_utilities | eng_utilities/geometry_utilities.py | [
"MIT"
] | Python | distNDx | <not_specific> | def distNDx(pt1, pt2, limit=0):
"""Returns distance between two nD points (as two n-tuples)
It ignores items if they are not numeric, and also has
an option to limit length of tuples to a certain
length defined by the `limit` argument"""
if limit > 0:
return (sum((vv2 - vv1)**2.0 for i, (vv1, vv2) in enumerate(zip(pt1, pt2))
if isinstance(vv1, (int, float))
and isinstance(vv2, (int, float))
and i < limit))**0.5
else:
return (sum((vv2 - vv1)**2.0 for vv1, vv2 in zip(pt1, pt2)
if isinstance(vv1, (int, float))
and isinstance(vv2, (int, float))))**0.5 | Returns distance between two nD points (as two n-tuples)
It ignores items if they are not numeric, and also has
an option to limit length of tuples to a certain
length defined by the `limit` argument | Returns distance between two nD points (as two n-tuples)
It ignores items if they are not numeric, and also has
an option to limit length of tuples to a certain
length defined by the `limit` argument | [
"Returns",
"distance",
"between",
"two",
"nD",
"points",
"(",
"as",
"two",
"n",
"-",
"tuples",
")",
"It",
"ignores",
"items",
"if",
"they",
"are",
"not",
"numeric",
"and",
"also",
"has",
"an",
"option",
"to",
"limit",
"length",
"of",
"tuples",
"to",
"a",
"certain",
"length",
"defined",
"by",
"the",
"`",
"limit",
"`",
"argument"
] | def distNDx(pt1, pt2, limit=0):
if limit > 0:
return (sum((vv2 - vv1)**2.0 for i, (vv1, vv2) in enumerate(zip(pt1, pt2))
if isinstance(vv1, (int, float))
and isinstance(vv2, (int, float))
and i < limit))**0.5
else:
return (sum((vv2 - vv1)**2.0 for vv1, vv2 in zip(pt1, pt2)
if isinstance(vv1, (int, float))
and isinstance(vv2, (int, float))))**0.5 | [
"def",
"distNDx",
"(",
"pt1",
",",
"pt2",
",",
"limit",
"=",
"0",
")",
":",
"if",
"limit",
">",
"0",
":",
"return",
"(",
"sum",
"(",
"(",
"vv2",
"-",
"vv1",
")",
"**",
"2.0",
"for",
"i",
",",
"(",
"vv1",
",",
"vv2",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"pt1",
",",
"pt2",
")",
")",
"if",
"isinstance",
"(",
"vv1",
",",
"(",
"int",
",",
"float",
")",
")",
"and",
"isinstance",
"(",
"vv2",
",",
"(",
"int",
",",
"float",
")",
")",
"and",
"i",
"<",
"limit",
")",
")",
"**",
"0.5",
"else",
":",
"return",
"(",
"sum",
"(",
"(",
"vv2",
"-",
"vv1",
")",
"**",
"2.0",
"for",
"vv1",
",",
"vv2",
"in",
"zip",
"(",
"pt1",
",",
"pt2",
")",
"if",
"isinstance",
"(",
"vv1",
",",
"(",
"int",
",",
"float",
")",
")",
"and",
"isinstance",
"(",
"vv2",
",",
"(",
"int",
",",
"float",
")",
")",
")",
")",
"**",
"0.5"
] | Returns distance between two nD points (as two n-tuples)
It ignores items if they are not numeric, and also has
an option to limit length of tuples to a certain
length defined by the `limit` argument | [
"Returns",
"distance",
"between",
"two",
"nD",
"points",
"(",
"as",
"two",
"n",
"-",
"tuples",
")",
"It",
"ignores",
"items",
"if",
"they",
"are",
"not",
"numeric",
"and",
"also",
"has",
"an",
"option",
"to",
"limit",
"length",
"of",
"tuples",
"to",
"a",
"certain",
"length",
"defined",
"by",
"the",
"`",
"limit",
"`",
"argument"
] | [
"\"\"\"Returns distance between two nD points (as two n-tuples)\n It ignores items if they are not numeric, and also has\n an option to limit length of tuples to a certain \n length defined by the `limit` argument\"\"\""
] | [
{
"param": "pt1",
"type": null
},
{
"param": "pt2",
"type": null
},
{
"param": "limit",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "pt1",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "pt2",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "limit",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def distNDx(pt1, pt2, limit=0):
if limit > 0:
return (sum((vv2 - vv1)**2.0 for i, (vv1, vv2) in enumerate(zip(pt1, pt2))
if isinstance(vv1, (int, float))
and isinstance(vv2, (int, float))
and i < limit))**0.5
else:
return (sum((vv2 - vv1)**2.0 for vv1, vv2 in zip(pt1, pt2)
if isinstance(vv1, (int, float))
and isinstance(vv2, (int, float))))**0.5 | 610,443 | 645 |
c75db0d1f700eef2f23a27d9b59e96e090dbe904 | MikalaiDrabovich/tensorflow | tensorflow/tensorboard/tensorboard.py | [
"Apache-2.0"
] | Python | ParseEventFilesFlag | <not_specific> | def ParseEventFilesFlag(flag_value):
"""Parses the logdir flag into a map from paths to run group names.
The events files flag format is a comma-separated list of path specifications.
A path specification either looks like 'group_name:/path/to/directory' or
'/path/to/directory'; in the latter case, the group is unnamed. Group names
cannot start with a forward slash: /foo:bar/baz will be interpreted as a
spec with no name and path '/foo:bar/baz'.
Globs are not supported.
Args:
flag_value: A comma-separated list of run specifications.
Returns:
A dict mapping directory paths to names like {'/path/to/directory': 'name'}.
Groups without an explicit name are named after their path. If flag_value
is None, returns an empty dict, which is helpful for testing things that
don't require any valid runs.
"""
files = {}
if flag_value is None:
return files
for specification in flag_value.split(','):
# If the spec looks like /foo:bar/baz, then we assume it's a path with a
# colon.
if ':' in specification and specification[0] != '/':
# We split at most once so run_name:/path:with/a/colon will work.
run_name, path = specification.split(':', 1)
else:
run_name = None
path = specification
files[path] = run_name
return files | Parses the logdir flag into a map from paths to run group names.
The events files flag format is a comma-separated list of path specifications.
A path specification either looks like 'group_name:/path/to/directory' or
'/path/to/directory'; in the latter case, the group is unnamed. Group names
cannot start with a forward slash: /foo:bar/baz will be interpreted as a
spec with no name and path '/foo:bar/baz'.
Globs are not supported.
Args:
flag_value: A comma-separated list of run specifications.
Returns:
A dict mapping directory paths to names like {'/path/to/directory': 'name'}.
Groups without an explicit name are named after their path. If flag_value
is None, returns an empty dict, which is helpful for testing things that
don't require any valid runs.
| Parses the logdir flag into a map from paths to run group names.
The events files flag format is a comma-separated list of path specifications.
Globs are not supported. | [
"Parses",
"the",
"logdir",
"flag",
"into",
"a",
"map",
"from",
"paths",
"to",
"run",
"group",
"names",
".",
"The",
"events",
"files",
"flag",
"format",
"is",
"a",
"comma",
"-",
"separated",
"list",
"of",
"path",
"specifications",
".",
"Globs",
"are",
"not",
"supported",
"."
] | def ParseEventFilesFlag(flag_value):
files = {}
if flag_value is None:
return files
for specification in flag_value.split(','):
if ':' in specification and specification[0] != '/':
run_name, path = specification.split(':', 1)
else:
run_name = None
path = specification
files[path] = run_name
return files | [
"def",
"ParseEventFilesFlag",
"(",
"flag_value",
")",
":",
"files",
"=",
"{",
"}",
"if",
"flag_value",
"is",
"None",
":",
"return",
"files",
"for",
"specification",
"in",
"flag_value",
".",
"split",
"(",
"','",
")",
":",
"if",
"':'",
"in",
"specification",
"and",
"specification",
"[",
"0",
"]",
"!=",
"'/'",
":",
"run_name",
",",
"path",
"=",
"specification",
".",
"split",
"(",
"':'",
",",
"1",
")",
"else",
":",
"run_name",
"=",
"None",
"path",
"=",
"specification",
"files",
"[",
"path",
"]",
"=",
"run_name",
"return",
"files"
] | Parses the logdir flag into a map from paths to run group names. | [
"Parses",
"the",
"logdir",
"flag",
"into",
"a",
"map",
"from",
"paths",
"to",
"run",
"group",
"names",
"."
] | [
"\"\"\"Parses the logdir flag into a map from paths to run group names.\n\n The events files flag format is a comma-separated list of path specifications.\n A path specification either looks like 'group_name:/path/to/directory' or\n '/path/to/directory'; in the latter case, the group is unnamed. Group names\n cannot start with a forward slash: /foo:bar/baz will be interpreted as a\n spec with no name and path '/foo:bar/baz'.\n\n Globs are not supported.\n\n Args:\n flag_value: A comma-separated list of run specifications.\n Returns:\n A dict mapping directory paths to names like {'/path/to/directory': 'name'}.\n Groups without an explicit name are named after their path. If flag_value\n is None, returns an empty dict, which is helpful for testing things that\n don't require any valid runs.\n \"\"\"",
"# If the spec looks like /foo:bar/baz, then we assume it's a path with a",
"# colon.",
"# We split at most once so run_name:/path:with/a/colon will work."
] | [
{
"param": "flag_value",
"type": null
}
] | {
"returns": [
{
"docstring": "A dict mapping directory paths to names like {'/path/to/directory': 'name'}.\nGroups without an explicit name are named after their path. If flag_value\nis None, returns an empty dict, which is helpful for testing things that\ndon't require any valid runs.",
"docstring_tokens": [
"A",
"dict",
"mapping",
"directory",
"paths",
"to",
"names",
"like",
"{",
"'",
"/",
"path",
"/",
"to",
"/",
"directory",
"'",
":",
"'",
"name",
"'",
"}",
".",
"Groups",
"without",
"an",
"explicit",
"name",
"are",
"named",
"after",
"their",
"path",
".",
"If",
"flag_value",
"is",
"None",
"returns",
"an",
"empty",
"dict",
"which",
"is",
"helpful",
"for",
"testing",
"things",
"that",
"don",
"'",
"t",
"require",
"any",
"valid",
"runs",
"."
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "flag_value",
"type": null,
"docstring": "A comma-separated list of run specifications.",
"docstring_tokens": [
"A",
"comma",
"-",
"separated",
"list",
"of",
"run",
"specifications",
"."
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def ParseEventFilesFlag(flag_value):
files = {}
if flag_value is None:
return files
for specification in flag_value.split(','):
if ':' in specification and specification[0] != '/':
run_name, path = specification.split(':', 1)
else:
run_name = None
path = specification
files[path] = run_name
return files | 610,444 | 987 |
157e74966941c5bca413c59a7775d6254681941e | z/xonotic-map-manager | xmm/util.py | [
"MIT"
] | Python | replace_last | <not_specific> | def replace_last(string, old, new):
"""
Replace the last occurrence of a pattern in a string
:param string:
string
:type string: ``str``
:param old:
string to find
:type old: ``str``
:param new:
string to replace
:type new: ``str``
:returns: ``str``
"""
return string[::-1].replace(old[::-1], new[::-1], 1)[::-1] |
Replace the last occurrence of a pattern in a string
:param string:
string
:type string: ``str``
:param old:
string to find
:type old: ``str``
:param new:
string to replace
:type new: ``str``
:returns: ``str``
| Replace the last occurrence of a pattern in a string | [
"Replace",
"the",
"last",
"occurrence",
"of",
"a",
"pattern",
"in",
"a",
"string"
] | def replace_last(string, old, new):
return string[::-1].replace(old[::-1], new[::-1], 1)[::-1] | [
"def",
"replace_last",
"(",
"string",
",",
"old",
",",
"new",
")",
":",
"return",
"string",
"[",
":",
":",
"-",
"1",
"]",
".",
"replace",
"(",
"old",
"[",
":",
":",
"-",
"1",
"]",
",",
"new",
"[",
":",
":",
"-",
"1",
"]",
",",
"1",
")",
"[",
":",
":",
"-",
"1",
"]"
] | Replace the last occurrence of a pattern in a string | [
"Replace",
"the",
"last",
"occurrence",
"of",
"a",
"pattern",
"in",
"a",
"string"
] | [
"\"\"\"\n Replace the last occurrence of a pattern in a string\n\n :param string:\n string\n :type string: ``str``\n\n :param old:\n string to find\n :type old: ``str``\n\n :param new:\n string to replace\n :type new: ``str``\n\n :returns: ``str``\n \"\"\""
] | [
{
"param": "string",
"type": null
},
{
"param": "old",
"type": null
},
{
"param": "new",
"type": null
}
] | {
"returns": [
{
"docstring": null,
"docstring_tokens": [
"None"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "string",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": null
},
{
"identifier": "old",
"type": null,
"docstring": "string to find",
"docstring_tokens": [
"string",
"to",
"find"
],
"default": null,
"is_optional": null
},
{
"identifier": "new",
"type": null,
"docstring": "string to replace",
"docstring_tokens": [
"string",
"to",
"replace"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def replace_last(string, old, new):
return string[::-1].replace(old[::-1], new[::-1], 1)[::-1] | 610,445 | 541 |
1d116e6fac9c0b947be39da4941a814b9c508d6d | Binse-Park/lisa_ARM | external/trappy/trappy/plotter/Utils.py | [
"Apache-2.0"
] | Python | decolonize | <not_specific> | def decolonize(val):
"""Remove the colon at the end of the word
This will be used by the unique word of
template class to sanitize attr accesses
"""
return val.strip(":") | Remove the colon at the end of the word
This will be used by the unique word of
template class to sanitize attr accesses
| Remove the colon at the end of the word
This will be used by the unique word of
template class to sanitize attr accesses | [
"Remove",
"the",
"colon",
"at",
"the",
"end",
"of",
"the",
"word",
"This",
"will",
"be",
"used",
"by",
"the",
"unique",
"word",
"of",
"template",
"class",
"to",
"sanitize",
"attr",
"accesses"
] | def decolonize(val):
return val.strip(":") | [
"def",
"decolonize",
"(",
"val",
")",
":",
"return",
"val",
".",
"strip",
"(",
"\":\"",
")"
] | Remove the colon at the end of the word
This will be used by the unique word of
template class to sanitize attr accesses | [
"Remove",
"the",
"colon",
"at",
"the",
"end",
"of",
"the",
"word",
"This",
"will",
"be",
"used",
"by",
"the",
"unique",
"word",
"of",
"template",
"class",
"to",
"sanitize",
"attr",
"accesses"
] | [
"\"\"\"Remove the colon at the end of the word\n This will be used by the unique word of\n template class to sanitize attr accesses\n \"\"\""
] | [
{
"param": "val",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "val",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def decolonize(val):
return val.strip(":") | 610,446 | 1 |
de0ddce0e085082f0caf00fe2a8b80f06a505ef8 | DouglasAbrams/remixt | remixt/likelihood.py | [
"MIT"
] | Python | estimate_phi | <not_specific> | def estimate_phi(x):
""" Estimate proportion of genotypable reads.
Args:
x (numpy.array): major, minor, and total read counts
Returns:
numpy.array: estimate of proportion of genotypable reads.
"""
phi = x[:,0:2].sum(axis=1).astype(float) / (x[:,2].astype(float) + 1.0)
return phi | Estimate proportion of genotypable reads.
Args:
x (numpy.array): major, minor, and total read counts
Returns:
numpy.array: estimate of proportion of genotypable reads.
| Estimate proportion of genotypable reads. | [
"Estimate",
"proportion",
"of",
"genotypable",
"reads",
"."
] | def estimate_phi(x):
phi = x[:,0:2].sum(axis=1).astype(float) / (x[:,2].astype(float) + 1.0)
return phi | [
"def",
"estimate_phi",
"(",
"x",
")",
":",
"phi",
"=",
"x",
"[",
":",
",",
"0",
":",
"2",
"]",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
".",
"astype",
"(",
"float",
")",
"/",
"(",
"x",
"[",
":",
",",
"2",
"]",
".",
"astype",
"(",
"float",
")",
"+",
"1.0",
")",
"return",
"phi"
] | Estimate proportion of genotypable reads. | [
"Estimate",
"proportion",
"of",
"genotypable",
"reads",
"."
] | [
"\"\"\" Estimate proportion of genotypable reads.\n\n Args:\n x (numpy.array): major, minor, and total read counts\n\n Returns:\n numpy.array: estimate of proportion of genotypable reads.\n\n \"\"\""
] | [
{
"param": "x",
"type": null
}
] | {
"returns": [
{
"docstring": "estimate of proportion of genotypable reads.",
"docstring_tokens": [
"estimate",
"of",
"proportion",
"of",
"genotypable",
"reads",
"."
],
"type": "numpy.array"
}
],
"raises": [],
"params": [
{
"identifier": "x",
"type": null,
"docstring": "major, minor, and total read counts",
"docstring_tokens": [
"major",
"minor",
"and",
"total",
"read",
"counts"
],
"default": null,
"is_optional": false
}
],
"outlier_params": [],
"others": []
} | def estimate_phi(x):
phi = x[:,0:2].sum(axis=1).astype(float) / (x[:,2].astype(float) + 1.0)
return phi | 610,447 | 836 |
4a40a6c202563fcd69707d3a19a8ba3ba8747caa | tthyer/toil | attic/toil-sort-example.py | [
"Apache-2.0"
] | Python | copy_subrange_of_file | null | def copy_subrange_of_file(input_file, file_start, file_end, output_filehandle):
"""Copies the range (in bytes) between fileStart and fileEnd to the given
output file handle.
"""
with open(input_file, 'r') as fileHandle:
fileHandle.seek(file_start)
data = fileHandle.read(file_end - file_start)
assert len(data) == file_end - file_start
output_filehandle.write(data) | Copies the range (in bytes) between fileStart and fileEnd to the given
output file handle.
| Copies the range (in bytes) between fileStart and fileEnd to the given
output file handle. | [
"Copies",
"the",
"range",
"(",
"in",
"bytes",
")",
"between",
"fileStart",
"and",
"fileEnd",
"to",
"the",
"given",
"output",
"file",
"handle",
"."
] | def copy_subrange_of_file(input_file, file_start, file_end, output_filehandle):
with open(input_file, 'r') as fileHandle:
fileHandle.seek(file_start)
data = fileHandle.read(file_end - file_start)
assert len(data) == file_end - file_start
output_filehandle.write(data) | [
"def",
"copy_subrange_of_file",
"(",
"input_file",
",",
"file_start",
",",
"file_end",
",",
"output_filehandle",
")",
":",
"with",
"open",
"(",
"input_file",
",",
"'r'",
")",
"as",
"fileHandle",
":",
"fileHandle",
".",
"seek",
"(",
"file_start",
")",
"data",
"=",
"fileHandle",
".",
"read",
"(",
"file_end",
"-",
"file_start",
")",
"assert",
"len",
"(",
"data",
")",
"==",
"file_end",
"-",
"file_start",
"output_filehandle",
".",
"write",
"(",
"data",
")"
] | Copies the range (in bytes) between fileStart and fileEnd to the given
output file handle. | [
"Copies",
"the",
"range",
"(",
"in",
"bytes",
")",
"between",
"fileStart",
"and",
"fileEnd",
"to",
"the",
"given",
"output",
"file",
"handle",
"."
] | [
"\"\"\"Copies the range (in bytes) between fileStart and fileEnd to the given\n output file handle.\n \"\"\""
] | [
{
"param": "input_file",
"type": null
},
{
"param": "file_start",
"type": null
},
{
"param": "file_end",
"type": null
},
{
"param": "output_filehandle",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "input_file",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "file_start",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "file_end",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "output_filehandle",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def copy_subrange_of_file(input_file, file_start, file_end, output_filehandle):
with open(input_file, 'r') as fileHandle:
fileHandle.seek(file_start)
data = fileHandle.read(file_end - file_start)
assert len(data) == file_end - file_start
output_filehandle.write(data) | 610,448 | 233 |
3a35c5189e95dd2b66c60e3a921b3aa3fc97b23a | ffahri/sudokusolver | main.py | [
"Apache-2.0"
] | Python | infer_grid | <not_specific> | def infer_grid(img):
"""Infers 81 cell grid from a square image."""
squares = []
side = img.shape[:1]
side = side[0] / 9
for i in range(9):
for j in range(9):
p1 = (i * side, j * side) # Top left corner of a bounding box
p2 = ((i + 1) * side, (j + 1) * side) # Bottom right corner of bounding box
squares.append((p1, p2))
#print(squares)
return squares | Infers 81 cell grid from a square image. | Infers 81 cell grid from a square image. | [
"Infers",
"81",
"cell",
"grid",
"from",
"a",
"square",
"image",
"."
] | def infer_grid(img):
squares = []
side = img.shape[:1]
side = side[0] / 9
for i in range(9):
for j in range(9):
p1 = (i * side, j * side)
p2 = ((i + 1) * side, (j + 1) * side)
squares.append((p1, p2))
return squares | [
"def",
"infer_grid",
"(",
"img",
")",
":",
"squares",
"=",
"[",
"]",
"side",
"=",
"img",
".",
"shape",
"[",
":",
"1",
"]",
"side",
"=",
"side",
"[",
"0",
"]",
"/",
"9",
"for",
"i",
"in",
"range",
"(",
"9",
")",
":",
"for",
"j",
"in",
"range",
"(",
"9",
")",
":",
"p1",
"=",
"(",
"i",
"*",
"side",
",",
"j",
"*",
"side",
")",
"p2",
"=",
"(",
"(",
"i",
"+",
"1",
")",
"*",
"side",
",",
"(",
"j",
"+",
"1",
")",
"*",
"side",
")",
"squares",
".",
"append",
"(",
"(",
"p1",
",",
"p2",
")",
")",
"return",
"squares"
] | Infers 81 cell grid from a square image. | [
"Infers",
"81",
"cell",
"grid",
"from",
"a",
"square",
"image",
"."
] | [
"\"\"\"Infers 81 cell grid from a square image.\"\"\"",
"# Top left corner of a bounding box",
"# Bottom right corner of bounding box",
"#print(squares)"
] | [
{
"param": "img",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "img",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def infer_grid(img):
squares = []
side = img.shape[:1]
side = side[0] / 9
for i in range(9):
for j in range(9):
p1 = (i * side, j * side)
p2 = ((i + 1) * side, (j + 1) * side)
squares.append((p1, p2))
return squares | 610,449 | 479 |
e0f6e82294c99f7c00ccb334d84f83ada8185ce4 | amunchet/cormorant | phase_one/generator/generator.py | [
"MIT"
] | Python | check | <not_specific> | def check(filename, combination):
"""Checks if a given combination is in the filename"""
if not os.path.exists(filename):
return False
with open(filename) as f:
return str(combination) in f.read() | Checks if a given combination is in the filename | Checks if a given combination is in the filename | [
"Checks",
"if",
"a",
"given",
"combination",
"is",
"in",
"the",
"filename"
] | def check(filename, combination):
if not os.path.exists(filename):
return False
with open(filename) as f:
return str(combination) in f.read() | [
"def",
"check",
"(",
"filename",
",",
"combination",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"filename",
")",
":",
"return",
"False",
"with",
"open",
"(",
"filename",
")",
"as",
"f",
":",
"return",
"str",
"(",
"combination",
")",
"in",
"f",
".",
"read",
"(",
")"
] | Checks if a given combination is in the filename | [
"Checks",
"if",
"a",
"given",
"combination",
"is",
"in",
"the",
"filename"
] | [
"\"\"\"Checks if a given combination is in the filename\"\"\""
] | [
{
"param": "filename",
"type": null
},
{
"param": "combination",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "filename",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "combination",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import os
def check(filename, combination):
if not os.path.exists(filename):
return False
with open(filename) as f:
return str(combination) in f.read() | 610,450 | 218 |
8c42357a458a3906dec311f147cc5a2f046c1a64 | MichiKal/healthcare-resilience | Code/Dynamics_Final.py | [
"MIT"
] | Python | pick_doctors_to_remove | <not_specific> | def pick_doctors_to_remove(doctors, N, rng):
'''
Pick N random doctors to be removed.
'''
rem_index = rng.choice(range(len(doctors)), N, replace=False)
return rem_index |
Pick N random doctors to be removed.
| Pick N random doctors to be removed. | [
"Pick",
"N",
"random",
"doctors",
"to",
"be",
"removed",
"."
] | def pick_doctors_to_remove(doctors, N, rng):
rem_index = rng.choice(range(len(doctors)), N, replace=False)
return rem_index | [
"def",
"pick_doctors_to_remove",
"(",
"doctors",
",",
"N",
",",
"rng",
")",
":",
"rem_index",
"=",
"rng",
".",
"choice",
"(",
"range",
"(",
"len",
"(",
"doctors",
")",
")",
",",
"N",
",",
"replace",
"=",
"False",
")",
"return",
"rem_index"
] | Pick N random doctors to be removed. | [
"Pick",
"N",
"random",
"doctors",
"to",
"be",
"removed",
"."
] | [
"'''\r\n Pick N random doctors to be removed. \r\n '''"
] | [
{
"param": "doctors",
"type": null
},
{
"param": "N",
"type": null
},
{
"param": "rng",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "doctors",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "N",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "rng",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def pick_doctors_to_remove(doctors, N, rng):
rem_index = rng.choice(range(len(doctors)), N, replace=False)
return rem_index | 610,451 | 646 |
c9966ca5931dd3e41fbdf8784ee318a5e4ddf520 | SpencerEricksen/informers | source/informer_functions.py | [
"MIT"
] | Python | inf_sel_max_prom_global | <not_specific> | def inf_sel_max_prom_global( n_informers, df_binary ):
''' given n_informers and df_binary, return a list of maximally promiscuous cpds to be
used as the promiscuous informer set. Note, rather than LOTO, a one-time global
list is generated '''
inf_list = df_binary.sum(axis=1).sort_values(ascending=False, na_position='last' ).head( n_informers ).index
return inf_list | given n_informers and df_binary, return a list of maximally promiscuous cpds to be
used as the promiscuous informer set. Note, rather than LOTO, a one-time global
list is generated | given n_informers and df_binary, return a list of maximally promiscuous cpds to be
used as the promiscuous informer set. Note, rather than LOTO, a one-time global
list is generated | [
"given",
"n_informers",
"and",
"df_binary",
"return",
"a",
"list",
"of",
"maximally",
"promiscuous",
"cpds",
"to",
"be",
"used",
"as",
"the",
"promiscuous",
"informer",
"set",
".",
"Note",
"rather",
"than",
"LOTO",
"a",
"one",
"-",
"time",
"global",
"list",
"is",
"generated"
] | def inf_sel_max_prom_global( n_informers, df_binary ):
inf_list = df_binary.sum(axis=1).sort_values(ascending=False, na_position='last' ).head( n_informers ).index
return inf_list | [
"def",
"inf_sel_max_prom_global",
"(",
"n_informers",
",",
"df_binary",
")",
":",
"inf_list",
"=",
"df_binary",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
".",
"sort_values",
"(",
"ascending",
"=",
"False",
",",
"na_position",
"=",
"'last'",
")",
".",
"head",
"(",
"n_informers",
")",
".",
"index",
"return",
"inf_list"
] | given n_informers and df_binary, return a list of maximally promiscuous cpds to be
used as the promiscuous informer set. | [
"given",
"n_informers",
"and",
"df_binary",
"return",
"a",
"list",
"of",
"maximally",
"promiscuous",
"cpds",
"to",
"be",
"used",
"as",
"the",
"promiscuous",
"informer",
"set",
"."
] | [
"''' given n_informers and df_binary, return a list of maximally promiscuous cpds to be\n used as the promiscuous informer set. Note, rather than LOTO, a one-time global \n list is generated '''"
] | [
{
"param": "n_informers",
"type": null
},
{
"param": "df_binary",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "n_informers",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "df_binary",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def inf_sel_max_prom_global( n_informers, df_binary ):
inf_list = df_binary.sum(axis=1).sort_values(ascending=False, na_position='last' ).head( n_informers ).index
return inf_list | 610,452 | 635 |
d18671df246d3a7112763c83aa53073dd781884f | curtislb/ProjectEuler | py/common/arrays.py | [
"MIT"
] | Python | argmax | int | def argmax(values: Sequence[Comparable]) -> int:
"""Finds the index of the maximum value in a sequence.
Args:
values: A sequence of values that can be compared to one another.
Returns:
The index (from 0 to ``len(values) - 1``) of the first element in
``values`` that is greater than or equal to all others.
"""
max_index = 0
max_value = values[0]
for i, value in enumerate(values):
if value > max_value:
max_index = i
max_value = value
return max_index | Finds the index of the maximum value in a sequence.
Args:
values: A sequence of values that can be compared to one another.
Returns:
The index (from 0 to ``len(values) - 1``) of the first element in
``values`` that is greater than or equal to all others.
| Finds the index of the maximum value in a sequence. | [
"Finds",
"the",
"index",
"of",
"the",
"maximum",
"value",
"in",
"a",
"sequence",
"."
] | def argmax(values: Sequence[Comparable]) -> int:
max_index = 0
max_value = values[0]
for i, value in enumerate(values):
if value > max_value:
max_index = i
max_value = value
return max_index | [
"def",
"argmax",
"(",
"values",
":",
"Sequence",
"[",
"Comparable",
"]",
")",
"->",
"int",
":",
"max_index",
"=",
"0",
"max_value",
"=",
"values",
"[",
"0",
"]",
"for",
"i",
",",
"value",
"in",
"enumerate",
"(",
"values",
")",
":",
"if",
"value",
">",
"max_value",
":",
"max_index",
"=",
"i",
"max_value",
"=",
"value",
"return",
"max_index"
] | Finds the index of the maximum value in a sequence. | [
"Finds",
"the",
"index",
"of",
"the",
"maximum",
"value",
"in",
"a",
"sequence",
"."
] | [
"\"\"\"Finds the index of the maximum value in a sequence.\n\n Args:\n values: A sequence of values that can be compared to one another.\n\n Returns:\n The index (from 0 to ``len(values) - 1``) of the first element in\n ``values`` that is greater than or equal to all others.\n \"\"\""
] | [
{
"param": "values",
"type": "Sequence[Comparable]"
}
] | {
"returns": [
{
"docstring": "The index (from 0 to ``len(values) - 1``) of the first element in\n``values`` that is greater than or equal to all others.",
"docstring_tokens": [
"The",
"index",
"(",
"from",
"0",
"to",
"`",
"`",
"len",
"(",
"values",
")",
"-",
"1",
"`",
"`",
")",
"of",
"the",
"first",
"element",
"in",
"`",
"`",
"values",
"`",
"`",
"that",
"is",
"greater",
"than",
"or",
"equal",
"to",
"all",
"others",
"."
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "values",
"type": "Sequence[Comparable]",
"docstring": "A sequence of values that can be compared to one another.",
"docstring_tokens": [
"A",
"sequence",
"of",
"values",
"that",
"can",
"be",
"compared",
"to",
"one",
"another",
"."
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def argmax(values: Sequence[Comparable]) -> int:
max_index = 0
max_value = values[0]
for i, value in enumerate(values):
if value > max_value:
max_index = i
max_value = value
return max_index | 610,453 | 245 |
0548c4dcce191494a91ad818f4d461af109c8a52 | cdetrio/octopus | octopus/arch/wasm/cfg.py | [
"MIT"
] | Python | enum_func_name_call_indirect | <not_specific> | def enum_func_name_call_indirect(functions):
''' return a list of function name if they used call_indirect
'''
func_name = list()
# iterate over functions
for func in functions:
for inst in func.instructions:
if inst.name == "call_indirect":
func_name.append(func.name)
func_name = list(set(func_name))
return func_name | return a list of function name if they used call_indirect
| return a list of function name if they used call_indirect | [
"return",
"a",
"list",
"of",
"function",
"name",
"if",
"they",
"used",
"call_indirect"
] | def enum_func_name_call_indirect(functions):
func_name = list()
for func in functions:
for inst in func.instructions:
if inst.name == "call_indirect":
func_name.append(func.name)
func_name = list(set(func_name))
return func_name | [
"def",
"enum_func_name_call_indirect",
"(",
"functions",
")",
":",
"func_name",
"=",
"list",
"(",
")",
"for",
"func",
"in",
"functions",
":",
"for",
"inst",
"in",
"func",
".",
"instructions",
":",
"if",
"inst",
".",
"name",
"==",
"\"call_indirect\"",
":",
"func_name",
".",
"append",
"(",
"func",
".",
"name",
")",
"func_name",
"=",
"list",
"(",
"set",
"(",
"func_name",
")",
")",
"return",
"func_name"
] | return a list of function name if they used call_indirect | [
"return",
"a",
"list",
"of",
"function",
"name",
"if",
"they",
"used",
"call_indirect"
] | [
"''' return a list of function name if they used call_indirect\n '''",
"# iterate over functions"
] | [
{
"param": "functions",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "functions",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def enum_func_name_call_indirect(functions):
func_name = list()
for func in functions:
for inst in func.instructions:
if inst.name == "call_indirect":
func_name.append(func.name)
func_name = list(set(func_name))
return func_name | 610,454 | 230 |
b6c5ffb2845a5142028cde07ccaa83a8c5d5307e | ymenghank/MinimalCFG | mincfg/mincfg.py | [
"MIT"
] | Python | minimize_rules_1 | <not_specific> | def minimize_rules_1(G):
'''
Perform the following (trivial) minimization:
- remove duplicated rules
- remove self-produced rule
'''
# remove duplicated rules
G2 = []
for rule1 in G:
duplicated = False
for rule2 in G2:
if rule1[0] == rule2[0] and rule1[1] == rule2[1]:
duplicated = True
break
if not duplicated:
G2.append(rule1)
G = G2
# remove self-produced rule
G2 = []
for nt, sub in G:
if len(sub) == 1 and nt == sub[0]:
continue
G2.append((nt, sub))
G = G2
return G |
Perform the following (trivial) minimization:
- remove duplicated rules
- remove self-produced rule
| Perform the following (trivial) minimization:
remove duplicated rules
remove self-produced rule | [
"Perform",
"the",
"following",
"(",
"trivial",
")",
"minimization",
":",
"remove",
"duplicated",
"rules",
"remove",
"self",
"-",
"produced",
"rule"
] | def minimize_rules_1(G):
G2 = []
for rule1 in G:
duplicated = False
for rule2 in G2:
if rule1[0] == rule2[0] and rule1[1] == rule2[1]:
duplicated = True
break
if not duplicated:
G2.append(rule1)
G = G2
G2 = []
for nt, sub in G:
if len(sub) == 1 and nt == sub[0]:
continue
G2.append((nt, sub))
G = G2
return G | [
"def",
"minimize_rules_1",
"(",
"G",
")",
":",
"G2",
"=",
"[",
"]",
"for",
"rule1",
"in",
"G",
":",
"duplicated",
"=",
"False",
"for",
"rule2",
"in",
"G2",
":",
"if",
"rule1",
"[",
"0",
"]",
"==",
"rule2",
"[",
"0",
"]",
"and",
"rule1",
"[",
"1",
"]",
"==",
"rule2",
"[",
"1",
"]",
":",
"duplicated",
"=",
"True",
"break",
"if",
"not",
"duplicated",
":",
"G2",
".",
"append",
"(",
"rule1",
")",
"G",
"=",
"G2",
"G2",
"=",
"[",
"]",
"for",
"nt",
",",
"sub",
"in",
"G",
":",
"if",
"len",
"(",
"sub",
")",
"==",
"1",
"and",
"nt",
"==",
"sub",
"[",
"0",
"]",
":",
"continue",
"G2",
".",
"append",
"(",
"(",
"nt",
",",
"sub",
")",
")",
"G",
"=",
"G2",
"return",
"G"
] | Perform the following (trivial) minimization:
remove duplicated rules
remove self-produced rule | [
"Perform",
"the",
"following",
"(",
"trivial",
")",
"minimization",
":",
"remove",
"duplicated",
"rules",
"remove",
"self",
"-",
"produced",
"rule"
] | [
"'''\r\n Perform the following (trivial) minimization:\r\n - remove duplicated rules\r\n - remove self-produced rule\r\n '''",
"# remove duplicated rules\r",
"# remove self-produced rule\r"
] | [
{
"param": "G",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "G",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def minimize_rules_1(G):
G2 = []
for rule1 in G:
duplicated = False
for rule2 in G2:
if rule1[0] == rule2[0] and rule1[1] == rule2[1]:
duplicated = True
break
if not duplicated:
G2.append(rule1)
G = G2
G2 = []
for nt, sub in G:
if len(sub) == 1 and nt == sub[0]:
continue
G2.append((nt, sub))
G = G2
return G | 610,455 | 613 |
32b67d2a93809833d40b5242b9a8f32eccd0589c | elgalu/iometrics | tasks.py | [
"Apache-2.0"
] | Python | lint | null | def lint(c): # type: ignore
"""Lint files and save report."""
c.run(
"""
poetry run pylint --rcfile pyproject.toml -j 0 \
iometrics > pylint-report.txt
""".strip()
) | Lint files and save report. | Lint files and save report. | [
"Lint",
"files",
"and",
"save",
"report",
"."
] | def lint(c):
c.run(
"""
poetry run pylint --rcfile pyproject.toml -j 0 \
iometrics > pylint-report.txt
""".strip()
) | [
"def",
"lint",
"(",
"c",
")",
":",
"c",
".",
"run",
"(",
"\"\"\"\n poetry run pylint --rcfile pyproject.toml -j 0 \\\n iometrics > pylint-report.txt\n \"\"\"",
".",
"strip",
"(",
")",
")"
] | Lint files and save report. | [
"Lint",
"files",
"and",
"save",
"report",
"."
] | [
"# type: ignore",
"\"\"\"Lint files and save report.\"\"\""
] | [
{
"param": "c",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "c",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def lint(c):
c.run(
"""
poetry run pylint --rcfile pyproject.toml -j 0 \
iometrics > pylint-report.txt
""".strip()
) | 610,456 | 795 |
6187f1ab3600cc93ea36dc168d819d8be2e9caf6 | BrianGallew/cassandra_range_repair | src/range_repair.py | [
"MIT"
] | Python | _build_repair_dict | <not_specific> | def _build_repair_dict(cmd, step, start, end, nodeposition, keyspace=None, column_families=None):
"""
Build a standard repair step dict.
:param cmd: Repair command.
:param step: Step number.
:param start: Start range.
:param end: End range.
:param nodeposition: Node position.
:param keyspace: Keyspace being repaired.
:param column_families: Column families being repaired.
:rtype: dict
:return: Dict of repair step info.
"""
return {
'time': datetime.now().isoformat(),
'step': step,
'start': start,
'end': end,
'nodeposition': nodeposition,
'keyspace': keyspace or '<all>',
'column_families': column_families or '<all>',
'cmd': ' '.join(map(str, cmd))
} |
Build a standard repair step dict.
:param cmd: Repair command.
:param step: Step number.
:param start: Start range.
:param end: End range.
:param nodeposition: Node position.
:param keyspace: Keyspace being repaired.
:param column_families: Column families being repaired.
:rtype: dict
:return: Dict of repair step info.
| Build a standard repair step dict. | [
"Build",
"a",
"standard",
"repair",
"step",
"dict",
"."
] | def _build_repair_dict(cmd, step, start, end, nodeposition, keyspace=None, column_families=None):
return {
'time': datetime.now().isoformat(),
'step': step,
'start': start,
'end': end,
'nodeposition': nodeposition,
'keyspace': keyspace or '<all>',
'column_families': column_families or '<all>',
'cmd': ' '.join(map(str, cmd))
} | [
"def",
"_build_repair_dict",
"(",
"cmd",
",",
"step",
",",
"start",
",",
"end",
",",
"nodeposition",
",",
"keyspace",
"=",
"None",
",",
"column_families",
"=",
"None",
")",
":",
"return",
"{",
"'time'",
":",
"datetime",
".",
"now",
"(",
")",
".",
"isoformat",
"(",
")",
",",
"'step'",
":",
"step",
",",
"'start'",
":",
"start",
",",
"'end'",
":",
"end",
",",
"'nodeposition'",
":",
"nodeposition",
",",
"'keyspace'",
":",
"keyspace",
"or",
"'<all>'",
",",
"'column_families'",
":",
"column_families",
"or",
"'<all>'",
",",
"'cmd'",
":",
"' '",
".",
"join",
"(",
"map",
"(",
"str",
",",
"cmd",
")",
")",
"}"
] | Build a standard repair step dict. | [
"Build",
"a",
"standard",
"repair",
"step",
"dict",
"."
] | [
"\"\"\"\n Build a standard repair step dict.\n\n :param cmd: Repair command.\n :param step: Step number.\n :param start: Start range.\n :param end: End range.\n :param nodeposition: Node position.\n :param keyspace: Keyspace being repaired.\n :param column_families: Column families being repaired.\n\n :rtype: dict\n :return: Dict of repair step info.\n \"\"\""
] | [
{
"param": "cmd",
"type": null
},
{
"param": "step",
"type": null
},
{
"param": "start",
"type": null
},
{
"param": "end",
"type": null
},
{
"param": "nodeposition",
"type": null
},
{
"param": "keyspace",
"type": null
},
{
"param": "column_families",
"type": null
}
] | {
"returns": [
{
"docstring": "Dict of repair step info.",
"docstring_tokens": [
"Dict",
"of",
"repair",
"step",
"info",
"."
],
"type": "dict"
}
],
"raises": [],
"params": [
{
"identifier": "cmd",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": null
},
{
"identifier": "step",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": null
},
{
"identifier": "start",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": null
},
{
"identifier": "end",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": null
},
{
"identifier": "nodeposition",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": null
},
{
"identifier": "keyspace",
"type": null,
"docstring": "Keyspace being repaired.",
"docstring_tokens": [
"Keyspace",
"being",
"repaired",
"."
],
"default": null,
"is_optional": null
},
{
"identifier": "column_families",
"type": null,
"docstring": "Column families being repaired.",
"docstring_tokens": [
"Column",
"families",
"being",
"repaired",
"."
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import datetime
def _build_repair_dict(cmd, step, start, end, nodeposition, keyspace=None, column_families=None):
return {
'time': datetime.now().isoformat(),
'step': step,
'start': start,
'end': end,
'nodeposition': nodeposition,
'keyspace': keyspace or '<all>',
'column_families': column_families or '<all>',
'cmd': ' '.join(map(str, cmd))
} | 610,457 | 439 |
143be6d4be6454f173865923bd1507ffa138f522 | mike-fmh/find-song | misc.py | [
"MIT"
] | Python | take_off_extra_chars | <not_specific> | def take_off_extra_chars(string):
"""Takes brackets, parentheses, stars out of strings"""
word = ""
for i in range(len(string)):
if string[i] != "[" and string[i] != "]" and string[i] != "(" and string[i] != ")" and string[i] != "*":
word += string[i]
return word | Takes brackets, parentheses, stars out of strings | Takes brackets, parentheses, stars out of strings | [
"Takes",
"brackets",
"parentheses",
"stars",
"out",
"of",
"strings"
] | def take_off_extra_chars(string):
word = ""
for i in range(len(string)):
if string[i] != "[" and string[i] != "]" and string[i] != "(" and string[i] != ")" and string[i] != "*":
word += string[i]
return word | [
"def",
"take_off_extra_chars",
"(",
"string",
")",
":",
"word",
"=",
"\"\"",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"string",
")",
")",
":",
"if",
"string",
"[",
"i",
"]",
"!=",
"\"[\"",
"and",
"string",
"[",
"i",
"]",
"!=",
"\"]\"",
"and",
"string",
"[",
"i",
"]",
"!=",
"\"(\"",
"and",
"string",
"[",
"i",
"]",
"!=",
"\")\"",
"and",
"string",
"[",
"i",
"]",
"!=",
"\"*\"",
":",
"word",
"+=",
"string",
"[",
"i",
"]",
"return",
"word"
] | Takes brackets, parentheses, stars out of strings | [
"Takes",
"brackets",
"parentheses",
"stars",
"out",
"of",
"strings"
] | [
"\"\"\"Takes brackets, parentheses, stars out of strings\"\"\""
] | [
{
"param": "string",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "string",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def take_off_extra_chars(string):
word = ""
for i in range(len(string)):
if string[i] != "[" and string[i] != "]" and string[i] != "(" and string[i] != ")" and string[i] != "*":
word += string[i]
return word | 610,458 | 286 |
bb91476d047373bfbc02ee6492656540574ea6e1 | clokman/KFIR | triplicator/bibTools.py | [
"MIT"
] | Python | updateFieldValuesRegistry | null | def updateFieldValuesRegistry(instance, entry_id, field_name, field_value):
"""
Updates instance registry each time an entry is added to the bibliography instance. The registry allows
fast searching entries in the bibliography.
Args:
entry_id (str): id to be assigned to entry (e.g., '2341230u9078').
field_name(str): name of field (e.g., 'author')
field_value(str or list): value of the field (e.g., 'John Doe' )
Returns:
Nothing, but updates the instance._field_values_registry
Examples:
>>> # preparation
>>> from triplicator.bibTools import Bibliography
>>> bibx = Bibliography()
>>> # add first entry and see how instance registry is updated afterwards
>>> bibx.setEntry("01", "author", "John Can Lokman")
>>> bibx.setEntry("01", "title", "Test Book 1")
>>> print(bibx._field_values_registry)
{'author': {'John Can Lokman': ['01']}, 'title': {'Test Book 1': ['01']}}
>>> # add second entry and see how instance registry is updated afterwards
>>> bibx.setEntry("02", "title", "Test Book 2")
>>> bibx.setEntry("02", "author", "Stefan Schlobach")
>>> print(bibx._field_values_registry)
{'author': {'John Can Lokman': ['01'], 'Stefan Schlobach': ['02']}, 'title': {'Test Book 1': ['01'], 'Test Book 2': ['02']}}
TODO:
- Input should be treated as a search string rather than an exact string, so, for instance, a partial
author name can also be searched.
"""
# function must be able to accept a list of items, as this is sometimes the case (e.g., multiple authors
# ...for author field).
# Therefore, strings inputs are converted to lists to be compatible with the list processing facilities
field_value_list = []
if type(field_value) == str:
field_value_list = [field_value]
# Debugger
#print("input is this string:")
#print(field_value_list)
# Explicit statement. If the parameter is already a list, take it as it is
elif type(field_value) == list:
field_value_list = field_value
# Debugger
# print("input is this list:")
# print(field_value_list)
elif type(field_value) is None:
pass
#else:
# #raise Exception("'field_value' must be string or list. It is currently: " + str(field_value))
if field_value_list != []:
for each_field_value in field_value_list:
# if field_name (e.g., author) has never been added to the registry
if field_name not in instance._field_values_registry:
# Debugger
#print("SCENARIO 1")
#print("field_values_registry is currently:")
#print(instance._field_values_registry)
# Add dictionary entry for the field name-value pair and the entry id (e.g., {author:{"john x":[124515152])}
# NOTE: Below line can instead use instance._field_type_registry for more efficient search. This has to be tested
instance._field_values_registry[field_name] = {each_field_value: [entry_id]}
# Debugger
#print("field_name '" + str(field_name) + "' is not in registry")
#print("the current field value is: '" + each_field_value + "' (and it is not in registry).")
#print("field name and current field value is now added to registry with the following command:")
#print("instance._field_values_registry[field_name] = {each_field_value: [entry_id]}")
#print("the field_values_registry has now become:")
#print(instance._field_values_registry)
# if field name (e.g., 'author' field) is previously added to the registry...
elif field_name in instance._field_values_registry:
# Debugger
#print("SCENARIO 2")
#print("field_values_registry is currently:")
#print(instance._field_values_registry)
# ...but if field_value (e.g., author's name) has never been added to the registry
if each_field_value not in instance._field_values_registry[field_name]:
# add this field value (e.g., author) and set its value to a LIST that contains current entry_id
# so that this list can later be appended with other entry_ids.
# an example operation performed by the line below would be equivalent to:
# instance._field_values_registry[author] = {"John x": ["14578436002"]}
# which creates this dictionary entry:
# _field_values_registry:{ author:{ "John x": ["14578436002"] } }
instance._field_values_registry[field_name][each_field_value] = [entry_id]
# Debugger
#print("field_name '" + str(field_name) + "' has been found in the registry")
#print("current field value '" + each_field_value + "' has NOT been found in the registry")
#print("field name and current field value is now added to registry with the following command:")
#print("instance._field_values_registry[field_name] = {each_field_value: [entry_id]}")
#print("the field_values_registry has now become:")
#print(instance._field_values_registry)
# if field_value (e.g., author's name) is previously added to the registry
elif each_field_value in instance._field_values_registry[field_name]:
# Debugger
#print("SCENARIO 3")
#print("field_values_registry is currently:")
#print(instance._field_values_registry)
# append entry id to corresponding field value (e.g.,add entry_id to author name)
# an example operation performed by the line below would be equivalent to:
# instance._field_values_registry[author]["John x"].append["14578436002"]
# which creates this dictionary entry:
# _field_values_registry:{ author:{ "John x": ["some_previous_id", "14578436002"] } }
instance._field_values_registry[field_name][each_field_value].append(entry_id)
# Debugger
#print("field_name '" + str(field_name) + "' has been found in the registry")
#print("current field value '" + each_field_value + "' HAS been found in the registry")
#print("field name and current field value is now added to registry with the following command:")
#print("instance._field_values_registry[field_name] = {each_field_value: [entry_id]}")
#print("the field_values_registry has now become:")
#print(instance._field_values_registry)
# Debugger
#print("instance._field_values_registry is")
#print(instance._field_values_registry)
#print("") |
Updates instance registry each time an entry is added to the bibliography instance. The registry allows
fast searching entries in the bibliography.
Args:
entry_id (str): id to be assigned to entry (e.g., '2341230u9078').
field_name(str): name of field (e.g., 'author')
field_value(str or list): value of the field (e.g., 'John Doe' )
Returns:
Nothing, but updates the instance._field_values_registry
Examples:
>>> # preparation
>>> from triplicator.bibTools import Bibliography
>>> bibx = Bibliography()
>>> # add first entry and see how instance registry is updated afterwards
>>> bibx.setEntry("01", "author", "John Can Lokman")
>>> bibx.setEntry("01", "title", "Test Book 1")
>>> print(bibx._field_values_registry)
{'author': {'John Can Lokman': ['01']}, 'title': {'Test Book 1': ['01']}}
>>> # add second entry and see how instance registry is updated afterwards
>>> bibx.setEntry("02", "title", "Test Book 2")
>>> bibx.setEntry("02", "author", "Stefan Schlobach")
>>> print(bibx._field_values_registry)
{'author': {'John Can Lokman': ['01'], 'Stefan Schlobach': ['02']}, 'title': {'Test Book 1': ['01'], 'Test Book 2': ['02']}}
TODO:
- Input should be treated as a search string rather than an exact string, so, for instance, a partial
author name can also be searched.
| Updates instance registry each time an entry is added to the bibliography instance. The registry allows
fast searching entries in the bibliography. | [
"Updates",
"instance",
"registry",
"each",
"time",
"an",
"entry",
"is",
"added",
"to",
"the",
"bibliography",
"instance",
".",
"The",
"registry",
"allows",
"fast",
"searching",
"entries",
"in",
"the",
"bibliography",
"."
] | def updateFieldValuesRegistry(instance, entry_id, field_name, field_value):
field_value_list = []
if type(field_value) == str:
field_value_list = [field_value]
elif type(field_value) == list:
field_value_list = field_value
elif type(field_value) is None:
pass
if field_value_list != []:
for each_field_value in field_value_list:
if field_name not in instance._field_values_registry:
instance._field_values_registry[field_name] = {each_field_value: [entry_id]}
elif field_name in instance._field_values_registry:
if each_field_value not in instance._field_values_registry[field_name]:
instance._field_values_registry[field_name][each_field_value] = [entry_id]
elif each_field_value in instance._field_values_registry[field_name]:
instance._field_values_registry[field_name][each_field_value].append(entry_id) | [
"def",
"updateFieldValuesRegistry",
"(",
"instance",
",",
"entry_id",
",",
"field_name",
",",
"field_value",
")",
":",
"field_value_list",
"=",
"[",
"]",
"if",
"type",
"(",
"field_value",
")",
"==",
"str",
":",
"field_value_list",
"=",
"[",
"field_value",
"]",
"elif",
"type",
"(",
"field_value",
")",
"==",
"list",
":",
"field_value_list",
"=",
"field_value",
"elif",
"type",
"(",
"field_value",
")",
"is",
"None",
":",
"pass",
"if",
"field_value_list",
"!=",
"[",
"]",
":",
"for",
"each_field_value",
"in",
"field_value_list",
":",
"if",
"field_name",
"not",
"in",
"instance",
".",
"_field_values_registry",
":",
"instance",
".",
"_field_values_registry",
"[",
"field_name",
"]",
"=",
"{",
"each_field_value",
":",
"[",
"entry_id",
"]",
"}",
"elif",
"field_name",
"in",
"instance",
".",
"_field_values_registry",
":",
"if",
"each_field_value",
"not",
"in",
"instance",
".",
"_field_values_registry",
"[",
"field_name",
"]",
":",
"instance",
".",
"_field_values_registry",
"[",
"field_name",
"]",
"[",
"each_field_value",
"]",
"=",
"[",
"entry_id",
"]",
"elif",
"each_field_value",
"in",
"instance",
".",
"_field_values_registry",
"[",
"field_name",
"]",
":",
"instance",
".",
"_field_values_registry",
"[",
"field_name",
"]",
"[",
"each_field_value",
"]",
".",
"append",
"(",
"entry_id",
")"
] | Updates instance registry each time an entry is added to the bibliography instance. | [
"Updates",
"instance",
"registry",
"each",
"time",
"an",
"entry",
"is",
"added",
"to",
"the",
"bibliography",
"instance",
"."
] | [
"\"\"\"\n Updates instance registry each time an entry is added to the bibliography instance. The registry allows\n fast searching entries in the bibliography.\n\n Args:\n entry_id (str): id to be assigned to entry (e.g., '2341230u9078').\n field_name(str): name of field (e.g., 'author')\n field_value(str or list): value of the field (e.g., 'John Doe' )\n\n Returns:\n Nothing, but updates the instance._field_values_registry\n\n Examples:\n >>> # preparation\n >>> from triplicator.bibTools import Bibliography\n >>> bibx = Bibliography()\n\n >>> # add first entry and see how instance registry is updated afterwards\n >>> bibx.setEntry(\"01\", \"author\", \"John Can Lokman\")\n >>> bibx.setEntry(\"01\", \"title\", \"Test Book 1\")\n >>> print(bibx._field_values_registry)\n {'author': {'John Can Lokman': ['01']}, 'title': {'Test Book 1': ['01']}}\n\n >>> # add second entry and see how instance registry is updated afterwards\n >>> bibx.setEntry(\"02\", \"title\", \"Test Book 2\")\n >>> bibx.setEntry(\"02\", \"author\", \"Stefan Schlobach\")\n >>> print(bibx._field_values_registry)\n {'author': {'John Can Lokman': ['01'], 'Stefan Schlobach': ['02']}, 'title': {'Test Book 1': ['01'], 'Test Book 2': ['02']}}\n\n TODO:\n - Input should be treated as a search string rather than an exact string, so, for instance, a partial\n author name can also be searched.\n \"\"\"",
"# function must be able to accept a list of items, as this is sometimes the case (e.g., multiple authors",
"# ...for author field).",
"# Therefore, strings inputs are converted to lists to be compatible with the list processing facilities",
"# Debugger",
"#print(\"input is this string:\")",
"#print(field_value_list)",
"# Explicit statement. If the parameter is already a list, take it as it is",
"# Debugger",
"# print(\"input is this list:\")",
"# print(field_value_list)",
"#else:",
"# #raise Exception(\"'field_value' must be string or list. It is currently: \" + str(field_value))",
"# if field_name (e.g., author) has never been added to the registry",
"# Debugger",
"#print(\"SCENARIO 1\")",
"#print(\"field_values_registry is currently:\")",
"#print(instance._field_values_registry)",
"# Add dictionary entry for the field name-value pair and the entry id (e.g., {author:{\"john x\":[124515152])}",
"# NOTE: Below line can instead use instance._field_type_registry for more efficient search. This has to be tested",
"# Debugger",
"#print(\"field_name '\" + str(field_name) + \"' is not in registry\")",
"#print(\"the current field value is: '\" + each_field_value + \"' (and it is not in registry).\")",
"#print(\"field name and current field value is now added to registry with the following command:\")",
"#print(\"instance._field_values_registry[field_name] = {each_field_value: [entry_id]}\")",
"#print(\"the field_values_registry has now become:\")",
"#print(instance._field_values_registry)",
"# if field name (e.g., 'author' field) is previously added to the registry...",
"# Debugger",
"#print(\"SCENARIO 2\")",
"#print(\"field_values_registry is currently:\")",
"#print(instance._field_values_registry)",
"# ...but if field_value (e.g., author's name) has never been added to the registry",
"# add this field value (e.g., author) and set its value to a LIST that contains current entry_id",
"# so that this list can later be appended with other entry_ids.",
"# an example operation performed by the line below would be equivalent to:",
"# instance._field_values_registry[author] = {\"John x\": [\"14578436002\"]}",
"# which creates this dictionary entry:",
"# _field_values_registry:{ author:{ \"John x\": [\"14578436002\"] } }",
"# Debugger",
"#print(\"field_name '\" + str(field_name) + \"' has been found in the registry\")",
"#print(\"current field value '\" + each_field_value + \"' has NOT been found in the registry\")",
"#print(\"field name and current field value is now added to registry with the following command:\")",
"#print(\"instance._field_values_registry[field_name] = {each_field_value: [entry_id]}\")",
"#print(\"the field_values_registry has now become:\")",
"#print(instance._field_values_registry)",
"# if field_value (e.g., author's name) is previously added to the registry",
"# Debugger",
"#print(\"SCENARIO 3\")",
"#print(\"field_values_registry is currently:\")",
"#print(instance._field_values_registry)",
"# append entry id to corresponding field value (e.g.,add entry_id to author name)",
"# an example operation performed by the line below would be equivalent to:",
"# instance._field_values_registry[author][\"John x\"].append[\"14578436002\"]",
"# which creates this dictionary entry:",
"# _field_values_registry:{ author:{ \"John x\": [\"some_previous_id\", \"14578436002\"] } }",
"# Debugger",
"#print(\"field_name '\" + str(field_name) + \"' has been found in the registry\")",
"#print(\"current field value '\" + each_field_value + \"' HAS been found in the registry\")",
"#print(\"field name and current field value is now added to registry with the following command:\")",
"#print(\"instance._field_values_registry[field_name] = {each_field_value: [entry_id]}\")",
"#print(\"the field_values_registry has now become:\")",
"#print(instance._field_values_registry)",
"# Debugger",
"#print(\"instance._field_values_registry is\")",
"#print(instance._field_values_registry)",
"#print(\"\")"
] | [
{
"param": "instance",
"type": null
},
{
"param": "entry_id",
"type": null
},
{
"param": "field_name",
"type": null
},
{
"param": "field_value",
"type": null
}
] | {
"returns": [
{
"docstring": "Nothing, but updates the instance._field_values_registry",
"docstring_tokens": [
"Nothing",
"but",
"updates",
"the",
"instance",
".",
"_field_values_registry"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "instance",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "entry_id",
"type": null,
"docstring": "id to be assigned to entry .",
"docstring_tokens": [
"id",
"to",
"be",
"assigned",
"to",
"entry",
"."
],
"default": null,
"is_optional": false
},
{
"identifier": "field_name",
"type": null,
"docstring": "name of field",
"docstring_tokens": [
"name",
"of",
"field"
],
"default": null,
"is_optional": false
},
{
"identifier": "field_value",
"type": null,
"docstring": "value of the field",
"docstring_tokens": [
"value",
"of",
"the",
"field"
],
"default": null,
"is_optional": false
}
],
"outlier_params": [],
"others": [
{
"identifier": "examples",
"docstring": ">>> # preparation\n>>> from triplicator.bibTools import Bibliography\n>>> bibx = Bibliography()\n\n>>> # add first entry and see how instance registry is updated afterwards\n>>> bibx.setEntry(\"01\", \"author\", \"John Can Lokman\")\n>>> bibx.setEntry(\"01\", \"title\", \"Test Book 1\")\n>>> print(bibx._field_values_registry)\n{'author': {'John Can Lokman': ['01']}, 'title': {'Test Book 1': ['01']}}\n\n>>> # add second entry and see how instance registry is updated afterwards\n>>> bibx.setEntry(\"02\", \"title\", \"Test Book 2\")\n>>> bibx.setEntry(\"02\", \"author\", \"Stefan Schlobach\")\n>>> print(bibx._field_values_registry)\n{'author': {'John Can Lokman': ['01'], 'Stefan Schlobach': ['02']}, 'title': {'Test Book 1': ['01'], 'Test Book 2': ['02']}}",
"docstring_tokens": [
">>>",
"#",
"preparation",
">>>",
"from",
"triplicator",
".",
"bibTools",
"import",
"Bibliography",
">>>",
"bibx",
"=",
"Bibliography",
"()",
">>>",
"#",
"add",
"first",
"entry",
"and",
"see",
"how",
"instance",
"registry",
"is",
"updated",
"afterwards",
">>>",
"bibx",
".",
"setEntry",
"(",
"\"",
"01",
"\"",
"\"",
"author",
"\"",
"\"",
"John",
"Can",
"Lokman",
"\"",
")",
">>>",
"bibx",
".",
"setEntry",
"(",
"\"",
"01",
"\"",
"\"",
"title",
"\"",
"\"",
"Test",
"Book",
"1",
"\"",
")",
">>>",
"print",
"(",
"bibx",
".",
"_field_values_registry",
")",
"{",
"'",
"author",
"'",
":",
"{",
"'",
"John",
"Can",
"Lokman",
"'",
":",
"[",
"'",
"01",
"'",
"]",
"}",
"'",
"title",
"'",
":",
"{",
"'",
"Test",
"Book",
"1",
"'",
":",
"[",
"'",
"01",
"'",
"]",
"}}",
">>>",
"#",
"add",
"second",
"entry",
"and",
"see",
"how",
"instance",
"registry",
"is",
"updated",
"afterwards",
">>>",
"bibx",
".",
"setEntry",
"(",
"\"",
"02",
"\"",
"\"",
"title",
"\"",
"\"",
"Test",
"Book",
"2",
"\"",
")",
">>>",
"bibx",
".",
"setEntry",
"(",
"\"",
"02",
"\"",
"\"",
"author",
"\"",
"\"",
"Stefan",
"Schlobach",
"\"",
")",
">>>",
"print",
"(",
"bibx",
".",
"_field_values_registry",
")",
"{",
"'",
"author",
"'",
":",
"{",
"'",
"John",
"Can",
"Lokman",
"'",
":",
"[",
"'",
"01",
"'",
"]",
"'",
"Stefan",
"Schlobach",
"'",
":",
"[",
"'",
"02",
"'",
"]",
"}",
"'",
"title",
"'",
":",
"{",
"'",
"Test",
"Book",
"1",
"'",
":",
"[",
"'",
"01",
"'",
"]",
"'",
"Test",
"Book",
"2",
"'",
":",
"[",
"'",
"02",
"'",
"]",
"}}"
]
}
]
} | def updateFieldValuesRegistry(instance, entry_id, field_name, field_value):
field_value_list = []
if type(field_value) == str:
field_value_list = [field_value]
elif type(field_value) == list:
field_value_list = field_value
elif type(field_value) is None:
pass
if field_value_list != []:
for each_field_value in field_value_list:
if field_name not in instance._field_values_registry:
instance._field_values_registry[field_name] = {each_field_value: [entry_id]}
elif field_name in instance._field_values_registry:
if each_field_value not in instance._field_values_registry[field_name]:
instance._field_values_registry[field_name][each_field_value] = [entry_id]
elif each_field_value in instance._field_values_registry[field_name]:
instance._field_values_registry[field_name][each_field_value].append(entry_id) | 610,459 | 368 |
87fc168d170457f62c249d67977430c0d5f5aca9 | Jianwei-Wang/python2.7_lib | dist-packages/dtk/ui/utils.py | [
"PSF-2.0"
] | Python | scroll_to_top | null | def scroll_to_top(scrolled_window):
'''
Scroll scrolled_window to top position.
@param scrolled_window: Gtk.ScrolledWindow instance.
'''
scrolled_window.get_vadjustment().set_value(0) |
Scroll scrolled_window to top position.
@param scrolled_window: Gtk.ScrolledWindow instance.
| Scroll scrolled_window to top position. | [
"Scroll",
"scrolled_window",
"to",
"top",
"position",
"."
] | def scroll_to_top(scrolled_window):
scrolled_window.get_vadjustment().set_value(0) | [
"def",
"scroll_to_top",
"(",
"scrolled_window",
")",
":",
"scrolled_window",
".",
"get_vadjustment",
"(",
")",
".",
"set_value",
"(",
"0",
")"
] | Scroll scrolled_window to top position. | [
"Scroll",
"scrolled_window",
"to",
"top",
"position",
"."
] | [
"'''\n Scroll scrolled_window to top position.\n\n @param scrolled_window: Gtk.ScrolledWindow instance.\n '''"
] | [
{
"param": "scrolled_window",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "scrolled_window",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": false
}
],
"outlier_params": [],
"others": []
} | def scroll_to_top(scrolled_window):
scrolled_window.get_vadjustment().set_value(0) | 610,460 | 651 |
2cd5341150b55db07868f4972692f2030069c7f7 | mnfienen/sfrmaker | sfrmaker/test/test_examples.py | [
"CC0-1.0"
] | Python | keep_cwd | null | def keep_cwd():
"""Reset the working directory after a test.
"""
wd = os.getcwd()
yield wd # provide the fixture value
print("reverting working directory from {} to {}".format(os.getcwd(), wd))
os.chdir(wd) | Reset the working directory after a test.
| Reset the working directory after a test. | [
"Reset",
"the",
"working",
"directory",
"after",
"a",
"test",
"."
] | def keep_cwd():
wd = os.getcwd()
yield wd
print("reverting working directory from {} to {}".format(os.getcwd(), wd))
os.chdir(wd) | [
"def",
"keep_cwd",
"(",
")",
":",
"wd",
"=",
"os",
".",
"getcwd",
"(",
")",
"yield",
"wd",
"print",
"(",
"\"reverting working directory from {} to {}\"",
".",
"format",
"(",
"os",
".",
"getcwd",
"(",
")",
",",
"wd",
")",
")",
"os",
".",
"chdir",
"(",
"wd",
")"
] | Reset the working directory after a test. | [
"Reset",
"the",
"working",
"directory",
"after",
"a",
"test",
"."
] | [
"\"\"\"Reset the working directory after a test.\n \"\"\"",
"# provide the fixture value"
] | [] | {
"returns": [],
"raises": [],
"params": [],
"outlier_params": [],
"others": []
} | import os
def keep_cwd():
wd = os.getcwd()
yield wd
print("reverting working directory from {} to {}".format(os.getcwd(), wd))
os.chdir(wd) | 610,461 | 1,018 |
d1d19c31d7a08cd05475c969fbf2328d027248cd | zyndagj/zed-align | zed-align.py | [
"BSD-3-Clause"
] | Python | ParseFai | <not_specific> | def ParseFai(inFile):
'''
Parses a fa.fai into a python dictionary
Paramteters
================================
inFile FILE fai file
'''
return dict(map(lambda y: (y[0], int(y[1])), map(lambda y: y.split('\t'), open(inFile,'r').readlines()))) |
Parses a fa.fai into a python dictionary
Paramteters
================================
inFile FILE fai file
| Parses a fa.fai into a python dictionary
Paramteters
inFile FILE fai file | [
"Parses",
"a",
"fa",
".",
"fai",
"into",
"a",
"python",
"dictionary",
"Paramteters",
"inFile",
"FILE",
"fai",
"file"
] | def ParseFai(inFile):
return dict(map(lambda y: (y[0], int(y[1])), map(lambda y: y.split('\t'), open(inFile,'r').readlines()))) | [
"def",
"ParseFai",
"(",
"inFile",
")",
":",
"return",
"dict",
"(",
"map",
"(",
"lambda",
"y",
":",
"(",
"y",
"[",
"0",
"]",
",",
"int",
"(",
"y",
"[",
"1",
"]",
")",
")",
",",
"map",
"(",
"lambda",
"y",
":",
"y",
".",
"split",
"(",
"'\\t'",
")",
",",
"open",
"(",
"inFile",
",",
"'r'",
")",
".",
"readlines",
"(",
")",
")",
")",
")"
] | Parses a fa.fai into a python dictionary
Paramteters | [
"Parses",
"a",
"fa",
".",
"fai",
"into",
"a",
"python",
"dictionary",
"Paramteters"
] | [
"'''\n\tParses a fa.fai into a python dictionary\n\tParamteters\n\t================================\n\tinFile\tFILE\tfai file\n\t'''"
] | [
{
"param": "inFile",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "inFile",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def ParseFai(inFile):
return dict(map(lambda y: (y[0], int(y[1])), map(lambda y: y.split('\t'), open(inFile,'r').readlines()))) | 610,462 | 913 |
8a2dc6b641221d2efd51dab0dc853975f81d0734 | kanderso-nrel/pvfactors | pvfactors/geometry/plot.py | [
"BSD-3-Clause"
] | Python | plot_line | null | def plot_line(ax, ob, line_color):
"""Plot boundaries of shapely line
Parameters
----------
ax : ``matplotlib.pyplot.Axes`` object
Axes for plotting
ob : ``Shapely`` object
Geometry object whose boundaries should be plotted
line_color : str
matplotlib color to use for plotting the line
"""
try:
x, y = ob.xy
ax.plot(x, y, color=line_color, alpha=0.7,
linewidth=3, solid_capstyle='round', zorder=2)
except NotImplementedError:
for line in ob:
x, y = line.xy
ax.plot(x, y, color=line_color,
alpha=0.7, linewidth=3, solid_capstyle='round', zorder=2) | Plot boundaries of shapely line
Parameters
----------
ax : ``matplotlib.pyplot.Axes`` object
Axes for plotting
ob : ``Shapely`` object
Geometry object whose boundaries should be plotted
line_color : str
matplotlib color to use for plotting the line
| Plot boundaries of shapely line
Parameters
| [
"Plot",
"boundaries",
"of",
"shapely",
"line",
"Parameters"
] | def plot_line(ax, ob, line_color):
try:
x, y = ob.xy
ax.plot(x, y, color=line_color, alpha=0.7,
linewidth=3, solid_capstyle='round', zorder=2)
except NotImplementedError:
for line in ob:
x, y = line.xy
ax.plot(x, y, color=line_color,
alpha=0.7, linewidth=3, solid_capstyle='round', zorder=2) | [
"def",
"plot_line",
"(",
"ax",
",",
"ob",
",",
"line_color",
")",
":",
"try",
":",
"x",
",",
"y",
"=",
"ob",
".",
"xy",
"ax",
".",
"plot",
"(",
"x",
",",
"y",
",",
"color",
"=",
"line_color",
",",
"alpha",
"=",
"0.7",
",",
"linewidth",
"=",
"3",
",",
"solid_capstyle",
"=",
"'round'",
",",
"zorder",
"=",
"2",
")",
"except",
"NotImplementedError",
":",
"for",
"line",
"in",
"ob",
":",
"x",
",",
"y",
"=",
"line",
".",
"xy",
"ax",
".",
"plot",
"(",
"x",
",",
"y",
",",
"color",
"=",
"line_color",
",",
"alpha",
"=",
"0.7",
",",
"linewidth",
"=",
"3",
",",
"solid_capstyle",
"=",
"'round'",
",",
"zorder",
"=",
"2",
")"
] | Plot boundaries of shapely line
Parameters | [
"Plot",
"boundaries",
"of",
"shapely",
"line",
"Parameters"
] | [
"\"\"\"Plot boundaries of shapely line\n\n Parameters\n ----------\n ax : ``matplotlib.pyplot.Axes`` object\n Axes for plotting\n ob : ``Shapely`` object\n Geometry object whose boundaries should be plotted\n line_color : str\n matplotlib color to use for plotting the line\n\n \"\"\""
] | [
{
"param": "ax",
"type": null
},
{
"param": "ob",
"type": null
},
{
"param": "line_color",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "ax",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "ob",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "line_color",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def plot_line(ax, ob, line_color):
try:
x, y = ob.xy
ax.plot(x, y, color=line_color, alpha=0.7,
linewidth=3, solid_capstyle='round', zorder=2)
except NotImplementedError:
for line in ob:
x, y = line.xy
ax.plot(x, y, color=line_color,
alpha=0.7, linewidth=3, solid_capstyle='round', zorder=2) | 610,465 | 6 |
e79cf27380e036c9990f1c5757749c654c7225d2 | fxer/adventofcode | 2018/day06.py | [
"MIT"
] | Python | distance | <not_specific> | def distance(coord1, coord2):
"""
Return Manhattan Distance between two coordinates
"""
return abs(coord1[0] - coord2[0]) + abs(coord1[1] - coord2[1]) |
Return Manhattan Distance between two coordinates
| Return Manhattan Distance between two coordinates | [
"Return",
"Manhattan",
"Distance",
"between",
"two",
"coordinates"
] | def distance(coord1, coord2):
return abs(coord1[0] - coord2[0]) + abs(coord1[1] - coord2[1]) | [
"def",
"distance",
"(",
"coord1",
",",
"coord2",
")",
":",
"return",
"abs",
"(",
"coord1",
"[",
"0",
"]",
"-",
"coord2",
"[",
"0",
"]",
")",
"+",
"abs",
"(",
"coord1",
"[",
"1",
"]",
"-",
"coord2",
"[",
"1",
"]",
")"
] | Return Manhattan Distance between two coordinates | [
"Return",
"Manhattan",
"Distance",
"between",
"two",
"coordinates"
] | [
"\"\"\"\n Return Manhattan Distance between two coordinates\n \"\"\""
] | [
{
"param": "coord1",
"type": null
},
{
"param": "coord2",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "coord1",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "coord2",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def distance(coord1, coord2):
return abs(coord1[0] - coord2[0]) + abs(coord1[1] - coord2[1]) | 610,466 | 429 |
ea42caacd17a8a65ebcdadc86dc76af635fe0f6a | arpananand1/csp | peripheral/hemc_44121/config/hemc.py | [
"0BSD"
] | Python | hsmcConvertMaskToInt | <not_specific> | def hsmcConvertMaskToInt( aRegMask ):
""" function to convert bit field mask string to integer -- assumes mask is contiguous bits"""
numBits = 0;
aBinStr = '{0:32b}'.format(int( aRegMask, 16 )).strip().rstrip( "0" )
while len( aBinStr ):
aBinCh = aBinStr[-1]
aBinStr = aBinStr[0:-1]
if aBinCh == '1':
numBits += 1
else:
break
return ((2**numBits) - 1) # return max value field can contain | function to convert bit field mask string to integer -- assumes mask is contiguous bits | function to convert bit field mask string to integer -- assumes mask is contiguous bits | [
"function",
"to",
"convert",
"bit",
"field",
"mask",
"string",
"to",
"integer",
"--",
"assumes",
"mask",
"is",
"contiguous",
"bits"
] | def hsmcConvertMaskToInt( aRegMask ):
numBits = 0;
aBinStr = '{0:32b}'.format(int( aRegMask, 16 )).strip().rstrip( "0" )
while len( aBinStr ):
aBinCh = aBinStr[-1]
aBinStr = aBinStr[0:-1]
if aBinCh == '1':
numBits += 1
else:
break
return ((2**numBits) - 1) | [
"def",
"hsmcConvertMaskToInt",
"(",
"aRegMask",
")",
":",
"numBits",
"=",
"0",
";",
"aBinStr",
"=",
"'{0:32b}'",
".",
"format",
"(",
"int",
"(",
"aRegMask",
",",
"16",
")",
")",
".",
"strip",
"(",
")",
".",
"rstrip",
"(",
"\"0\"",
")",
"while",
"len",
"(",
"aBinStr",
")",
":",
"aBinCh",
"=",
"aBinStr",
"[",
"-",
"1",
"]",
"aBinStr",
"=",
"aBinStr",
"[",
"0",
":",
"-",
"1",
"]",
"if",
"aBinCh",
"==",
"'1'",
":",
"numBits",
"+=",
"1",
"else",
":",
"break",
"return",
"(",
"(",
"2",
"**",
"numBits",
")",
"-",
"1",
")"
] | function to convert bit field mask string to integer -- assumes mask is contiguous bits | [
"function",
"to",
"convert",
"bit",
"field",
"mask",
"string",
"to",
"integer",
"--",
"assumes",
"mask",
"is",
"contiguous",
"bits"
] | [
"\"\"\" function to convert bit field mask string to integer -- assumes mask is contiguous bits\"\"\"",
"# return max value field can contain"
] | [
{
"param": "aRegMask",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "aRegMask",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def hsmcConvertMaskToInt( aRegMask ):
numBits = 0;
aBinStr = '{0:32b}'.format(int( aRegMask, 16 )).strip().rstrip( "0" )
while len( aBinStr ):
aBinCh = aBinStr[-1]
aBinStr = aBinStr[0:-1]
if aBinCh == '1':
numBits += 1
else:
break
return ((2**numBits) - 1) | 610,467 | 95 |
e4d05018efef9330efc5213ff769d4cc844b53d3 | dylburger/cracking-the-coding-interview-prep | balanced-brackets/are_brackets_balanced.py | [
"MIT"
] | Python | is_right_bracket | <not_specific> | def is_right_bracket(char):
""" Given a character,
return the match object if the character is a right bracket, else None
"""
pattern = r'[\]})]'
return re.search(pattern, char) | Given a character,
return the match object if the character is a right bracket, else None
| Given a character,
return the match object if the character is a right bracket, else None | [
"Given",
"a",
"character",
"return",
"the",
"match",
"object",
"if",
"the",
"character",
"is",
"a",
"right",
"bracket",
"else",
"None"
] | def is_right_bracket(char):
pattern = r'[\]})]'
return re.search(pattern, char) | [
"def",
"is_right_bracket",
"(",
"char",
")",
":",
"pattern",
"=",
"r'[\\]})]'",
"return",
"re",
".",
"search",
"(",
"pattern",
",",
"char",
")"
] | Given a character,
return the match object if the character is a right bracket, else None | [
"Given",
"a",
"character",
"return",
"the",
"match",
"object",
"if",
"the",
"character",
"is",
"a",
"right",
"bracket",
"else",
"None"
] | [
"\"\"\" Given a character,\n return the match object if the character is a right bracket, else None\n \"\"\""
] | [
{
"param": "char",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "char",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import re
def is_right_bracket(char):
pattern = r'[\]})]'
return re.search(pattern, char) | 610,468 | 441 |
1b2d7783f4730021bcc68d0554c2a4e143e4229f | latsdev/azure-linux-extensions | AzureMonitorAgent/agent.py | [
"Apache-2.0"
] | Python | is_arc_installed | <not_specific> | def is_arc_installed():
"""
Check if this is an Arc machine
"""
# Using systemctl to check this since Arc only supports VMs that have systemd
check_arc = os.system('systemctl status himdsd 1>/dev/null 2>&1')
return check_arc == 0 |
Check if this is an Arc machine
| Check if this is an Arc machine | [
"Check",
"if",
"this",
"is",
"an",
"Arc",
"machine"
] | def is_arc_installed():
check_arc = os.system('systemctl status himdsd 1>/dev/null 2>&1')
return check_arc == 0 | [
"def",
"is_arc_installed",
"(",
")",
":",
"check_arc",
"=",
"os",
".",
"system",
"(",
"'systemctl status himdsd 1>/dev/null 2>&1'",
")",
"return",
"check_arc",
"==",
"0"
] | Check if this is an Arc machine | [
"Check",
"if",
"this",
"is",
"an",
"Arc",
"machine"
] | [
"\"\"\"\n Check if this is an Arc machine\n \"\"\"",
"# Using systemctl to check this since Arc only supports VMs that have systemd"
] | [] | {
"returns": [],
"raises": [],
"params": [],
"outlier_params": [],
"others": []
} | import os
def is_arc_installed():
check_arc = os.system('systemctl status himdsd 1>/dev/null 2>&1')
return check_arc == 0 | 610,469 | 321 |
0f4b923a67aeac1f64ea04ad6c59c19171867f3c | csm-aut/csm | csmserver/utils.py | [
"Apache-2.0"
] | Python | generate_ip_range | <not_specific> | def generate_ip_range(start_ip, end_ip):
"""
Given the start_ip and end_ip, generate all the IP addresses in between inclusively.
Example, generate_ip_range("192.168.1.0", "192.168.2.0")
"""
start = list(map(int, start_ip.split(".")))
end = list(map(int, end_ip.split(".")))
temp = start
ip_range = []
ip_range.append(start_ip)
while temp != end:
start[3] += 1
for i in (3, 2, 1):
if temp[i] == 256:
temp[i] = 0
temp[i - 1] += 1
ip_range.append(".".join(map(str, temp)))
return ip_range |
Given the start_ip and end_ip, generate all the IP addresses in between inclusively.
Example, generate_ip_range("192.168.1.0", "192.168.2.0")
| Given the start_ip and end_ip, generate all the IP addresses in between inclusively. | [
"Given",
"the",
"start_ip",
"and",
"end_ip",
"generate",
"all",
"the",
"IP",
"addresses",
"in",
"between",
"inclusively",
"."
] | def generate_ip_range(start_ip, end_ip):
start = list(map(int, start_ip.split(".")))
end = list(map(int, end_ip.split(".")))
temp = start
ip_range = []
ip_range.append(start_ip)
while temp != end:
start[3] += 1
for i in (3, 2, 1):
if temp[i] == 256:
temp[i] = 0
temp[i - 1] += 1
ip_range.append(".".join(map(str, temp)))
return ip_range | [
"def",
"generate_ip_range",
"(",
"start_ip",
",",
"end_ip",
")",
":",
"start",
"=",
"list",
"(",
"map",
"(",
"int",
",",
"start_ip",
".",
"split",
"(",
"\".\"",
")",
")",
")",
"end",
"=",
"list",
"(",
"map",
"(",
"int",
",",
"end_ip",
".",
"split",
"(",
"\".\"",
")",
")",
")",
"temp",
"=",
"start",
"ip_range",
"=",
"[",
"]",
"ip_range",
".",
"append",
"(",
"start_ip",
")",
"while",
"temp",
"!=",
"end",
":",
"start",
"[",
"3",
"]",
"+=",
"1",
"for",
"i",
"in",
"(",
"3",
",",
"2",
",",
"1",
")",
":",
"if",
"temp",
"[",
"i",
"]",
"==",
"256",
":",
"temp",
"[",
"i",
"]",
"=",
"0",
"temp",
"[",
"i",
"-",
"1",
"]",
"+=",
"1",
"ip_range",
".",
"append",
"(",
"\".\"",
".",
"join",
"(",
"map",
"(",
"str",
",",
"temp",
")",
")",
")",
"return",
"ip_range"
] | Given the start_ip and end_ip, generate all the IP addresses in between inclusively. | [
"Given",
"the",
"start_ip",
"and",
"end_ip",
"generate",
"all",
"the",
"IP",
"addresses",
"in",
"between",
"inclusively",
"."
] | [
"\"\"\"\n Given the start_ip and end_ip, generate all the IP addresses in between inclusively.\n Example, generate_ip_range(\"192.168.1.0\", \"192.168.2.0\")\n \"\"\""
] | [
{
"param": "start_ip",
"type": null
},
{
"param": "end_ip",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "start_ip",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "end_ip",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def generate_ip_range(start_ip, end_ip):
start = list(map(int, start_ip.split(".")))
end = list(map(int, end_ip.split(".")))
temp = start
ip_range = []
ip_range.append(start_ip)
while temp != end:
start[3] += 1
for i in (3, 2, 1):
if temp[i] == 256:
temp[i] = 0
temp[i - 1] += 1
ip_range.append(".".join(map(str, temp)))
return ip_range | 610,470 | 750 |
7672ee3112088ecc2567e4dd476d88dbd50daaab | kjbracey-arm/scripts-pelion-edge | ostree/ostree-delta.py | [
"Apache-2.0"
] | Python | ensure_is_directory | null | def ensure_is_directory(path):
"""
Check that a file exists and is a directory.
Raises an exception on failure and does nothing on success
Args:
* path (PathLike): path to check.
"""
path = pathlib.Path(path)
if not path.exists():
raise ValueError('"{}" does not exist'.format(path))
if not path.is_dir():
raise ValueError('"{}" is not a directory'.format(path)) |
Check that a file exists and is a directory.
Raises an exception on failure and does nothing on success
Args:
* path (PathLike): path to check.
| Check that a file exists and is a directory.
Raises an exception on failure and does nothing on success
path (PathLike): path to check. | [
"Check",
"that",
"a",
"file",
"exists",
"and",
"is",
"a",
"directory",
".",
"Raises",
"an",
"exception",
"on",
"failure",
"and",
"does",
"nothing",
"on",
"success",
"path",
"(",
"PathLike",
")",
":",
"path",
"to",
"check",
"."
] | def ensure_is_directory(path):
path = pathlib.Path(path)
if not path.exists():
raise ValueError('"{}" does not exist'.format(path))
if not path.is_dir():
raise ValueError('"{}" is not a directory'.format(path)) | [
"def",
"ensure_is_directory",
"(",
"path",
")",
":",
"path",
"=",
"pathlib",
".",
"Path",
"(",
"path",
")",
"if",
"not",
"path",
".",
"exists",
"(",
")",
":",
"raise",
"ValueError",
"(",
"'\"{}\" does not exist'",
".",
"format",
"(",
"path",
")",
")",
"if",
"not",
"path",
".",
"is_dir",
"(",
")",
":",
"raise",
"ValueError",
"(",
"'\"{}\" is not a directory'",
".",
"format",
"(",
"path",
")",
")"
] | Check that a file exists and is a directory. | [
"Check",
"that",
"a",
"file",
"exists",
"and",
"is",
"a",
"directory",
"."
] | [
"\"\"\"\n Check that a file exists and is a directory.\n\n Raises an exception on failure and does nothing on success\n\n Args:\n * path (PathLike): path to check.\n\n \"\"\""
] | [
{
"param": "path",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "path",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import pathlib
def ensure_is_directory(path):
path = pathlib.Path(path)
if not path.exists():
raise ValueError('"{}" does not exist'.format(path))
if not path.is_dir():
raise ValueError('"{}" is not a directory'.format(path)) | 610,471 | 495 |
130486e3065d0dea780047044660f17cc7a9db5d | displayn/sistercities | sistercities/parser/de_parser.py | [
"MIT"
] | Python | add_graph_nodes | <not_specific> | def add_graph_nodes(data_graph, root_city, wikicities, root_city_attributes):
"""
add_graph_nodes adds nodes to a graph and returns the new graph
@param data_graph: the current graph
@param root_city: the root city
@param wikicities: all catched cities
@param root_city_attributes: attributes of the root_city
@return: the updated data_graph
"""
data_graph.add_node(root_city.id, root_city_attributes)
for city in wikicities:
city.get()
url = ''
if city.sitelinks:
if 'dewiki' in city.sitelinks:
url = city.sitelinks['dewiki']
wiki = 'dewiki'
elif 'enwiki' in city.sitelinks:
url = city.sitelinks['enwiki']
wiki = 'enwiki'
else:
url = next(city.sitelinks.__iter__())
attr_child = {
'url': url,
'wiki': wiki
}
# add connection between root city an sister city
data_graph.add_edge(root_city.id, city.id)
# create or update node of the sister city
data_graph.add_node(city.id, attr_child)
return data_graph |
add_graph_nodes adds nodes to a graph and returns the new graph
@param data_graph: the current graph
@param root_city: the root city
@param wikicities: all catched cities
@param root_city_attributes: attributes of the root_city
@return: the updated data_graph
| add_graph_nodes adds nodes to a graph and returns the new graph | [
"add_graph_nodes",
"adds",
"nodes",
"to",
"a",
"graph",
"and",
"returns",
"the",
"new",
"graph"
] | def add_graph_nodes(data_graph, root_city, wikicities, root_city_attributes):
data_graph.add_node(root_city.id, root_city_attributes)
for city in wikicities:
city.get()
url = ''
if city.sitelinks:
if 'dewiki' in city.sitelinks:
url = city.sitelinks['dewiki']
wiki = 'dewiki'
elif 'enwiki' in city.sitelinks:
url = city.sitelinks['enwiki']
wiki = 'enwiki'
else:
url = next(city.sitelinks.__iter__())
attr_child = {
'url': url,
'wiki': wiki
}
data_graph.add_edge(root_city.id, city.id)
data_graph.add_node(city.id, attr_child)
return data_graph | [
"def",
"add_graph_nodes",
"(",
"data_graph",
",",
"root_city",
",",
"wikicities",
",",
"root_city_attributes",
")",
":",
"data_graph",
".",
"add_node",
"(",
"root_city",
".",
"id",
",",
"root_city_attributes",
")",
"for",
"city",
"in",
"wikicities",
":",
"city",
".",
"get",
"(",
")",
"url",
"=",
"''",
"if",
"city",
".",
"sitelinks",
":",
"if",
"'dewiki'",
"in",
"city",
".",
"sitelinks",
":",
"url",
"=",
"city",
".",
"sitelinks",
"[",
"'dewiki'",
"]",
"wiki",
"=",
"'dewiki'",
"elif",
"'enwiki'",
"in",
"city",
".",
"sitelinks",
":",
"url",
"=",
"city",
".",
"sitelinks",
"[",
"'enwiki'",
"]",
"wiki",
"=",
"'enwiki'",
"else",
":",
"url",
"=",
"next",
"(",
"city",
".",
"sitelinks",
".",
"__iter__",
"(",
")",
")",
"attr_child",
"=",
"{",
"'url'",
":",
"url",
",",
"'wiki'",
":",
"wiki",
"}",
"data_graph",
".",
"add_edge",
"(",
"root_city",
".",
"id",
",",
"city",
".",
"id",
")",
"data_graph",
".",
"add_node",
"(",
"city",
".",
"id",
",",
"attr_child",
")",
"return",
"data_graph"
] | add_graph_nodes adds nodes to a graph and returns the new graph | [
"add_graph_nodes",
"adds",
"nodes",
"to",
"a",
"graph",
"and",
"returns",
"the",
"new",
"graph"
] | [
"\"\"\"\n add_graph_nodes adds nodes to a graph and returns the new graph\n @param data_graph: the current graph\n @param root_city: the root city\n @param wikicities: all catched cities\n @param root_city_attributes: attributes of the root_city\n @return: the updated data_graph\n \"\"\"",
"# add connection between root city an sister city",
"# create or update node of the sister city"
] | [
{
"param": "data_graph",
"type": null
},
{
"param": "root_city",
"type": null
},
{
"param": "wikicities",
"type": null
},
{
"param": "root_city_attributes",
"type": null
}
] | {
"returns": [
{
"docstring": "the updated data_graph",
"docstring_tokens": [
"the",
"updated",
"data_graph"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "data_graph",
"type": null,
"docstring": "the current graph",
"docstring_tokens": [
"the",
"current",
"graph"
],
"default": null,
"is_optional": false
},
{
"identifier": "root_city",
"type": null,
"docstring": "the root city",
"docstring_tokens": [
"the",
"root",
"city"
],
"default": null,
"is_optional": false
},
{
"identifier": "wikicities",
"type": null,
"docstring": "all catched cities",
"docstring_tokens": [
"all",
"catched",
"cities"
],
"default": null,
"is_optional": false
},
{
"identifier": "root_city_attributes",
"type": null,
"docstring": "attributes of the root_city",
"docstring_tokens": [
"attributes",
"of",
"the",
"root_city"
],
"default": null,
"is_optional": false
}
],
"outlier_params": [],
"others": []
} | def add_graph_nodes(data_graph, root_city, wikicities, root_city_attributes):
data_graph.add_node(root_city.id, root_city_attributes)
for city in wikicities:
city.get()
url = ''
if city.sitelinks:
if 'dewiki' in city.sitelinks:
url = city.sitelinks['dewiki']
wiki = 'dewiki'
elif 'enwiki' in city.sitelinks:
url = city.sitelinks['enwiki']
wiki = 'enwiki'
else:
url = next(city.sitelinks.__iter__())
attr_child = {
'url': url,
'wiki': wiki
}
data_graph.add_edge(root_city.id, city.id)
data_graph.add_node(city.id, attr_child)
return data_graph | 610,472 | 834 |
1e8f4c5bf145bfb1b2d366fe526771e8bb7502ae | praveenkuttappan/azure-sdk-tools | packages/python-packages/protocol-stub-generator/autorest_vendor/autorest/codegen/models/enum_schema.py | [
"MIT"
] | Python | from_yaml | "EnumValue" | def from_yaml(cls, yaml_data: Dict[str, Any]) -> "EnumValue":
"""Constructs an EnumValue from yaml data.
:param yaml_data: the yaml data from which we will construct this object
:type yaml_data: dict[str, Any]
:return: A created EnumValue
:rtype: ~autorest.models.EnumValue
"""
return cls(
name=yaml_data["language"]["python"]["name"],
value=yaml_data["value"],
description=yaml_data["language"]["python"].get("description"),
) | Constructs an EnumValue from yaml data.
:param yaml_data: the yaml data from which we will construct this object
:type yaml_data: dict[str, Any]
:return: A created EnumValue
:rtype: ~autorest.models.EnumValue
| Constructs an EnumValue from yaml data. | [
"Constructs",
"an",
"EnumValue",
"from",
"yaml",
"data",
"."
] | def from_yaml(cls, yaml_data: Dict[str, Any]) -> "EnumValue":
return cls(
name=yaml_data["language"]["python"]["name"],
value=yaml_data["value"],
description=yaml_data["language"]["python"].get("description"),
) | [
"def",
"from_yaml",
"(",
"cls",
",",
"yaml_data",
":",
"Dict",
"[",
"str",
",",
"Any",
"]",
")",
"->",
"\"EnumValue\"",
":",
"return",
"cls",
"(",
"name",
"=",
"yaml_data",
"[",
"\"language\"",
"]",
"[",
"\"python\"",
"]",
"[",
"\"name\"",
"]",
",",
"value",
"=",
"yaml_data",
"[",
"\"value\"",
"]",
",",
"description",
"=",
"yaml_data",
"[",
"\"language\"",
"]",
"[",
"\"python\"",
"]",
".",
"get",
"(",
"\"description\"",
")",
",",
")"
] | Constructs an EnumValue from yaml data. | [
"Constructs",
"an",
"EnumValue",
"from",
"yaml",
"data",
"."
] | [
"\"\"\"Constructs an EnumValue from yaml data.\n\n :param yaml_data: the yaml data from which we will construct this object\n :type yaml_data: dict[str, Any]\n\n :return: A created EnumValue\n :rtype: ~autorest.models.EnumValue\n \"\"\""
] | [
{
"param": "cls",
"type": null
},
{
"param": "yaml_data",
"type": "Dict[str, Any]"
}
] | {
"returns": [
{
"docstring": "A created EnumValue",
"docstring_tokens": [
"A",
"created",
"EnumValue"
],
"type": "~autorest.models.EnumValue"
}
],
"raises": [],
"params": [
{
"identifier": "cls",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "yaml_data",
"type": "Dict[str, Any]",
"docstring": "the yaml data from which we will construct this object",
"docstring_tokens": [
"the",
"yaml",
"data",
"from",
"which",
"we",
"will",
"construct",
"this",
"object"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def from_yaml(cls, yaml_data: Dict[str, Any]) -> "EnumValue":
return cls(
name=yaml_data["language"]["python"]["name"],
value=yaml_data["value"],
description=yaml_data["language"]["python"].get("description"),
) | 610,473 | 673 |
60efd97c7a3e9ece44b479c773e442b1355b68e3 | omerk2511/dropbox | server/controllers/validators.py | [
"MIT"
] | Python | is_payload_valid | <not_specific> | def is_payload_valid(payload, rules):
"""
Checks if a payload is valid based on some predefined rules
args: payload, rules
ret: is_valid
"""
if type(payload) != dict:
return False
for rule in rules:
if type(rule) == list:
count = 0
for nested_rule in rule:
count += 1 if (nested_rule[0] in payload and type(
payload[nested_rule[0]]) in nested_rule[1]) else 0
if count == 0:
return False
else:
if (rule[0] not in payload or type(payload[rule[0]])
not in rule[1]) and (len(rule) == 2 or not rule[2]):
return False
return True |
Checks if a payload is valid based on some predefined rules
args: payload, rules
ret: is_valid
| Checks if a payload is valid based on some predefined rules
args: payload, rules
ret: is_valid | [
"Checks",
"if",
"a",
"payload",
"is",
"valid",
"based",
"on",
"some",
"predefined",
"rules",
"args",
":",
"payload",
"rules",
"ret",
":",
"is_valid"
] | def is_payload_valid(payload, rules):
if type(payload) != dict:
return False
for rule in rules:
if type(rule) == list:
count = 0
for nested_rule in rule:
count += 1 if (nested_rule[0] in payload and type(
payload[nested_rule[0]]) in nested_rule[1]) else 0
if count == 0:
return False
else:
if (rule[0] not in payload or type(payload[rule[0]])
not in rule[1]) and (len(rule) == 2 or not rule[2]):
return False
return True | [
"def",
"is_payload_valid",
"(",
"payload",
",",
"rules",
")",
":",
"if",
"type",
"(",
"payload",
")",
"!=",
"dict",
":",
"return",
"False",
"for",
"rule",
"in",
"rules",
":",
"if",
"type",
"(",
"rule",
")",
"==",
"list",
":",
"count",
"=",
"0",
"for",
"nested_rule",
"in",
"rule",
":",
"count",
"+=",
"1",
"if",
"(",
"nested_rule",
"[",
"0",
"]",
"in",
"payload",
"and",
"type",
"(",
"payload",
"[",
"nested_rule",
"[",
"0",
"]",
"]",
")",
"in",
"nested_rule",
"[",
"1",
"]",
")",
"else",
"0",
"if",
"count",
"==",
"0",
":",
"return",
"False",
"else",
":",
"if",
"(",
"rule",
"[",
"0",
"]",
"not",
"in",
"payload",
"or",
"type",
"(",
"payload",
"[",
"rule",
"[",
"0",
"]",
"]",
")",
"not",
"in",
"rule",
"[",
"1",
"]",
")",
"and",
"(",
"len",
"(",
"rule",
")",
"==",
"2",
"or",
"not",
"rule",
"[",
"2",
"]",
")",
":",
"return",
"False",
"return",
"True"
] | Checks if a payload is valid based on some predefined rules
args: payload, rules
ret: is_valid | [
"Checks",
"if",
"a",
"payload",
"is",
"valid",
"based",
"on",
"some",
"predefined",
"rules",
"args",
":",
"payload",
"rules",
"ret",
":",
"is_valid"
] | [
"\"\"\"\n Checks if a payload is valid based on some predefined rules\n args: payload, rules\n ret: is_valid\n \"\"\""
] | [
{
"param": "payload",
"type": null
},
{
"param": "rules",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "payload",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "rules",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def is_payload_valid(payload, rules):
if type(payload) != dict:
return False
for rule in rules:
if type(rule) == list:
count = 0
for nested_rule in rule:
count += 1 if (nested_rule[0] in payload and type(
payload[nested_rule[0]]) in nested_rule[1]) else 0
if count == 0:
return False
else:
if (rule[0] not in payload or type(payload[rule[0]])
not in rule[1]) and (len(rule) == 2 or not rule[2]):
return False
return True | 610,474 | 980 |
c8ff855b30887173e2744561a1525efa83ff4478 | covid-19-impact-lab/sid-germany | src/manfred/shared.py | [
"MIT"
] | Python | hash_array | <not_specific> | def hash_array(arr):
"""Create a hashsum for fast comparison of numpy arrays."""
# make the array exactly representable as float
arr = 1 + arr - 1
return hashlib.sha1(arr.tobytes()).hexdigest() | Create a hashsum for fast comparison of numpy arrays. | Create a hashsum for fast comparison of numpy arrays. | [
"Create",
"a",
"hashsum",
"for",
"fast",
"comparison",
"of",
"numpy",
"arrays",
"."
] | def hash_array(arr):
arr = 1 + arr - 1
return hashlib.sha1(arr.tobytes()).hexdigest() | [
"def",
"hash_array",
"(",
"arr",
")",
":",
"arr",
"=",
"1",
"+",
"arr",
"-",
"1",
"return",
"hashlib",
".",
"sha1",
"(",
"arr",
".",
"tobytes",
"(",
")",
")",
".",
"hexdigest",
"(",
")"
] | Create a hashsum for fast comparison of numpy arrays. | [
"Create",
"a",
"hashsum",
"for",
"fast",
"comparison",
"of",
"numpy",
"arrays",
"."
] | [
"\"\"\"Create a hashsum for fast comparison of numpy arrays.\"\"\"",
"# make the array exactly representable as float"
] | [
{
"param": "arr",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "arr",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import hashlib
def hash_array(arr):
arr = 1 + arr - 1
return hashlib.sha1(arr.tobytes()).hexdigest() | 610,475 | 871 |
a09c36020328055e635ba6202602b4e2af9e7090 | nehal96/Deep-Learning-ND-Exercises | MiniFlow/6 - Gradient Descent/gd.py | [
"MIT"
] | Python | gradient_descent_update | <not_specific> | def gradient_descent_update(x, gradx, learning_rate):
"""
Performs a gradient descent update.
"""
# TODO: Implement gradient descent.
# Return the new value for x
return x - (learning_rate * gradx) |
Performs a gradient descent update.
| Performs a gradient descent update. | [
"Performs",
"a",
"gradient",
"descent",
"update",
"."
] | def gradient_descent_update(x, gradx, learning_rate):
return x - (learning_rate * gradx) | [
"def",
"gradient_descent_update",
"(",
"x",
",",
"gradx",
",",
"learning_rate",
")",
":",
"return",
"x",
"-",
"(",
"learning_rate",
"*",
"gradx",
")"
] | Performs a gradient descent update. | [
"Performs",
"a",
"gradient",
"descent",
"update",
"."
] | [
"\"\"\"\n Performs a gradient descent update.\n \"\"\"",
"# TODO: Implement gradient descent.",
"# Return the new value for x"
] | [
{
"param": "x",
"type": null
},
{
"param": "gradx",
"type": null
},
{
"param": "learning_rate",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "x",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "gradx",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "learning_rate",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def gradient_descent_update(x, gradx, learning_rate):
return x - (learning_rate * gradx) | 610,476 | 347 |
524730ad60c3fe64c42d1dca2a9595390020ee7a | vishalkal/huxley | huxley/core/models.py | [
"Unlicense"
] | Python | shuffle | <not_specific> | def shuffle(countries):
""" Returns a list of countries (or IDs) in shuffled order
for double columning, i.e. [1, 6, 2, 7, 3, 8, ...]. Before
shuffling, the list is padded to length 10. """
countries += [None]*(10 - len(countries))
c1, c2 = countries[:5], countries[5:]
return zip(c1, c2) | Returns a list of countries (or IDs) in shuffled order
for double columning, i.e. [1, 6, 2, 7, 3, 8, ...]. Before
shuffling, the list is padded to length 10. | Returns a list of countries (or IDs) in shuffled order
for double columning, i.e. | [
"Returns",
"a",
"list",
"of",
"countries",
"(",
"or",
"IDs",
")",
"in",
"shuffled",
"order",
"for",
"double",
"columning",
"i",
".",
"e",
"."
] | def shuffle(countries):
countries += [None]*(10 - len(countries))
c1, c2 = countries[:5], countries[5:]
return zip(c1, c2) | [
"def",
"shuffle",
"(",
"countries",
")",
":",
"countries",
"+=",
"[",
"None",
"]",
"*",
"(",
"10",
"-",
"len",
"(",
"countries",
")",
")",
"c1",
",",
"c2",
"=",
"countries",
"[",
":",
"5",
"]",
",",
"countries",
"[",
"5",
":",
"]",
"return",
"zip",
"(",
"c1",
",",
"c2",
")"
] | Returns a list of countries (or IDs) in shuffled order
for double columning, i.e. | [
"Returns",
"a",
"list",
"of",
"countries",
"(",
"or",
"IDs",
")",
"in",
"shuffled",
"order",
"for",
"double",
"columning",
"i",
".",
"e",
"."
] | [
"\"\"\" Returns a list of countries (or IDs) in shuffled order\n for double columning, i.e. [1, 6, 2, 7, 3, 8, ...]. Before\n shuffling, the list is padded to length 10. \"\"\""
] | [
{
"param": "countries",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "countries",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def shuffle(countries):
countries += [None]*(10 - len(countries))
c1, c2 = countries[:5], countries[5:]
return zip(c1, c2) | 610,478 | 167 |
31f473a611caebd6ee9782aa181f4ca1a35dab28 | jhonattanrgc21/python-exercises | Subprogramas/calculadora.py | [
"MIT"
] | Python | read | <not_specific> | def read():
''' Lee y valida el valor de los dos numeros '''
while True:
try:
a = float(input('Valor uno: '))
b = float(input('Valor dos: '))
break
except:
print('\nError, debe ingresar numeros!')
return a, b | Lee y valida el valor de los dos numeros | Lee y valida el valor de los dos numeros | [
"Lee",
"y",
"valida",
"el",
"valor",
"de",
"los",
"dos",
"numeros"
] | def read():
while True:
try:
a = float(input('Valor uno: '))
b = float(input('Valor dos: '))
break
except:
print('\nError, debe ingresar numeros!')
return a, b | [
"def",
"read",
"(",
")",
":",
"while",
"True",
":",
"try",
":",
"a",
"=",
"float",
"(",
"input",
"(",
"'Valor uno: '",
")",
")",
"b",
"=",
"float",
"(",
"input",
"(",
"'Valor dos: '",
")",
")",
"break",
"except",
":",
"print",
"(",
"'\\nError, debe ingresar numeros!'",
")",
"return",
"a",
",",
"b"
] | Lee y valida el valor de los dos numeros | [
"Lee",
"y",
"valida",
"el",
"valor",
"de",
"los",
"dos",
"numeros"
] | [
"''' Lee y valida el valor de los dos numeros '''"
] | [] | {
"returns": [],
"raises": [],
"params": [],
"outlier_params": [],
"others": []
} | def read():
while True:
try:
a = float(input('Valor uno: '))
b = float(input('Valor dos: '))
break
except:
print('\nError, debe ingresar numeros!')
return a, b | 610,479 | 792 |
b1a184fa755305176e4efba2bd7fb842edabc412 | Kate-Willett/HadISDH_Marine_Build | EUSTACE_SST_MAT/qc.py | [
"CC0-1.0"
] | Python | month_lengths | <not_specific> | def month_lengths(year):
'''
Return a list holding the lengths of the months in a given year
:param year: Year for which you want month lengths
:type year: int
:return: list of month lengths
:rtype: int
'''
if calendar.isleap(year):
month_lengths = [31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
else:
month_lengths = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
return month_lengths |
Return a list holding the lengths of the months in a given year
:param year: Year for which you want month lengths
:type year: int
:return: list of month lengths
:rtype: int
| Return a list holding the lengths of the months in a given year | [
"Return",
"a",
"list",
"holding",
"the",
"lengths",
"of",
"the",
"months",
"in",
"a",
"given",
"year"
] | def month_lengths(year):
if calendar.isleap(year):
month_lengths = [31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
else:
month_lengths = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
return month_lengths | [
"def",
"month_lengths",
"(",
"year",
")",
":",
"if",
"calendar",
".",
"isleap",
"(",
"year",
")",
":",
"month_lengths",
"=",
"[",
"31",
",",
"29",
",",
"31",
",",
"30",
",",
"31",
",",
"30",
",",
"31",
",",
"31",
",",
"30",
",",
"31",
",",
"30",
",",
"31",
"]",
"else",
":",
"month_lengths",
"=",
"[",
"31",
",",
"28",
",",
"31",
",",
"30",
",",
"31",
",",
"30",
",",
"31",
",",
"31",
",",
"30",
",",
"31",
",",
"30",
",",
"31",
"]",
"return",
"month_lengths"
] | Return a list holding the lengths of the months in a given year | [
"Return",
"a",
"list",
"holding",
"the",
"lengths",
"of",
"the",
"months",
"in",
"a",
"given",
"year"
] | [
"'''\n Return a list holding the lengths of the months in a given year\n \n :param year: Year for which you want month lengths\n :type year: int\n :return: list of month lengths\n :rtype: int\n '''"
] | [
{
"param": "year",
"type": null
}
] | {
"returns": [
{
"docstring": "list of month lengths",
"docstring_tokens": [
"list",
"of",
"month",
"lengths"
],
"type": "int"
}
],
"raises": [],
"params": [
{
"identifier": "year",
"type": null,
"docstring": "Year for which you want month lengths",
"docstring_tokens": [
"Year",
"for",
"which",
"you",
"want",
"month",
"lengths"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import calendar
def month_lengths(year):
if calendar.isleap(year):
month_lengths = [31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
else:
month_lengths = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
return month_lengths | 610,480 | 149 |
c844115b6826ac4d63460916ef51a2aea3ced79b | dbellavista/script-stuff | python_lib/pyutils.py | [
"Apache-2.0"
] | Python | iter_lines | null | def iter_lines(cmd, wait_on_exit=True):
"""Generator for iterate line of a process output
Keyword arguments:
cmd -- The command to execute
wait_on_exit -- wait the process on EOF (default True)
"""
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
for l in iter(process.stdout.readline, b''):
yield l
if wait_on_exit:
process.wait() | Generator for iterate line of a process output
Keyword arguments:
cmd -- The command to execute
wait_on_exit -- wait the process on EOF (default True)
| Generator for iterate line of a process output
Keyword arguments:
cmd -- The command to execute
wait_on_exit -- wait the process on EOF (default True) | [
"Generator",
"for",
"iterate",
"line",
"of",
"a",
"process",
"output",
"Keyword",
"arguments",
":",
"cmd",
"--",
"The",
"command",
"to",
"execute",
"wait_on_exit",
"--",
"wait",
"the",
"process",
"on",
"EOF",
"(",
"default",
"True",
")"
] | def iter_lines(cmd, wait_on_exit=True):
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
for l in iter(process.stdout.readline, b''):
yield l
if wait_on_exit:
process.wait() | [
"def",
"iter_lines",
"(",
"cmd",
",",
"wait_on_exit",
"=",
"True",
")",
":",
"process",
"=",
"subprocess",
".",
"Popen",
"(",
"cmd",
",",
"shell",
"=",
"True",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
")",
"for",
"l",
"in",
"iter",
"(",
"process",
".",
"stdout",
".",
"readline",
",",
"b''",
")",
":",
"yield",
"l",
"if",
"wait_on_exit",
":",
"process",
".",
"wait",
"(",
")"
] | Generator for iterate line of a process output
Keyword arguments:
cmd -- The command to execute
wait_on_exit -- wait the process on EOF (default True) | [
"Generator",
"for",
"iterate",
"line",
"of",
"a",
"process",
"output",
"Keyword",
"arguments",
":",
"cmd",
"--",
"The",
"command",
"to",
"execute",
"wait_on_exit",
"--",
"wait",
"the",
"process",
"on",
"EOF",
"(",
"default",
"True",
")"
] | [
"\"\"\"Generator for iterate line of a process output\n\n Keyword arguments:\n cmd -- The command to execute\n wait_on_exit -- wait the process on EOF (default True)\n \"\"\""
] | [
{
"param": "cmd",
"type": null
},
{
"param": "wait_on_exit",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "cmd",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "wait_on_exit",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import subprocess
def iter_lines(cmd, wait_on_exit=True):
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
for l in iter(process.stdout.readline, b''):
yield l
if wait_on_exit:
process.wait() | 610,481 | 345 |
ec65d798a503835395e6fb8661b002c25408999b | KshitijKarthick/tvecs | tests/test_vector_space_mapper.py | [
"MIT"
] | Python | teardown_class | null | def teardown_class(cls):
"""
Delete temp files generated for tests.
| *Test Suite ID* : V
|
| *Test Case Number* : 02
|
| *Description* : Delete models after construction to remove
| residual of setup_test [Test Case Number 01]
|
| *Preconditions* : model1 and model2 exist
|
| *Test Parameters* : model1 and model2 file paths
|
| *Test Data* :
| model1 file path = 'tests/resources/model1'
| model2 file path = 'tests/resources/model2'
|
| *Expected Result* : Models deleted
|
| *Actual Result* : Models deleted
|
| **Status : Pass**
|
"""
try:
os.remove(
os.path.join('tests', 'resources', 'model_1')
)
os.remove(
os.path.join('tests', 'resources', 'model_2')
)
except (OSError, IOError):
pass |
Delete temp files generated for tests.
| *Test Suite ID* : V
|
| *Test Case Number* : 02
|
| *Description* : Delete models after construction to remove
| residual of setup_test [Test Case Number 01]
|
| *Preconditions* : model1 and model2 exist
|
| *Test Parameters* : model1 and model2 file paths
|
| *Test Data* :
| model1 file path = 'tests/resources/model1'
| model2 file path = 'tests/resources/model2'
|
| *Expected Result* : Models deleted
|
| *Actual Result* : Models deleted
|
| **Status : Pass**
|
| Delete temp files generated for tests. | [
"Delete",
"temp",
"files",
"generated",
"for",
"tests",
"."
] | def teardown_class(cls):
try:
os.remove(
os.path.join('tests', 'resources', 'model_1')
)
os.remove(
os.path.join('tests', 'resources', 'model_2')
)
except (OSError, IOError):
pass | [
"def",
"teardown_class",
"(",
"cls",
")",
":",
"try",
":",
"os",
".",
"remove",
"(",
"os",
".",
"path",
".",
"join",
"(",
"'tests'",
",",
"'resources'",
",",
"'model_1'",
")",
")",
"os",
".",
"remove",
"(",
"os",
".",
"path",
".",
"join",
"(",
"'tests'",
",",
"'resources'",
",",
"'model_2'",
")",
")",
"except",
"(",
"OSError",
",",
"IOError",
")",
":",
"pass"
] | Delete temp files generated for tests. | [
"Delete",
"temp",
"files",
"generated",
"for",
"tests",
"."
] | [
"\"\"\"\n Delete temp files generated for tests.\n\n | *Test Suite ID* : V\n |\n | *Test Case Number* : 02\n |\n | *Description* : Delete models after construction to remove\n | residual of setup_test [Test Case Number 01]\n |\n | *Preconditions* : model1 and model2 exist\n |\n | *Test Parameters* : model1 and model2 file paths\n |\n | *Test Data* :\n | model1 file path = 'tests/resources/model1'\n | model2 file path = 'tests/resources/model2'\n |\n | *Expected Result* : Models deleted\n |\n | *Actual Result* : Models deleted\n |\n | **Status : Pass**\n |\n\n \"\"\""
] | [
{
"param": "cls",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "cls",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import os
def teardown_class(cls):
try:
os.remove(
os.path.join('tests', 'resources', 'model_1')
)
os.remove(
os.path.join('tests', 'resources', 'model_2')
)
except (OSError, IOError):
pass | 610,482 | 296 |
c3c71769c86b54863a7f48f318aea7063828f56f | rdagnelie/devops-bot | compute.py | [
"Apache-2.0"
] | Python | _compute_clear_passwordlist | <not_specific> | def _compute_clear_passwordlist(count):
"""internal unique routine to compute passwords"""
return pwd.genword(
length=16,
entropy=56,
chars="aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ0123456789&~\#{[|^@]}$%*,?.;/:!=+)(-'",
returns=count,
) | internal unique routine to compute passwords | internal unique routine to compute passwords | [
"internal",
"unique",
"routine",
"to",
"compute",
"passwords"
] | def _compute_clear_passwordlist(count):
return pwd.genword(
length=16,
entropy=56,
chars="aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ0123456789&~\#{[|^@]}$%*,?.;/:!=+)(-'",
returns=count,
) | [
"def",
"_compute_clear_passwordlist",
"(",
"count",
")",
":",
"return",
"pwd",
".",
"genword",
"(",
"length",
"=",
"16",
",",
"entropy",
"=",
"56",
",",
"chars",
"=",
"\"aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ0123456789&~\\#{[|^@]}$%*,?.;/:!=+)(-'\"",
",",
"returns",
"=",
"count",
",",
")"
] | internal unique routine to compute passwords | [
"internal",
"unique",
"routine",
"to",
"compute",
"passwords"
] | [
"\"\"\"internal unique routine to compute passwords\"\"\""
] | [
{
"param": "count",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "count",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import pwd
def _compute_clear_passwordlist(count):
return pwd.genword(
length=16,
entropy=56,
chars="aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ0123456789&~\#{[|^@]}$%*,?.;/:!=+)(-'",
returns=count,
) | 610,483 | 160 |
534146f8def9815e3d8190130fb453ff96ac98f9 | cancervariants/metakb | tests/conftest.py | [
"MIT"
] | Python | check_method | <not_specific> | def check_method():
"""Create a test fixture to compare methods."""
def check_method(actual, test):
"""Check that methods match."""
assert actual == test
return check_method | Create a test fixture to compare methods. | Create a test fixture to compare methods. | [
"Create",
"a",
"test",
"fixture",
"to",
"compare",
"methods",
"."
] | def check_method():
def check_method(actual, test):
assert actual == test
return check_method | [
"def",
"check_method",
"(",
")",
":",
"def",
"check_method",
"(",
"actual",
",",
"test",
")",
":",
"\"\"\"Check that methods match.\"\"\"",
"assert",
"actual",
"==",
"test",
"return",
"check_method"
] | Create a test fixture to compare methods. | [
"Create",
"a",
"test",
"fixture",
"to",
"compare",
"methods",
"."
] | [
"\"\"\"Create a test fixture to compare methods.\"\"\"",
"\"\"\"Check that methods match.\"\"\""
] | [] | {
"returns": [],
"raises": [],
"params": [],
"outlier_params": [],
"others": []
} | def check_method():
def check_method(actual, test):
assert actual == test
return check_method | 610,484 | 625 |
1947defb4e86064ec0df6b359059f0dfa55a953e | jperez999/NVTabular | nvtabular/io.py | [
"Apache-2.0"
] | Python | _merge_general_metadata | <not_specific> | def _merge_general_metadata(meta_list):
""" Combine list of "general" metadata dicts into
a single dict
"""
if not meta_list:
return {}
meta = None
for md in meta_list:
if meta:
meta["data_paths"] += md["data_paths"]
meta["file_stats"] += md["file_stats"]
else:
meta = md.copy()
return meta | Combine list of "general" metadata dicts into
a single dict
| Combine list of "general" metadata dicts into
a single dict | [
"Combine",
"list",
"of",
"\"",
"general",
"\"",
"metadata",
"dicts",
"into",
"a",
"single",
"dict"
] | def _merge_general_metadata(meta_list):
if not meta_list:
return {}
meta = None
for md in meta_list:
if meta:
meta["data_paths"] += md["data_paths"]
meta["file_stats"] += md["file_stats"]
else:
meta = md.copy()
return meta | [
"def",
"_merge_general_metadata",
"(",
"meta_list",
")",
":",
"if",
"not",
"meta_list",
":",
"return",
"{",
"}",
"meta",
"=",
"None",
"for",
"md",
"in",
"meta_list",
":",
"if",
"meta",
":",
"meta",
"[",
"\"data_paths\"",
"]",
"+=",
"md",
"[",
"\"data_paths\"",
"]",
"meta",
"[",
"\"file_stats\"",
"]",
"+=",
"md",
"[",
"\"file_stats\"",
"]",
"else",
":",
"meta",
"=",
"md",
".",
"copy",
"(",
")",
"return",
"meta"
] | Combine list of "general" metadata dicts into
a single dict | [
"Combine",
"list",
"of",
"\"",
"general",
"\"",
"metadata",
"dicts",
"into",
"a",
"single",
"dict"
] | [
"\"\"\" Combine list of \"general\" metadata dicts into\n a single dict\n \"\"\""
] | [
{
"param": "meta_list",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "meta_list",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def _merge_general_metadata(meta_list):
if not meta_list:
return {}
meta = None
for md in meta_list:
if meta:
meta["data_paths"] += md["data_paths"]
meta["file_stats"] += md["file_stats"]
else:
meta = md.copy()
return meta | 610,485 | 407 |
2c6d18ec63e9dd506732425a2e7ac845db2530f7 | kaspermunch/argweaver | argweaver/deps/compbio/phylo.py | [
"MIT"
] | Python | add_implied_spec_nodes_brecon | null | def add_implied_spec_nodes_brecon(tree, brecon):
"""
adds speciation nodes to tree that are implied but are not present
because of gene losses
"""
for node, events in brecon.items():
for sp, event in events:
if event == "specloss":
parent = node.parent
children = parent.children
node2 = tree.new_node()
node2.parent = parent
children[children.index(node)] = node2
node.parent = node2
node2.children.append(node)
brecon[node2] = [[sp, "spec"]]
elif event == "transloss":
parent = node.parent
children = parent.children
node2 = tree.new_node()
node2.parent = parent
children[children.index(node)] = node2
node.parent = node2
node2.children.append(node)
brecon[node2] = [[sp, "trans"]]
brecon[node] = events[-1:] |
adds speciation nodes to tree that are implied but are not present
because of gene losses
| adds speciation nodes to tree that are implied but are not present
because of gene losses | [
"adds",
"speciation",
"nodes",
"to",
"tree",
"that",
"are",
"implied",
"but",
"are",
"not",
"present",
"because",
"of",
"gene",
"losses"
] | def add_implied_spec_nodes_brecon(tree, brecon):
for node, events in brecon.items():
for sp, event in events:
if event == "specloss":
parent = node.parent
children = parent.children
node2 = tree.new_node()
node2.parent = parent
children[children.index(node)] = node2
node.parent = node2
node2.children.append(node)
brecon[node2] = [[sp, "spec"]]
elif event == "transloss":
parent = node.parent
children = parent.children
node2 = tree.new_node()
node2.parent = parent
children[children.index(node)] = node2
node.parent = node2
node2.children.append(node)
brecon[node2] = [[sp, "trans"]]
brecon[node] = events[-1:] | [
"def",
"add_implied_spec_nodes_brecon",
"(",
"tree",
",",
"brecon",
")",
":",
"for",
"node",
",",
"events",
"in",
"brecon",
".",
"items",
"(",
")",
":",
"for",
"sp",
",",
"event",
"in",
"events",
":",
"if",
"event",
"==",
"\"specloss\"",
":",
"parent",
"=",
"node",
".",
"parent",
"children",
"=",
"parent",
".",
"children",
"node2",
"=",
"tree",
".",
"new_node",
"(",
")",
"node2",
".",
"parent",
"=",
"parent",
"children",
"[",
"children",
".",
"index",
"(",
"node",
")",
"]",
"=",
"node2",
"node",
".",
"parent",
"=",
"node2",
"node2",
".",
"children",
".",
"append",
"(",
"node",
")",
"brecon",
"[",
"node2",
"]",
"=",
"[",
"[",
"sp",
",",
"\"spec\"",
"]",
"]",
"elif",
"event",
"==",
"\"transloss\"",
":",
"parent",
"=",
"node",
".",
"parent",
"children",
"=",
"parent",
".",
"children",
"node2",
"=",
"tree",
".",
"new_node",
"(",
")",
"node2",
".",
"parent",
"=",
"parent",
"children",
"[",
"children",
".",
"index",
"(",
"node",
")",
"]",
"=",
"node2",
"node",
".",
"parent",
"=",
"node2",
"node2",
".",
"children",
".",
"append",
"(",
"node",
")",
"brecon",
"[",
"node2",
"]",
"=",
"[",
"[",
"sp",
",",
"\"trans\"",
"]",
"]",
"brecon",
"[",
"node",
"]",
"=",
"events",
"[",
"-",
"1",
":",
"]"
] | adds speciation nodes to tree that are implied but are not present
because of gene losses | [
"adds",
"speciation",
"nodes",
"to",
"tree",
"that",
"are",
"implied",
"but",
"are",
"not",
"present",
"because",
"of",
"gene",
"losses"
] | [
"\"\"\"\n adds speciation nodes to tree that are implied but are not present\n because of gene losses\n \"\"\""
] | [
{
"param": "tree",
"type": null
},
{
"param": "brecon",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "tree",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "brecon",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def add_implied_spec_nodes_brecon(tree, brecon):
for node, events in brecon.items():
for sp, event in events:
if event == "specloss":
parent = node.parent
children = parent.children
node2 = tree.new_node()
node2.parent = parent
children[children.index(node)] = node2
node.parent = node2
node2.children.append(node)
brecon[node2] = [[sp, "spec"]]
elif event == "transloss":
parent = node.parent
children = parent.children
node2 = tree.new_node()
node2.parent = parent
children[children.index(node)] = node2
node.parent = node2
node2.children.append(node)
brecon[node2] = [[sp, "trans"]]
brecon[node] = events[-1:] | 610,486 | 600 |
f3c85fe2fb8453855b8e8fe65bce6d78c286e0d6 | xingmimfl/pytorch_FPN | faster_rcnn/rpn_msr/proposal_layer.py | [
"MIT"
] | Python | _clip_pad | <not_specific> | def _clip_pad(tensor, pad_shape):
"""
Clip boxes of the pad area.
:param tensor: [n, c, H, W]
:param pad_shape: [h, w]
:return: [n, c, h, w]
"""
H, W = tensor.shape[2:]
h, w = pad_shape
if h < H or w < W:
tensor = tensor[:, :, :h, :w].copy()
return tensor |
Clip boxes of the pad area.
:param tensor: [n, c, H, W]
:param pad_shape: [h, w]
:return: [n, c, h, w]
| Clip boxes of the pad area. | [
"Clip",
"boxes",
"of",
"the",
"pad",
"area",
"."
] | def _clip_pad(tensor, pad_shape):
H, W = tensor.shape[2:]
h, w = pad_shape
if h < H or w < W:
tensor = tensor[:, :, :h, :w].copy()
return tensor | [
"def",
"_clip_pad",
"(",
"tensor",
",",
"pad_shape",
")",
":",
"H",
",",
"W",
"=",
"tensor",
".",
"shape",
"[",
"2",
":",
"]",
"h",
",",
"w",
"=",
"pad_shape",
"if",
"h",
"<",
"H",
"or",
"w",
"<",
"W",
":",
"tensor",
"=",
"tensor",
"[",
":",
",",
":",
",",
":",
"h",
",",
":",
"w",
"]",
".",
"copy",
"(",
")",
"return",
"tensor"
] | Clip boxes of the pad area. | [
"Clip",
"boxes",
"of",
"the",
"pad",
"area",
"."
] | [
"\"\"\"\n Clip boxes of the pad area.\n :param tensor: [n, c, H, W]\n :param pad_shape: [h, w]\n :return: [n, c, h, w]\n \"\"\""
] | [
{
"param": "tensor",
"type": null
},
{
"param": "pad_shape",
"type": null
}
] | {
"returns": [
{
"docstring": null,
"docstring_tokens": [
"None"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "tensor",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": null
},
{
"identifier": "pad_shape",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def _clip_pad(tensor, pad_shape):
H, W = tensor.shape[2:]
h, w = pad_shape
if h < H or w < W:
tensor = tensor[:, :, :h, :w].copy()
return tensor | 610,487 | 891 |
c7718e223572a42e1a181bd81fbaedfc33ecddaf | Vozak16/Gmail-Manager | my_charts/web_app/modules/gmail_manager.py | [
"MIT"
] | Python | write_json | null | def write_json(inbox_info_dict):
"""
This function loads the data from the dictionary to json file.
Namely, it loads the data got from GUser.get_inbox_info method.
:param inbox_info_dict: dict
:return: None
"""
with open("output_data_examples.inbox_info.json", 'w') as json_file:
json.dump(inbox_info_dict, json_file, indent=4, ensure_ascii=False) |
This function loads the data from the dictionary to json file.
Namely, it loads the data got from GUser.get_inbox_info method.
:param inbox_info_dict: dict
:return: None
| This function loads the data from the dictionary to json file.
Namely, it loads the data got from GUser.get_inbox_info method. | [
"This",
"function",
"loads",
"the",
"data",
"from",
"the",
"dictionary",
"to",
"json",
"file",
".",
"Namely",
"it",
"loads",
"the",
"data",
"got",
"from",
"GUser",
".",
"get_inbox_info",
"method",
"."
] | def write_json(inbox_info_dict):
with open("output_data_examples.inbox_info.json", 'w') as json_file:
json.dump(inbox_info_dict, json_file, indent=4, ensure_ascii=False) | [
"def",
"write_json",
"(",
"inbox_info_dict",
")",
":",
"with",
"open",
"(",
"\"output_data_examples.inbox_info.json\"",
",",
"'w'",
")",
"as",
"json_file",
":",
"json",
".",
"dump",
"(",
"inbox_info_dict",
",",
"json_file",
",",
"indent",
"=",
"4",
",",
"ensure_ascii",
"=",
"False",
")"
] | This function loads the data from the dictionary to json file. | [
"This",
"function",
"loads",
"the",
"data",
"from",
"the",
"dictionary",
"to",
"json",
"file",
"."
] | [
"\"\"\"\n This function loads the data from the dictionary to json file.\n Namely, it loads the data got from GUser.get_inbox_info method.\n :param inbox_info_dict: dict\n :return: None\n \"\"\""
] | [
{
"param": "inbox_info_dict",
"type": null
}
] | {
"returns": [
{
"docstring": null,
"docstring_tokens": [
"None"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "inbox_info_dict",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import json
def write_json(inbox_info_dict):
with open("output_data_examples.inbox_info.json", 'w') as json_file:
json.dump(inbox_info_dict, json_file, indent=4, ensure_ascii=False) | 610,488 | 681 |
3dde067be6115db582a4fe1459b79e9234ea53b7 | ONSdigital/eq-google-slides-to-schema | process.py | [
"MIT"
] | Python | _clean_join | <not_specific> | def _clean_join(content):
"""
Joins a list of values together and cleans (removes newlines)
:param content: A str or list of str to process
:return: The joined/cleaned str
"""
if not isinstance(content, str):
content = ''.join(content) if content else ''
return content.replace('\n', '') |
Joins a list of values together and cleans (removes newlines)
:param content: A str or list of str to process
:return: The joined/cleaned str
| Joins a list of values together and cleans (removes newlines) | [
"Joins",
"a",
"list",
"of",
"values",
"together",
"and",
"cleans",
"(",
"removes",
"newlines",
")"
] | def _clean_join(content):
if not isinstance(content, str):
content = ''.join(content) if content else ''
return content.replace('\n', '') | [
"def",
"_clean_join",
"(",
"content",
")",
":",
"if",
"not",
"isinstance",
"(",
"content",
",",
"str",
")",
":",
"content",
"=",
"''",
".",
"join",
"(",
"content",
")",
"if",
"content",
"else",
"''",
"return",
"content",
".",
"replace",
"(",
"'\\n'",
",",
"''",
")"
] | Joins a list of values together and cleans (removes newlines) | [
"Joins",
"a",
"list",
"of",
"values",
"together",
"and",
"cleans",
"(",
"removes",
"newlines",
")"
] | [
"\"\"\"\n Joins a list of values together and cleans (removes newlines)\n :param content: A str or list of str to process\n :return: The joined/cleaned str\n \"\"\""
] | [
{
"param": "content",
"type": null
}
] | {
"returns": [
{
"docstring": "The joined/cleaned str",
"docstring_tokens": [
"The",
"joined",
"/",
"cleaned",
"str"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "content",
"type": null,
"docstring": "A str or list of str to process",
"docstring_tokens": [
"A",
"str",
"or",
"list",
"of",
"str",
"to",
"process"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def _clean_join(content):
if not isinstance(content, str):
content = ''.join(content) if content else ''
return content.replace('\n', '') | 610,489 | 47 |
40a6b496835a0898768a847930b40ebe9e8f4995 | rbhoot/us_census_tools | census_downloader/census_api_data_downloader.py | [
"Apache-2.0"
] | Python | url_filter | list | def url_filter(url_list: list) -> list:
"""Filters out URLs that are to be queried, skipping URLs that responded with 204 HTTP code.
Args:
url_list: List of URL with metadata dict object.
Returns:
List of URL with metadata dict object that need to be queried.
"""
ret_list = []
for cur_url in url_list:
if cur_url['status'] == 'pending' or cur_url['status'].startswith('fail'):
if 'http_code' in cur_url:
if cur_url['http_code'] != '204':
ret_list.append(cur_url)
else:
ret_list.append(cur_url)
return ret_list | Filters out URLs that are to be queried, skipping URLs that responded with 204 HTTP code.
Args:
url_list: List of URL with metadata dict object.
Returns:
List of URL with metadata dict object that need to be queried.
| Filters out URLs that are to be queried, skipping URLs that responded with 204 HTTP code. | [
"Filters",
"out",
"URLs",
"that",
"are",
"to",
"be",
"queried",
"skipping",
"URLs",
"that",
"responded",
"with",
"204",
"HTTP",
"code",
"."
] | def url_filter(url_list: list) -> list:
ret_list = []
for cur_url in url_list:
if cur_url['status'] == 'pending' or cur_url['status'].startswith('fail'):
if 'http_code' in cur_url:
if cur_url['http_code'] != '204':
ret_list.append(cur_url)
else:
ret_list.append(cur_url)
return ret_list | [
"def",
"url_filter",
"(",
"url_list",
":",
"list",
")",
"->",
"list",
":",
"ret_list",
"=",
"[",
"]",
"for",
"cur_url",
"in",
"url_list",
":",
"if",
"cur_url",
"[",
"'status'",
"]",
"==",
"'pending'",
"or",
"cur_url",
"[",
"'status'",
"]",
".",
"startswith",
"(",
"'fail'",
")",
":",
"if",
"'http_code'",
"in",
"cur_url",
":",
"if",
"cur_url",
"[",
"'http_code'",
"]",
"!=",
"'204'",
":",
"ret_list",
".",
"append",
"(",
"cur_url",
")",
"else",
":",
"ret_list",
".",
"append",
"(",
"cur_url",
")",
"return",
"ret_list"
] | Filters out URLs that are to be queried, skipping URLs that responded with 204 HTTP code. | [
"Filters",
"out",
"URLs",
"that",
"are",
"to",
"be",
"queried",
"skipping",
"URLs",
"that",
"responded",
"with",
"204",
"HTTP",
"code",
"."
] | [
"\"\"\"Filters out URLs that are to be queried, skipping URLs that responded with 204 HTTP code.\n\n Args:\n url_list: List of URL with metadata dict object.\n \n Returns:\n List of URL with metadata dict object that need to be queried.\n \"\"\""
] | [
{
"param": "url_list",
"type": "list"
}
] | {
"returns": [
{
"docstring": "List of URL with metadata dict object that need to be queried.",
"docstring_tokens": [
"List",
"of",
"URL",
"with",
"metadata",
"dict",
"object",
"that",
"need",
"to",
"be",
"queried",
"."
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "url_list",
"type": "list",
"docstring": "List of URL with metadata dict object.",
"docstring_tokens": [
"List",
"of",
"URL",
"with",
"metadata",
"dict",
"object",
"."
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def url_filter(url_list: list) -> list:
ret_list = []
for cur_url in url_list:
if cur_url['status'] == 'pending' or cur_url['status'].startswith('fail'):
if 'http_code' in cur_url:
if cur_url['http_code'] != '204':
ret_list.append(cur_url)
else:
ret_list.append(cur_url)
return ret_list | 610,490 | 215 |
c012da5bad2776d31c92fa0293f66ac532be7da9 | tpDcc/tpDcc-libs-math | tpDcc/libs/math/core/scalar.py | [
"MIT"
] | Python | snap_value | <not_specific> | def snap_value(input, snap_value):
"""
Returns snap value given an input and a base snap value
:param input: float
:param snap_value: float
:return: float
"""
return round((float(input) / snap_value)) * snap_value |
Returns snap value given an input and a base snap value
:param input: float
:param snap_value: float
:return: float
| Returns snap value given an input and a base snap value | [
"Returns",
"snap",
"value",
"given",
"an",
"input",
"and",
"a",
"base",
"snap",
"value"
] | def snap_value(input, snap_value):
return round((float(input) / snap_value)) * snap_value | [
"def",
"snap_value",
"(",
"input",
",",
"snap_value",
")",
":",
"return",
"round",
"(",
"(",
"float",
"(",
"input",
")",
"/",
"snap_value",
")",
")",
"*",
"snap_value"
] | Returns snap value given an input and a base snap value | [
"Returns",
"snap",
"value",
"given",
"an",
"input",
"and",
"a",
"base",
"snap",
"value"
] | [
"\"\"\"\n Returns snap value given an input and a base snap value\n :param input: float\n :param snap_value: float\n :return: float\n \"\"\""
] | [
{
"param": "input",
"type": null
},
{
"param": "snap_value",
"type": null
}
] | {
"returns": [
{
"docstring": null,
"docstring_tokens": [
"None"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "input",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": null
},
{
"identifier": "snap_value",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def snap_value(input, snap_value):
return round((float(input) / snap_value)) * snap_value | 610,491 | 743 |
17f7decf9db6080d50fb798c509fa43cd8443674 | Luk-kar/Show_Images_Differences | Show_Images_Differences/utils.py | [
"MIT"
] | Python | error_check_path_is_empty_string | null | def error_check_path_is_empty_string(path):
"""
Used to fail fast than too late.
Also to avoid empty string in os path (when path is empty it takes script directory)
"""
if path == "":
raise ValueError("Error: variable is empty string") |
Used to fail fast than too late.
Also to avoid empty string in os path (when path is empty it takes script directory)
| Used to fail fast than too late.
Also to avoid empty string in os path (when path is empty it takes script directory) | [
"Used",
"to",
"fail",
"fast",
"than",
"too",
"late",
".",
"Also",
"to",
"avoid",
"empty",
"string",
"in",
"os",
"path",
"(",
"when",
"path",
"is",
"empty",
"it",
"takes",
"script",
"directory",
")"
] | def error_check_path_is_empty_string(path):
if path == "":
raise ValueError("Error: variable is empty string") | [
"def",
"error_check_path_is_empty_string",
"(",
"path",
")",
":",
"if",
"path",
"==",
"\"\"",
":",
"raise",
"ValueError",
"(",
"\"Error: variable is empty string\"",
")"
] | Used to fail fast than too late. | [
"Used",
"to",
"fail",
"fast",
"than",
"too",
"late",
"."
] | [
"\"\"\"\n Used to fail fast than too late.\n Also to avoid empty string in os path (when path is empty it takes script directory)\n \"\"\""
] | [
{
"param": "path",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "path",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def error_check_path_is_empty_string(path):
if path == "":
raise ValueError("Error: variable is empty string") | 610,492 | 60 |
521cad80828ba2e7cf1a2a14bc5934e6ccef6db6 | IMTMarburg/mbf_heatmap | src/mbf_heatmap/chipseq/norm.py | [
"MIT"
] | Python | _apply_tpm | null | def _apply_tpm(lanes_to_draw, raw_data):
"""Convert read counts in raw_data into TPMs - in situ"""
for lane in lanes_to_draw:
norm_factor = 1e6 / lane.mapped_reads()
raw_data[lane.name] = raw_data[lane.name] * norm_factor | Convert read counts in raw_data into TPMs - in situ | Convert read counts in raw_data into TPMs - in situ | [
"Convert",
"read",
"counts",
"in",
"raw_data",
"into",
"TPMs",
"-",
"in",
"situ"
] | def _apply_tpm(lanes_to_draw, raw_data):
for lane in lanes_to_draw:
norm_factor = 1e6 / lane.mapped_reads()
raw_data[lane.name] = raw_data[lane.name] * norm_factor | [
"def",
"_apply_tpm",
"(",
"lanes_to_draw",
",",
"raw_data",
")",
":",
"for",
"lane",
"in",
"lanes_to_draw",
":",
"norm_factor",
"=",
"1e6",
"/",
"lane",
".",
"mapped_reads",
"(",
")",
"raw_data",
"[",
"lane",
".",
"name",
"]",
"=",
"raw_data",
"[",
"lane",
".",
"name",
"]",
"*",
"norm_factor"
] | Convert read counts in raw_data into TPMs - in situ | [
"Convert",
"read",
"counts",
"in",
"raw_data",
"into",
"TPMs",
"-",
"in",
"situ"
] | [
"\"\"\"Convert read counts in raw_data into TPMs - in situ\"\"\""
] | [
{
"param": "lanes_to_draw",
"type": null
},
{
"param": "raw_data",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "lanes_to_draw",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "raw_data",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def _apply_tpm(lanes_to_draw, raw_data):
for lane in lanes_to_draw:
norm_factor = 1e6 / lane.mapped_reads()
raw_data[lane.name] = raw_data[lane.name] * norm_factor | 610,493 | 497 |
44d826988159d97647cf70b6769bc722aaf63772 | edwinkost/wflow | wflow-py/WflowDeltashell/wflib.py | [
"MIT"
] | Python | GetItemByPartialName | <not_specific> | def GetItemByPartialName(list, name):
""" Returns the first item in the list
that has the provided name"""
for item in list :
if name.upper() in item.Name.upper():
return item | Returns the first item in the list
that has the provided name | Returns the first item in the list
that has the provided name | [
"Returns",
"the",
"first",
"item",
"in",
"the",
"list",
"that",
"has",
"the",
"provided",
"name"
] | def GetItemByPartialName(list, name):
for item in list :
if name.upper() in item.Name.upper():
return item | [
"def",
"GetItemByPartialName",
"(",
"list",
",",
"name",
")",
":",
"for",
"item",
"in",
"list",
":",
"if",
"name",
".",
"upper",
"(",
")",
"in",
"item",
".",
"Name",
".",
"upper",
"(",
")",
":",
"return",
"item"
] | Returns the first item in the list
that has the provided name | [
"Returns",
"the",
"first",
"item",
"in",
"the",
"list",
"that",
"has",
"the",
"provided",
"name"
] | [
"\"\"\" Returns the first item in the list\n that has the provided name\"\"\""
] | [
{
"param": "list",
"type": null
},
{
"param": "name",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "list",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "name",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def GetItemByPartialName(list, name):
for item in list :
if name.upper() in item.Name.upper():
return item | 610,494 | 0 |
77f82dfd2ad92542b3c58d24e8f5e962744b6042 | wspr-ncsu/visiblev8 | builder/tool.py | [
"BSD-3-Clause"
] | Python | docker_check_image | bool | def docker_check_image(image_name: str) -> bool:
'''Return True if <image_name> exists as a local Docker container image.
'''
# Should always succeed (if docker is there and the version test passed)
raw = subprocess.check_output(['docker', 'image', 'ls', '-q', image_name])
# Empty output means no such image
logging.debug("docker image ls '{0}' -> '{1}'".format(image_name, raw.decode('utf8').strip()))
return bool(raw) | Return True if <image_name> exists as a local Docker container image.
| Return True if exists as a local Docker container image. | [
"Return",
"True",
"if",
"exists",
"as",
"a",
"local",
"Docker",
"container",
"image",
"."
] | def docker_check_image(image_name: str) -> bool:
raw = subprocess.check_output(['docker', 'image', 'ls', '-q', image_name])
logging.debug("docker image ls '{0}' -> '{1}'".format(image_name, raw.decode('utf8').strip()))
return bool(raw) | [
"def",
"docker_check_image",
"(",
"image_name",
":",
"str",
")",
"->",
"bool",
":",
"raw",
"=",
"subprocess",
".",
"check_output",
"(",
"[",
"'docker'",
",",
"'image'",
",",
"'ls'",
",",
"'-q'",
",",
"image_name",
"]",
")",
"logging",
".",
"debug",
"(",
"\"docker image ls '{0}' -> '{1}'\"",
".",
"format",
"(",
"image_name",
",",
"raw",
".",
"decode",
"(",
"'utf8'",
")",
".",
"strip",
"(",
")",
")",
")",
"return",
"bool",
"(",
"raw",
")"
] | Return True if <image_name> exists as a local Docker container image. | [
"Return",
"True",
"if",
"<image_name",
">",
"exists",
"as",
"a",
"local",
"Docker",
"container",
"image",
"."
] | [
"'''Return True if <image_name> exists as a local Docker container image.\n '''",
"# Should always succeed (if docker is there and the version test passed)",
"# Empty output means no such image"
] | [
{
"param": "image_name",
"type": "str"
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "image_name",
"type": "str",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import logging
import subprocess
def docker_check_image(image_name: str) -> bool:
raw = subprocess.check_output(['docker', 'image', 'ls', '-q', image_name])
logging.debug("docker image ls '{0}' -> '{1}'".format(image_name, raw.decode('utf8').strip()))
return bool(raw) | 610,495 | 28 |
7adb16cae8e7b61ce5c585e274805090a5a0c2f0 | pwang724/deepethogram | deepethogram/losses.py | [
"FSFAP"
] | Python | should_decay_parameter | bool | def should_decay_parameter(name: str, param: torch.Tensor) -> bool:
"""Determines if L2 (or L2-SP) decay should be applied to parameter
BatchNorm and bias parameters are excluded. Uses both the name of the parameter and shape of parameter to determine
Helpful source:
https://github.com/rwightman/pytorch-image-models/blob/198f6ea0f3dae13f041f3ea5880dd79089b60d61/timm/optim/optim_factory.py
Parameters
----------
name : str
name of parameter
param : torch.Tensor
parameter
Returns
-------
bool
Whether or not to decay
"""
if not param.requires_grad:
return False
elif 'batchnorm' in name.lower() or 'bn' in name.lower() or 'bias' in name.lower():
return False
elif param.ndim == 1:
return False
else:
return True | Determines if L2 (or L2-SP) decay should be applied to parameter
BatchNorm and bias parameters are excluded. Uses both the name of the parameter and shape of parameter to determine
Helpful source:
https://github.com/rwightman/pytorch-image-models/blob/198f6ea0f3dae13f041f3ea5880dd79089b60d61/timm/optim/optim_factory.py
Parameters
----------
name : str
name of parameter
param : torch.Tensor
parameter
Returns
-------
bool
Whether or not to decay
| Determines if L2 (or L2-SP) decay should be applied to parameter
BatchNorm and bias parameters are excluded. Uses both the name of the parameter and shape of parameter to determine
Parameters
name : str
name of parameter
param : torch.Tensor
parameter
Returns
bool
Whether or not to decay | [
"Determines",
"if",
"L2",
"(",
"or",
"L2",
"-",
"SP",
")",
"decay",
"should",
"be",
"applied",
"to",
"parameter",
"BatchNorm",
"and",
"bias",
"parameters",
"are",
"excluded",
".",
"Uses",
"both",
"the",
"name",
"of",
"the",
"parameter",
"and",
"shape",
"of",
"parameter",
"to",
"determine",
"Parameters",
"name",
":",
"str",
"name",
"of",
"parameter",
"param",
":",
"torch",
".",
"Tensor",
"parameter",
"Returns",
"bool",
"Whether",
"or",
"not",
"to",
"decay"
] | def should_decay_parameter(name: str, param: torch.Tensor) -> bool:
if not param.requires_grad:
return False
elif 'batchnorm' in name.lower() or 'bn' in name.lower() or 'bias' in name.lower():
return False
elif param.ndim == 1:
return False
else:
return True | [
"def",
"should_decay_parameter",
"(",
"name",
":",
"str",
",",
"param",
":",
"torch",
".",
"Tensor",
")",
"->",
"bool",
":",
"if",
"not",
"param",
".",
"requires_grad",
":",
"return",
"False",
"elif",
"'batchnorm'",
"in",
"name",
".",
"lower",
"(",
")",
"or",
"'bn'",
"in",
"name",
".",
"lower",
"(",
")",
"or",
"'bias'",
"in",
"name",
".",
"lower",
"(",
")",
":",
"return",
"False",
"elif",
"param",
".",
"ndim",
"==",
"1",
":",
"return",
"False",
"else",
":",
"return",
"True"
] | Determines if L2 (or L2-SP) decay should be applied to parameter
BatchNorm and bias parameters are excluded. | [
"Determines",
"if",
"L2",
"(",
"or",
"L2",
"-",
"SP",
")",
"decay",
"should",
"be",
"applied",
"to",
"parameter",
"BatchNorm",
"and",
"bias",
"parameters",
"are",
"excluded",
"."
] | [
"\"\"\"Determines if L2 (or L2-SP) decay should be applied to parameter\n\n BatchNorm and bias parameters are excluded. Uses both the name of the parameter and shape of parameter to determine\n\n Helpful source:\n https://github.com/rwightman/pytorch-image-models/blob/198f6ea0f3dae13f041f3ea5880dd79089b60d61/timm/optim/optim_factory.py\n \n Parameters\n ----------\n name : str\n name of parameter\n param : torch.Tensor\n parameter\n\n Returns\n -------\n bool\n Whether or not to decay\n \"\"\""
] | [
{
"param": "name",
"type": "str"
},
{
"param": "param",
"type": "torch.Tensor"
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "name",
"type": "str",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "param",
"type": "torch.Tensor",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def should_decay_parameter(name: str, param: torch.Tensor) -> bool:
if not param.requires_grad:
return False
elif 'batchnorm' in name.lower() or 'bn' in name.lower() or 'bias' in name.lower():
return False
elif param.ndim == 1:
return False
else:
return True | 610,496 | 603 |
caca8c5716dbc35a3c55cdcf4dbc4976a40a7bc7 | jason-weirather/py-seq-tools | seqtools/statistics/__init__.py | [
"Apache-2.0"
] | Python | median | <not_specific> | def median(arr):
"""median of the values, must have more than 0 entries.
:param arr: list of numbers
:type arr: number[] a number array
:return: median
:rtype: float
"""
if len(arr) == 0:
sys.stderr.write("ERROR: no content in array to take average\n")
sys.exit()
if len(arr) == 1: return arr[0]
quot = len(arr)/2
rem = len(arr)%2
if rem != 0:
return sorted(arr)[quot]
return float(sum(sorted(arr)[quot-1:quot+1]))/float(2) | median of the values, must have more than 0 entries.
:param arr: list of numbers
:type arr: number[] a number array
:return: median
:rtype: float
| median of the values, must have more than 0 entries. | [
"median",
"of",
"the",
"values",
"must",
"have",
"more",
"than",
"0",
"entries",
"."
] | def median(arr):
if len(arr) == 0:
sys.stderr.write("ERROR: no content in array to take average\n")
sys.exit()
if len(arr) == 1: return arr[0]
quot = len(arr)/2
rem = len(arr)%2
if rem != 0:
return sorted(arr)[quot]
return float(sum(sorted(arr)[quot-1:quot+1]))/float(2) | [
"def",
"median",
"(",
"arr",
")",
":",
"if",
"len",
"(",
"arr",
")",
"==",
"0",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"ERROR: no content in array to take average\\n\"",
")",
"sys",
".",
"exit",
"(",
")",
"if",
"len",
"(",
"arr",
")",
"==",
"1",
":",
"return",
"arr",
"[",
"0",
"]",
"quot",
"=",
"len",
"(",
"arr",
")",
"/",
"2",
"rem",
"=",
"len",
"(",
"arr",
")",
"%",
"2",
"if",
"rem",
"!=",
"0",
":",
"return",
"sorted",
"(",
"arr",
")",
"[",
"quot",
"]",
"return",
"float",
"(",
"sum",
"(",
"sorted",
"(",
"arr",
")",
"[",
"quot",
"-",
"1",
":",
"quot",
"+",
"1",
"]",
")",
")",
"/",
"float",
"(",
"2",
")"
] | median of the values, must have more than 0 entries. | [
"median",
"of",
"the",
"values",
"must",
"have",
"more",
"than",
"0",
"entries",
"."
] | [
"\"\"\"median of the values, must have more than 0 entries.\n\n :param arr: list of numbers\n :type arr: number[] a number array\n :return: median\n :rtype: float\n\n \"\"\""
] | [
{
"param": "arr",
"type": null
}
] | {
"returns": [
{
"docstring": null,
"docstring_tokens": [
"None"
],
"type": "float"
}
],
"raises": [],
"params": [
{
"identifier": "arr",
"type": null,
"docstring": "list of numbers",
"docstring_tokens": [
"list",
"of",
"numbers"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import sys
def median(arr):
if len(arr) == 0:
sys.stderr.write("ERROR: no content in array to take average\n")
sys.exit()
if len(arr) == 1: return arr[0]
quot = len(arr)/2
rem = len(arr)%2
if rem != 0:
return sorted(arr)[quot]
return float(sum(sorted(arr)[quot-1:quot+1]))/float(2) | 610,497 | 185 |
ac63af735dc7db2ecdd678836d54b71bf0baa871 | thkim1011/chess-ai | chess/chess.py | [
"MIT"
] | Python | piece_is_blocked_diagonal | <not_specific> | def piece_is_blocked_diagonal(piece, position, board):
"""
Returns True if the diagonal path of piece to position is blocked.
It does not check for whether there is a piece at the final destination
since a move may be a valid attack. In addition, if position is not a
diagonal movement of the initial, then True is returned since the piece
is "blocked"
>>> board = Board(empty=True)
>>> queen = Queen(WHITE, locate("d1"))
>>> board.add_piece(queen)
>>> board.add_piece(Pawn(BLACK, locate("h5")))
>>> board.add_piece(Pawn(WHITE, locate("c2"))) # Set up board
>>> piece_is_blocked_diagonal(queen, locate("d1"), board) # Not valid move
True
>>> piece_is_blocked_diagonal(queen, locate("e2"), board) # Valid move
False
>>> piece_is_blocked_diagonal(queen, locate("f3"), board) # Valid move
False
>>> piece_is_blocked_diagonal(queen, locate("h5"), board) # Valid (see above)
False
>>> piece_is_blocked_diagonal(queen, locate("c2"), board) # Valid (see above)
False
>>> piece_is_blocked_diagonal(queen, locate("a4"), board) # Not valid move
True
>>> piece_is_blocked_diagonal(queen, locate("h1"), board) # Wrong move
True
>>> piece_is_blocked_diagonal(queen, locate("a2"), board) # Wrong move
True
"""
row_change = position.row - piece.position.row
col_change = position.col - piece.position.col
n = abs(row_change)
m = abs(col_change)
if n == m and n != 0:
row_unit = row_change // n
col_unit = col_change // m
for i in range(1, n):
if board.get_piece(piece.position + (row_unit * i, col_unit * i)):
return True
return False
return True |
Returns True if the diagonal path of piece to position is blocked.
It does not check for whether there is a piece at the final destination
since a move may be a valid attack. In addition, if position is not a
diagonal movement of the initial, then True is returned since the piece
is "blocked"
>>> board = Board(empty=True)
>>> queen = Queen(WHITE, locate("d1"))
>>> board.add_piece(queen)
>>> board.add_piece(Pawn(BLACK, locate("h5")))
>>> board.add_piece(Pawn(WHITE, locate("c2"))) # Set up board
>>> piece_is_blocked_diagonal(queen, locate("d1"), board) # Not valid move
True
>>> piece_is_blocked_diagonal(queen, locate("e2"), board) # Valid move
False
>>> piece_is_blocked_diagonal(queen, locate("f3"), board) # Valid move
False
>>> piece_is_blocked_diagonal(queen, locate("h5"), board) # Valid (see above)
False
>>> piece_is_blocked_diagonal(queen, locate("c2"), board) # Valid (see above)
False
>>> piece_is_blocked_diagonal(queen, locate("a4"), board) # Not valid move
True
>>> piece_is_blocked_diagonal(queen, locate("h1"), board) # Wrong move
True
>>> piece_is_blocked_diagonal(queen, locate("a2"), board) # Wrong move
True
| Returns True if the diagonal path of piece to position is blocked.
It does not check for whether there is a piece at the final destination
since a move may be a valid attack. | [
"Returns",
"True",
"if",
"the",
"diagonal",
"path",
"of",
"piece",
"to",
"position",
"is",
"blocked",
".",
"It",
"does",
"not",
"check",
"for",
"whether",
"there",
"is",
"a",
"piece",
"at",
"the",
"final",
"destination",
"since",
"a",
"move",
"may",
"be",
"a",
"valid",
"attack",
"."
] | def piece_is_blocked_diagonal(piece, position, board):
row_change = position.row - piece.position.row
col_change = position.col - piece.position.col
n = abs(row_change)
m = abs(col_change)
if n == m and n != 0:
row_unit = row_change // n
col_unit = col_change // m
for i in range(1, n):
if board.get_piece(piece.position + (row_unit * i, col_unit * i)):
return True
return False
return True | [
"def",
"piece_is_blocked_diagonal",
"(",
"piece",
",",
"position",
",",
"board",
")",
":",
"row_change",
"=",
"position",
".",
"row",
"-",
"piece",
".",
"position",
".",
"row",
"col_change",
"=",
"position",
".",
"col",
"-",
"piece",
".",
"position",
".",
"col",
"n",
"=",
"abs",
"(",
"row_change",
")",
"m",
"=",
"abs",
"(",
"col_change",
")",
"if",
"n",
"==",
"m",
"and",
"n",
"!=",
"0",
":",
"row_unit",
"=",
"row_change",
"//",
"n",
"col_unit",
"=",
"col_change",
"//",
"m",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"n",
")",
":",
"if",
"board",
".",
"get_piece",
"(",
"piece",
".",
"position",
"+",
"(",
"row_unit",
"*",
"i",
",",
"col_unit",
"*",
"i",
")",
")",
":",
"return",
"True",
"return",
"False",
"return",
"True"
] | Returns True if the diagonal path of piece to position is blocked. | [
"Returns",
"True",
"if",
"the",
"diagonal",
"path",
"of",
"piece",
"to",
"position",
"is",
"blocked",
"."
] | [
"\"\"\"\n Returns True if the diagonal path of piece to position is blocked.\n It does not check for whether there is a piece at the final destination\n since a move may be a valid attack. In addition, if position is not a\n diagonal movement of the initial, then True is returned since the piece\n is \"blocked\"\n >>> board = Board(empty=True)\n >>> queen = Queen(WHITE, locate(\"d1\"))\n >>> board.add_piece(queen)\n >>> board.add_piece(Pawn(BLACK, locate(\"h5\")))\n >>> board.add_piece(Pawn(WHITE, locate(\"c2\"))) # Set up board\n >>> piece_is_blocked_diagonal(queen, locate(\"d1\"), board) # Not valid move\n True\n >>> piece_is_blocked_diagonal(queen, locate(\"e2\"), board) # Valid move\n False\n >>> piece_is_blocked_diagonal(queen, locate(\"f3\"), board) # Valid move\n False\n >>> piece_is_blocked_diagonal(queen, locate(\"h5\"), board) # Valid (see above)\n False\n >>> piece_is_blocked_diagonal(queen, locate(\"c2\"), board) # Valid (see above)\n False\n >>> piece_is_blocked_diagonal(queen, locate(\"a4\"), board) # Not valid move\n True\n >>> piece_is_blocked_diagonal(queen, locate(\"h1\"), board) # Wrong move\n True\n >>> piece_is_blocked_diagonal(queen, locate(\"a2\"), board) # Wrong move\n True\n \"\"\""
] | [
{
"param": "piece",
"type": null
},
{
"param": "position",
"type": null
},
{
"param": "board",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "piece",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "position",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "board",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def piece_is_blocked_diagonal(piece, position, board):
row_change = position.row - piece.position.row
col_change = position.col - piece.position.col
n = abs(row_change)
m = abs(col_change)
if n == m and n != 0:
row_unit = row_change // n
col_unit = col_change // m
for i in range(1, n):
if board.get_piece(piece.position + (row_unit * i, col_unit * i)):
return True
return False
return True | 610,498 | 726 |
7825f9407cebc8526ba3171de879978497fff38d | dual75/athome | src/athome/lib/taskrunner.py | [
"Apache-2.0"
] | Python | _find_all | <not_specific> | def _find_all(plugins_dir):
"""Find all python modules in plugins dir"""
result = [(f, os.path.join(plugins_dir, f))
for f in os.listdir(plugins_dir)
if f.endswith('.py')
and os.path.isfile(os.path.join(plugins_dir, f))
and os.access(os.path.join(plugins_dir, f), os.R_OK)
]
return result | Find all python modules in plugins dir | Find all python modules in plugins dir | [
"Find",
"all",
"python",
"modules",
"in",
"plugins",
"dir"
] | def _find_all(plugins_dir):
result = [(f, os.path.join(plugins_dir, f))
for f in os.listdir(plugins_dir)
if f.endswith('.py')
and os.path.isfile(os.path.join(plugins_dir, f))
and os.access(os.path.join(plugins_dir, f), os.R_OK)
]
return result | [
"def",
"_find_all",
"(",
"plugins_dir",
")",
":",
"result",
"=",
"[",
"(",
"f",
",",
"os",
".",
"path",
".",
"join",
"(",
"plugins_dir",
",",
"f",
")",
")",
"for",
"f",
"in",
"os",
".",
"listdir",
"(",
"plugins_dir",
")",
"if",
"f",
".",
"endswith",
"(",
"'.py'",
")",
"and",
"os",
".",
"path",
".",
"isfile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"plugins_dir",
",",
"f",
")",
")",
"and",
"os",
".",
"access",
"(",
"os",
".",
"path",
".",
"join",
"(",
"plugins_dir",
",",
"f",
")",
",",
"os",
".",
"R_OK",
")",
"]",
"return",
"result"
] | Find all python modules in plugins dir | [
"Find",
"all",
"python",
"modules",
"in",
"plugins",
"dir"
] | [
"\"\"\"Find all python modules in plugins dir\"\"\""
] | [
{
"param": "plugins_dir",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "plugins_dir",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import os
def _find_all(plugins_dir):
result = [(f, os.path.join(plugins_dir, f))
for f in os.listdir(plugins_dir)
if f.endswith('.py')
and os.path.isfile(os.path.join(plugins_dir, f))
and os.access(os.path.join(plugins_dir, f), os.R_OK)
]
return result | 610,499 | 719 |
7e391029030b51912765067150f7cf3060cc5a8d | spoonsandhangers/Python_basics | 2_2a_input_string_exercises.py | [
"CC0-1.0"
] | Python | conversation | null | def conversation():
"""
Asks the user to input their name and hair colour
gives the user a nickname by concatenating the first 3 letters
of their name with 'gsy'
:return:
"""
#store the name that the user inputs in the variable user_name
user_name = input("What is your name?: ")
#output hello and their name using the variable user_name
print("Hello", user_name)
#Ask the user what colour their hair is mentioning their name.
user_hair = input("What colour is your hair "+ user_name+ "? ")
#Give the user a nickname using the first three letters of their name followed by
#gsy.
print("Your nickname is", user_name[0:2]+"gsy") |
Asks the user to input their name and hair colour
gives the user a nickname by concatenating the first 3 letters
of their name with 'gsy'
:return:
| Asks the user to input their name and hair colour
gives the user a nickname by concatenating the first 3 letters
of their name with 'gsy' | [
"Asks",
"the",
"user",
"to",
"input",
"their",
"name",
"and",
"hair",
"colour",
"gives",
"the",
"user",
"a",
"nickname",
"by",
"concatenating",
"the",
"first",
"3",
"letters",
"of",
"their",
"name",
"with",
"'",
"gsy",
"'"
] | def conversation():
user_name = input("What is your name?: ")
print("Hello", user_name)
user_hair = input("What colour is your hair "+ user_name+ "? ")
print("Your nickname is", user_name[0:2]+"gsy") | [
"def",
"conversation",
"(",
")",
":",
"user_name",
"=",
"input",
"(",
"\"What is your name?: \"",
")",
"print",
"(",
"\"Hello\"",
",",
"user_name",
")",
"user_hair",
"=",
"input",
"(",
"\"What colour is your hair \"",
"+",
"user_name",
"+",
"\"? \"",
")",
"print",
"(",
"\"Your nickname is\"",
",",
"user_name",
"[",
"0",
":",
"2",
"]",
"+",
"\"gsy\"",
")"
] | Asks the user to input their name and hair colour
gives the user a nickname by concatenating the first 3 letters
of their name with 'gsy' | [
"Asks",
"the",
"user",
"to",
"input",
"their",
"name",
"and",
"hair",
"colour",
"gives",
"the",
"user",
"a",
"nickname",
"by",
"concatenating",
"the",
"first",
"3",
"letters",
"of",
"their",
"name",
"with",
"'",
"gsy",
"'"
] | [
"\"\"\"\n Asks the user to input their name and hair colour\n gives the user a nickname by concatenating the first 3 letters\n of their name with 'gsy'\n :return:\n \"\"\"",
"#store the name that the user inputs in the variable user_name",
"#output hello and their name using the variable user_name",
"#Ask the user what colour their hair is mentioning their name.",
"#Give the user a nickname using the first three letters of their name followed by",
"#gsy."
] | [] | {
"returns": [
{
"docstring": null,
"docstring_tokens": [
"None"
],
"type": null
}
],
"raises": [],
"params": [],
"outlier_params": [],
"others": []
} | def conversation():
user_name = input("What is your name?: ")
print("Hello", user_name)
user_hair = input("What colour is your hair "+ user_name+ "? ")
print("Your nickname is", user_name[0:2]+"gsy") | 610,500 | 525 |
fd3de8cdc195db2ebe56e733e39dc443f8193250 | peteroconnor-bc/artemis | artemis/remote/plotting/utils.py | [
"BSD-2-Clause-FreeBSD"
] | Python | _queue_get_all_no_wait | <not_specific> | def _queue_get_all_no_wait(q, max_items_to_retreive):
"""
Empties the queue, but takes maximally maxItemsToRetreive from the queue
:param q:
:param max_items_to_retreive:
:return:
"""
items = []
for numOfItemsRetrieved in range(0, max_items_to_retreive):
try:
items.append(q.get_nowait())
except queue.Empty:
break
return items |
Empties the queue, but takes maximally maxItemsToRetreive from the queue
:param q:
:param max_items_to_retreive:
:return:
| Empties the queue, but takes maximally maxItemsToRetreive from the queue | [
"Empties",
"the",
"queue",
"but",
"takes",
"maximally",
"maxItemsToRetreive",
"from",
"the",
"queue"
] | def _queue_get_all_no_wait(q, max_items_to_retreive):
items = []
for numOfItemsRetrieved in range(0, max_items_to_retreive):
try:
items.append(q.get_nowait())
except queue.Empty:
break
return items | [
"def",
"_queue_get_all_no_wait",
"(",
"q",
",",
"max_items_to_retreive",
")",
":",
"items",
"=",
"[",
"]",
"for",
"numOfItemsRetrieved",
"in",
"range",
"(",
"0",
",",
"max_items_to_retreive",
")",
":",
"try",
":",
"items",
".",
"append",
"(",
"q",
".",
"get_nowait",
"(",
")",
")",
"except",
"queue",
".",
"Empty",
":",
"break",
"return",
"items"
] | Empties the queue, but takes maximally maxItemsToRetreive from the queue | [
"Empties",
"the",
"queue",
"but",
"takes",
"maximally",
"maxItemsToRetreive",
"from",
"the",
"queue"
] | [
"\"\"\"\n Empties the queue, but takes maximally maxItemsToRetreive from the queue\n :param q:\n :param max_items_to_retreive:\n :return:\n \"\"\""
] | [
{
"param": "q",
"type": null
},
{
"param": "max_items_to_retreive",
"type": null
}
] | {
"returns": [
{
"docstring": null,
"docstring_tokens": [
"None"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "q",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": null
},
{
"identifier": "max_items_to_retreive",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import queue
def _queue_get_all_no_wait(q, max_items_to_retreive):
items = []
for numOfItemsRetrieved in range(0, max_items_to_retreive):
try:
items.append(q.get_nowait())
except queue.Empty:
break
return items | 610,501 | 35 |
2538b97e7d5fae3a0b8be51347197320891bf3f2 | NikithaKrish/PaddleNLP | paddlenlp/trainer/trainer_utils.py | [
"Apache-2.0"
] | Python | default_compute_objective | float | def default_compute_objective(metrics: Dict[str, float]) -> float:
"""
The default objective to maximize/minimize when doing an hyperparameter search. It is the evaluation loss if no
metrics are provided to the [`Trainer`], the sum of all metrics otherwise.
Args:
metrics (`Dict[str, float]`): The metrics returned by the evaluate method.
Return:
`float`: The objective to minimize or maximize
"""
metrics = copy.deepcopy(metrics)
loss = metrics.pop("eval_loss", None)
_ = metrics.pop("epoch", None)
# Remove speed metrics
speed_metrics = [
m for m in metrics.keys()
if m.endswith("_runtime") or m.endswith("_per_second")
]
for sm in speed_metrics:
_ = metrics.pop(sm, None)
return loss if len(metrics) == 0 else sum(metrics.values()) |
The default objective to maximize/minimize when doing an hyperparameter search. It is the evaluation loss if no
metrics are provided to the [`Trainer`], the sum of all metrics otherwise.
Args:
metrics (`Dict[str, float]`): The metrics returned by the evaluate method.
Return:
`float`: The objective to minimize or maximize
| The default objective to maximize/minimize when doing an hyperparameter search. It is the evaluation loss if no
metrics are provided to the [`Trainer`], the sum of all metrics otherwise. | [
"The",
"default",
"objective",
"to",
"maximize",
"/",
"minimize",
"when",
"doing",
"an",
"hyperparameter",
"search",
".",
"It",
"is",
"the",
"evaluation",
"loss",
"if",
"no",
"metrics",
"are",
"provided",
"to",
"the",
"[",
"`",
"Trainer",
"`",
"]",
"the",
"sum",
"of",
"all",
"metrics",
"otherwise",
"."
] | def default_compute_objective(metrics: Dict[str, float]) -> float:
metrics = copy.deepcopy(metrics)
loss = metrics.pop("eval_loss", None)
_ = metrics.pop("epoch", None)
speed_metrics = [
m for m in metrics.keys()
if m.endswith("_runtime") or m.endswith("_per_second")
]
for sm in speed_metrics:
_ = metrics.pop(sm, None)
return loss if len(metrics) == 0 else sum(metrics.values()) | [
"def",
"default_compute_objective",
"(",
"metrics",
":",
"Dict",
"[",
"str",
",",
"float",
"]",
")",
"->",
"float",
":",
"metrics",
"=",
"copy",
".",
"deepcopy",
"(",
"metrics",
")",
"loss",
"=",
"metrics",
".",
"pop",
"(",
"\"eval_loss\"",
",",
"None",
")",
"_",
"=",
"metrics",
".",
"pop",
"(",
"\"epoch\"",
",",
"None",
")",
"speed_metrics",
"=",
"[",
"m",
"for",
"m",
"in",
"metrics",
".",
"keys",
"(",
")",
"if",
"m",
".",
"endswith",
"(",
"\"_runtime\"",
")",
"or",
"m",
".",
"endswith",
"(",
"\"_per_second\"",
")",
"]",
"for",
"sm",
"in",
"speed_metrics",
":",
"_",
"=",
"metrics",
".",
"pop",
"(",
"sm",
",",
"None",
")",
"return",
"loss",
"if",
"len",
"(",
"metrics",
")",
"==",
"0",
"else",
"sum",
"(",
"metrics",
".",
"values",
"(",
")",
")"
] | The default objective to maximize/minimize when doing an hyperparameter search. | [
"The",
"default",
"objective",
"to",
"maximize",
"/",
"minimize",
"when",
"doing",
"an",
"hyperparameter",
"search",
"."
] | [
"\"\"\"\n The default objective to maximize/minimize when doing an hyperparameter search. It is the evaluation loss if no\n metrics are provided to the [`Trainer`], the sum of all metrics otherwise.\n\n Args:\n metrics (`Dict[str, float]`): The metrics returned by the evaluate method.\n\n Return:\n `float`: The objective to minimize or maximize\n \"\"\"",
"# Remove speed metrics"
] | [
{
"param": "metrics",
"type": "Dict[str, float]"
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "metrics",
"type": "Dict[str, float]",
"docstring": "The metrics returned by the evaluate method.",
"docstring_tokens": [
"The",
"metrics",
"returned",
"by",
"the",
"evaluate",
"method",
"."
],
"default": null,
"is_optional": false
}
],
"outlier_params": [],
"others": []
} | import copy
def default_compute_objective(metrics: Dict[str, float]) -> float:
metrics = copy.deepcopy(metrics)
loss = metrics.pop("eval_loss", None)
_ = metrics.pop("epoch", None)
speed_metrics = [
m for m in metrics.keys()
if m.endswith("_runtime") or m.endswith("_per_second")
]
for sm in speed_metrics:
_ = metrics.pop(sm, None)
return loss if len(metrics) == 0 else sum(metrics.values()) | 610,502 | 644 |
20d2cd4dc0ee9969f6fe085d56e1b09cf55461b0 | yihangx/Medical_Image_Server | core/files.py | [
"MIT"
] | Python | b64s_to_b | bytes | def b64s_to_b(b64s: str) -> bytes:
"""convert base 64 strting to bytes
:param b64s: input base 64 string
:type b64s: str
:return: output bytes
:rtype: bytes
"""
b = base64.b64decode(b64s.encode('utf8'))
return b | convert base 64 strting to bytes
:param b64s: input base 64 string
:type b64s: str
:return: output bytes
:rtype: bytes
| convert base 64 strting to bytes | [
"convert",
"base",
"64",
"strting",
"to",
"bytes"
] | def b64s_to_b(b64s: str) -> bytes:
b = base64.b64decode(b64s.encode('utf8'))
return b | [
"def",
"b64s_to_b",
"(",
"b64s",
":",
"str",
")",
"->",
"bytes",
":",
"b",
"=",
"base64",
".",
"b64decode",
"(",
"b64s",
".",
"encode",
"(",
"'utf8'",
")",
")",
"return",
"b"
] | convert base 64 strting to bytes | [
"convert",
"base",
"64",
"strting",
"to",
"bytes"
] | [
"\"\"\"convert base 64 strting to bytes\n\n :param b64s: input base 64 string\n :type b64s: str\n :return: output bytes\n :rtype: bytes\n \"\"\""
] | [
{
"param": "b64s",
"type": "str"
}
] | {
"returns": [
{
"docstring": null,
"docstring_tokens": [
"None"
],
"type": "bytes"
}
],
"raises": [],
"params": [
{
"identifier": "b64s",
"type": "str",
"docstring": "input base 64 string",
"docstring_tokens": [
"input",
"base",
"64",
"string"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import base64
def b64s_to_b(b64s: str) -> bytes:
b = base64.b64decode(b64s.encode('utf8'))
return b | 610,503 | 1,011 |
bbaa865b04d98245316263daee881bfdeb2d57b2 | vedant-jain03/NeoAlgo | Python/ds/SwapNodes-LinkedList.py | [
"MIT"
] | Python | swapNodes | <not_specific> | def swapNodes(head, m, n):
'''
Summary Line:
This function helps us to swap the
particular nodes of the Linked List.
Args:
head- Head of our Linked List
m- index of the first node being swapped.
n- index of the second node being swapped.
Returns:
Head of the new Linked list with
swapped nodes.
'''
if head is None:
return head
x = m
y = n
m = min(x, y)
n = max(x, y)
i = 0
temp = head
prev1 = None
while(i < m):
if temp is None:
break
prev1 = temp
temp = temp.next
i = i+1
current1 = temp # current1 set at the first node
i = 0
temp = head
prev2 = None
while i < n:
if temp is None:
break
prev2 = temp
temp = temp.next
i = i+1
current2 = temp # current2 set to the other node
temp = current2.next
'''
We iterate over the prev and the next nodes
of our current1 and current2 and manually change
their links to swap the nodes.
'''
if prev1 is not None and prev2 is not None:
prev1.next = current2
prev2.next = current1
current2.next = current1.next
current1.next = temp
if prev1 is None:
prev2.next = current1
head = current2
current2.next = current1.next
current1.next = temp
return head |
Summary Line:
This function helps us to swap the
particular nodes of the Linked List.
Args:
head- Head of our Linked List
m- index of the first node being swapped.
n- index of the second node being swapped.
Returns:
Head of the new Linked list with
swapped nodes.
| Summary Line:
This function helps us to swap the
particular nodes of the Linked List.
head- Head of our Linked List
m- index of the first node being swapped.
n- index of the second node being swapped.
Head of the new Linked list with
swapped nodes. | [
"Summary",
"Line",
":",
"This",
"function",
"helps",
"us",
"to",
"swap",
"the",
"particular",
"nodes",
"of",
"the",
"Linked",
"List",
".",
"head",
"-",
"Head",
"of",
"our",
"Linked",
"List",
"m",
"-",
"index",
"of",
"the",
"first",
"node",
"being",
"swapped",
".",
"n",
"-",
"index",
"of",
"the",
"second",
"node",
"being",
"swapped",
".",
"Head",
"of",
"the",
"new",
"Linked",
"list",
"with",
"swapped",
"nodes",
"."
] | def swapNodes(head, m, n):
if head is None:
return head
x = m
y = n
m = min(x, y)
n = max(x, y)
i = 0
temp = head
prev1 = None
while(i < m):
if temp is None:
break
prev1 = temp
temp = temp.next
i = i+1
current1 = temp
i = 0
temp = head
prev2 = None
while i < n:
if temp is None:
break
prev2 = temp
temp = temp.next
i = i+1
current2 = temp
temp = current2.next
if prev1 is not None and prev2 is not None:
prev1.next = current2
prev2.next = current1
current2.next = current1.next
current1.next = temp
if prev1 is None:
prev2.next = current1
head = current2
current2.next = current1.next
current1.next = temp
return head | [
"def",
"swapNodes",
"(",
"head",
",",
"m",
",",
"n",
")",
":",
"if",
"head",
"is",
"None",
":",
"return",
"head",
"x",
"=",
"m",
"y",
"=",
"n",
"m",
"=",
"min",
"(",
"x",
",",
"y",
")",
"n",
"=",
"max",
"(",
"x",
",",
"y",
")",
"i",
"=",
"0",
"temp",
"=",
"head",
"prev1",
"=",
"None",
"while",
"(",
"i",
"<",
"m",
")",
":",
"if",
"temp",
"is",
"None",
":",
"break",
"prev1",
"=",
"temp",
"temp",
"=",
"temp",
".",
"next",
"i",
"=",
"i",
"+",
"1",
"current1",
"=",
"temp",
"i",
"=",
"0",
"temp",
"=",
"head",
"prev2",
"=",
"None",
"while",
"i",
"<",
"n",
":",
"if",
"temp",
"is",
"None",
":",
"break",
"prev2",
"=",
"temp",
"temp",
"=",
"temp",
".",
"next",
"i",
"=",
"i",
"+",
"1",
"current2",
"=",
"temp",
"temp",
"=",
"current2",
".",
"next",
"'''\n We iterate over the prev and the next nodes\n of our current1 and current2 and manually change\n their links to swap the nodes.\n '''",
"if",
"prev1",
"is",
"not",
"None",
"and",
"prev2",
"is",
"not",
"None",
":",
"prev1",
".",
"next",
"=",
"current2",
"prev2",
".",
"next",
"=",
"current1",
"current2",
".",
"next",
"=",
"current1",
".",
"next",
"current1",
".",
"next",
"=",
"temp",
"if",
"prev1",
"is",
"None",
":",
"prev2",
".",
"next",
"=",
"current1",
"head",
"=",
"current2",
"current2",
".",
"next",
"=",
"current1",
".",
"next",
"current1",
".",
"next",
"=",
"temp",
"return",
"head"
] | Summary Line:
This function helps us to swap the
particular nodes of the Linked List. | [
"Summary",
"Line",
":",
"This",
"function",
"helps",
"us",
"to",
"swap",
"the",
"particular",
"nodes",
"of",
"the",
"Linked",
"List",
"."
] | [
"'''\n Summary Line:\n This function helps us to swap the\n particular nodes of the Linked List.\n\n Args:\n head- Head of our Linked List\n m- index of the first node being swapped.\n n- index of the second node being swapped.\n\n Returns:\n Head of the new Linked list with\n swapped nodes.\n '''",
"# current1 set at the first node",
"# current2 set to the other node",
"'''\n We iterate over the prev and the next nodes\n of our current1 and current2 and manually change\n their links to swap the nodes.\n '''"
] | [
{
"param": "head",
"type": null
},
{
"param": "m",
"type": null
},
{
"param": "n",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "head",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "m",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "n",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def swapNodes(head, m, n):
if head is None:
return head
x = m
y = n
m = min(x, y)
n = max(x, y)
i = 0
temp = head
prev1 = None
while(i < m):
if temp is None:
break
prev1 = temp
temp = temp.next
i = i+1
current1 = temp
i = 0
temp = head
prev2 = None
while i < n:
if temp is None:
break
prev2 = temp
temp = temp.next
i = i+1
current2 = temp
temp = current2.next
if prev1 is not None and prev2 is not None:
prev1.next = current2
prev2.next = current1
current2.next = current1.next
current1.next = temp
if prev1 is None:
prev2.next = current1
head = current2
current2.next = current1.next
current1.next = temp
return head | 610,504 | 840 |
b3139209806a2e23ca4795481bbf4a7995454236 | neu-spiral/LPSforLifelong | utils.py | [
"MIT"
] | Python | mask_joint | <not_specific> | def mask_joint(args,mask1,mask2):
'''
mask1 has more 1 than mask2
return: new mask with only 0s and 1s
'''
masks = copy.deepcopy(mask1)
if not mask2:
return mask1
for name in mask1:
if name not in args.fixed_layer and name in args.pruned_layer:
non_zeros1,non_zeros2 = mask1[name], mask2[name]
non_zeros = non_zeros1 + non_zeros2
# Fake float version of |
under_threshold = non_zeros < 0.5
above_threshold = non_zeros > 0.5
non_zeros[above_threshold] = 1
non_zeros[under_threshold] = 0
masks[name] = non_zeros
return masks |
mask1 has more 1 than mask2
return: new mask with only 0s and 1s
| mask1 has more 1 than mask2
return: new mask with only 0s and 1s | [
"mask1",
"has",
"more",
"1",
"than",
"mask2",
"return",
":",
"new",
"mask",
"with",
"only",
"0s",
"and",
"1s"
] | def mask_joint(args,mask1,mask2):
masks = copy.deepcopy(mask1)
if not mask2:
return mask1
for name in mask1:
if name not in args.fixed_layer and name in args.pruned_layer:
non_zeros1,non_zeros2 = mask1[name], mask2[name]
non_zeros = non_zeros1 + non_zeros2
under_threshold = non_zeros < 0.5
above_threshold = non_zeros > 0.5
non_zeros[above_threshold] = 1
non_zeros[under_threshold] = 0
masks[name] = non_zeros
return masks | [
"def",
"mask_joint",
"(",
"args",
",",
"mask1",
",",
"mask2",
")",
":",
"masks",
"=",
"copy",
".",
"deepcopy",
"(",
"mask1",
")",
"if",
"not",
"mask2",
":",
"return",
"mask1",
"for",
"name",
"in",
"mask1",
":",
"if",
"name",
"not",
"in",
"args",
".",
"fixed_layer",
"and",
"name",
"in",
"args",
".",
"pruned_layer",
":",
"non_zeros1",
",",
"non_zeros2",
"=",
"mask1",
"[",
"name",
"]",
",",
"mask2",
"[",
"name",
"]",
"non_zeros",
"=",
"non_zeros1",
"+",
"non_zeros2",
"under_threshold",
"=",
"non_zeros",
"<",
"0.5",
"above_threshold",
"=",
"non_zeros",
">",
"0.5",
"non_zeros",
"[",
"above_threshold",
"]",
"=",
"1",
"non_zeros",
"[",
"under_threshold",
"]",
"=",
"0",
"masks",
"[",
"name",
"]",
"=",
"non_zeros",
"return",
"masks"
] | mask1 has more 1 than mask2
return: new mask with only 0s and 1s | [
"mask1",
"has",
"more",
"1",
"than",
"mask2",
"return",
":",
"new",
"mask",
"with",
"only",
"0s",
"and",
"1s"
] | [
"'''\n mask1 has more 1 than mask2\n return: new mask with only 0s and 1s\n '''",
"# Fake float version of |"
] | [
{
"param": "args",
"type": null
},
{
"param": "mask1",
"type": null
},
{
"param": "mask2",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "args",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "mask1",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "mask2",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import copy
def mask_joint(args,mask1,mask2):
masks = copy.deepcopy(mask1)
if not mask2:
return mask1
for name in mask1:
if name not in args.fixed_layer and name in args.pruned_layer:
non_zeros1,non_zeros2 = mask1[name], mask2[name]
non_zeros = non_zeros1 + non_zeros2
under_threshold = non_zeros < 0.5
above_threshold = non_zeros > 0.5
non_zeros[above_threshold] = 1
non_zeros[under_threshold] = 0
masks[name] = non_zeros
return masks | 610,505 | 930 |
dd940c679cf4d9f5d185b397a0a233d6cce9701f | zimolzak/toddler-language-acquisition | main.py | [
"MIT"
] | Python | filename2frame | <not_specific> | def filename2frame(fn):
"""Make two lists a shorthand CSV.
In other words, unroll the CSV. It's a slightly unusual CSV in
that column 0 is always a date, and then there are a *variable*
number of subsequent columns that you can think of as word0,
word1, ... wordn. This compact / rolled-up structure simply allows
me to type the CSV faster.
"""
rows = open(fn, 'r').read().splitlines()
date_list = []
word_list = []
for r in rows:
columns = r.split(',')
# Assume columns[0] is always date and columns[1..end] are words.
for i in range(1,len(columns)):
if columns[i] not in word_list: # dedup assumes CSV chronological.
date_list.append(columns[0])
word_list.append(columns[i])
return date_list, word_list | Make two lists a shorthand CSV.
In other words, unroll the CSV. It's a slightly unusual CSV in
that column 0 is always a date, and then there are a *variable*
number of subsequent columns that you can think of as word0,
word1, ... wordn. This compact / rolled-up structure simply allows
me to type the CSV faster.
| Make two lists a shorthand CSV.
In other words, unroll the CSV. It's a slightly unusual CSV in
that column 0 is always a date, and then there are a *variable
number of subsequent columns that you can think of as word0,
word1, ... wordn. This compact / rolled-up structure simply allows
me to type the CSV faster. | [
"Make",
"two",
"lists",
"a",
"shorthand",
"CSV",
".",
"In",
"other",
"words",
"unroll",
"the",
"CSV",
".",
"It",
"'",
"s",
"a",
"slightly",
"unusual",
"CSV",
"in",
"that",
"column",
"0",
"is",
"always",
"a",
"date",
"and",
"then",
"there",
"are",
"a",
"*",
"variable",
"number",
"of",
"subsequent",
"columns",
"that",
"you",
"can",
"think",
"of",
"as",
"word0",
"word1",
"...",
"wordn",
".",
"This",
"compact",
"/",
"rolled",
"-",
"up",
"structure",
"simply",
"allows",
"me",
"to",
"type",
"the",
"CSV",
"faster",
"."
] | def filename2frame(fn):
rows = open(fn, 'r').read().splitlines()
date_list = []
word_list = []
for r in rows:
columns = r.split(',')
for i in range(1,len(columns)):
if columns[i] not in word_list:
date_list.append(columns[0])
word_list.append(columns[i])
return date_list, word_list | [
"def",
"filename2frame",
"(",
"fn",
")",
":",
"rows",
"=",
"open",
"(",
"fn",
",",
"'r'",
")",
".",
"read",
"(",
")",
".",
"splitlines",
"(",
")",
"date_list",
"=",
"[",
"]",
"word_list",
"=",
"[",
"]",
"for",
"r",
"in",
"rows",
":",
"columns",
"=",
"r",
".",
"split",
"(",
"','",
")",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"columns",
")",
")",
":",
"if",
"columns",
"[",
"i",
"]",
"not",
"in",
"word_list",
":",
"date_list",
".",
"append",
"(",
"columns",
"[",
"0",
"]",
")",
"word_list",
".",
"append",
"(",
"columns",
"[",
"i",
"]",
")",
"return",
"date_list",
",",
"word_list"
] | Make two lists a shorthand CSV. | [
"Make",
"two",
"lists",
"a",
"shorthand",
"CSV",
"."
] | [
"\"\"\"Make two lists a shorthand CSV.\n\n In other words, unroll the CSV. It's a slightly unusual CSV in\n that column 0 is always a date, and then there are a *variable*\n number of subsequent columns that you can think of as word0,\n word1, ... wordn. This compact / rolled-up structure simply allows\n me to type the CSV faster.\n \"\"\"",
"# Assume columns[0] is always date and columns[1..end] are words.",
"# dedup assumes CSV chronological."
] | [
{
"param": "fn",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "fn",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def filename2frame(fn):
rows = open(fn, 'r').read().splitlines()
date_list = []
word_list = []
for r in rows:
columns = r.split(',')
for i in range(1,len(columns)):
if columns[i] not in word_list:
date_list.append(columns[0])
word_list.append(columns[i])
return date_list, word_list | 610,506 | 50 |
c77a055f4eee13fd1bf781d4e20ce83c4fb407f2 | kwinstonix/PerfKitBenchmarker | perfkitbenchmarker/linux_benchmarks/vbench_transcoding_benchmark.py | [
"Apache-2.0"
] | Python | Prepare | None | def Prepare(spec: benchmark_spec.BenchmarkSpec) -> None:
"""Install FFmpeg and download sample videos on the VM.
Args:
spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
vm = spec.vms[0]
home_dir = vm.RemoteCommand('echo $HOME')[0].strip()
# vm.InstallPreprovisionedBenchmarkData('vbench', ['vbench.zip'], home_dir)
vm.DownloadPreprovisionedData(home_dir, 'vbench', 'vbench.zip')
vm.InstallPackages('unzip')
vm.RemoteCommand('unzip -o vbench.zip')
vm.RemoteCommand('cp -r ~/vbench /scratch')
vm.Install('ffmpeg')
vm.InstallPackages('parallel') | Install FFmpeg and download sample videos on the VM.
Args:
spec: The benchmark specification. Contains all data that is
required to run the benchmark.
| Install FFmpeg and download sample videos on the VM. | [
"Install",
"FFmpeg",
"and",
"download",
"sample",
"videos",
"on",
"the",
"VM",
"."
] | def Prepare(spec: benchmark_spec.BenchmarkSpec) -> None:
vm = spec.vms[0]
home_dir = vm.RemoteCommand('echo $HOME')[0].strip()
vm.DownloadPreprovisionedData(home_dir, 'vbench', 'vbench.zip')
vm.InstallPackages('unzip')
vm.RemoteCommand('unzip -o vbench.zip')
vm.RemoteCommand('cp -r ~/vbench /scratch')
vm.Install('ffmpeg')
vm.InstallPackages('parallel') | [
"def",
"Prepare",
"(",
"spec",
":",
"benchmark_spec",
".",
"BenchmarkSpec",
")",
"->",
"None",
":",
"vm",
"=",
"spec",
".",
"vms",
"[",
"0",
"]",
"home_dir",
"=",
"vm",
".",
"RemoteCommand",
"(",
"'echo $HOME'",
")",
"[",
"0",
"]",
".",
"strip",
"(",
")",
"vm",
".",
"DownloadPreprovisionedData",
"(",
"home_dir",
",",
"'vbench'",
",",
"'vbench.zip'",
")",
"vm",
".",
"InstallPackages",
"(",
"'unzip'",
")",
"vm",
".",
"RemoteCommand",
"(",
"'unzip -o vbench.zip'",
")",
"vm",
".",
"RemoteCommand",
"(",
"'cp -r ~/vbench /scratch'",
")",
"vm",
".",
"Install",
"(",
"'ffmpeg'",
")",
"vm",
".",
"InstallPackages",
"(",
"'parallel'",
")"
] | Install FFmpeg and download sample videos on the VM. | [
"Install",
"FFmpeg",
"and",
"download",
"sample",
"videos",
"on",
"the",
"VM",
"."
] | [
"\"\"\"Install FFmpeg and download sample videos on the VM.\n\n Args:\n spec: The benchmark specification. Contains all data that is\n required to run the benchmark.\n \"\"\"",
"# vm.InstallPreprovisionedBenchmarkData('vbench', ['vbench.zip'], home_dir)"
] | [
{
"param": "spec",
"type": "benchmark_spec.BenchmarkSpec"
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "spec",
"type": "benchmark_spec.BenchmarkSpec",
"docstring": "The benchmark specification. Contains all data that is\nrequired to run the benchmark.",
"docstring_tokens": [
"The",
"benchmark",
"specification",
".",
"Contains",
"all",
"data",
"that",
"is",
"required",
"to",
"run",
"the",
"benchmark",
"."
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def Prepare(spec: benchmark_spec.BenchmarkSpec) -> None:
vm = spec.vms[0]
home_dir = vm.RemoteCommand('echo $HOME')[0].strip()
vm.DownloadPreprovisionedData(home_dir, 'vbench', 'vbench.zip')
vm.InstallPackages('unzip')
vm.RemoteCommand('unzip -o vbench.zip')
vm.RemoteCommand('cp -r ~/vbench /scratch')
vm.Install('ffmpeg')
vm.InstallPackages('parallel') | 610,507 | 502 |
d4e360cd9295d6ad3587aecbe008ca69933c63bf | saneravi/ML_Stuff | language-word-detection/bigram.py | [
"MIT"
] | Python | deserialize | <not_specific> | def deserialize(filename='data.json'):
"""Return JSON data from file."""
import json
with open(filename) as f:
data = json.load(f)
return data | Return JSON data from file. | Return JSON data from file. | [
"Return",
"JSON",
"data",
"from",
"file",
"."
] | def deserialize(filename='data.json'):
import json
with open(filename) as f:
data = json.load(f)
return data | [
"def",
"deserialize",
"(",
"filename",
"=",
"'data.json'",
")",
":",
"import",
"json",
"with",
"open",
"(",
"filename",
")",
"as",
"f",
":",
"data",
"=",
"json",
".",
"load",
"(",
"f",
")",
"return",
"data"
] | Return JSON data from file. | [
"Return",
"JSON",
"data",
"from",
"file",
"."
] | [
"\"\"\"Return JSON data from file.\"\"\""
] | [
{
"param": "filename",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "filename",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import json
def deserialize(filename='data.json'):
import json
with open(filename) as f:
data = json.load(f)
return data | 610,508 | 643 |
86e66fbd7dbecb7d4011e936565af1d6ea27681c | ktbyers/python_course | class2/ex2_telnetlib.py | [
"Apache-2.0"
] | Python | write_bytes | <not_specific> | def write_bytes(out_data):
"""Write Python2 and Python3 compatible byte stream.
This is a bit compliated :-(
It basically ensures that the unicode in your program is always encoded into an UTF-8 byte
stream in the proper way (when bytes are written out to the network).
Or worded another way, Unicode in the program is in the idealized unicode code points, and
when you write it out to the network it needs represented a certain way (encoded).
"""
if sys.version_info[0] >= 3:
if isinstance(out_data, type(u'')):
return out_data.encode('utf-8')
elif isinstance(out_data, type(b'')):
return out_data
else:
if isinstance(out_data, type(u'')):
return out_data.encode('utf-8')
elif isinstance(out_data, type(str(''))):
return out_data
msg = "Invalid value for out_data neither unicode nor byte string: {}".format(out_data)
raise ValueError(msg) | Write Python2 and Python3 compatible byte stream.
This is a bit compliated :-(
It basically ensures that the unicode in your program is always encoded into an UTF-8 byte
stream in the proper way (when bytes are written out to the network).
Or worded another way, Unicode in the program is in the idealized unicode code points, and
when you write it out to the network it needs represented a certain way (encoded).
| Write Python2 and Python3 compatible byte stream.
This is a bit compliated :-(
It basically ensures that the unicode in your program is always encoded into an UTF-8 byte
stream in the proper way (when bytes are written out to the network).
Or worded another way, Unicode in the program is in the idealized unicode code points, and
when you write it out to the network it needs represented a certain way (encoded). | [
"Write",
"Python2",
"and",
"Python3",
"compatible",
"byte",
"stream",
".",
"This",
"is",
"a",
"bit",
"compliated",
":",
"-",
"(",
"It",
"basically",
"ensures",
"that",
"the",
"unicode",
"in",
"your",
"program",
"is",
"always",
"encoded",
"into",
"an",
"UTF",
"-",
"8",
"byte",
"stream",
"in",
"the",
"proper",
"way",
"(",
"when",
"bytes",
"are",
"written",
"out",
"to",
"the",
"network",
")",
".",
"Or",
"worded",
"another",
"way",
"Unicode",
"in",
"the",
"program",
"is",
"in",
"the",
"idealized",
"unicode",
"code",
"points",
"and",
"when",
"you",
"write",
"it",
"out",
"to",
"the",
"network",
"it",
"needs",
"represented",
"a",
"certain",
"way",
"(",
"encoded",
")",
"."
] | def write_bytes(out_data):
if sys.version_info[0] >= 3:
if isinstance(out_data, type(u'')):
return out_data.encode('utf-8')
elif isinstance(out_data, type(b'')):
return out_data
else:
if isinstance(out_data, type(u'')):
return out_data.encode('utf-8')
elif isinstance(out_data, type(str(''))):
return out_data
msg = "Invalid value for out_data neither unicode nor byte string: {}".format(out_data)
raise ValueError(msg) | [
"def",
"write_bytes",
"(",
"out_data",
")",
":",
"if",
"sys",
".",
"version_info",
"[",
"0",
"]",
">=",
"3",
":",
"if",
"isinstance",
"(",
"out_data",
",",
"type",
"(",
"u''",
")",
")",
":",
"return",
"out_data",
".",
"encode",
"(",
"'utf-8'",
")",
"elif",
"isinstance",
"(",
"out_data",
",",
"type",
"(",
"b''",
")",
")",
":",
"return",
"out_data",
"else",
":",
"if",
"isinstance",
"(",
"out_data",
",",
"type",
"(",
"u''",
")",
")",
":",
"return",
"out_data",
".",
"encode",
"(",
"'utf-8'",
")",
"elif",
"isinstance",
"(",
"out_data",
",",
"type",
"(",
"str",
"(",
"''",
")",
")",
")",
":",
"return",
"out_data",
"msg",
"=",
"\"Invalid value for out_data neither unicode nor byte string: {}\"",
".",
"format",
"(",
"out_data",
")",
"raise",
"ValueError",
"(",
"msg",
")"
] | Write Python2 and Python3 compatible byte stream. | [
"Write",
"Python2",
"and",
"Python3",
"compatible",
"byte",
"stream",
"."
] | [
"\"\"\"Write Python2 and Python3 compatible byte stream.\n\n This is a bit compliated :-(\n\n It basically ensures that the unicode in your program is always encoded into an UTF-8 byte\n stream in the proper way (when bytes are written out to the network).\n\n Or worded another way, Unicode in the program is in the idealized unicode code points, and\n when you write it out to the network it needs represented a certain way (encoded).\n \"\"\""
] | [
{
"param": "out_data",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "out_data",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import sys
def write_bytes(out_data):
if sys.version_info[0] >= 3:
if isinstance(out_data, type(u'')):
return out_data.encode('utf-8')
elif isinstance(out_data, type(b'')):
return out_data
else:
if isinstance(out_data, type(u'')):
return out_data.encode('utf-8')
elif isinstance(out_data, type(str(''))):
return out_data
msg = "Invalid value for out_data neither unicode nor byte string: {}".format(out_data)
raise ValueError(msg) | 610,509 | 843 |
c6a7212929883535088dc8a3731c12ef0c0992e9 | vijayakumarr345/pattern | Alphabets/Capital Alphabets/E.py | [
"MIT"
] | Python | while_E | null | def while_E():
""" *'s printed in the Shape of Capital E """
row =0
while row <9:
col =0
while col <6:
if col ==0 or row%4 ==0:
print('*',end=' ')
else:
print(' ',end=' ')
col += 1
print()
row +=1 | *'s printed in the Shape of Capital E | 's printed in the Shape of Capital E | [
"'",
"s",
"printed",
"in",
"the",
"Shape",
"of",
"Capital",
"E"
] | def while_E():
row =0
while row <9:
col =0
while col <6:
if col ==0 or row%4 ==0:
print('*',end=' ')
else:
print(' ',end=' ')
col += 1
print()
row +=1 | [
"def",
"while_E",
"(",
")",
":",
"row",
"=",
"0",
"while",
"row",
"<",
"9",
":",
"col",
"=",
"0",
"while",
"col",
"<",
"6",
":",
"if",
"col",
"==",
"0",
"or",
"row",
"%",
"4",
"==",
"0",
":",
"print",
"(",
"'*'",
",",
"end",
"=",
"' '",
")",
"else",
":",
"print",
"(",
"' '",
",",
"end",
"=",
"' '",
")",
"col",
"+=",
"1",
"print",
"(",
")",
"row",
"+=",
"1"
] | 's printed in the Shape of Capital E | [
"'",
"s",
"printed",
"in",
"the",
"Shape",
"of",
"Capital",
"E"
] | [
"\"\"\" *'s printed in the Shape of Capital E \"\"\""
] | [] | {
"returns": [],
"raises": [],
"params": [],
"outlier_params": [],
"others": []
} | def while_E():
row =0
while row <9:
col =0
while col <6:
if col ==0 or row%4 ==0:
print('*',end=' ')
else:
print(' ',end=' ')
col += 1
print()
row +=1 | 610,510 | 207 |
ff7e94209fdc591c247e3361f8703d6bad9c7a8c | eyalev/gcloud | lib/googlecloudsdk/api_lib/compute/image_utils.py | [
"Apache-2.0"
] | Python | AddImageProjectFlag | null | def AddImageProjectFlag(parser):
"""Adds the --image flag to the given parser."""
image_project = parser.add_argument(
'--image-project',
help='The project against which all image references will be resolved.')
image_project.detailed_help = """\
The project against which all image and image family references will be
resolved. See ``--image'' for more details.
""" | Adds the --image flag to the given parser. | Adds the --image flag to the given parser. | [
"Adds",
"the",
"--",
"image",
"flag",
"to",
"the",
"given",
"parser",
"."
] | def AddImageProjectFlag(parser):
image_project = parser.add_argument(
'--image-project',
help='The project against which all image references will be resolved.')
image_project.detailed_help = """\
The project against which all image and image family references will be
resolved. See ``--image'' for more details.
""" | [
"def",
"AddImageProjectFlag",
"(",
"parser",
")",
":",
"image_project",
"=",
"parser",
".",
"add_argument",
"(",
"'--image-project'",
",",
"help",
"=",
"'The project against which all image references will be resolved.'",
")",
"image_project",
".",
"detailed_help",
"=",
"\"\"\"\\\n The project against which all image and image family references will be\n resolved. See ``--image'' for more details.\n \"\"\""
] | Adds the --image flag to the given parser. | [
"Adds",
"the",
"--",
"image",
"flag",
"to",
"the",
"given",
"parser",
"."
] | [
"\"\"\"Adds the --image flag to the given parser.\"\"\""
] | [
{
"param": "parser",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "parser",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def AddImageProjectFlag(parser):
image_project = parser.add_argument(
'--image-project',
help='The project against which all image references will be resolved.')
image_project.detailed_help = """\
The project against which all image and image family references will be
resolved. See ``--image'' for more details.
""" | 610,511 | 61 |
d3d634a9979395ddf11e30b4343f5e1bd7f60637 | a-gram/udsp | udsp/core/utils.py | [
"MIT"
] | Python | is_pow2 | <not_specific> | def is_pow2(n):
"""
Check whether a number is power of 2
Parameters
----------
n: int
A positive integer number
Returns
-------
bool
"""
return n > 0 and ((n & (n - 1)) == 0)
# Solution 2
# return math.log2(n) % 1 == 0 |
Check whether a number is power of 2
Parameters
----------
n: int
A positive integer number
Returns
-------
bool
| Check whether a number is power of 2
Parameters
int
A positive integer number
Returns
bool | [
"Check",
"whether",
"a",
"number",
"is",
"power",
"of",
"2",
"Parameters",
"int",
"A",
"positive",
"integer",
"number",
"Returns",
"bool"
] | def is_pow2(n):
return n > 0 and ((n & (n - 1)) == 0) | [
"def",
"is_pow2",
"(",
"n",
")",
":",
"return",
"n",
">",
"0",
"and",
"(",
"(",
"n",
"&",
"(",
"n",
"-",
"1",
")",
")",
"==",
"0",
")"
] | Check whether a number is power of 2
Parameters | [
"Check",
"whether",
"a",
"number",
"is",
"power",
"of",
"2",
"Parameters"
] | [
"\"\"\"\n Check whether a number is power of 2\n\n Parameters\n ----------\n n: int\n A positive integer number\n\n Returns\n -------\n bool\n\n \"\"\"",
"# Solution 2",
"# return math.log2(n) % 1 == 0"
] | [
{
"param": "n",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "n",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def is_pow2(n):
return n > 0 and ((n & (n - 1)) == 0) | 610,512 | 58 |
bb4381ce307654abf90f574562eec70d93d26ed0 | GO-Eratosthenes/dhdt | dhdt/generic/unit_conversion.py | [
"Apache-2.0"
] | Python | datetime2calender | <not_specific> | def datetime2calender(dt):
""" Convert array of datetime64 to a calendar year, month, day.
Parameters
----------
dt : np.datetime64
times of interest
Returns
-------
cal : numpy array
calendar array with last axis representing year, month, day
"""
# decompose calendar floors
Y, M, D = [dt.astype(f"M8[{x}]") for x in "YMD"]
year = (Y + 1970).astype('timedelta64[Y]').astype(int)
month = ((M - Y) + 1).astype('timedelta64[M]').astype(int)
day = ((D - M) + 1).astype('timedelta64[D]').astype(int)
return year, month, day | Convert array of datetime64 to a calendar year, month, day.
Parameters
----------
dt : np.datetime64
times of interest
Returns
-------
cal : numpy array
calendar array with last axis representing year, month, day
| Convert array of datetime64 to a calendar year, month, day.
Parameters
dt : np.datetime64
times of interest
Returns
cal : numpy array
calendar array with last axis representing year, month, day | [
"Convert",
"array",
"of",
"datetime64",
"to",
"a",
"calendar",
"year",
"month",
"day",
".",
"Parameters",
"dt",
":",
"np",
".",
"datetime64",
"times",
"of",
"interest",
"Returns",
"cal",
":",
"numpy",
"array",
"calendar",
"array",
"with",
"last",
"axis",
"representing",
"year",
"month",
"day"
] | def datetime2calender(dt):
Y, M, D = [dt.astype(f"M8[{x}]") for x in "YMD"]
year = (Y + 1970).astype('timedelta64[Y]').astype(int)
month = ((M - Y) + 1).astype('timedelta64[M]').astype(int)
day = ((D - M) + 1).astype('timedelta64[D]').astype(int)
return year, month, day | [
"def",
"datetime2calender",
"(",
"dt",
")",
":",
"Y",
",",
"M",
",",
"D",
"=",
"[",
"dt",
".",
"astype",
"(",
"f\"M8[{x}]\"",
")",
"for",
"x",
"in",
"\"YMD\"",
"]",
"year",
"=",
"(",
"Y",
"+",
"1970",
")",
".",
"astype",
"(",
"'timedelta64[Y]'",
")",
".",
"astype",
"(",
"int",
")",
"month",
"=",
"(",
"(",
"M",
"-",
"Y",
")",
"+",
"1",
")",
".",
"astype",
"(",
"'timedelta64[M]'",
")",
".",
"astype",
"(",
"int",
")",
"day",
"=",
"(",
"(",
"D",
"-",
"M",
")",
"+",
"1",
")",
".",
"astype",
"(",
"'timedelta64[D]'",
")",
".",
"astype",
"(",
"int",
")",
"return",
"year",
",",
"month",
",",
"day"
] | Convert array of datetime64 to a calendar year, month, day. | [
"Convert",
"array",
"of",
"datetime64",
"to",
"a",
"calendar",
"year",
"month",
"day",
"."
] | [
"\"\"\" Convert array of datetime64 to a calendar year, month, day.\n\n Parameters\n ----------\n dt : np.datetime64\n times of interest\n\n Returns\n -------\n cal : numpy array\n calendar array with last axis representing year, month, day\n \"\"\"",
"# decompose calendar floors"
] | [
{
"param": "dt",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "dt",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def datetime2calender(dt):
Y, M, D = [dt.astype(f"M8[{x}]") for x in "YMD"]
year = (Y + 1970).astype('timedelta64[Y]').astype(int)
month = ((M - Y) + 1).astype('timedelta64[M]').astype(int)
day = ((D - M) + 1).astype('timedelta64[D]').astype(int)
return year, month, day | 610,513 | 327 |
8fc463cb6e7e86a3826ab7f1741dfc9a666cd8b8 | kvswim/kv_jhucs_coursesubmit | prototype/courses_old/CoursesMain.py | [
"MIT"
] | Python | saveCatalog | null | def saveCatalog(filename, catalog):
"""
Save catalog to plain text file
"""
dbfile = open(filename, 'w')
for key in sorted(catalog.keys()): # do we want sort???
dbfile.write(str(catalog[key]) + '\n')
dbfile.write(catalog[key].getDescription() + '\n' + '\n')
dbfile.close() |
Save catalog to plain text file
| Save catalog to plain text file | [
"Save",
"catalog",
"to",
"plain",
"text",
"file"
] | def saveCatalog(filename, catalog):
dbfile = open(filename, 'w')
for key in sorted(catalog.keys()):
dbfile.write(str(catalog[key]) + '\n')
dbfile.write(catalog[key].getDescription() + '\n' + '\n')
dbfile.close() | [
"def",
"saveCatalog",
"(",
"filename",
",",
"catalog",
")",
":",
"dbfile",
"=",
"open",
"(",
"filename",
",",
"'w'",
")",
"for",
"key",
"in",
"sorted",
"(",
"catalog",
".",
"keys",
"(",
")",
")",
":",
"dbfile",
".",
"write",
"(",
"str",
"(",
"catalog",
"[",
"key",
"]",
")",
"+",
"'\\n'",
")",
"dbfile",
".",
"write",
"(",
"catalog",
"[",
"key",
"]",
".",
"getDescription",
"(",
")",
"+",
"'\\n'",
"+",
"'\\n'",
")",
"dbfile",
".",
"close",
"(",
")"
] | Save catalog to plain text file | [
"Save",
"catalog",
"to",
"plain",
"text",
"file"
] | [
"\"\"\"\n Save catalog to plain text file\n \"\"\"",
"# do we want sort???"
] | [
{
"param": "filename",
"type": null
},
{
"param": "catalog",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "filename",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "catalog",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def saveCatalog(filename, catalog):
dbfile = open(filename, 'w')
for key in sorted(catalog.keys()):
dbfile.write(str(catalog[key]) + '\n')
dbfile.write(catalog[key].getDescription() + '\n' + '\n')
dbfile.close() | 610,514 | 908 |