hexsha
stringlengths 40
40
| repo
stringlengths 5
121
| path
stringlengths 4
227
| license
sequence | language
stringclasses 1
value | identifier
stringlengths 1
107
| return_type
stringlengths 2
237
⌀ | original_string
stringlengths 75
13.4k
| original_docstring
stringlengths 13
12.9k
| docstring
stringlengths 13
2.57k
| docstring_tokens
sequence | code
stringlengths 23
1.88k
| code_tokens
sequence | short_docstring
stringlengths 1
1.32k
| short_docstring_tokens
sequence | comment
sequence | parameters
list | docstring_params
dict | code_with_imports
stringlengths 23
1.88k
| idxs
int64 0
611k
| cluster
int64 0
1.02k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a8cbbff03ded41c224ee9c7a1e835f1ca35c38b5 | mat3049/mwo-skilltree-planner | util/skill_node_extractor.py | [
"MIT"
] | Python | process_discriminators | <not_specific> | def process_discriminators(effect):
"""
Determine which discriminators apply to each effect
"""
discriminators = {
'faction': False,
'class': False,
'tonnage': False
}
# Effect has a discriminator; Faction discriminator is default
if effect.attrib['value'] == '0.0':
discriminators['faction'] = True
# Iterate over faction classifiers
for faction in effect.iter('Faction'):
# Faction has no base value; Class discriminator in place
if faction.attrib['value'] == '0.0':
discriminators['class'] = True
for weight_class in faction.iter('WeightClass'):
# Weight has no base value; Tonnage discriminator in place
if weight_class.attrib['value'] == "0.0":
discriminators['tonnage'] = True
return discriminators |
Determine which discriminators apply to each effect
| Determine which discriminators apply to each effect | [
"Determine",
"which",
"discriminators",
"apply",
"to",
"each",
"effect"
] | def process_discriminators(effect):
discriminators = {
'faction': False,
'class': False,
'tonnage': False
}
if effect.attrib['value'] == '0.0':
discriminators['faction'] = True
for faction in effect.iter('Faction'):
if faction.attrib['value'] == '0.0':
discriminators['class'] = True
for weight_class in faction.iter('WeightClass'):
if weight_class.attrib['value'] == "0.0":
discriminators['tonnage'] = True
return discriminators | [
"def",
"process_discriminators",
"(",
"effect",
")",
":",
"discriminators",
"=",
"{",
"'faction'",
":",
"False",
",",
"'class'",
":",
"False",
",",
"'tonnage'",
":",
"False",
"}",
"if",
"effect",
".",
"attrib",
"[",
"'value'",
"]",
"==",
"'0.0'",
":",
"discriminators",
"[",
"'faction'",
"]",
"=",
"True",
"for",
"faction",
"in",
"effect",
".",
"iter",
"(",
"'Faction'",
")",
":",
"if",
"faction",
".",
"attrib",
"[",
"'value'",
"]",
"==",
"'0.0'",
":",
"discriminators",
"[",
"'class'",
"]",
"=",
"True",
"for",
"weight_class",
"in",
"faction",
".",
"iter",
"(",
"'WeightClass'",
")",
":",
"if",
"weight_class",
".",
"attrib",
"[",
"'value'",
"]",
"==",
"\"0.0\"",
":",
"discriminators",
"[",
"'tonnage'",
"]",
"=",
"True",
"return",
"discriminators"
] | Determine which discriminators apply to each effect | [
"Determine",
"which",
"discriminators",
"apply",
"to",
"each",
"effect"
] | [
"\"\"\"\n Determine which discriminators apply to each effect\n \"\"\"",
"# Effect has a discriminator; Faction discriminator is default",
"# Iterate over faction classifiers",
"# Faction has no base value; Class discriminator in place",
"# Weight has no base value; Tonnage discriminator in place"
] | [
{
"param": "effect",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "effect",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def process_discriminators(effect):
discriminators = {
'faction': False,
'class': False,
'tonnage': False
}
if effect.attrib['value'] == '0.0':
discriminators['faction'] = True
for faction in effect.iter('Faction'):
if faction.attrib['value'] == '0.0':
discriminators['class'] = True
for weight_class in faction.iter('WeightClass'):
if weight_class.attrib['value'] == "0.0":
discriminators['tonnage'] = True
return discriminators | 609,964 | 309 |
0861fb6de1988f399cc4e82647007a5611a1a734 | suyashkumar/nucleus | nucleus/util/sequence_utils.py | [
"Apache-2.0"
] | Python | _add_lowercase | <not_specific> | def _add_lowercase(d):
"""Returns a dictionary with the lowercase keys and values entered."""
retval = d.copy()
retval.update({k.lower(): v.lower() for k, v in d.items()})
return retval | Returns a dictionary with the lowercase keys and values entered. | Returns a dictionary with the lowercase keys and values entered. | [
"Returns",
"a",
"dictionary",
"with",
"the",
"lowercase",
"keys",
"and",
"values",
"entered",
"."
] | def _add_lowercase(d):
retval = d.copy()
retval.update({k.lower(): v.lower() for k, v in d.items()})
return retval | [
"def",
"_add_lowercase",
"(",
"d",
")",
":",
"retval",
"=",
"d",
".",
"copy",
"(",
")",
"retval",
".",
"update",
"(",
"{",
"k",
".",
"lower",
"(",
")",
":",
"v",
".",
"lower",
"(",
")",
"for",
"k",
",",
"v",
"in",
"d",
".",
"items",
"(",
")",
"}",
")",
"return",
"retval"
] | Returns a dictionary with the lowercase keys and values entered. | [
"Returns",
"a",
"dictionary",
"with",
"the",
"lowercase",
"keys",
"and",
"values",
"entered",
"."
] | [
"\"\"\"Returns a dictionary with the lowercase keys and values entered.\"\"\""
] | [
{
"param": "d",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "d",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def _add_lowercase(d):
retval = d.copy()
retval.update({k.lower(): v.lower() for k, v in d.items()})
return retval | 609,965 | 397 |
c156b6eff9f1757ae304822996f4ef0c02733c11 | Ceofy/NetColoc | netcoloc/network_colocalization.py | [
"MIT"
] | Python | calculate_expected_overlap | null | def calculate_expected_overlap(z_scores_1, z_scores_2, gene_set_name_1='Gene Set 1', gene_set_name_2='Gene Set 2',
z_score_threshold=3, z1_threshold=1.5,z2_threshold=1.5,
num_reps=1000, save_random_network_overlap=False, plot=False):
'''Function to determine size of expected network overlap by randomly
shuffling gene names.
Args:
z_scores_1 (pandas.Series): Pandas Series resulting from the
netprop_zscore.netprop_zscore or netprop_zscore.calc_zscore_heat
methods, containing the z-scores of each gene following network
propagation. The index consists of gene names.
z_scores_2 (pandas.Series): Similar to z_scores_1. The two pandas Series
must contain the same genes (ie. come from the same interactome
network).
z_score_threshold (float): The threshold to determine whether a gene is
a part of the network overlap or not. Genes with combined z-scores
below this threshold will be discarded. (Default: 3)
z1_threshold (float): The individual z1-score threshold to determine whether a gene is
a part of the network overlap or not. Genes with z1-scores
below this threshold will be discarded. (Default: 1.5)
z2_threshold (float): The individual z2-score threshold to determine whether a gene is
a part of the network overlap or not. Genes with z2-scores
below this threshold will be discarded. (Default: 1.5)
num_reps (int): The number of times that gene names will be shuffled.
plot (bool): If True, the distribution will be plotted. If False, it
will not be plotted. (Default: False)
Returns:
float:
'''
# Build a distribution of expected network overlap sizes by shuffling node names
#random_network_overlap_sizes = []
#z_scores_1_copy = z_scores_1.copy()
#z_scores_2_copy = z_scores_2.copy()
#gene_set_1 = z_scores_1.index.tolist()
#gene_set_2 = z_scores_2.index.tolist()
#for _ in range(num_reps):
# Shuffle gene name labels
# np.random.shuffle(gene_set_1)
# z_scores_1_copy.index = gene_set_1
# np.random.shuffle(gene_set_2)
#z_scores_2_copy.index = gene_set_2
#random_size = len(calculate_network_overlap(z_scores_1_copy, z_scores_2_copy, z_score_threshold=z_score_threshold,
#z1_threshold=z1_threshold,z2_threshold=z2_threshold))
#random_network_overlap_sizes.append(random_size)
#network_overlap_size = len(calculate_network_overlap(z_scores_1, z_scores_2, z_score_threshold=z_score_threshold,
# z1_threshold=z1_threshold,z2_threshold=z2_threshold))
#if plot:
# plt.figure(figsize=(5, 4))
# dfig = sns.histplot(random_network_overlap_sizes, label='Expected network intersection size')
# plt.vlines(network_overlap_size, ymin=0, ymax=dfig.dataLim.bounds[3], color='r', label='Observed network intersection size')
#plt.xlabel('Size of proximal subgraph, z > ' + str(z_score_threshold), fontsize=16)
#plt.legend(fontsize=12)
#return network_overlap_size, random_network_overlap_sizes
pass | Function to determine size of expected network overlap by randomly
shuffling gene names.
Args:
z_scores_1 (pandas.Series): Pandas Series resulting from the
netprop_zscore.netprop_zscore or netprop_zscore.calc_zscore_heat
methods, containing the z-scores of each gene following network
propagation. The index consists of gene names.
z_scores_2 (pandas.Series): Similar to z_scores_1. The two pandas Series
must contain the same genes (ie. come from the same interactome
network).
z_score_threshold (float): The threshold to determine whether a gene is
a part of the network overlap or not. Genes with combined z-scores
below this threshold will be discarded. (Default: 3)
z1_threshold (float): The individual z1-score threshold to determine whether a gene is
a part of the network overlap or not. Genes with z1-scores
below this threshold will be discarded. (Default: 1.5)
z2_threshold (float): The individual z2-score threshold to determine whether a gene is
a part of the network overlap or not. Genes with z2-scores
below this threshold will be discarded. (Default: 1.5)
num_reps (int): The number of times that gene names will be shuffled.
plot (bool): If True, the distribution will be plotted. If False, it
will not be plotted. (Default: False)
Returns:
float:
| Function to determine size of expected network overlap by randomly
shuffling gene names. | [
"Function",
"to",
"determine",
"size",
"of",
"expected",
"network",
"overlap",
"by",
"randomly",
"shuffling",
"gene",
"names",
"."
] | def calculate_expected_overlap(z_scores_1, z_scores_2, gene_set_name_1='Gene Set 1', gene_set_name_2='Gene Set 2',
z_score_threshold=3, z1_threshold=1.5,z2_threshold=1.5,
num_reps=1000, save_random_network_overlap=False, plot=False):
pass | [
"def",
"calculate_expected_overlap",
"(",
"z_scores_1",
",",
"z_scores_2",
",",
"gene_set_name_1",
"=",
"'Gene Set 1'",
",",
"gene_set_name_2",
"=",
"'Gene Set 2'",
",",
"z_score_threshold",
"=",
"3",
",",
"z1_threshold",
"=",
"1.5",
",",
"z2_threshold",
"=",
"1.5",
",",
"num_reps",
"=",
"1000",
",",
"save_random_network_overlap",
"=",
"False",
",",
"plot",
"=",
"False",
")",
":",
"pass"
] | Function to determine size of expected network overlap by randomly
shuffling gene names. | [
"Function",
"to",
"determine",
"size",
"of",
"expected",
"network",
"overlap",
"by",
"randomly",
"shuffling",
"gene",
"names",
"."
] | [
"'''Function to determine size of expected network overlap by randomly\n shuffling gene names.\n\n Args:\n z_scores_1 (pandas.Series): Pandas Series resulting from the \n netprop_zscore.netprop_zscore or netprop_zscore.calc_zscore_heat\n methods, containing the z-scores of each gene following network\n propagation. The index consists of gene names.\n z_scores_2 (pandas.Series): Similar to z_scores_1. The two pandas Series\n must contain the same genes (ie. come from the same interactome\n network).\n z_score_threshold (float): The threshold to determine whether a gene is \n a part of the network overlap or not. Genes with combined z-scores\n below this threshold will be discarded. (Default: 3)\n z1_threshold (float): The individual z1-score threshold to determine whether a gene is \n a part of the network overlap or not. Genes with z1-scores\n below this threshold will be discarded. (Default: 1.5)\n z2_threshold (float): The individual z2-score threshold to determine whether a gene is \n a part of the network overlap or not. Genes with z2-scores\n below this threshold will be discarded. (Default: 1.5)\n num_reps (int): The number of times that gene names will be shuffled.\n plot (bool): If True, the distribution will be plotted. If False, it\n will not be plotted. (Default: False)\n\n Returns:\n float: \n\n '''",
"# Build a distribution of expected network overlap sizes by shuffling node names",
"#random_network_overlap_sizes = []",
"#z_scores_1_copy = z_scores_1.copy()",
"#z_scores_2_copy = z_scores_2.copy()",
"#gene_set_1 = z_scores_1.index.tolist()",
"#gene_set_2 = z_scores_2.index.tolist()",
"#for _ in range(num_reps):",
"# Shuffle gene name labels",
"# np.random.shuffle(gene_set_1)",
"# z_scores_1_copy.index = gene_set_1",
"# np.random.shuffle(gene_set_2)",
"#z_scores_2_copy.index = gene_set_2",
"#random_size = len(calculate_network_overlap(z_scores_1_copy, z_scores_2_copy, z_score_threshold=z_score_threshold,",
"#z1_threshold=z1_threshold,z2_threshold=z2_threshold))",
"#random_network_overlap_sizes.append(random_size)",
"#network_overlap_size = len(calculate_network_overlap(z_scores_1, z_scores_2, z_score_threshold=z_score_threshold,",
"# z1_threshold=z1_threshold,z2_threshold=z2_threshold))",
"#if plot:",
"# plt.figure(figsize=(5, 4))",
"# dfig = sns.histplot(random_network_overlap_sizes, label='Expected network intersection size')",
"# plt.vlines(network_overlap_size, ymin=0, ymax=dfig.dataLim.bounds[3], color='r', label='Observed network intersection size')",
"#plt.xlabel('Size of proximal subgraph, z > ' + str(z_score_threshold), fontsize=16)",
"#plt.legend(fontsize=12)",
"#return network_overlap_size, random_network_overlap_sizes"
] | [
{
"param": "z_scores_1",
"type": null
},
{
"param": "z_scores_2",
"type": null
},
{
"param": "gene_set_name_1",
"type": null
},
{
"param": "gene_set_name_2",
"type": null
},
{
"param": "z_score_threshold",
"type": null
},
{
"param": "z1_threshold",
"type": null
},
{
"param": "z2_threshold",
"type": null
},
{
"param": "num_reps",
"type": null
},
{
"param": "save_random_network_overlap",
"type": null
},
{
"param": "plot",
"type": null
}
] | {
"returns": [
{
"docstring": null,
"docstring_tokens": [
"None"
],
"type": "float"
}
],
"raises": [],
"params": [
{
"identifier": "z_scores_1",
"type": null,
"docstring": "Pandas Series resulting from the\nnetprop_zscore.netprop_zscore or netprop_zscore.calc_zscore_heat\nmethods, containing the z-scores of each gene following network\npropagation. The index consists of gene names.",
"docstring_tokens": [
"Pandas",
"Series",
"resulting",
"from",
"the",
"netprop_zscore",
".",
"netprop_zscore",
"or",
"netprop_zscore",
".",
"calc_zscore_heat",
"methods",
"containing",
"the",
"z",
"-",
"scores",
"of",
"each",
"gene",
"following",
"network",
"propagation",
".",
"The",
"index",
"consists",
"of",
"gene",
"names",
"."
],
"default": null,
"is_optional": false
},
{
"identifier": "z_scores_2",
"type": null,
"docstring": "Similar to z_scores_1. The two pandas Series\nmust contain the same genes .",
"docstring_tokens": [
"Similar",
"to",
"z_scores_1",
".",
"The",
"two",
"pandas",
"Series",
"must",
"contain",
"the",
"same",
"genes",
"."
],
"default": null,
"is_optional": false
},
{
"identifier": "gene_set_name_1",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "gene_set_name_2",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "z_score_threshold",
"type": null,
"docstring": "The threshold to determine whether a gene is\na part of the network overlap or not. Genes with combined z-scores\nbelow this threshold will be discarded.",
"docstring_tokens": [
"The",
"threshold",
"to",
"determine",
"whether",
"a",
"gene",
"is",
"a",
"part",
"of",
"the",
"network",
"overlap",
"or",
"not",
".",
"Genes",
"with",
"combined",
"z",
"-",
"scores",
"below",
"this",
"threshold",
"will",
"be",
"discarded",
"."
],
"default": null,
"is_optional": false
},
{
"identifier": "z1_threshold",
"type": null,
"docstring": "The individual z1-score threshold to determine whether a gene is\na part of the network overlap or not. Genes with z1-scores\nbelow this threshold will be discarded.",
"docstring_tokens": [
"The",
"individual",
"z1",
"-",
"score",
"threshold",
"to",
"determine",
"whether",
"a",
"gene",
"is",
"a",
"part",
"of",
"the",
"network",
"overlap",
"or",
"not",
".",
"Genes",
"with",
"z1",
"-",
"scores",
"below",
"this",
"threshold",
"will",
"be",
"discarded",
"."
],
"default": null,
"is_optional": false
},
{
"identifier": "z2_threshold",
"type": null,
"docstring": "The individual z2-score threshold to determine whether a gene is\na part of the network overlap or not. Genes with z2-scores\nbelow this threshold will be discarded. (Default: 1.5)\nnum_reps (int): The number of times that gene names will be shuffled.",
"docstring_tokens": [
"The",
"individual",
"z2",
"-",
"score",
"threshold",
"to",
"determine",
"whether",
"a",
"gene",
"is",
"a",
"part",
"of",
"the",
"network",
"overlap",
"or",
"not",
".",
"Genes",
"with",
"z2",
"-",
"scores",
"below",
"this",
"threshold",
"will",
"be",
"discarded",
".",
"(",
"Default",
":",
"1",
".",
"5",
")",
"num_reps",
"(",
"int",
")",
":",
"The",
"number",
"of",
"times",
"that",
"gene",
"names",
"will",
"be",
"shuffled",
"."
],
"default": null,
"is_optional": false
},
{
"identifier": "num_reps",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "save_random_network_overlap",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "plot",
"type": null,
"docstring": "If True, the distribution will be plotted. If False, it\nwill not be plotted. (Default: False)",
"docstring_tokens": [
"If",
"True",
"the",
"distribution",
"will",
"be",
"plotted",
".",
"If",
"False",
"it",
"will",
"not",
"be",
"plotted",
".",
"(",
"Default",
":",
"False",
")"
],
"default": null,
"is_optional": false
}
],
"outlier_params": [],
"others": []
} | def calculate_expected_overlap(z_scores_1, z_scores_2, gene_set_name_1='Gene Set 1', gene_set_name_2='Gene Set 2',
z_score_threshold=3, z1_threshold=1.5,z2_threshold=1.5,
num_reps=1000, save_random_network_overlap=False, plot=False):
pass | 609,966 | 653 |
b546ac7e1c25e7abd322681c59c60a1c1988cd70 | ArneBinder/nlp-formats | nlp_formats/brat.py | [
"Apache-2.0"
] | Python | _get_normalization_annotation | <not_specific> | def _get_normalization_annotation(annotation_line):
"""
example input:
N1 Reference T1 Wikipedia:534366 Barack Obama
"""
_id, remaining, text = annotation_line.split('\t', maxsplit=2)
_type, target, ref = remaining.split(' ')
res_id, ent_id = ref.split(':')
return {
'id': _id,
'type': _type,
'target': target,
'resource_id': res_id,
'entity_id': ent_id,
} |
example input:
N1 Reference T1 Wikipedia:534366 Barack Obama
| example input:
N1 Reference T1 Wikipedia:534366 Barack Obama | [
"example",
"input",
":",
"N1",
"Reference",
"T1",
"Wikipedia",
":",
"534366",
"Barack",
"Obama"
] | def _get_normalization_annotation(annotation_line):
_id, remaining, text = annotation_line.split('\t', maxsplit=2)
_type, target, ref = remaining.split(' ')
res_id, ent_id = ref.split(':')
return {
'id': _id,
'type': _type,
'target': target,
'resource_id': res_id,
'entity_id': ent_id,
} | [
"def",
"_get_normalization_annotation",
"(",
"annotation_line",
")",
":",
"_id",
",",
"remaining",
",",
"text",
"=",
"annotation_line",
".",
"split",
"(",
"'\\t'",
",",
"maxsplit",
"=",
"2",
")",
"_type",
",",
"target",
",",
"ref",
"=",
"remaining",
".",
"split",
"(",
"' '",
")",
"res_id",
",",
"ent_id",
"=",
"ref",
".",
"split",
"(",
"':'",
")",
"return",
"{",
"'id'",
":",
"_id",
",",
"'type'",
":",
"_type",
",",
"'target'",
":",
"target",
",",
"'resource_id'",
":",
"res_id",
",",
"'entity_id'",
":",
"ent_id",
",",
"}"
] | example input:
N1 Reference T1 Wikipedia:534366 Barack Obama | [
"example",
"input",
":",
"N1",
"Reference",
"T1",
"Wikipedia",
":",
"534366",
"Barack",
"Obama"
] | [
"\"\"\"\n example input:\n N1\tReference T1 Wikipedia:534366\tBarack Obama\n \"\"\""
] | [
{
"param": "annotation_line",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "annotation_line",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def _get_normalization_annotation(annotation_line):
_id, remaining, text = annotation_line.split('\t', maxsplit=2)
_type, target, ref = remaining.split(' ')
res_id, ent_id = ref.split(':')
return {
'id': _id,
'type': _type,
'target': target,
'resource_id': res_id,
'entity_id': ent_id,
} | 609,967 | 333 |
2501bb093efdc1993ce1b776f9c465b03245564a | VictorErmakov/tacitus-notes | src/tacitus/tacitus.py | [
"BSD-2-Clause"
] | Python | exec_no_fail | <not_specific> | def exec_no_fail(command):
"""runs shell command and capture the output"""
result = subprocess.run(command, shell=True, capture_output=True)
if result.returncode:
print(f"Error executing: {command}")
sys.exit(result.stderr.decode("utf-8").strip())
return result.stdout.decode("utf-8").strip() | runs shell command and capture the output | runs shell command and capture the output | [
"runs",
"shell",
"command",
"and",
"capture",
"the",
"output"
] | def exec_no_fail(command):
result = subprocess.run(command, shell=True, capture_output=True)
if result.returncode:
print(f"Error executing: {command}")
sys.exit(result.stderr.decode("utf-8").strip())
return result.stdout.decode("utf-8").strip() | [
"def",
"exec_no_fail",
"(",
"command",
")",
":",
"result",
"=",
"subprocess",
".",
"run",
"(",
"command",
",",
"shell",
"=",
"True",
",",
"capture_output",
"=",
"True",
")",
"if",
"result",
".",
"returncode",
":",
"print",
"(",
"f\"Error executing: {command}\"",
")",
"sys",
".",
"exit",
"(",
"result",
".",
"stderr",
".",
"decode",
"(",
"\"utf-8\"",
")",
".",
"strip",
"(",
")",
")",
"return",
"result",
".",
"stdout",
".",
"decode",
"(",
"\"utf-8\"",
")",
".",
"strip",
"(",
")"
] | runs shell command and capture the output | [
"runs",
"shell",
"command",
"and",
"capture",
"the",
"output"
] | [
"\"\"\"runs shell command and capture the output\"\"\""
] | [
{
"param": "command",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "command",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import subprocess
import sys
def exec_no_fail(command):
result = subprocess.run(command, shell=True, capture_output=True)
if result.returncode:
print(f"Error executing: {command}")
sys.exit(result.stderr.decode("utf-8").strip())
return result.stdout.decode("utf-8").strip() | 609,968 | 345 |
a3e782ea84e79316daa3f59a91f66da46ca30d83 | adamrehn/gen-invoice | gen_invoice/InvoiceGenerator.py | [
"MIT"
] | Python | _format_currency | <not_specific> | def _format_currency(value):
'''
Formats a numeric value as a currency string using the system's locale settings
'''
# Set our locale to the system default
locale.setlocale(locale.LC_ALL, '')
# Format the value using the currency settings for our locale
return locale.currency(value, symbol=True, grouping=True) |
Formats a numeric value as a currency string using the system's locale settings
| Formats a numeric value as a currency string using the system's locale settings | [
"Formats",
"a",
"numeric",
"value",
"as",
"a",
"currency",
"string",
"using",
"the",
"system",
"'",
"s",
"locale",
"settings"
] | def _format_currency(value):
locale.setlocale(locale.LC_ALL, '')
return locale.currency(value, symbol=True, grouping=True) | [
"def",
"_format_currency",
"(",
"value",
")",
":",
"locale",
".",
"setlocale",
"(",
"locale",
".",
"LC_ALL",
",",
"''",
")",
"return",
"locale",
".",
"currency",
"(",
"value",
",",
"symbol",
"=",
"True",
",",
"grouping",
"=",
"True",
")"
] | Formats a numeric value as a currency string using the system's locale settings | [
"Formats",
"a",
"numeric",
"value",
"as",
"a",
"currency",
"string",
"using",
"the",
"system",
"'",
"s",
"locale",
"settings"
] | [
"'''\n\t\tFormats a numeric value as a currency string using the system's locale settings\n\t\t'''",
"# Set our locale to the system default",
"# Format the value using the currency settings for our locale"
] | [
{
"param": "value",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "value",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import locale
def _format_currency(value):
locale.setlocale(locale.LC_ALL, '')
return locale.currency(value, symbol=True, grouping=True) | 609,969 | 651 |
af8de8aa9949afa42fb3753478c9e2ec92d6a33b | Ahmed-Araby/CarND-LaneLines-P1 | Lines.py | [
"MIT"
] | Python | average_lines | <not_specific> | def average_lines(lines , Avg_ux , Avg_uy , Avg_lx , Avg_ly):
"""
- I will work on averaging the end points
- ***************** we need to find better way to average the lines ******************
:param left_lanes:
:param right_lanes:
:return:
"""
# left lane averging end points
avg_lx = 0
avg_ly = 0
avg_ux = 0
avg_uy = 0
for i in range(0 , len(lines), 1):
# (col , row)
fx , fy , sx , sy = lines[i]
point1 = 0
point2 = 0
# point1 will have the lower point
if fy>sy:
point1 = (fx , fy)
point2 = (sx , sy)
else:
point2 = (sx , sy)
point1 = (fx , fy)
# average the points
avg_lx += point1[0]
avg_ly += point1[1]
avg_ux +=point2[0]
avg_uy +=point2[1]
# calculate moving average , more smooth detection
# the problem here is the bias that we need to correct as
# we did initialize the averages = 0 in the begining.
"""
Avg_lx = int(.9*Avg_lx + .1*avg_lx)
Avg_ly = int(.9*Avg_ly + .1*avg_ly)
Avg_ux = int(.9*Avg_ux + .1*avg_ux)
Avg_uy = int(.9*Avg_uy + .1*avg_uy)
"""
l= len(lines)
return [avg_lx //l , avg_ly //l , avg_ux //l , avg_uy //l] |
- I will work on averaging the end points
- ***************** we need to find better way to average the lines ******************
:param left_lanes:
:param right_lanes:
:return:
| I will work on averaging the end points
we need to find better way to average the lines | [
"I",
"will",
"work",
"on",
"averaging",
"the",
"end",
"points",
"we",
"need",
"to",
"find",
"better",
"way",
"to",
"average",
"the",
"lines"
] | def average_lines(lines , Avg_ux , Avg_uy , Avg_lx , Avg_ly):
avg_lx = 0
avg_ly = 0
avg_ux = 0
avg_uy = 0
for i in range(0 , len(lines), 1):
fx , fy , sx , sy = lines[i]
point1 = 0
point2 = 0
if fy>sy:
point1 = (fx , fy)
point2 = (sx , sy)
else:
point2 = (sx , sy)
point1 = (fx , fy)
avg_lx += point1[0]
avg_ly += point1[1]
avg_ux +=point2[0]
avg_uy +=point2[1]
l= len(lines)
return [avg_lx //l , avg_ly //l , avg_ux //l , avg_uy //l] | [
"def",
"average_lines",
"(",
"lines",
",",
"Avg_ux",
",",
"Avg_uy",
",",
"Avg_lx",
",",
"Avg_ly",
")",
":",
"avg_lx",
"=",
"0",
"avg_ly",
"=",
"0",
"avg_ux",
"=",
"0",
"avg_uy",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"lines",
")",
",",
"1",
")",
":",
"fx",
",",
"fy",
",",
"sx",
",",
"sy",
"=",
"lines",
"[",
"i",
"]",
"point1",
"=",
"0",
"point2",
"=",
"0",
"if",
"fy",
">",
"sy",
":",
"point1",
"=",
"(",
"fx",
",",
"fy",
")",
"point2",
"=",
"(",
"sx",
",",
"sy",
")",
"else",
":",
"point2",
"=",
"(",
"sx",
",",
"sy",
")",
"point1",
"=",
"(",
"fx",
",",
"fy",
")",
"avg_lx",
"+=",
"point1",
"[",
"0",
"]",
"avg_ly",
"+=",
"point1",
"[",
"1",
"]",
"avg_ux",
"+=",
"point2",
"[",
"0",
"]",
"avg_uy",
"+=",
"point2",
"[",
"1",
"]",
"\"\"\"\n Avg_lx = int(.9*Avg_lx + .1*avg_lx)\n Avg_ly = int(.9*Avg_ly + .1*avg_ly)\n Avg_ux = int(.9*Avg_ux + .1*avg_ux)\n Avg_uy = int(.9*Avg_uy + .1*avg_uy)\n \"\"\"",
"l",
"=",
"len",
"(",
"lines",
")",
"return",
"[",
"avg_lx",
"//",
"l",
",",
"avg_ly",
"//",
"l",
",",
"avg_ux",
"//",
"l",
",",
"avg_uy",
"//",
"l",
"]"
] | I will work on averaging the end points
***************** we need to find better way to average the lines | [
"I",
"will",
"work",
"on",
"averaging",
"the",
"end",
"points",
"*****************",
"we",
"need",
"to",
"find",
"better",
"way",
"to",
"average",
"the",
"lines"
] | [
"\"\"\"\n - I will work on averaging the end points\n - ***************** we need to find better way to average the lines ******************\n :param left_lanes:\n :param right_lanes:\n :return:\n \"\"\"",
"# left lane averging end points",
"# (col , row)",
"# point1 will have the lower point",
"# average the points",
"# calculate moving average , more smooth detection",
"# the problem here is the bias that we need to correct as",
"# we did initialize the averages = 0 in the begining.",
"\"\"\"\n Avg_lx = int(.9*Avg_lx + .1*avg_lx)\n Avg_ly = int(.9*Avg_ly + .1*avg_ly)\n Avg_ux = int(.9*Avg_ux + .1*avg_ux)\n Avg_uy = int(.9*Avg_uy + .1*avg_uy)\n \"\"\""
] | [
{
"param": "lines",
"type": null
},
{
"param": "Avg_ux",
"type": null
},
{
"param": "Avg_uy",
"type": null
},
{
"param": "Avg_lx",
"type": null
},
{
"param": "Avg_ly",
"type": null
}
] | {
"returns": [
{
"docstring": null,
"docstring_tokens": [
"None"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "lines",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "Avg_ux",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "Avg_uy",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "Avg_lx",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "Avg_ly",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [
{
"identifier": "left_lanes",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": null
},
{
"identifier": "right_lanes",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": null
}
],
"others": []
} | def average_lines(lines , Avg_ux , Avg_uy , Avg_lx , Avg_ly):
avg_lx = 0
avg_ly = 0
avg_ux = 0
avg_uy = 0
for i in range(0 , len(lines), 1):
fx , fy , sx , sy = lines[i]
point1 = 0
point2 = 0
if fy>sy:
point1 = (fx , fy)
point2 = (sx , sy)
else:
point2 = (sx , sy)
point1 = (fx , fy)
avg_lx += point1[0]
avg_ly += point1[1]
avg_ux +=point2[0]
avg_uy +=point2[1]
l= len(lines)
return [avg_lx //l , avg_ly //l , avg_ux //l , avg_uy //l] | 609,970 | 159 |
5bd10af5e1c601005c7f63a5888772f0842bf135 | vishalbelsare/neuropod | source/python/neuropod/loader.py | [
"Apache-2.0"
] | Python | _convert_native_shape_to_list | <not_specific> | def _convert_native_shape_to_list(dims):
"""
Takes a list of `neuropod_native.Dimension` objects and converts to a list of python types
"""
out = []
for dim in dims:
if dim.value == -2:
# It's a symbol
out.append(dim.symbol)
elif dim.value == -1:
# Any shape is okay
out.append(None)
else:
out.append(dim.value)
return out |
Takes a list of `neuropod_native.Dimension` objects and converts to a list of python types
| Takes a list of `neuropod_native.Dimension` objects and converts to a list of python types | [
"Takes",
"a",
"list",
"of",
"`",
"neuropod_native",
".",
"Dimension",
"`",
"objects",
"and",
"converts",
"to",
"a",
"list",
"of",
"python",
"types"
] | def _convert_native_shape_to_list(dims):
out = []
for dim in dims:
if dim.value == -2:
out.append(dim.symbol)
elif dim.value == -1:
out.append(None)
else:
out.append(dim.value)
return out | [
"def",
"_convert_native_shape_to_list",
"(",
"dims",
")",
":",
"out",
"=",
"[",
"]",
"for",
"dim",
"in",
"dims",
":",
"if",
"dim",
".",
"value",
"==",
"-",
"2",
":",
"out",
".",
"append",
"(",
"dim",
".",
"symbol",
")",
"elif",
"dim",
".",
"value",
"==",
"-",
"1",
":",
"out",
".",
"append",
"(",
"None",
")",
"else",
":",
"out",
".",
"append",
"(",
"dim",
".",
"value",
")",
"return",
"out"
] | Takes a list of `neuropod_native.Dimension` objects and converts to a list of python types | [
"Takes",
"a",
"list",
"of",
"`",
"neuropod_native",
".",
"Dimension",
"`",
"objects",
"and",
"converts",
"to",
"a",
"list",
"of",
"python",
"types"
] | [
"\"\"\"\n Takes a list of `neuropod_native.Dimension` objects and converts to a list of python types\n \"\"\"",
"# It's a symbol",
"# Any shape is okay"
] | [
{
"param": "dims",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "dims",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def _convert_native_shape_to_list(dims):
out = []
for dim in dims:
if dim.value == -2:
out.append(dim.symbol)
elif dim.value == -1:
out.append(None)
else:
out.append(dim.value)
return out | 609,971 | 877 |
52c92c16479f37578061c9f5ceff436e97c7183b | suiboli314/RoombaSim | src/GreedyAlgo.py | [
"MIT"
] | Python | _get_distance | <not_specific> | def _get_distance(cell1, cell2):
""" Calculate manhattan distance distance between given two cells
Args:
cell1 (tuple): a cell
cell2 (tuple): a cell
Returns:
int: manhattan distance
"""
return abs(cell1[0] - cell2[0]) + abs(cell1[1]-cell2[1]) | Calculate manhattan distance distance between given two cells
Args:
cell1 (tuple): a cell
cell2 (tuple): a cell
Returns:
int: manhattan distance
| Calculate manhattan distance distance between given two cells | [
"Calculate",
"manhattan",
"distance",
"distance",
"between",
"given",
"two",
"cells"
] | def _get_distance(cell1, cell2):
return abs(cell1[0] - cell2[0]) + abs(cell1[1]-cell2[1]) | [
"def",
"_get_distance",
"(",
"cell1",
",",
"cell2",
")",
":",
"return",
"abs",
"(",
"cell1",
"[",
"0",
"]",
"-",
"cell2",
"[",
"0",
"]",
")",
"+",
"abs",
"(",
"cell1",
"[",
"1",
"]",
"-",
"cell2",
"[",
"1",
"]",
")"
] | Calculate manhattan distance distance between given two cells | [
"Calculate",
"manhattan",
"distance",
"distance",
"between",
"given",
"two",
"cells"
] | [
"\"\"\" Calculate manhattan distance distance between given two cells\n\n Args:\n cell1 (tuple): a cell\n cell2 (tuple): a cell\n Returns:\n int: manhattan distance\n \"\"\""
] | [
{
"param": "cell1",
"type": null
},
{
"param": "cell2",
"type": null
}
] | {
"returns": [
{
"docstring": null,
"docstring_tokens": [
"None"
],
"type": "int"
}
],
"raises": [],
"params": [
{
"identifier": "cell1",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": false
},
{
"identifier": "cell2",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": false
}
],
"outlier_params": [],
"others": []
} | def _get_distance(cell1, cell2):
return abs(cell1[0] - cell2[0]) + abs(cell1[1]-cell2[1]) | 609,972 | 429 |
84d81fb6f5d8bd9f30a34835954e585900de460e | SimonBoothroyd/nonbonded | nonbonded/library/plotting/plotly/utilities.py | [
"MIT"
] | Python | unique_colors | <not_specific> | def unique_colors(n_colors):
"""Returns a unique list of distinguishable colors. These are taken from the
default seaborn `colorblind` color palette.
Parameters
----------
n_colors
The number of colors to return (max=10).
"""
colors = [
(0.004, 0.451, 0.698),
(0.871, 0.561, 0.020),
(0.008, 0.620, 0.451),
(0.835, 0.369, 0.000),
(0.800, 0.471, 0.737),
(0.792, 0.569, 0.380),
(0.984, 0.686, 0.894),
(0.580, 0.580, 0.580),
(0.925, 0.882, 0.200),
(0.337, 0.706, 0.914),
]
assert n_colors <= len(colors)
return colors[:n_colors] | Returns a unique list of distinguishable colors. These are taken from the
default seaborn `colorblind` color palette.
Parameters
----------
n_colors
The number of colors to return (max=10).
| Returns a unique list of distinguishable colors. These are taken from the
default seaborn `colorblind` color palette.
Parameters
n_colors
The number of colors to return (max=10). | [
"Returns",
"a",
"unique",
"list",
"of",
"distinguishable",
"colors",
".",
"These",
"are",
"taken",
"from",
"the",
"default",
"seaborn",
"`",
"colorblind",
"`",
"color",
"palette",
".",
"Parameters",
"n_colors",
"The",
"number",
"of",
"colors",
"to",
"return",
"(",
"max",
"=",
"10",
")",
"."
] | def unique_colors(n_colors):
colors = [
(0.004, 0.451, 0.698),
(0.871, 0.561, 0.020),
(0.008, 0.620, 0.451),
(0.835, 0.369, 0.000),
(0.800, 0.471, 0.737),
(0.792, 0.569, 0.380),
(0.984, 0.686, 0.894),
(0.580, 0.580, 0.580),
(0.925, 0.882, 0.200),
(0.337, 0.706, 0.914),
]
assert n_colors <= len(colors)
return colors[:n_colors] | [
"def",
"unique_colors",
"(",
"n_colors",
")",
":",
"colors",
"=",
"[",
"(",
"0.004",
",",
"0.451",
",",
"0.698",
")",
",",
"(",
"0.871",
",",
"0.561",
",",
"0.020",
")",
",",
"(",
"0.008",
",",
"0.620",
",",
"0.451",
")",
",",
"(",
"0.835",
",",
"0.369",
",",
"0.000",
")",
",",
"(",
"0.800",
",",
"0.471",
",",
"0.737",
")",
",",
"(",
"0.792",
",",
"0.569",
",",
"0.380",
")",
",",
"(",
"0.984",
",",
"0.686",
",",
"0.894",
")",
",",
"(",
"0.580",
",",
"0.580",
",",
"0.580",
")",
",",
"(",
"0.925",
",",
"0.882",
",",
"0.200",
")",
",",
"(",
"0.337",
",",
"0.706",
",",
"0.914",
")",
",",
"]",
"assert",
"n_colors",
"<=",
"len",
"(",
"colors",
")",
"return",
"colors",
"[",
":",
"n_colors",
"]"
] | Returns a unique list of distinguishable colors. | [
"Returns",
"a",
"unique",
"list",
"of",
"distinguishable",
"colors",
"."
] | [
"\"\"\"Returns a unique list of distinguishable colors. These are taken from the\n default seaborn `colorblind` color palette.\n\n Parameters\n ----------\n n_colors\n The number of colors to return (max=10).\n \"\"\""
] | [
{
"param": "n_colors",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "n_colors",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def unique_colors(n_colors):
colors = [
(0.004, 0.451, 0.698),
(0.871, 0.561, 0.020),
(0.008, 0.620, 0.451),
(0.835, 0.369, 0.000),
(0.800, 0.471, 0.737),
(0.792, 0.569, 0.380),
(0.984, 0.686, 0.894),
(0.580, 0.580, 0.580),
(0.925, 0.882, 0.200),
(0.337, 0.706, 0.914),
]
assert n_colors <= len(colors)
return colors[:n_colors] | 609,973 | 258 |
169a56b092188966232ea21bbfe06f4de886fd89 | PythonCharmers/nbfastconvert | nbfastconvert/nbfastconvert.py | [
"MIT"
] | Python | hash_cell_repr | <not_specific> | def hash_cell_repr(cell):
"""
Create a string representation of the given cell
(a dictionary), ensuring the keys are sorted first.
"""
sorted_keys = sorted(cell.keys())
sorted_cell = {key: cell[key] for key in sorted_keys}
cell_hash = repr(sorted_cell)
return cell_hash |
Create a string representation of the given cell
(a dictionary), ensuring the keys are sorted first.
| Create a string representation of the given cell
(a dictionary), ensuring the keys are sorted first. | [
"Create",
"a",
"string",
"representation",
"of",
"the",
"given",
"cell",
"(",
"a",
"dictionary",
")",
"ensuring",
"the",
"keys",
"are",
"sorted",
"first",
"."
] | def hash_cell_repr(cell):
sorted_keys = sorted(cell.keys())
sorted_cell = {key: cell[key] for key in sorted_keys}
cell_hash = repr(sorted_cell)
return cell_hash | [
"def",
"hash_cell_repr",
"(",
"cell",
")",
":",
"sorted_keys",
"=",
"sorted",
"(",
"cell",
".",
"keys",
"(",
")",
")",
"sorted_cell",
"=",
"{",
"key",
":",
"cell",
"[",
"key",
"]",
"for",
"key",
"in",
"sorted_keys",
"}",
"cell_hash",
"=",
"repr",
"(",
"sorted_cell",
")",
"return",
"cell_hash"
] | Create a string representation of the given cell
(a dictionary), ensuring the keys are sorted first. | [
"Create",
"a",
"string",
"representation",
"of",
"the",
"given",
"cell",
"(",
"a",
"dictionary",
")",
"ensuring",
"the",
"keys",
"are",
"sorted",
"first",
"."
] | [
"\"\"\"\n Create a string representation of the given cell\n (a dictionary), ensuring the keys are sorted first.\n \"\"\""
] | [
{
"param": "cell",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "cell",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def hash_cell_repr(cell):
sorted_keys = sorted(cell.keys())
sorted_cell = {key: cell[key] for key in sorted_keys}
cell_hash = repr(sorted_cell)
return cell_hash | 609,975 | 7 |
5a99dfc5c0625e606d57921e0162f944ea1208ee | alishakodibagkar/brainlit | brainlit/cloudreg/scripts/parastitcher.py | [
"Apache-2.0"
] | Python | collect_instructions | <not_specific> | def collect_instructions(inst):
"""
Collect the remanent part of a list of strings in a unique string
Input:
inst = Input list of strings
Output:
results = String containing all the elements of inst
"""
len_inst = len(inst)
if len_inst > 0:
for i in range(0, len_inst):
if i == 0:
results = str(inst[i])
else:
results = results + " " + str(inst[i])
else:
results = ""
return results |
Collect the remanent part of a list of strings in a unique string
Input:
inst = Input list of strings
Output:
results = String containing all the elements of inst
| Collect the remanent part of a list of strings in a unique string
Input:
inst = Input list of strings
Output:
results = String containing all the elements of inst | [
"Collect",
"the",
"remanent",
"part",
"of",
"a",
"list",
"of",
"strings",
"in",
"a",
"unique",
"string",
"Input",
":",
"inst",
"=",
"Input",
"list",
"of",
"strings",
"Output",
":",
"results",
"=",
"String",
"containing",
"all",
"the",
"elements",
"of",
"inst"
] | def collect_instructions(inst):
len_inst = len(inst)
if len_inst > 0:
for i in range(0, len_inst):
if i == 0:
results = str(inst[i])
else:
results = results + " " + str(inst[i])
else:
results = ""
return results | [
"def",
"collect_instructions",
"(",
"inst",
")",
":",
"len_inst",
"=",
"len",
"(",
"inst",
")",
"if",
"len_inst",
">",
"0",
":",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len_inst",
")",
":",
"if",
"i",
"==",
"0",
":",
"results",
"=",
"str",
"(",
"inst",
"[",
"i",
"]",
")",
"else",
":",
"results",
"=",
"results",
"+",
"\" \"",
"+",
"str",
"(",
"inst",
"[",
"i",
"]",
")",
"else",
":",
"results",
"=",
"\"\"",
"return",
"results"
] | Collect the remanent part of a list of strings in a unique string
Input:
inst = Input list of strings
Output:
results = String containing all the elements of inst | [
"Collect",
"the",
"remanent",
"part",
"of",
"a",
"list",
"of",
"strings",
"in",
"a",
"unique",
"string",
"Input",
":",
"inst",
"=",
"Input",
"list",
"of",
"strings",
"Output",
":",
"results",
"=",
"String",
"containing",
"all",
"the",
"elements",
"of",
"inst"
] | [
"\"\"\"\n Collect the remanent part of a list of strings in a unique string\n Input:\n inst = Input list of strings\n Output:\n results = String containing all the elements of inst\n \"\"\""
] | [
{
"param": "inst",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "inst",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def collect_instructions(inst):
len_inst = len(inst)
if len_inst > 0:
for i in range(0, len_inst):
if i == 0:
results = str(inst[i])
else:
results = results + " " + str(inst[i])
else:
results = ""
return results | 609,976 | 356 |
fdfb8a2fba3ff01316a02f1f3387258114f495b5 | k-kapp/CycleDating | cycle_dating/Algos/eiisr.py | [
"Apache-2.0"
] | Python | combo_loop_help | null | def combo_loop_help(lst, curr_idx, curr_num, total_num, all_els, all_perms):
"""
Helper function for the combinations function defined below
"""
if all_els is None:
all_els = []
for i in range(curr_idx, len(lst)):
all_els.append(lst[i])
if curr_num == total_num:
all_perms.append(copy.deepcopy(all_els))
del all_els[-1]
else:
combo_loop_help(lst, i + 1, curr_num + 1, total_num, all_els, all_perms)
del all_els[-1] |
Helper function for the combinations function defined below
| Helper function for the combinations function defined below | [
"Helper",
"function",
"for",
"the",
"combinations",
"function",
"defined",
"below"
] | def combo_loop_help(lst, curr_idx, curr_num, total_num, all_els, all_perms):
if all_els is None:
all_els = []
for i in range(curr_idx, len(lst)):
all_els.append(lst[i])
if curr_num == total_num:
all_perms.append(copy.deepcopy(all_els))
del all_els[-1]
else:
combo_loop_help(lst, i + 1, curr_num + 1, total_num, all_els, all_perms)
del all_els[-1] | [
"def",
"combo_loop_help",
"(",
"lst",
",",
"curr_idx",
",",
"curr_num",
",",
"total_num",
",",
"all_els",
",",
"all_perms",
")",
":",
"if",
"all_els",
"is",
"None",
":",
"all_els",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"curr_idx",
",",
"len",
"(",
"lst",
")",
")",
":",
"all_els",
".",
"append",
"(",
"lst",
"[",
"i",
"]",
")",
"if",
"curr_num",
"==",
"total_num",
":",
"all_perms",
".",
"append",
"(",
"copy",
".",
"deepcopy",
"(",
"all_els",
")",
")",
"del",
"all_els",
"[",
"-",
"1",
"]",
"else",
":",
"combo_loop_help",
"(",
"lst",
",",
"i",
"+",
"1",
",",
"curr_num",
"+",
"1",
",",
"total_num",
",",
"all_els",
",",
"all_perms",
")",
"del",
"all_els",
"[",
"-",
"1",
"]"
] | Helper function for the combinations function defined below | [
"Helper",
"function",
"for",
"the",
"combinations",
"function",
"defined",
"below"
] | [
"\"\"\"\n Helper function for the combinations function defined below\n \"\"\""
] | [
{
"param": "lst",
"type": null
},
{
"param": "curr_idx",
"type": null
},
{
"param": "curr_num",
"type": null
},
{
"param": "total_num",
"type": null
},
{
"param": "all_els",
"type": null
},
{
"param": "all_perms",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "lst",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "curr_idx",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "curr_num",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "total_num",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "all_els",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "all_perms",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import copy
def combo_loop_help(lst, curr_idx, curr_num, total_num, all_els, all_perms):
if all_els is None:
all_els = []
for i in range(curr_idx, len(lst)):
all_els.append(lst[i])
if curr_num == total_num:
all_perms.append(copy.deepcopy(all_els))
del all_els[-1]
else:
combo_loop_help(lst, i + 1, curr_num + 1, total_num, all_els, all_perms)
del all_els[-1] | 609,977 | 601 |
51221623318f1138966ef288407d57aa78936a5a | aricsanders/pyMez3 | Code/Analysis/NISTUncertainty.py | [
"Unlicense"
] | Python | coax_power_S_NIST | <not_specific> | def coax_power_S_NIST(connector_type='N',frequency=1.):
"""Calculates SNIST for coax power measurements"""
if re.search('7',connector_type,re.IGNORECASE):
uncertainty_eff=.09+.01*frequency
elif re.search('N',connector_type,re.IGNORECASE):
uncertainty_eff=10**(-1.4+.04*frequency)
elif re.search('3.5',connector_type,re.IGNORECASE):
if frequency<.05:
uncertainty_eff=10**(-1.4+.04*frequency)
elif frequency>=.05 and frequency<=18.:
uncertainty_eff=.25
else:
uncertainty_eff=.25
return [uncertainty_eff] | Calculates SNIST for coax power measurements | Calculates SNIST for coax power measurements | [
"Calculates",
"SNIST",
"for",
"coax",
"power",
"measurements"
] | def coax_power_S_NIST(connector_type='N',frequency=1.):
if re.search('7',connector_type,re.IGNORECASE):
uncertainty_eff=.09+.01*frequency
elif re.search('N',connector_type,re.IGNORECASE):
uncertainty_eff=10**(-1.4+.04*frequency)
elif re.search('3.5',connector_type,re.IGNORECASE):
if frequency<.05:
uncertainty_eff=10**(-1.4+.04*frequency)
elif frequency>=.05 and frequency<=18.:
uncertainty_eff=.25
else:
uncertainty_eff=.25
return [uncertainty_eff] | [
"def",
"coax_power_S_NIST",
"(",
"connector_type",
"=",
"'N'",
",",
"frequency",
"=",
"1.",
")",
":",
"if",
"re",
".",
"search",
"(",
"'7'",
",",
"connector_type",
",",
"re",
".",
"IGNORECASE",
")",
":",
"uncertainty_eff",
"=",
".09",
"+",
".01",
"*",
"frequency",
"elif",
"re",
".",
"search",
"(",
"'N'",
",",
"connector_type",
",",
"re",
".",
"IGNORECASE",
")",
":",
"uncertainty_eff",
"=",
"10",
"**",
"(",
"-",
"1.4",
"+",
".04",
"*",
"frequency",
")",
"elif",
"re",
".",
"search",
"(",
"'3.5'",
",",
"connector_type",
",",
"re",
".",
"IGNORECASE",
")",
":",
"if",
"frequency",
"<",
".05",
":",
"uncertainty_eff",
"=",
"10",
"**",
"(",
"-",
"1.4",
"+",
".04",
"*",
"frequency",
")",
"elif",
"frequency",
">=",
".05",
"and",
"frequency",
"<=",
"18.",
":",
"uncertainty_eff",
"=",
".25",
"else",
":",
"uncertainty_eff",
"=",
".25",
"return",
"[",
"uncertainty_eff",
"]"
] | Calculates SNIST for coax power measurements | [
"Calculates",
"SNIST",
"for",
"coax",
"power",
"measurements"
] | [
"\"\"\"Calculates SNIST for coax power measurements\"\"\""
] | [
{
"param": "connector_type",
"type": null
},
{
"param": "frequency",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "connector_type",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "frequency",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import re
def coax_power_S_NIST(connector_type='N',frequency=1.):
if re.search('7',connector_type,re.IGNORECASE):
uncertainty_eff=.09+.01*frequency
elif re.search('N',connector_type,re.IGNORECASE):
uncertainty_eff=10**(-1.4+.04*frequency)
elif re.search('3.5',connector_type,re.IGNORECASE):
if frequency<.05:
uncertainty_eff=10**(-1.4+.04*frequency)
elif frequency>=.05 and frequency<=18.:
uncertainty_eff=.25
else:
uncertainty_eff=.25
return [uncertainty_eff] | 609,978 | 478 |
3f8f8b7752620f5d84d7126c4ed2b8f8b9582df5 | pupamanyu/cicd | pipeline2/source-code/dependencies/helper_function.py | [
"Apache-2.0"
] | Python | delete_table | None | def delete_table(bq_client: bigquery.Client,
dataset_id: str,
table_name: str) -> None:
"""Deletes a specified table in BigQuery.
Args:
bq_client: bigquery.Client object.
dataset_id: String holding ID of dataset
table_name: String of table name to delete
Returns:
None; Deletes a table in BigQuery
"""
dataset_ref = bq_client.dataset(dataset_id)
table_ref = dataset_ref.table(table_name)
bq_client.delete_table(table=table_ref) | Deletes a specified table in BigQuery.
Args:
bq_client: bigquery.Client object.
dataset_id: String holding ID of dataset
table_name: String of table name to delete
Returns:
None; Deletes a table in BigQuery
| Deletes a specified table in BigQuery. | [
"Deletes",
"a",
"specified",
"table",
"in",
"BigQuery",
"."
] | def delete_table(bq_client: bigquery.Client,
dataset_id: str,
table_name: str) -> None:
dataset_ref = bq_client.dataset(dataset_id)
table_ref = dataset_ref.table(table_name)
bq_client.delete_table(table=table_ref) | [
"def",
"delete_table",
"(",
"bq_client",
":",
"bigquery",
".",
"Client",
",",
"dataset_id",
":",
"str",
",",
"table_name",
":",
"str",
")",
"->",
"None",
":",
"dataset_ref",
"=",
"bq_client",
".",
"dataset",
"(",
"dataset_id",
")",
"table_ref",
"=",
"dataset_ref",
".",
"table",
"(",
"table_name",
")",
"bq_client",
".",
"delete_table",
"(",
"table",
"=",
"table_ref",
")"
] | Deletes a specified table in BigQuery. | [
"Deletes",
"a",
"specified",
"table",
"in",
"BigQuery",
"."
] | [
"\"\"\"Deletes a specified table in BigQuery.\n\n Args:\n bq_client: bigquery.Client object.\n dataset_id: String holding ID of dataset\n table_name: String of table name to delete\n\n Returns:\n None; Deletes a table in BigQuery\n \"\"\""
] | [
{
"param": "bq_client",
"type": "bigquery.Client"
},
{
"param": "dataset_id",
"type": "str"
},
{
"param": "table_name",
"type": "str"
}
] | {
"returns": [
{
"docstring": "None; Deletes a table in BigQuery",
"docstring_tokens": [
"None",
";",
"Deletes",
"a",
"table",
"in",
"BigQuery"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "bq_client",
"type": "bigquery.Client",
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": null
},
{
"identifier": "dataset_id",
"type": "str",
"docstring": "String holding ID of dataset",
"docstring_tokens": [
"String",
"holding",
"ID",
"of",
"dataset"
],
"default": null,
"is_optional": null
},
{
"identifier": "table_name",
"type": "str",
"docstring": "String of table name to delete",
"docstring_tokens": [
"String",
"of",
"table",
"name",
"to",
"delete"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def delete_table(bq_client: bigquery.Client,
dataset_id: str,
table_name: str) -> None:
dataset_ref = bq_client.dataset(dataset_id)
table_ref = dataset_ref.table(table_name)
bq_client.delete_table(table=table_ref) | 609,979 | 774 |
91a10ba9337d4a5565f10c59d1a9bc593e3edb4a | googleads/html5-to-dfp | x5_converters.py | [
"Apache-2.0"
] | Python | _edge_js_match_function | <not_specific> | def _edge_js_match_function(snippet, assets, match):
"""Returns a match function to replace asset names with x5 variables."""
name = match.group(1)
if '%' in name:
name = urllib.unquote(name)
try:
asset = assets[name[2:-2]]
except KeyError:
return match.group(1)
snippet.assets.append(asset.name)
if name.startswith(r'\"') or name.startswith(r"\'"):
# From this in edge js: '<a href=\"asset.name\">'
# To this: '<a href=[\"' + ]__x5__.macro_ID[ + '\"]>'
return "' + __x5__.macro_%s + '" % asset.id
elif name[1] in ('"', "'"):
# From this edge js: var g23=['"]970x90.jpg['"],
# To this: var g23=__x5__.macro_ID,
return '%s__x5__.macro_%s%s' % (name[0], asset.id, name[-1])
return '%s__x5__.macro_%s%s' % (name[:2], asset.id, name[-2:]) | Returns a match function to replace asset names with x5 variables. | Returns a match function to replace asset names with x5 variables. | [
"Returns",
"a",
"match",
"function",
"to",
"replace",
"asset",
"names",
"with",
"x5",
"variables",
"."
] | def _edge_js_match_function(snippet, assets, match):
name = match.group(1)
if '%' in name:
name = urllib.unquote(name)
try:
asset = assets[name[2:-2]]
except KeyError:
return match.group(1)
snippet.assets.append(asset.name)
if name.startswith(r'\"') or name.startswith(r"\'"):
return "' + __x5__.macro_%s + '" % asset.id
elif name[1] in ('"', "'"):
return '%s__x5__.macro_%s%s' % (name[0], asset.id, name[-1])
return '%s__x5__.macro_%s%s' % (name[:2], asset.id, name[-2:]) | [
"def",
"_edge_js_match_function",
"(",
"snippet",
",",
"assets",
",",
"match",
")",
":",
"name",
"=",
"match",
".",
"group",
"(",
"1",
")",
"if",
"'%'",
"in",
"name",
":",
"name",
"=",
"urllib",
".",
"unquote",
"(",
"name",
")",
"try",
":",
"asset",
"=",
"assets",
"[",
"name",
"[",
"2",
":",
"-",
"2",
"]",
"]",
"except",
"KeyError",
":",
"return",
"match",
".",
"group",
"(",
"1",
")",
"snippet",
".",
"assets",
".",
"append",
"(",
"asset",
".",
"name",
")",
"if",
"name",
".",
"startswith",
"(",
"r'\\\"'",
")",
"or",
"name",
".",
"startswith",
"(",
"r\"\\'\"",
")",
":",
"return",
"\"' + __x5__.macro_%s + '\"",
"%",
"asset",
".",
"id",
"elif",
"name",
"[",
"1",
"]",
"in",
"(",
"'\"'",
",",
"\"'\"",
")",
":",
"return",
"'%s__x5__.macro_%s%s'",
"%",
"(",
"name",
"[",
"0",
"]",
",",
"asset",
".",
"id",
",",
"name",
"[",
"-",
"1",
"]",
")",
"return",
"'%s__x5__.macro_%s%s'",
"%",
"(",
"name",
"[",
":",
"2",
"]",
",",
"asset",
".",
"id",
",",
"name",
"[",
"-",
"2",
":",
"]",
")"
] | Returns a match function to replace asset names with x5 variables. | [
"Returns",
"a",
"match",
"function",
"to",
"replace",
"asset",
"names",
"with",
"x5",
"variables",
"."
] | [
"\"\"\"Returns a match function to replace asset names with x5 variables.\"\"\"",
"# From this in edge js: '<a href=\\\"asset.name\\\">'",
"# To this: '<a href=[\\\"' + ]__x5__.macro_ID[ + '\\\"]>'",
"# From this edge js: var g23=['\"]970x90.jpg['\"],",
"# To this: var g23=__x5__.macro_ID,"
] | [
{
"param": "snippet",
"type": null
},
{
"param": "assets",
"type": null
},
{
"param": "match",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "snippet",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "assets",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "match",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import urllib
def _edge_js_match_function(snippet, assets, match):
name = match.group(1)
if '%' in name:
name = urllib.unquote(name)
try:
asset = assets[name[2:-2]]
except KeyError:
return match.group(1)
snippet.assets.append(asset.name)
if name.startswith(r'\"') or name.startswith(r"\'"):
return "' + __x5__.macro_%s + '" % asset.id
elif name[1] in ('"', "'"):
return '%s__x5__.macro_%s%s' % (name[0], asset.id, name[-1])
return '%s__x5__.macro_%s%s' % (name[:2], asset.id, name[-2:]) | 609,980 | 962 |
71f1405f9ffdb2095af0d2119e488a001c7e1ef0 | bopopescu/phyG | eggs/mercurial-2.2.3-py2.7-linux-x86_64-ucs4.egg/mercurial/phases.py | [
"CC-BY-3.0"
] | Python | retractboundary | null | def retractboundary(repo, targetphase, nodes):
"""Set nodes back to a phase changing other nodes phases if necessary.
This function move boundary *backward* this means that all nodes are set
in the target phase or kept in a *higher* phase.
Simplify boundary to contains phase roots only."""
currentroots = repo._phaseroots[targetphase]
newroots = [n for n in nodes if repo[n].phase() < targetphase]
if newroots:
currentroots.update(newroots)
ctxs = repo.set('roots(%ln::)', currentroots)
currentroots.intersection_update(ctx.node() for ctx in ctxs)
if '_phaserev' in vars(repo):
del repo._phaserev
repo._dirtyphases = True | Set nodes back to a phase changing other nodes phases if necessary.
This function move boundary *backward* this means that all nodes are set
in the target phase or kept in a *higher* phase.
Simplify boundary to contains phase roots only. | Set nodes back to a phase changing other nodes phases if necessary.
This function move boundary *backward* this means that all nodes are set
in the target phase or kept in a *higher* phase.
Simplify boundary to contains phase roots only. | [
"Set",
"nodes",
"back",
"to",
"a",
"phase",
"changing",
"other",
"nodes",
"phases",
"if",
"necessary",
".",
"This",
"function",
"move",
"boundary",
"*",
"backward",
"*",
"this",
"means",
"that",
"all",
"nodes",
"are",
"set",
"in",
"the",
"target",
"phase",
"or",
"kept",
"in",
"a",
"*",
"higher",
"*",
"phase",
".",
"Simplify",
"boundary",
"to",
"contains",
"phase",
"roots",
"only",
"."
] | def retractboundary(repo, targetphase, nodes):
currentroots = repo._phaseroots[targetphase]
newroots = [n for n in nodes if repo[n].phase() < targetphase]
if newroots:
currentroots.update(newroots)
ctxs = repo.set('roots(%ln::)', currentroots)
currentroots.intersection_update(ctx.node() for ctx in ctxs)
if '_phaserev' in vars(repo):
del repo._phaserev
repo._dirtyphases = True | [
"def",
"retractboundary",
"(",
"repo",
",",
"targetphase",
",",
"nodes",
")",
":",
"currentroots",
"=",
"repo",
".",
"_phaseroots",
"[",
"targetphase",
"]",
"newroots",
"=",
"[",
"n",
"for",
"n",
"in",
"nodes",
"if",
"repo",
"[",
"n",
"]",
".",
"phase",
"(",
")",
"<",
"targetphase",
"]",
"if",
"newroots",
":",
"currentroots",
".",
"update",
"(",
"newroots",
")",
"ctxs",
"=",
"repo",
".",
"set",
"(",
"'roots(%ln::)'",
",",
"currentroots",
")",
"currentroots",
".",
"intersection_update",
"(",
"ctx",
".",
"node",
"(",
")",
"for",
"ctx",
"in",
"ctxs",
")",
"if",
"'_phaserev'",
"in",
"vars",
"(",
"repo",
")",
":",
"del",
"repo",
".",
"_phaserev",
"repo",
".",
"_dirtyphases",
"=",
"True"
] | Set nodes back to a phase changing other nodes phases if necessary. | [
"Set",
"nodes",
"back",
"to",
"a",
"phase",
"changing",
"other",
"nodes",
"phases",
"if",
"necessary",
"."
] | [
"\"\"\"Set nodes back to a phase changing other nodes phases if necessary.\n\n This function move boundary *backward* this means that all nodes are set\n in the target phase or kept in a *higher* phase.\n\n Simplify boundary to contains phase roots only.\"\"\""
] | [
{
"param": "repo",
"type": null
},
{
"param": "targetphase",
"type": null
},
{
"param": "nodes",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "repo",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "targetphase",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "nodes",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def retractboundary(repo, targetphase, nodes):
currentroots = repo._phaseroots[targetphase]
newroots = [n for n in nodes if repo[n].phase() < targetphase]
if newroots:
currentroots.update(newroots)
ctxs = repo.set('roots(%ln::)', currentroots)
currentroots.intersection_update(ctx.node() for ctx in ctxs)
if '_phaserev' in vars(repo):
del repo._phaserev
repo._dirtyphases = True | 609,981 | 879 |
0a8c6235dc313325543b454f0977dc14770d0e7a | jcsambangi/bme590final_backend | conftest.py | [
"MIT"
] | Python | mktestEfile | <not_specific> | def mktestEfile(tmpdir_factory):
"""Creates test file for unit testing in tempEdir.
:param tmpdir_factory:
:returns: path of file for pin 9307 test
"""
tempEdir = tmpdir_factory.mktemp('tempE')
testFile = tempEdir.join('L9307L0.BIN')
test9307Path = testFile.strpath
with open('./testing/9307L0.BIN', 'rb') as binaryFile:
hold = binaryFile.read()
with open(test9307Path, 'wb') as binaryFile:
binaryFile.write(hold)
return test9307Path | Creates test file for unit testing in tempEdir.
:param tmpdir_factory:
:returns: path of file for pin 9307 test
| Creates test file for unit testing in tempEdir. | [
"Creates",
"test",
"file",
"for",
"unit",
"testing",
"in",
"tempEdir",
"."
] | def mktestEfile(tmpdir_factory):
tempEdir = tmpdir_factory.mktemp('tempE')
testFile = tempEdir.join('L9307L0.BIN')
test9307Path = testFile.strpath
with open('./testing/9307L0.BIN', 'rb') as binaryFile:
hold = binaryFile.read()
with open(test9307Path, 'wb') as binaryFile:
binaryFile.write(hold)
return test9307Path | [
"def",
"mktestEfile",
"(",
"tmpdir_factory",
")",
":",
"tempEdir",
"=",
"tmpdir_factory",
".",
"mktemp",
"(",
"'tempE'",
")",
"testFile",
"=",
"tempEdir",
".",
"join",
"(",
"'L9307L0.BIN'",
")",
"test9307Path",
"=",
"testFile",
".",
"strpath",
"with",
"open",
"(",
"'./testing/9307L0.BIN'",
",",
"'rb'",
")",
"as",
"binaryFile",
":",
"hold",
"=",
"binaryFile",
".",
"read",
"(",
")",
"with",
"open",
"(",
"test9307Path",
",",
"'wb'",
")",
"as",
"binaryFile",
":",
"binaryFile",
".",
"write",
"(",
"hold",
")",
"return",
"test9307Path"
] | Creates test file for unit testing in tempEdir. | [
"Creates",
"test",
"file",
"for",
"unit",
"testing",
"in",
"tempEdir",
"."
] | [
"\"\"\"Creates test file for unit testing in tempEdir.\n\n :param tmpdir_factory:\n :returns: path of file for pin 9307 test\n \"\"\""
] | [
{
"param": "tmpdir_factory",
"type": null
}
] | {
"returns": [
{
"docstring": "path of file for pin 9307 test",
"docstring_tokens": [
"path",
"of",
"file",
"for",
"pin",
"9307",
"test"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "tmpdir_factory",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def mktestEfile(tmpdir_factory):
tempEdir = tmpdir_factory.mktemp('tempE')
testFile = tempEdir.join('L9307L0.BIN')
test9307Path = testFile.strpath
with open('./testing/9307L0.BIN', 'rb') as binaryFile:
hold = binaryFile.read()
with open(test9307Path, 'wb') as binaryFile:
binaryFile.write(hold)
return test9307Path | 609,982 | 624 |
15ee96550194c3702f2bc0ad7c5d1a6fae143157 | benradford/clef2019-protestnews-radford | final_task_3/evaluate_task3.py | [
"MIT"
] | Python | calc_metrics | <not_specific> | def calc_metrics(tp, p, t, percent=True):
"""
compute overall precision, recall and FB1 (default values are 0.0)
if percent is True, return 100 * original decimal value
"""
if p != 0:
precision = float(tp) / p
else:
precision = 0
if t !=0:
recall = float(tp) / t
else:
recall = 0
if precision + recall >0 :
fb1 = 2 * precision * recall / (precision + recall)
else:
fb1 = 0
if percent:
return 100 * precision, 100 * recall, 100 * fb1
else:
return precision, recall, fb1 |
compute overall precision, recall and FB1 (default values are 0.0)
if percent is True, return 100 * original decimal value
| compute overall precision, recall and FB1 (default values are 0.0)
if percent is True, return 100 * original decimal value | [
"compute",
"overall",
"precision",
"recall",
"and",
"FB1",
"(",
"default",
"values",
"are",
"0",
".",
"0",
")",
"if",
"percent",
"is",
"True",
"return",
"100",
"*",
"original",
"decimal",
"value"
] | def calc_metrics(tp, p, t, percent=True):
if p != 0:
precision = float(tp) / p
else:
precision = 0
if t !=0:
recall = float(tp) / t
else:
recall = 0
if precision + recall >0 :
fb1 = 2 * precision * recall / (precision + recall)
else:
fb1 = 0
if percent:
return 100 * precision, 100 * recall, 100 * fb1
else:
return precision, recall, fb1 | [
"def",
"calc_metrics",
"(",
"tp",
",",
"p",
",",
"t",
",",
"percent",
"=",
"True",
")",
":",
"if",
"p",
"!=",
"0",
":",
"precision",
"=",
"float",
"(",
"tp",
")",
"/",
"p",
"else",
":",
"precision",
"=",
"0",
"if",
"t",
"!=",
"0",
":",
"recall",
"=",
"float",
"(",
"tp",
")",
"/",
"t",
"else",
":",
"recall",
"=",
"0",
"if",
"precision",
"+",
"recall",
">",
"0",
":",
"fb1",
"=",
"2",
"*",
"precision",
"*",
"recall",
"/",
"(",
"precision",
"+",
"recall",
")",
"else",
":",
"fb1",
"=",
"0",
"if",
"percent",
":",
"return",
"100",
"*",
"precision",
",",
"100",
"*",
"recall",
",",
"100",
"*",
"fb1",
"else",
":",
"return",
"precision",
",",
"recall",
",",
"fb1"
] | compute overall precision, recall and FB1 (default values are 0.0)
if percent is True, return 100 * original decimal value | [
"compute",
"overall",
"precision",
"recall",
"and",
"FB1",
"(",
"default",
"values",
"are",
"0",
".",
"0",
")",
"if",
"percent",
"is",
"True",
"return",
"100",
"*",
"original",
"decimal",
"value"
] | [
"\"\"\"\n compute overall precision, recall and FB1 (default values are 0.0)\n if percent is True, return 100 * original decimal value\n \"\"\""
] | [
{
"param": "tp",
"type": null
},
{
"param": "p",
"type": null
},
{
"param": "t",
"type": null
},
{
"param": "percent",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "tp",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "p",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "t",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "percent",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def calc_metrics(tp, p, t, percent=True):
if p != 0:
precision = float(tp) / p
else:
precision = 0
if t !=0:
recall = float(tp) / t
else:
recall = 0
if precision + recall >0 :
fb1 = 2 * precision * recall / (precision + recall)
else:
fb1 = 0
if percent:
return 100 * precision, 100 * recall, 100 * fb1
else:
return precision, recall, fb1 | 609,983 | 583 |
659f18decd940f2bf4a4a26f094d1dd2f6f23414 | hochoy18/ray | python/ray/function_manager.py | [
"Apache-2.0"
] | Python | from_function | <not_specific> | def from_function(cls, function):
"""Create a FunctionDescriptor from a function instance.
This function is used to create the function descriptor from
a python function. If a function is a class function, it should
not be used by this function.
Args:
cls: Current class which is required argument for classmethod.
function: the python function used to create the function
descriptor.
Returns:
The FunctionDescriptor instance created according to the function.
"""
module_name = function.__module__
function_name = function.__name__
class_name = ""
function_source_hasher = hashlib.sha1()
try:
# If we are running a script or are in IPython, include the source
# code in the hash.
source = inspect.getsource(function).encode("ascii")
function_source_hasher.update(source)
function_source_hash = function_source_hasher.digest()
except (IOError, OSError, TypeError):
# Source code may not be available:
# e.g. Cython or Python interpreter.
function_source_hash = b""
return cls(module_name, function_name, class_name,
function_source_hash) | Create a FunctionDescriptor from a function instance.
This function is used to create the function descriptor from
a python function. If a function is a class function, it should
not be used by this function.
Args:
cls: Current class which is required argument for classmethod.
function: the python function used to create the function
descriptor.
Returns:
The FunctionDescriptor instance created according to the function.
| Create a FunctionDescriptor from a function instance.
This function is used to create the function descriptor from
a python function. If a function is a class function, it should
not be used by this function. | [
"Create",
"a",
"FunctionDescriptor",
"from",
"a",
"function",
"instance",
".",
"This",
"function",
"is",
"used",
"to",
"create",
"the",
"function",
"descriptor",
"from",
"a",
"python",
"function",
".",
"If",
"a",
"function",
"is",
"a",
"class",
"function",
"it",
"should",
"not",
"be",
"used",
"by",
"this",
"function",
"."
] | def from_function(cls, function):
module_name = function.__module__
function_name = function.__name__
class_name = ""
function_source_hasher = hashlib.sha1()
try:
source = inspect.getsource(function).encode("ascii")
function_source_hasher.update(source)
function_source_hash = function_source_hasher.digest()
except (IOError, OSError, TypeError):
function_source_hash = b""
return cls(module_name, function_name, class_name,
function_source_hash) | [
"def",
"from_function",
"(",
"cls",
",",
"function",
")",
":",
"module_name",
"=",
"function",
".",
"__module__",
"function_name",
"=",
"function",
".",
"__name__",
"class_name",
"=",
"\"\"",
"function_source_hasher",
"=",
"hashlib",
".",
"sha1",
"(",
")",
"try",
":",
"source",
"=",
"inspect",
".",
"getsource",
"(",
"function",
")",
".",
"encode",
"(",
"\"ascii\"",
")",
"function_source_hasher",
".",
"update",
"(",
"source",
")",
"function_source_hash",
"=",
"function_source_hasher",
".",
"digest",
"(",
")",
"except",
"(",
"IOError",
",",
"OSError",
",",
"TypeError",
")",
":",
"function_source_hash",
"=",
"b\"\"",
"return",
"cls",
"(",
"module_name",
",",
"function_name",
",",
"class_name",
",",
"function_source_hash",
")"
] | Create a FunctionDescriptor from a function instance. | [
"Create",
"a",
"FunctionDescriptor",
"from",
"a",
"function",
"instance",
"."
] | [
"\"\"\"Create a FunctionDescriptor from a function instance.\n\n This function is used to create the function descriptor from\n a python function. If a function is a class function, it should\n not be used by this function.\n\n Args:\n cls: Current class which is required argument for classmethod.\n function: the python function used to create the function\n descriptor.\n\n Returns:\n The FunctionDescriptor instance created according to the function.\n \"\"\"",
"# If we are running a script or are in IPython, include the source",
"# code in the hash.",
"# Source code may not be available:",
"# e.g. Cython or Python interpreter."
] | [
{
"param": "cls",
"type": null
},
{
"param": "function",
"type": null
}
] | {
"returns": [
{
"docstring": "The FunctionDescriptor instance created according to the function.",
"docstring_tokens": [
"The",
"FunctionDescriptor",
"instance",
"created",
"according",
"to",
"the",
"function",
"."
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "cls",
"type": null,
"docstring": "Current class which is required argument for classmethod.",
"docstring_tokens": [
"Current",
"class",
"which",
"is",
"required",
"argument",
"for",
"classmethod",
"."
],
"default": null,
"is_optional": null
},
{
"identifier": "function",
"type": null,
"docstring": "the python function used to create the function\ndescriptor.",
"docstring_tokens": [
"the",
"python",
"function",
"used",
"to",
"create",
"the",
"function",
"descriptor",
"."
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import inspect
import hashlib
def from_function(cls, function):
module_name = function.__module__
function_name = function.__name__
class_name = ""
function_source_hasher = hashlib.sha1()
try:
source = inspect.getsource(function).encode("ascii")
function_source_hasher.update(source)
function_source_hash = function_source_hasher.digest()
except (IOError, OSError, TypeError):
function_source_hash = b""
return cls(module_name, function_name, class_name,
function_source_hash) | 609,984 | 557 |
026ae5e68628359463223573a01b114e27a7cdfa | reib2/Lab-3-Flood-Warning | floodsystem/geo.py | [
"MIT"
] | Python | stations_by_river | <not_specific> | def stations_by_river(stations):
"This module returns a dictionary that maps river names to a list of station objects on a given river"
names_and_stations = {}
for station in stations:
river = station.river
if river not in names_and_stations.keys():
list_stations = [station.name]
names_and_stations.update({river : list_stations})
else:
list_stations = names_and_stations[river]
list_stations.append(station.name)
names_and_stations.update({river : list_stations})
return names_and_stations | This module returns a dictionary that maps river names to a list of station objects on a given river | This module returns a dictionary that maps river names to a list of station objects on a given river | [
"This",
"module",
"returns",
"a",
"dictionary",
"that",
"maps",
"river",
"names",
"to",
"a",
"list",
"of",
"station",
"objects",
"on",
"a",
"given",
"river"
] | def stations_by_river(stations):
names_and_stations = {}
for station in stations:
river = station.river
if river not in names_and_stations.keys():
list_stations = [station.name]
names_and_stations.update({river : list_stations})
else:
list_stations = names_and_stations[river]
list_stations.append(station.name)
names_and_stations.update({river : list_stations})
return names_and_stations | [
"def",
"stations_by_river",
"(",
"stations",
")",
":",
"names_and_stations",
"=",
"{",
"}",
"for",
"station",
"in",
"stations",
":",
"river",
"=",
"station",
".",
"river",
"if",
"river",
"not",
"in",
"names_and_stations",
".",
"keys",
"(",
")",
":",
"list_stations",
"=",
"[",
"station",
".",
"name",
"]",
"names_and_stations",
".",
"update",
"(",
"{",
"river",
":",
"list_stations",
"}",
")",
"else",
":",
"list_stations",
"=",
"names_and_stations",
"[",
"river",
"]",
"list_stations",
".",
"append",
"(",
"station",
".",
"name",
")",
"names_and_stations",
".",
"update",
"(",
"{",
"river",
":",
"list_stations",
"}",
")",
"return",
"names_and_stations"
] | This module returns a dictionary that maps river names to a list of station objects on a given river | [
"This",
"module",
"returns",
"a",
"dictionary",
"that",
"maps",
"river",
"names",
"to",
"a",
"list",
"of",
"station",
"objects",
"on",
"a",
"given",
"river"
] | [
"\"This module returns a dictionary that maps river names to a list of station objects on a given river\""
] | [
{
"param": "stations",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "stations",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def stations_by_river(stations):
names_and_stations = {}
for station in stations:
river = station.river
if river not in names_and_stations.keys():
list_stations = [station.name]
names_and_stations.update({river : list_stations})
else:
list_stations = names_and_stations[river]
list_stations.append(station.name)
names_and_stations.update({river : list_stations})
return names_and_stations | 609,985 | 550 |
23af979f80929defbda6791684ee58f8f0dc81a4 | hallee/espresso-arm | remi/remi/server.py | [
"MIT"
] | Python | gui_update_children_version | <not_specific> | def gui_update_children_version(client, leaf):
""" This function is called when a leaf is updated by gui_updater
and so, children does not need graphical update, it is only
required to update the last version of the dictionaries
"""
if not hasattr(leaf, 'attributes'):
return False
leaf.attributes.__lastversion__ = leaf.attributes.__version__
leaf.style.__lastversion__ = leaf.style.__version__
leaf.children.__lastversion__ = leaf.children.__version__
for subleaf in leaf.children.values():
gui_update_children_version(client, subleaf) | This function is called when a leaf is updated by gui_updater
and so, children does not need graphical update, it is only
required to update the last version of the dictionaries
| This function is called when a leaf is updated by gui_updater
and so, children does not need graphical update, it is only
required to update the last version of the dictionaries | [
"This",
"function",
"is",
"called",
"when",
"a",
"leaf",
"is",
"updated",
"by",
"gui_updater",
"and",
"so",
"children",
"does",
"not",
"need",
"graphical",
"update",
"it",
"is",
"only",
"required",
"to",
"update",
"the",
"last",
"version",
"of",
"the",
"dictionaries"
] | def gui_update_children_version(client, leaf):
if not hasattr(leaf, 'attributes'):
return False
leaf.attributes.__lastversion__ = leaf.attributes.__version__
leaf.style.__lastversion__ = leaf.style.__version__
leaf.children.__lastversion__ = leaf.children.__version__
for subleaf in leaf.children.values():
gui_update_children_version(client, subleaf) | [
"def",
"gui_update_children_version",
"(",
"client",
",",
"leaf",
")",
":",
"if",
"not",
"hasattr",
"(",
"leaf",
",",
"'attributes'",
")",
":",
"return",
"False",
"leaf",
".",
"attributes",
".",
"__lastversion__",
"=",
"leaf",
".",
"attributes",
".",
"__version__",
"leaf",
".",
"style",
".",
"__lastversion__",
"=",
"leaf",
".",
"style",
".",
"__version__",
"leaf",
".",
"children",
".",
"__lastversion__",
"=",
"leaf",
".",
"children",
".",
"__version__",
"for",
"subleaf",
"in",
"leaf",
".",
"children",
".",
"values",
"(",
")",
":",
"gui_update_children_version",
"(",
"client",
",",
"subleaf",
")"
] | This function is called when a leaf is updated by gui_updater
and so, children does not need graphical update, it is only
required to update the last version of the dictionaries | [
"This",
"function",
"is",
"called",
"when",
"a",
"leaf",
"is",
"updated",
"by",
"gui_updater",
"and",
"so",
"children",
"does",
"not",
"need",
"graphical",
"update",
"it",
"is",
"only",
"required",
"to",
"update",
"the",
"last",
"version",
"of",
"the",
"dictionaries"
] | [
"\"\"\" This function is called when a leaf is updated by gui_updater\n and so, children does not need graphical update, it is only\n required to update the last version of the dictionaries\n \"\"\""
] | [
{
"param": "client",
"type": null
},
{
"param": "leaf",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "client",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "leaf",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def gui_update_children_version(client, leaf):
if not hasattr(leaf, 'attributes'):
return False
leaf.attributes.__lastversion__ = leaf.attributes.__version__
leaf.style.__lastversion__ = leaf.style.__version__
leaf.children.__lastversion__ = leaf.children.__version__
for subleaf in leaf.children.values():
gui_update_children_version(client, subleaf) | 609,986 | 926 |
efc613838a527141a647763f61061c28d566093d | fronzbot/scripts | cadence/getDeviceUsage.py | [
"MIT"
] | Python | run | <not_specific> | def run(treeFile, libName):
"""Extract dictionary of devices from tree."""
with open(treeFile, 'r') as fh:
lines = fh.readlines()
deviceUsage = {};
print("...Processing List.")
for line in lines:
line_clean = line.lstrip()
line_split = line_clean.split(" ")
if line_split[0] == libName:
device = line_split[1]
count_string = line_split[-1]
count = re.sub(r'[()]', '', count_string)
try:
deviceUsage[device] += int(count)
except KeyError:
print("...Found device {}".format(device))
deviceUsage[device] = int(count)
log_file = os.path.join(os.getcwd(), "log_getDeviceUsage.txt")
print("...Writing to {}".format(log_file))
with open(log_file, 'w') as fh:
fh.write("Output for {}\n".format(treeFile))
fh.write("---\n")
for device, count in sorted(deviceUsage.items()):
fh.write("{}: {}\n".format(device, count))
return | Extract dictionary of devices from tree. | Extract dictionary of devices from tree. | [
"Extract",
"dictionary",
"of",
"devices",
"from",
"tree",
"."
] | def run(treeFile, libName):
with open(treeFile, 'r') as fh:
lines = fh.readlines()
deviceUsage = {};
print("...Processing List.")
for line in lines:
line_clean = line.lstrip()
line_split = line_clean.split(" ")
if line_split[0] == libName:
device = line_split[1]
count_string = line_split[-1]
count = re.sub(r'[()]', '', count_string)
try:
deviceUsage[device] += int(count)
except KeyError:
print("...Found device {}".format(device))
deviceUsage[device] = int(count)
log_file = os.path.join(os.getcwd(), "log_getDeviceUsage.txt")
print("...Writing to {}".format(log_file))
with open(log_file, 'w') as fh:
fh.write("Output for {}\n".format(treeFile))
fh.write("---\n")
for device, count in sorted(deviceUsage.items()):
fh.write("{}: {}\n".format(device, count))
return | [
"def",
"run",
"(",
"treeFile",
",",
"libName",
")",
":",
"with",
"open",
"(",
"treeFile",
",",
"'r'",
")",
"as",
"fh",
":",
"lines",
"=",
"fh",
".",
"readlines",
"(",
")",
"deviceUsage",
"=",
"{",
"}",
";",
"print",
"(",
"\"...Processing List.\"",
")",
"for",
"line",
"in",
"lines",
":",
"line_clean",
"=",
"line",
".",
"lstrip",
"(",
")",
"line_split",
"=",
"line_clean",
".",
"split",
"(",
"\" \"",
")",
"if",
"line_split",
"[",
"0",
"]",
"==",
"libName",
":",
"device",
"=",
"line_split",
"[",
"1",
"]",
"count_string",
"=",
"line_split",
"[",
"-",
"1",
"]",
"count",
"=",
"re",
".",
"sub",
"(",
"r'[()]'",
",",
"''",
",",
"count_string",
")",
"try",
":",
"deviceUsage",
"[",
"device",
"]",
"+=",
"int",
"(",
"count",
")",
"except",
"KeyError",
":",
"print",
"(",
"\"...Found device {}\"",
".",
"format",
"(",
"device",
")",
")",
"deviceUsage",
"[",
"device",
"]",
"=",
"int",
"(",
"count",
")",
"log_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"getcwd",
"(",
")",
",",
"\"log_getDeviceUsage.txt\"",
")",
"print",
"(",
"\"...Writing to {}\"",
".",
"format",
"(",
"log_file",
")",
")",
"with",
"open",
"(",
"log_file",
",",
"'w'",
")",
"as",
"fh",
":",
"fh",
".",
"write",
"(",
"\"Output for {}\\n\"",
".",
"format",
"(",
"treeFile",
")",
")",
"fh",
".",
"write",
"(",
"\"---\\n\"",
")",
"for",
"device",
",",
"count",
"in",
"sorted",
"(",
"deviceUsage",
".",
"items",
"(",
")",
")",
":",
"fh",
".",
"write",
"(",
"\"{}: {}\\n\"",
".",
"format",
"(",
"device",
",",
"count",
")",
")",
"return"
] | Extract dictionary of devices from tree. | [
"Extract",
"dictionary",
"of",
"devices",
"from",
"tree",
"."
] | [
"\"\"\"Extract dictionary of devices from tree.\"\"\""
] | [
{
"param": "treeFile",
"type": null
},
{
"param": "libName",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "treeFile",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "libName",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import re
import os
def run(treeFile, libName):
with open(treeFile, 'r') as fh:
lines = fh.readlines()
deviceUsage = {};
print("...Processing List.")
for line in lines:
line_clean = line.lstrip()
line_split = line_clean.split(" ")
if line_split[0] == libName:
device = line_split[1]
count_string = line_split[-1]
count = re.sub(r'[()]', '', count_string)
try:
deviceUsage[device] += int(count)
except KeyError:
print("...Found device {}".format(device))
deviceUsage[device] = int(count)
log_file = os.path.join(os.getcwd(), "log_getDeviceUsage.txt")
print("...Writing to {}".format(log_file))
with open(log_file, 'w') as fh:
fh.write("Output for {}\n".format(treeFile))
fh.write("---\n")
for device, count in sorted(deviceUsage.items()):
fh.write("{}: {}\n".format(device, count))
return | 609,987 | 17 |
0533c3af9d975160fa313fd00bf1134f8453512d | addamit/youngatlas | tests/conftest.py | [
"MIT"
] | Python | ip | null | def ip(session_ip):
"""Verify the extension is automatically loaded."""
status = session_ip.find_cell_magic("yamagics")
print ("magics status: {}".format(status))
session_ip.run_line_magic(magic_name="load_ext", line="yamagics")
print ("Yielding IPython")
yield session_ip
# session_ip.run_line_magic(magic_name="unload_ext", line="yamagics")
#session_ip.run_line_magic(magic_name="reset", line="-f")
#ip.extension_manager.reload_extension('storemagic') | Verify the extension is automatically loaded. | Verify the extension is automatically loaded. | [
"Verify",
"the",
"extension",
"is",
"automatically",
"loaded",
"."
] | def ip(session_ip):
status = session_ip.find_cell_magic("yamagics")
print ("magics status: {}".format(status))
session_ip.run_line_magic(magic_name="load_ext", line="yamagics")
print ("Yielding IPython")
yield session_ip | [
"def",
"ip",
"(",
"session_ip",
")",
":",
"status",
"=",
"session_ip",
".",
"find_cell_magic",
"(",
"\"yamagics\"",
")",
"print",
"(",
"\"magics status: {}\"",
".",
"format",
"(",
"status",
")",
")",
"session_ip",
".",
"run_line_magic",
"(",
"magic_name",
"=",
"\"load_ext\"",
",",
"line",
"=",
"\"yamagics\"",
")",
"print",
"(",
"\"Yielding IPython\"",
")",
"yield",
"session_ip"
] | Verify the extension is automatically loaded. | [
"Verify",
"the",
"extension",
"is",
"automatically",
"loaded",
"."
] | [
"\"\"\"Verify the extension is automatically loaded.\"\"\"",
"# session_ip.run_line_magic(magic_name=\"unload_ext\", line=\"yamagics\")",
"#session_ip.run_line_magic(magic_name=\"reset\", line=\"-f\")",
"#ip.extension_manager.reload_extension('storemagic')"
] | [
{
"param": "session_ip",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "session_ip",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def ip(session_ip):
status = session_ip.find_cell_magic("yamagics")
print ("magics status: {}".format(status))
session_ip.run_line_magic(magic_name="load_ext", line="yamagics")
print ("Yielding IPython")
yield session_ip | 609,988 | 551 |
3fa9b611e6e4f83a68a8ecd1b26f508fb06c29e8 | hodgestar/fishnet | fishnet/cmds/ls.py | [
"MIT"
] | Python | ls_local_op | null | def ls_local_op(channel, shell_glob):
""" Perform an ls on the local machine. """
import glob
import subprocess
filenames = list(glob.glob(shell_glob))
p = subprocess.Popen(
["/bin/ls"] + filenames,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
for line in stdout.splitlines():
channel.send("%s" % (line,))
for line in stderr.splitlines():
channel.send("[ERR] %s" % (line,)) | Perform an ls on the local machine. | Perform an ls on the local machine. | [
"Perform",
"an",
"ls",
"on",
"the",
"local",
"machine",
"."
] | def ls_local_op(channel, shell_glob):
import glob
import subprocess
filenames = list(glob.glob(shell_glob))
p = subprocess.Popen(
["/bin/ls"] + filenames,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
for line in stdout.splitlines():
channel.send("%s" % (line,))
for line in stderr.splitlines():
channel.send("[ERR] %s" % (line,)) | [
"def",
"ls_local_op",
"(",
"channel",
",",
"shell_glob",
")",
":",
"import",
"glob",
"import",
"subprocess",
"filenames",
"=",
"list",
"(",
"glob",
".",
"glob",
"(",
"shell_glob",
")",
")",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"[",
"\"/bin/ls\"",
"]",
"+",
"filenames",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
")",
"stdout",
",",
"stderr",
"=",
"p",
".",
"communicate",
"(",
")",
"for",
"line",
"in",
"stdout",
".",
"splitlines",
"(",
")",
":",
"channel",
".",
"send",
"(",
"\"%s\"",
"%",
"(",
"line",
",",
")",
")",
"for",
"line",
"in",
"stderr",
".",
"splitlines",
"(",
")",
":",
"channel",
".",
"send",
"(",
"\"[ERR] %s\"",
"%",
"(",
"line",
",",
")",
")"
] | Perform an ls on the local machine. | [
"Perform",
"an",
"ls",
"on",
"the",
"local",
"machine",
"."
] | [
"\"\"\" Perform an ls on the local machine. \"\"\""
] | [
{
"param": "channel",
"type": null
},
{
"param": "shell_glob",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "channel",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "shell_glob",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import subprocess
import glob
def ls_local_op(channel, shell_glob):
import glob
import subprocess
filenames = list(glob.glob(shell_glob))
p = subprocess.Popen(
["/bin/ls"] + filenames,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
for line in stdout.splitlines():
channel.send("%s" % (line,))
for line in stderr.splitlines():
channel.send("[ERR] %s" % (line,)) | 609,990 | 879 |
f883284c6cc3f5593aca1908d8fa25ae3bddaee4 | zipated/src | tools/metrics/common/pretty_print_xml.py | [
"BSD-3-Clause"
] | Python | SplitParagraphs | <not_specific> | def SplitParagraphs(text):
"""Split a block of text into paragraphs.
Args:
text: The text to split.
Returns:
A list of paragraphs as strings.
"""
text = textwrap.dedent(text.strip('\n'))
lines = text.split('\n')
# Split the text into paragraphs at blank line boundaries.
paragraphs = [[]]
for l in lines:
if paragraphs[-1] and not l.strip():
paragraphs.append([])
else:
paragraphs[-1].append(l)
# Remove trailing empty paragraph if present.
if paragraphs and not paragraphs[-1]:
paragraphs = paragraphs[:-1]
return ['\n'.join(p) for p in paragraphs] | Split a block of text into paragraphs.
Args:
text: The text to split.
Returns:
A list of paragraphs as strings.
| Split a block of text into paragraphs. | [
"Split",
"a",
"block",
"of",
"text",
"into",
"paragraphs",
"."
] | def SplitParagraphs(text):
text = textwrap.dedent(text.strip('\n'))
lines = text.split('\n')
paragraphs = [[]]
for l in lines:
if paragraphs[-1] and not l.strip():
paragraphs.append([])
else:
paragraphs[-1].append(l)
if paragraphs and not paragraphs[-1]:
paragraphs = paragraphs[:-1]
return ['\n'.join(p) for p in paragraphs] | [
"def",
"SplitParagraphs",
"(",
"text",
")",
":",
"text",
"=",
"textwrap",
".",
"dedent",
"(",
"text",
".",
"strip",
"(",
"'\\n'",
")",
")",
"lines",
"=",
"text",
".",
"split",
"(",
"'\\n'",
")",
"paragraphs",
"=",
"[",
"[",
"]",
"]",
"for",
"l",
"in",
"lines",
":",
"if",
"paragraphs",
"[",
"-",
"1",
"]",
"and",
"not",
"l",
".",
"strip",
"(",
")",
":",
"paragraphs",
".",
"append",
"(",
"[",
"]",
")",
"else",
":",
"paragraphs",
"[",
"-",
"1",
"]",
".",
"append",
"(",
"l",
")",
"if",
"paragraphs",
"and",
"not",
"paragraphs",
"[",
"-",
"1",
"]",
":",
"paragraphs",
"=",
"paragraphs",
"[",
":",
"-",
"1",
"]",
"return",
"[",
"'\\n'",
".",
"join",
"(",
"p",
")",
"for",
"p",
"in",
"paragraphs",
"]"
] | Split a block of text into paragraphs. | [
"Split",
"a",
"block",
"of",
"text",
"into",
"paragraphs",
"."
] | [
"\"\"\"Split a block of text into paragraphs.\n\n Args:\n text: The text to split.\n Returns:\n A list of paragraphs as strings.\n \"\"\"",
"# Split the text into paragraphs at blank line boundaries.",
"# Remove trailing empty paragraph if present."
] | [
{
"param": "text",
"type": null
}
] | {
"returns": [
{
"docstring": "A list of paragraphs as strings.",
"docstring_tokens": [
"A",
"list",
"of",
"paragraphs",
"as",
"strings",
"."
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "text",
"type": null,
"docstring": "The text to split.",
"docstring_tokens": [
"The",
"text",
"to",
"split",
"."
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import textwrap
def SplitParagraphs(text):
text = textwrap.dedent(text.strip('\n'))
lines = text.split('\n')
paragraphs = [[]]
for l in lines:
if paragraphs[-1] and not l.strip():
paragraphs.append([])
else:
paragraphs[-1].append(l)
if paragraphs and not paragraphs[-1]:
paragraphs = paragraphs[:-1]
return ['\n'.join(p) for p in paragraphs] | 609,991 | 98 |
bfc13cc4c5bbc737270de7539fba35dbb3ec6f38 | huaminglin/docker-demo | mitmproxy-demo/wsgi/script/wsgi.py | [
"MIT"
] | Python | date_time_string | <not_specific> | def date_time_string():
"""Return the current date and time formatted for a message header."""
WEEKS = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
MONTHS = [
None,
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'
]
now = time.time()
year, month, day, hh, mm, ss, wd, y_, z_ = time.gmtime(now)
s = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
WEEKS[wd],
day, MONTHS[month], year,
hh, mm, ss
)
return s | Return the current date and time formatted for a message header. | Return the current date and time formatted for a message header. | [
"Return",
"the",
"current",
"date",
"and",
"time",
"formatted",
"for",
"a",
"message",
"header",
"."
] | def date_time_string():
WEEKS = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
MONTHS = [
None,
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'
]
now = time.time()
year, month, day, hh, mm, ss, wd, y_, z_ = time.gmtime(now)
s = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
WEEKS[wd],
day, MONTHS[month], year,
hh, mm, ss
)
return s | [
"def",
"date_time_string",
"(",
")",
":",
"WEEKS",
"=",
"[",
"'Mon'",
",",
"'Tue'",
",",
"'Wed'",
",",
"'Thu'",
",",
"'Fri'",
",",
"'Sat'",
",",
"'Sun'",
"]",
"MONTHS",
"=",
"[",
"None",
",",
"'Jan'",
",",
"'Feb'",
",",
"'Mar'",
",",
"'Apr'",
",",
"'May'",
",",
"'Jun'",
",",
"'Jul'",
",",
"'Aug'",
",",
"'Sep'",
",",
"'Oct'",
",",
"'Nov'",
",",
"'Dec'",
"]",
"now",
"=",
"time",
".",
"time",
"(",
")",
"year",
",",
"month",
",",
"day",
",",
"hh",
",",
"mm",
",",
"ss",
",",
"wd",
",",
"y_",
",",
"z_",
"=",
"time",
".",
"gmtime",
"(",
"now",
")",
"s",
"=",
"\"%s, %02d %3s %4d %02d:%02d:%02d GMT\"",
"%",
"(",
"WEEKS",
"[",
"wd",
"]",
",",
"day",
",",
"MONTHS",
"[",
"month",
"]",
",",
"year",
",",
"hh",
",",
"mm",
",",
"ss",
")",
"return",
"s"
] | Return the current date and time formatted for a message header. | [
"Return",
"the",
"current",
"date",
"and",
"time",
"formatted",
"for",
"a",
"message",
"header",
"."
] | [
"\"\"\"Return the current date and time formatted for a message header.\"\"\""
] | [] | {
"returns": [],
"raises": [],
"params": [],
"outlier_params": [],
"others": []
} | import time
def date_time_string():
WEEKS = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
MONTHS = [
None,
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'
]
now = time.time()
year, month, day, hh, mm, ss, wd, y_, z_ = time.gmtime(now)
s = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
WEEKS[wd],
day, MONTHS[month], year,
hh, mm, ss
)
return s | 609,992 | 662 |
da14967c7dc4dcf34b33a72078dc950b7b2b8cc9 | vishwakftw/CS6270-TDBMS | SignedNetZoo/datasets/utils.py | [
"MIT"
] | Python | gen_bar_updater | <not_specific> | def gen_bar_updater(pbar):
"""
TQDM hook for progress bar during download
"""
def bar_update(count, block_size, total_size):
if pbar.total is None and total_size:
pbar.total = total_size
progress_bytes = count * block_size
pbar.update(progress_bytes - pbar.n)
return bar_update |
TQDM hook for progress bar during download
| TQDM hook for progress bar during download | [
"TQDM",
"hook",
"for",
"progress",
"bar",
"during",
"download"
] | def gen_bar_updater(pbar):
def bar_update(count, block_size, total_size):
if pbar.total is None and total_size:
pbar.total = total_size
progress_bytes = count * block_size
pbar.update(progress_bytes - pbar.n)
return bar_update | [
"def",
"gen_bar_updater",
"(",
"pbar",
")",
":",
"def",
"bar_update",
"(",
"count",
",",
"block_size",
",",
"total_size",
")",
":",
"if",
"pbar",
".",
"total",
"is",
"None",
"and",
"total_size",
":",
"pbar",
".",
"total",
"=",
"total_size",
"progress_bytes",
"=",
"count",
"*",
"block_size",
"pbar",
".",
"update",
"(",
"progress_bytes",
"-",
"pbar",
".",
"n",
")",
"return",
"bar_update"
] | TQDM hook for progress bar during download | [
"TQDM",
"hook",
"for",
"progress",
"bar",
"during",
"download"
] | [
"\"\"\"\n TQDM hook for progress bar during download\n \"\"\""
] | [
{
"param": "pbar",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "pbar",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def gen_bar_updater(pbar):
def bar_update(count, block_size, total_size):
if pbar.total is None and total_size:
pbar.total = total_size
progress_bytes = count * block_size
pbar.update(progress_bytes - pbar.n)
return bar_update | 609,994 | 73 |
26142958ee1cc6a6e3ab83760c5cbf71d7d0d4e9 | iriberri/aiida_core | aiida/orm/data/structure.py | [
"BSD-2-Clause"
] | Python | calc_cell_volume | <not_specific> | def calc_cell_volume(cell):
"""
Calculates the volume of a cell given the three lattice vectors.
It is calculated as cell[0] . (cell[1] x cell[2]), where . represents
a dot product and x a cross product.
:param cell: the cell vectors; the must be a 3x3 list of lists of floats,
no other checks are done.
:returns: the cell volume.
"""
# returns the volume of the primitive cell: |a1.(a2xa3)|
a1 = cell[0]
a2 = cell[1]
a3 = cell[2]
a_mid_0 = a2[1] * a3[2] - a2[2] * a3[1]
a_mid_1 = a2[2] * a3[0] - a2[0] * a3[2]
a_mid_2 = a2[0] * a3[1] - a2[1] * a3[0]
return abs(a1[0] * a_mid_0 + a1[1] * a_mid_1 + a1[2] * a_mid_2) |
Calculates the volume of a cell given the three lattice vectors.
It is calculated as cell[0] . (cell[1] x cell[2]), where . represents
a dot product and x a cross product.
:param cell: the cell vectors; the must be a 3x3 list of lists of floats,
no other checks are done.
:returns: the cell volume.
| Calculates the volume of a cell given the three lattice vectors.
It is calculated as cell[0] . | [
"Calculates",
"the",
"volume",
"of",
"a",
"cell",
"given",
"the",
"three",
"lattice",
"vectors",
".",
"It",
"is",
"calculated",
"as",
"cell",
"[",
"0",
"]",
"."
] | def calc_cell_volume(cell):
a1 = cell[0]
a2 = cell[1]
a3 = cell[2]
a_mid_0 = a2[1] * a3[2] - a2[2] * a3[1]
a_mid_1 = a2[2] * a3[0] - a2[0] * a3[2]
a_mid_2 = a2[0] * a3[1] - a2[1] * a3[0]
return abs(a1[0] * a_mid_0 + a1[1] * a_mid_1 + a1[2] * a_mid_2) | [
"def",
"calc_cell_volume",
"(",
"cell",
")",
":",
"a1",
"=",
"cell",
"[",
"0",
"]",
"a2",
"=",
"cell",
"[",
"1",
"]",
"a3",
"=",
"cell",
"[",
"2",
"]",
"a_mid_0",
"=",
"a2",
"[",
"1",
"]",
"*",
"a3",
"[",
"2",
"]",
"-",
"a2",
"[",
"2",
"]",
"*",
"a3",
"[",
"1",
"]",
"a_mid_1",
"=",
"a2",
"[",
"2",
"]",
"*",
"a3",
"[",
"0",
"]",
"-",
"a2",
"[",
"0",
"]",
"*",
"a3",
"[",
"2",
"]",
"a_mid_2",
"=",
"a2",
"[",
"0",
"]",
"*",
"a3",
"[",
"1",
"]",
"-",
"a2",
"[",
"1",
"]",
"*",
"a3",
"[",
"0",
"]",
"return",
"abs",
"(",
"a1",
"[",
"0",
"]",
"*",
"a_mid_0",
"+",
"a1",
"[",
"1",
"]",
"*",
"a_mid_1",
"+",
"a1",
"[",
"2",
"]",
"*",
"a_mid_2",
")"
] | Calculates the volume of a cell given the three lattice vectors. | [
"Calculates",
"the",
"volume",
"of",
"a",
"cell",
"given",
"the",
"three",
"lattice",
"vectors",
"."
] | [
"\"\"\"\n Calculates the volume of a cell given the three lattice vectors.\n\n It is calculated as cell[0] . (cell[1] x cell[2]), where . represents\n a dot product and x a cross product.\n\n :param cell: the cell vectors; the must be a 3x3 list of lists of floats,\n no other checks are done.\n\n :returns: the cell volume.\n \"\"\"",
"# returns the volume of the primitive cell: |a1.(a2xa3)|"
] | [
{
"param": "cell",
"type": null
}
] | {
"returns": [
{
"docstring": "the cell volume.",
"docstring_tokens": [
"the",
"cell",
"volume",
"."
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "cell",
"type": null,
"docstring": "the cell vectors; the must be a 3x3 list of lists of floats,\nno other checks are done.",
"docstring_tokens": [
"the",
"cell",
"vectors",
";",
"the",
"must",
"be",
"a",
"3x3",
"list",
"of",
"lists",
"of",
"floats",
"no",
"other",
"checks",
"are",
"done",
"."
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def calc_cell_volume(cell):
a1 = cell[0]
a2 = cell[1]
a3 = cell[2]
a_mid_0 = a2[1] * a3[2] - a2[2] * a3[1]
a_mid_1 = a2[2] * a3[0] - a2[0] * a3[2]
a_mid_2 = a2[0] * a3[1] - a2[1] * a3[0]
return abs(a1[0] * a_mid_0 + a1[1] * a_mid_1 + a1[2] * a_mid_2) | 609,995 | 500 |
4fdf5675af84bcc830436908e6fdb1163a7b95ac | linsalrob/EdwardsLab | refs and citations/orcid_vs_google.py | [
"MIT"
] | Python | message | null | def message(msg, c):
"""
Print a message to stderr using color
:param msg: the message to print
:param color: the color to use
:return: nothing
"""
color = {
'HEADER': '\033[95m',
'OKBLUE': '\033[94m',
'OKGREEN': '\033[92m',
'WARNING': '\033[93m',
'FAIL': '\033[91m',
'ENDC': '\033[0m',
'BOLD': '\033[1m',
'UNDERLINE': '\033[4m',
'PINK': '\033[95m',
'BLUE': '\033[94m',
'GREEN': '\033[92m',
'YELLOW': '\033[93m',
'RED': '\033[91m',
'WHITE': '\033[0m',
}
c = c.upper()
if c not in color:
c = "WHITE"
if os.fstat(0) == os.fstat(1):
# stderr is not redirected
sys.stderr.write(f"{color[c]}{msg}{color['ENDC']}\n")
else:
sys.stderr.write(f"{msg}\n") |
Print a message to stderr using color
:param msg: the message to print
:param color: the color to use
:return: nothing
| Print a message to stderr using color | [
"Print",
"a",
"message",
"to",
"stderr",
"using",
"color"
] | def message(msg, c):
color = {
'HEADER': '\033[95m',
'OKBLUE': '\033[94m',
'OKGREEN': '\033[92m',
'WARNING': '\033[93m',
'FAIL': '\033[91m',
'ENDC': '\033[0m',
'BOLD': '\033[1m',
'UNDERLINE': '\033[4m',
'PINK': '\033[95m',
'BLUE': '\033[94m',
'GREEN': '\033[92m',
'YELLOW': '\033[93m',
'RED': '\033[91m',
'WHITE': '\033[0m',
}
c = c.upper()
if c not in color:
c = "WHITE"
if os.fstat(0) == os.fstat(1):
sys.stderr.write(f"{color[c]}{msg}{color['ENDC']}\n")
else:
sys.stderr.write(f"{msg}\n") | [
"def",
"message",
"(",
"msg",
",",
"c",
")",
":",
"color",
"=",
"{",
"'HEADER'",
":",
"'\\033[95m'",
",",
"'OKBLUE'",
":",
"'\\033[94m'",
",",
"'OKGREEN'",
":",
"'\\033[92m'",
",",
"'WARNING'",
":",
"'\\033[93m'",
",",
"'FAIL'",
":",
"'\\033[91m'",
",",
"'ENDC'",
":",
"'\\033[0m'",
",",
"'BOLD'",
":",
"'\\033[1m'",
",",
"'UNDERLINE'",
":",
"'\\033[4m'",
",",
"'PINK'",
":",
"'\\033[95m'",
",",
"'BLUE'",
":",
"'\\033[94m'",
",",
"'GREEN'",
":",
"'\\033[92m'",
",",
"'YELLOW'",
":",
"'\\033[93m'",
",",
"'RED'",
":",
"'\\033[91m'",
",",
"'WHITE'",
":",
"'\\033[0m'",
",",
"}",
"c",
"=",
"c",
".",
"upper",
"(",
")",
"if",
"c",
"not",
"in",
"color",
":",
"c",
"=",
"\"WHITE\"",
"if",
"os",
".",
"fstat",
"(",
"0",
")",
"==",
"os",
".",
"fstat",
"(",
"1",
")",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"f\"{color[c]}{msg}{color['ENDC']}\\n\"",
")",
"else",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"f\"{msg}\\n\"",
")"
] | Print a message to stderr using color | [
"Print",
"a",
"message",
"to",
"stderr",
"using",
"color"
] | [
"\"\"\"\n Print a message to stderr using color\n :param msg: the message to print\n :param color: the color to use\n :return: nothing\n \"\"\"",
"# stderr is not redirected"
] | [
{
"param": "msg",
"type": null
},
{
"param": "c",
"type": null
}
] | {
"returns": [
{
"docstring": null,
"docstring_tokens": [
"None"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "msg",
"type": null,
"docstring": "the message to print",
"docstring_tokens": [
"the",
"message",
"to",
"print"
],
"default": null,
"is_optional": null
},
{
"identifier": "c",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [
{
"identifier": "color",
"type": null,
"docstring": "the color to use",
"docstring_tokens": [
"the",
"color",
"to",
"use"
],
"default": null,
"is_optional": null
}
],
"others": []
} | import sys
import os
def message(msg, c):
color = {
'HEADER': '\033[95m',
'OKBLUE': '\033[94m',
'OKGREEN': '\033[92m',
'WARNING': '\033[93m',
'FAIL': '\033[91m',
'ENDC': '\033[0m',
'BOLD': '\033[1m',
'UNDERLINE': '\033[4m',
'PINK': '\033[95m',
'BLUE': '\033[94m',
'GREEN': '\033[92m',
'YELLOW': '\033[93m',
'RED': '\033[91m',
'WHITE': '\033[0m',
}
c = c.upper()
if c not in color:
c = "WHITE"
if os.fstat(0) == os.fstat(1):
sys.stderr.write(f"{color[c]}{msg}{color['ENDC']}\n")
else:
sys.stderr.write(f"{msg}\n") | 609,996 | 109 |
07077c98b5541119393ddfc4307895a201e49a56 | abdalazzezzalsalahat/data-structures-and-algorithms | python/quick_sort/quick_sort.py | [
"MIT"
] | Python | swap | null | def swap(arr, current, low):
"""[summary]
Helper function used to shove values lower than pivot value over to the left
Args:
arr ([array]): [array to be sorted]
current ([int]): [index of the currrent element]
low ([int]): [index of the smallest element]
"""
temp = arr[current]
arr[current] = arr[low]
arr[low] = temp | [summary]
Helper function used to shove values lower than pivot value over to the left
Args:
arr ([array]): [array to be sorted]
current ([int]): [index of the currrent element]
low ([int]): [index of the smallest element]
| [summary]
Helper function used to shove values lower than pivot value over to the left | [
"[",
"summary",
"]",
"Helper",
"function",
"used",
"to",
"shove",
"values",
"lower",
"than",
"pivot",
"value",
"over",
"to",
"the",
"left"
] | def swap(arr, current, low):
temp = arr[current]
arr[current] = arr[low]
arr[low] = temp | [
"def",
"swap",
"(",
"arr",
",",
"current",
",",
"low",
")",
":",
"temp",
"=",
"arr",
"[",
"current",
"]",
"arr",
"[",
"current",
"]",
"=",
"arr",
"[",
"low",
"]",
"arr",
"[",
"low",
"]",
"=",
"temp"
] | [summary]
Helper function used to shove values lower than pivot value over to the left | [
"[",
"summary",
"]",
"Helper",
"function",
"used",
"to",
"shove",
"values",
"lower",
"than",
"pivot",
"value",
"over",
"to",
"the",
"left"
] | [
"\"\"\"[summary]\n Helper function used to shove values lower than pivot value over to the left\n\n Args:\n arr ([array]): [array to be sorted]\n current ([int]): [index of the currrent element]\n low ([int]): [index of the smallest element]\n \"\"\""
] | [
{
"param": "arr",
"type": null
},
{
"param": "current",
"type": null
},
{
"param": "low",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "arr",
"type": null,
"docstring": "[array to be sorted]",
"docstring_tokens": [
"[",
"array",
"to",
"be",
"sorted",
"]"
],
"default": null,
"is_optional": false
},
{
"identifier": "current",
"type": null,
"docstring": "[index of the currrent element]",
"docstring_tokens": [
"[",
"index",
"of",
"the",
"currrent",
"element",
"]"
],
"default": null,
"is_optional": false
},
{
"identifier": "low",
"type": null,
"docstring": "[index of the smallest element]",
"docstring_tokens": [
"[",
"index",
"of",
"the",
"smallest",
"element",
"]"
],
"default": null,
"is_optional": false
}
],
"outlier_params": [],
"others": []
} | def swap(arr, current, low):
temp = arr[current]
arr[current] = arr[low]
arr[low] = temp | 609,998 | 891 |
e986d38b4f211fd56d4b563c5c363bcb5e53ff9d | perceptance/deepimage | src/tinyimage/tools.py | [
"MIT"
] | Python | list_files | <not_specific> | def list_files(target_dir, fn_exts):
"""List all files match given extensions.
Match both upper and lower cases.
Args:
fn_exts: a list of file extension in the form of "*.jpg".
Returns:
a list of found files.
"""
all_exts = []
for ext in fn_exts:
all_exts.append(ext.lower())
all_exts.append(ext.upper())
all_exts = set(all_exts)
all_fns = []
for cur_ext in all_exts:
cur_fns = glob.glob(os.path.join(target_dir, cur_ext))
all_fns.extend(cur_fns)
return all_fns | List all files match given extensions.
Match both upper and lower cases.
Args:
fn_exts: a list of file extension in the form of "*.jpg".
Returns:
a list of found files.
| List all files match given extensions.
Match both upper and lower cases. | [
"List",
"all",
"files",
"match",
"given",
"extensions",
".",
"Match",
"both",
"upper",
"and",
"lower",
"cases",
"."
] | def list_files(target_dir, fn_exts):
all_exts = []
for ext in fn_exts:
all_exts.append(ext.lower())
all_exts.append(ext.upper())
all_exts = set(all_exts)
all_fns = []
for cur_ext in all_exts:
cur_fns = glob.glob(os.path.join(target_dir, cur_ext))
all_fns.extend(cur_fns)
return all_fns | [
"def",
"list_files",
"(",
"target_dir",
",",
"fn_exts",
")",
":",
"all_exts",
"=",
"[",
"]",
"for",
"ext",
"in",
"fn_exts",
":",
"all_exts",
".",
"append",
"(",
"ext",
".",
"lower",
"(",
")",
")",
"all_exts",
".",
"append",
"(",
"ext",
".",
"upper",
"(",
")",
")",
"all_exts",
"=",
"set",
"(",
"all_exts",
")",
"all_fns",
"=",
"[",
"]",
"for",
"cur_ext",
"in",
"all_exts",
":",
"cur_fns",
"=",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"target_dir",
",",
"cur_ext",
")",
")",
"all_fns",
".",
"extend",
"(",
"cur_fns",
")",
"return",
"all_fns"
] | List all files match given extensions. | [
"List",
"all",
"files",
"match",
"given",
"extensions",
"."
] | [
"\"\"\"List all files match given extensions.\n\n Match both upper and lower cases.\n\n Args:\n fn_exts: a list of file extension in the form of \"*.jpg\".\n \n Returns:\n a list of found files.\n \"\"\""
] | [
{
"param": "target_dir",
"type": null
},
{
"param": "fn_exts",
"type": null
}
] | {
"returns": [
{
"docstring": "a list of found files.",
"docstring_tokens": [
"a",
"list",
"of",
"found",
"files",
"."
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "target_dir",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "fn_exts",
"type": null,
"docstring": "a list of file extension in the form of \"*.jpg\".",
"docstring_tokens": [
"a",
"list",
"of",
"file",
"extension",
"in",
"the",
"form",
"of",
"\"",
"*",
".",
"jpg",
"\"",
"."
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import os
import glob
def list_files(target_dir, fn_exts):
all_exts = []
for ext in fn_exts:
all_exts.append(ext.lower())
all_exts.append(ext.upper())
all_exts = set(all_exts)
all_fns = []
for cur_ext in all_exts:
cur_fns = glob.glob(os.path.join(target_dir, cur_ext))
all_fns.extend(cur_fns)
return all_fns | 609,999 | 809 |
4751c1dacfdf0237ac5cbf65da54b3b7335b9a3e | ufbmi/onefl-deduper | onefl/utils/ui.py | [
"MIT"
] | Python | ask_yes_no | <not_specific> | def ask_yes_no(question, default="yes"):
"""Ask a yes/no question via raw_input() and return the answer
as a boolean.
:param question: the question displayed to the user
:param default: the default answer if the user hits <Enter>
"""
valid = {"y": True, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = input().lower()[0]
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n") | Ask a yes/no question via raw_input() and return the answer
as a boolean.
:param question: the question displayed to the user
:param default: the default answer if the user hits <Enter>
| Ask a yes/no question via raw_input() and return the answer
as a boolean. | [
"Ask",
"a",
"yes",
"/",
"no",
"question",
"via",
"raw_input",
"()",
"and",
"return",
"the",
"answer",
"as",
"a",
"boolean",
"."
] | def ask_yes_no(question, default="yes"):
valid = {"y": True, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = input().lower()[0]
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n") | [
"def",
"ask_yes_no",
"(",
"question",
",",
"default",
"=",
"\"yes\"",
")",
":",
"valid",
"=",
"{",
"\"y\"",
":",
"True",
",",
"\"n\"",
":",
"False",
"}",
"if",
"default",
"is",
"None",
":",
"prompt",
"=",
"\" [y/n] \"",
"elif",
"default",
"==",
"\"yes\"",
":",
"prompt",
"=",
"\" [Y/n] \"",
"elif",
"default",
"==",
"\"no\"",
":",
"prompt",
"=",
"\" [y/N] \"",
"else",
":",
"raise",
"ValueError",
"(",
"\"invalid default answer: '%s'\"",
"%",
"default",
")",
"while",
"True",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"question",
"+",
"prompt",
")",
"choice",
"=",
"input",
"(",
")",
".",
"lower",
"(",
")",
"[",
"0",
"]",
"if",
"default",
"is",
"not",
"None",
"and",
"choice",
"==",
"''",
":",
"return",
"valid",
"[",
"default",
"]",
"elif",
"choice",
"in",
"valid",
":",
"return",
"valid",
"[",
"choice",
"]",
"else",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"\"Please respond with 'yes' or 'no' \"",
"\"(or 'y' or 'n').\\n\"",
")"
] | Ask a yes/no question via raw_input() and return the answer
as a boolean. | [
"Ask",
"a",
"yes",
"/",
"no",
"question",
"via",
"raw_input",
"()",
"and",
"return",
"the",
"answer",
"as",
"a",
"boolean",
"."
] | [
"\"\"\"Ask a yes/no question via raw_input() and return the answer\n as a boolean.\n\n :param question: the question displayed to the user\n :param default: the default answer if the user hits <Enter>\n\n \"\"\""
] | [
{
"param": "question",
"type": null
},
{
"param": "default",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "question",
"type": null,
"docstring": "the question displayed to the user",
"docstring_tokens": [
"the",
"question",
"displayed",
"to",
"the",
"user"
],
"default": null,
"is_optional": null
},
{
"identifier": "default",
"type": null,
"docstring": "the default answer if the user hits ",
"docstring_tokens": [
"the",
"default",
"answer",
"if",
"the",
"user",
"hits"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import sys
def ask_yes_no(question, default="yes"):
valid = {"y": True, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = input().lower()[0]
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n") | 610,000 | 939 |
d26a1b14aa7913b4917f6a6ac72c957e36b11824 | terriyu/word-clustering | cluster_pmi.py | [
"MIT"
] | Python | geometric_pmi_score | <not_specific> | def geometric_pmi_score(pdict, wlist1, wlist2):
""" Calculate geometric mean of PMI over all word pairs
in two word lists, given pre-computed PMI dictionary
- If geometric PMI is undefined, return -inf
- The geometric mean is undefined if:
- Any of the PMIs are negative
- None of the word pairs have a defined PMI
"""
product_pmi = None
for word1 in wlist1:
for word2 in wlist2:
# Enforce alphabetical order in pair
pair = tuple(sorted([word1, word2]))
wi, wj = pair
if wi in pdict and wj in pdict[wi]:
if product_pmi is None:
product_pmi = 1
pmi = pdict[wi][wj]
# Check if PMI is negative
if pmi > 0:
product_pmi *= pmi
else:
product_pmi = float("-inf")
break
# If PMI is negative, break out of the loop completely
if product_pmi == float("-inf"):
break
if product_pmi is None:
# None of the word pairs had a defined PMI
return float("-inf")
elif product_pmi == float("-inf"):
# At least one word pair had a negative PMI
return float("-inf")
else:
return product_pmi ** (1/len(wlist1)/len(wlist2)) | Calculate geometric mean of PMI over all word pairs
in two word lists, given pre-computed PMI dictionary
- If geometric PMI is undefined, return -inf
- The geometric mean is undefined if:
- Any of the PMIs are negative
- None of the word pairs have a defined PMI
| Calculate geometric mean of PMI over all word pairs
in two word lists, given pre-computed PMI dictionary
If geometric PMI is undefined, return -inf
The geometric mean is undefined if:
Any of the PMIs are negative
None of the word pairs have a defined PMI | [
"Calculate",
"geometric",
"mean",
"of",
"PMI",
"over",
"all",
"word",
"pairs",
"in",
"two",
"word",
"lists",
"given",
"pre",
"-",
"computed",
"PMI",
"dictionary",
"If",
"geometric",
"PMI",
"is",
"undefined",
"return",
"-",
"inf",
"The",
"geometric",
"mean",
"is",
"undefined",
"if",
":",
"Any",
"of",
"the",
"PMIs",
"are",
"negative",
"None",
"of",
"the",
"word",
"pairs",
"have",
"a",
"defined",
"PMI"
] | def geometric_pmi_score(pdict, wlist1, wlist2):
product_pmi = None
for word1 in wlist1:
for word2 in wlist2:
pair = tuple(sorted([word1, word2]))
wi, wj = pair
if wi in pdict and wj in pdict[wi]:
if product_pmi is None:
product_pmi = 1
pmi = pdict[wi][wj]
if pmi > 0:
product_pmi *= pmi
else:
product_pmi = float("-inf")
break
if product_pmi == float("-inf"):
break
if product_pmi is None:
return float("-inf")
elif product_pmi == float("-inf"):
return float("-inf")
else:
return product_pmi ** (1/len(wlist1)/len(wlist2)) | [
"def",
"geometric_pmi_score",
"(",
"pdict",
",",
"wlist1",
",",
"wlist2",
")",
":",
"product_pmi",
"=",
"None",
"for",
"word1",
"in",
"wlist1",
":",
"for",
"word2",
"in",
"wlist2",
":",
"pair",
"=",
"tuple",
"(",
"sorted",
"(",
"[",
"word1",
",",
"word2",
"]",
")",
")",
"wi",
",",
"wj",
"=",
"pair",
"if",
"wi",
"in",
"pdict",
"and",
"wj",
"in",
"pdict",
"[",
"wi",
"]",
":",
"if",
"product_pmi",
"is",
"None",
":",
"product_pmi",
"=",
"1",
"pmi",
"=",
"pdict",
"[",
"wi",
"]",
"[",
"wj",
"]",
"if",
"pmi",
">",
"0",
":",
"product_pmi",
"*=",
"pmi",
"else",
":",
"product_pmi",
"=",
"float",
"(",
"\"-inf\"",
")",
"break",
"if",
"product_pmi",
"==",
"float",
"(",
"\"-inf\"",
")",
":",
"break",
"if",
"product_pmi",
"is",
"None",
":",
"return",
"float",
"(",
"\"-inf\"",
")",
"elif",
"product_pmi",
"==",
"float",
"(",
"\"-inf\"",
")",
":",
"return",
"float",
"(",
"\"-inf\"",
")",
"else",
":",
"return",
"product_pmi",
"**",
"(",
"1",
"/",
"len",
"(",
"wlist1",
")",
"/",
"len",
"(",
"wlist2",
")",
")"
] | Calculate geometric mean of PMI over all word pairs
in two word lists, given pre-computed PMI dictionary | [
"Calculate",
"geometric",
"mean",
"of",
"PMI",
"over",
"all",
"word",
"pairs",
"in",
"two",
"word",
"lists",
"given",
"pre",
"-",
"computed",
"PMI",
"dictionary"
] | [
"\"\"\" Calculate geometric mean of PMI over all word pairs\n in two word lists, given pre-computed PMI dictionary\n\n - If geometric PMI is undefined, return -inf\n - The geometric mean is undefined if:\n - Any of the PMIs are negative\n - None of the word pairs have a defined PMI\n \"\"\"",
"# Enforce alphabetical order in pair",
"# Check if PMI is negative",
"# If PMI is negative, break out of the loop completely",
"# None of the word pairs had a defined PMI",
"# At least one word pair had a negative PMI"
] | [
{
"param": "pdict",
"type": null
},
{
"param": "wlist1",
"type": null
},
{
"param": "wlist2",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "pdict",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "wlist1",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "wlist2",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def geometric_pmi_score(pdict, wlist1, wlist2):
product_pmi = None
for word1 in wlist1:
for word2 in wlist2:
pair = tuple(sorted([word1, word2]))
wi, wj = pair
if wi in pdict and wj in pdict[wi]:
if product_pmi is None:
product_pmi = 1
pmi = pdict[wi][wj]
if pmi > 0:
product_pmi *= pmi
else:
product_pmi = float("-inf")
break
if product_pmi == float("-inf"):
break
if product_pmi is None:
return float("-inf")
elif product_pmi == float("-inf"):
return float("-inf")
else:
return product_pmi ** (1/len(wlist1)/len(wlist2)) | 610,001 | 1,005 |
ad3929f62a474df3c94890d8ea491fe56ccee37f | Omerdan03/Stock_Mining | data_scraping_main.py | [
"MIT"
] | Python | calc_periods | <not_specific> | def calc_periods(from_date=datetime.date.today() - datetime.timedelta(days=365)):
"""This function takes the date to scrap from for the user and return period1 and period2 so it's ready to insert
inside the url"""
min_date = datetime.date.fromisoformat('1970-01-01')
duration1 = from_date-min_date
period1_sec = int(duration1.total_seconds())
duration2 = datetime.date.today()-min_date
period2_sec = int(duration2.total_seconds())
return period1_sec, period2_sec | This function takes the date to scrap from for the user and return period1 and period2 so it's ready to insert
inside the url | This function takes the date to scrap from for the user and return period1 and period2 so it's ready to insert
inside the url | [
"This",
"function",
"takes",
"the",
"date",
"to",
"scrap",
"from",
"for",
"the",
"user",
"and",
"return",
"period1",
"and",
"period2",
"so",
"it",
"'",
"s",
"ready",
"to",
"insert",
"inside",
"the",
"url"
] | def calc_periods(from_date=datetime.date.today() - datetime.timedelta(days=365)):
min_date = datetime.date.fromisoformat('1970-01-01')
duration1 = from_date-min_date
period1_sec = int(duration1.total_seconds())
duration2 = datetime.date.today()-min_date
period2_sec = int(duration2.total_seconds())
return period1_sec, period2_sec | [
"def",
"calc_periods",
"(",
"from_date",
"=",
"datetime",
".",
"date",
".",
"today",
"(",
")",
"-",
"datetime",
".",
"timedelta",
"(",
"days",
"=",
"365",
")",
")",
":",
"min_date",
"=",
"datetime",
".",
"date",
".",
"fromisoformat",
"(",
"'1970-01-01'",
")",
"duration1",
"=",
"from_date",
"-",
"min_date",
"period1_sec",
"=",
"int",
"(",
"duration1",
".",
"total_seconds",
"(",
")",
")",
"duration2",
"=",
"datetime",
".",
"date",
".",
"today",
"(",
")",
"-",
"min_date",
"period2_sec",
"=",
"int",
"(",
"duration2",
".",
"total_seconds",
"(",
")",
")",
"return",
"period1_sec",
",",
"period2_sec"
] | This function takes the date to scrap from for the user and return period1 and period2 so it's ready to insert
inside the url | [
"This",
"function",
"takes",
"the",
"date",
"to",
"scrap",
"from",
"for",
"the",
"user",
"and",
"return",
"period1",
"and",
"period2",
"so",
"it",
"'",
"s",
"ready",
"to",
"insert",
"inside",
"the",
"url"
] | [
"\"\"\"This function takes the date to scrap from for the user and return period1 and period2 so it's ready to insert\n inside the url\"\"\""
] | [
{
"param": "from_date",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "from_date",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import datetime
def calc_periods(from_date=datetime.date.today() - datetime.timedelta(days=365)):
min_date = datetime.date.fromisoformat('1970-01-01')
duration1 = from_date-min_date
period1_sec = int(duration1.total_seconds())
duration2 = datetime.date.today()-min_date
period2_sec = int(duration2.total_seconds())
return period1_sec, period2_sec | 610,002 | 809 |
77fba3b4bc26a4a2118e58ab7adca455dd843e17 | EGAMAGZ/code-notes | codenotes/cli/__init__.py | [
"MIT"
] | Python | custom_print | None | def custom_print(cls, text: str, theme: Theme = None) -> None:
"""Class method used to print custom formatted text
Parameters
----------
text : HTML
Text that will be print with format similar to html
theme: Theme
Theme used for the text to be displayed
"""
print_formatted = cls(theme)
print_formatted.console.print(text) | Class method used to print custom formatted text
Parameters
----------
text : HTML
Text that will be print with format similar to html
theme: Theme
Theme used for the text to be displayed
| Class method used to print custom formatted text
Parameters
text : HTML
Text that will be print with format similar to html
Theme
Theme used for the text to be displayed | [
"Class",
"method",
"used",
"to",
"print",
"custom",
"formatted",
"text",
"Parameters",
"text",
":",
"HTML",
"Text",
"that",
"will",
"be",
"print",
"with",
"format",
"similar",
"to",
"html",
"Theme",
"Theme",
"used",
"for",
"the",
"text",
"to",
"be",
"displayed"
] | def custom_print(cls, text: str, theme: Theme = None) -> None:
print_formatted = cls(theme)
print_formatted.console.print(text) | [
"def",
"custom_print",
"(",
"cls",
",",
"text",
":",
"str",
",",
"theme",
":",
"Theme",
"=",
"None",
")",
"->",
"None",
":",
"print_formatted",
"=",
"cls",
"(",
"theme",
")",
"print_formatted",
".",
"console",
".",
"print",
"(",
"text",
")"
] | Class method used to print custom formatted text
Parameters | [
"Class",
"method",
"used",
"to",
"print",
"custom",
"formatted",
"text",
"Parameters"
] | [
"\"\"\"Class method used to print custom formatted text\n Parameters\n ----------\n text : HTML\n Text that will be print with format similar to html\n\n theme: Theme\n Theme used for the text to be displayed\n \"\"\""
] | [
{
"param": "cls",
"type": null
},
{
"param": "text",
"type": "str"
},
{
"param": "theme",
"type": "Theme"
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "cls",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "text",
"type": "str",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "theme",
"type": "Theme",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def custom_print(cls, text: str, theme: Theme = None) -> None:
print_formatted = cls(theme)
print_formatted.console.print(text) | 610,003 | 426 |
0a7e07b4d852c4e056db46442985001279c3611d | apacha/Mensural-Detector | position_classification/extract_sub_image_for_classification.py | [
"MIT"
] | Python | compute_pad_to_force_center | (int, int, int, int) | def compute_pad_to_force_center(upper_left, lower_right, fixed_width, fixed_height, image_width,
image_height) -> (int, int, int, int):
"""
Returns the necessary padding to force the symbol to be in the center of the image sample.
This is useful when the symbol is close to the image boundaries, and so the usual procedure
do not find the symbol exactly in the center of the cropped image.
"""
left, top = upper_left.split(",")
right, bottom = lower_right.split(",")
left, right, top, bottom = float(left), float(right), float(top), float(bottom)
center_x = left + (right - left) / 2
center_y = top + (bottom - top) / 2
pad_left = abs( min(0, center_x - fixed_width / 2) )
pad_right = abs( min(0, image_width - (center_x + fixed_width / 2)) )
pad_top = abs( min(0, center_y - fixed_height / 2) )
pad_bottom = abs( min(0, image_height - (center_y + fixed_height / 2)) )
return int(pad_left), int(pad_right), int(pad_top), int(pad_bottom) |
Returns the necessary padding to force the symbol to be in the center of the image sample.
This is useful when the symbol is close to the image boundaries, and so the usual procedure
do not find the symbol exactly in the center of the cropped image.
| Returns the necessary padding to force the symbol to be in the center of the image sample.
This is useful when the symbol is close to the image boundaries, and so the usual procedure
do not find the symbol exactly in the center of the cropped image. | [
"Returns",
"the",
"necessary",
"padding",
"to",
"force",
"the",
"symbol",
"to",
"be",
"in",
"the",
"center",
"of",
"the",
"image",
"sample",
".",
"This",
"is",
"useful",
"when",
"the",
"symbol",
"is",
"close",
"to",
"the",
"image",
"boundaries",
"and",
"so",
"the",
"usual",
"procedure",
"do",
"not",
"find",
"the",
"symbol",
"exactly",
"in",
"the",
"center",
"of",
"the",
"cropped",
"image",
"."
] | def compute_pad_to_force_center(upper_left, lower_right, fixed_width, fixed_height, image_width,
image_height) -> (int, int, int, int):
left, top = upper_left.split(",")
right, bottom = lower_right.split(",")
left, right, top, bottom = float(left), float(right), float(top), float(bottom)
center_x = left + (right - left) / 2
center_y = top + (bottom - top) / 2
pad_left = abs( min(0, center_x - fixed_width / 2) )
pad_right = abs( min(0, image_width - (center_x + fixed_width / 2)) )
pad_top = abs( min(0, center_y - fixed_height / 2) )
pad_bottom = abs( min(0, image_height - (center_y + fixed_height / 2)) )
return int(pad_left), int(pad_right), int(pad_top), int(pad_bottom) | [
"def",
"compute_pad_to_force_center",
"(",
"upper_left",
",",
"lower_right",
",",
"fixed_width",
",",
"fixed_height",
",",
"image_width",
",",
"image_height",
")",
"->",
"(",
"int",
",",
"int",
",",
"int",
",",
"int",
")",
":",
"left",
",",
"top",
"=",
"upper_left",
".",
"split",
"(",
"\",\"",
")",
"right",
",",
"bottom",
"=",
"lower_right",
".",
"split",
"(",
"\",\"",
")",
"left",
",",
"right",
",",
"top",
",",
"bottom",
"=",
"float",
"(",
"left",
")",
",",
"float",
"(",
"right",
")",
",",
"float",
"(",
"top",
")",
",",
"float",
"(",
"bottom",
")",
"center_x",
"=",
"left",
"+",
"(",
"right",
"-",
"left",
")",
"/",
"2",
"center_y",
"=",
"top",
"+",
"(",
"bottom",
"-",
"top",
")",
"/",
"2",
"pad_left",
"=",
"abs",
"(",
"min",
"(",
"0",
",",
"center_x",
"-",
"fixed_width",
"/",
"2",
")",
")",
"pad_right",
"=",
"abs",
"(",
"min",
"(",
"0",
",",
"image_width",
"-",
"(",
"center_x",
"+",
"fixed_width",
"/",
"2",
")",
")",
")",
"pad_top",
"=",
"abs",
"(",
"min",
"(",
"0",
",",
"center_y",
"-",
"fixed_height",
"/",
"2",
")",
")",
"pad_bottom",
"=",
"abs",
"(",
"min",
"(",
"0",
",",
"image_height",
"-",
"(",
"center_y",
"+",
"fixed_height",
"/",
"2",
")",
")",
")",
"return",
"int",
"(",
"pad_left",
")",
",",
"int",
"(",
"pad_right",
")",
",",
"int",
"(",
"pad_top",
")",
",",
"int",
"(",
"pad_bottom",
")"
] | Returns the necessary padding to force the symbol to be in the center of the image sample. | [
"Returns",
"the",
"necessary",
"padding",
"to",
"force",
"the",
"symbol",
"to",
"be",
"in",
"the",
"center",
"of",
"the",
"image",
"sample",
"."
] | [
"\"\"\"\n Returns the necessary padding to force the symbol to be in the center of the image sample.\n This is useful when the symbol is close to the image boundaries, and so the usual procedure\n do not find the symbol exactly in the center of the cropped image.\n \"\"\""
] | [
{
"param": "upper_left",
"type": null
},
{
"param": "lower_right",
"type": null
},
{
"param": "fixed_width",
"type": null
},
{
"param": "fixed_height",
"type": null
},
{
"param": "image_width",
"type": null
},
{
"param": "image_height",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "upper_left",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "lower_right",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "fixed_width",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "fixed_height",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "image_width",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "image_height",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def compute_pad_to_force_center(upper_left, lower_right, fixed_width, fixed_height, image_width,
image_height) -> (int, int, int, int):
left, top = upper_left.split(",")
right, bottom = lower_right.split(",")
left, right, top, bottom = float(left), float(right), float(top), float(bottom)
center_x = left + (right - left) / 2
center_y = top + (bottom - top) / 2
pad_left = abs( min(0, center_x - fixed_width / 2) )
pad_right = abs( min(0, image_width - (center_x + fixed_width / 2)) )
pad_top = abs( min(0, center_y - fixed_height / 2) )
pad_bottom = abs( min(0, image_height - (center_y + fixed_height / 2)) )
return int(pad_left), int(pad_right), int(pad_top), int(pad_bottom) | 610,004 | 584 |
84ce1eafed96bd992d71c47243ace519e830003f | winhamwr/django-mature-optimization | mature_optimization/parse.py | [
"BSD-3-Clause"
] | Python | convert_time | <not_specific> | def convert_time(cls, time_str):
"""
Convert date string to datetime object
"""
if cls.date_ignore_pattern:
time_str = re.sub(cls.date_ignore_pattern, '', time_str)
return datetime.strptime(time_str, cls.date_format) |
Convert date string to datetime object
| Convert date string to datetime object | [
"Convert",
"date",
"string",
"to",
"datetime",
"object"
] | def convert_time(cls, time_str):
if cls.date_ignore_pattern:
time_str = re.sub(cls.date_ignore_pattern, '', time_str)
return datetime.strptime(time_str, cls.date_format) | [
"def",
"convert_time",
"(",
"cls",
",",
"time_str",
")",
":",
"if",
"cls",
".",
"date_ignore_pattern",
":",
"time_str",
"=",
"re",
".",
"sub",
"(",
"cls",
".",
"date_ignore_pattern",
",",
"''",
",",
"time_str",
")",
"return",
"datetime",
".",
"strptime",
"(",
"time_str",
",",
"cls",
".",
"date_format",
")"
] | Convert date string to datetime object | [
"Convert",
"date",
"string",
"to",
"datetime",
"object"
] | [
"\"\"\"\n Convert date string to datetime object\n \"\"\""
] | [
{
"param": "cls",
"type": null
},
{
"param": "time_str",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "cls",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "time_str",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import re
import datetime
def convert_time(cls, time_str):
if cls.date_ignore_pattern:
time_str = re.sub(cls.date_ignore_pattern, '', time_str)
return datetime.strptime(time_str, cls.date_format) | 610,005 | 953 |
ee263d3465e3d5713326a24fccf3ba9c4ba30f03 | Minsoo2022/DeepHuman | TrainingDataPreparation/util.py | [
"Unlicense"
] | Python | transform_mesh_in_place | <not_specific> | def transform_mesh_in_place(mesh, trans, scale):
"""
Transforms mesh
Note that it will perform translation first, followed by scaling
Also note that the transformation happens in-place
"""
mesh['v'][:, 0] += trans[0]
mesh['v'][:, 1] += trans[1]
mesh['v'][:, 2] += trans[2]
mesh['v'] *= scale
return mesh |
Transforms mesh
Note that it will perform translation first, followed by scaling
Also note that the transformation happens in-place
| Transforms mesh
Note that it will perform translation first, followed by scaling
Also note that the transformation happens in-place | [
"Transforms",
"mesh",
"Note",
"that",
"it",
"will",
"perform",
"translation",
"first",
"followed",
"by",
"scaling",
"Also",
"note",
"that",
"the",
"transformation",
"happens",
"in",
"-",
"place"
] | def transform_mesh_in_place(mesh, trans, scale):
mesh['v'][:, 0] += trans[0]
mesh['v'][:, 1] += trans[1]
mesh['v'][:, 2] += trans[2]
mesh['v'] *= scale
return mesh | [
"def",
"transform_mesh_in_place",
"(",
"mesh",
",",
"trans",
",",
"scale",
")",
":",
"mesh",
"[",
"'v'",
"]",
"[",
":",
",",
"0",
"]",
"+=",
"trans",
"[",
"0",
"]",
"mesh",
"[",
"'v'",
"]",
"[",
":",
",",
"1",
"]",
"+=",
"trans",
"[",
"1",
"]",
"mesh",
"[",
"'v'",
"]",
"[",
":",
",",
"2",
"]",
"+=",
"trans",
"[",
"2",
"]",
"mesh",
"[",
"'v'",
"]",
"*=",
"scale",
"return",
"mesh"
] | Transforms mesh
Note that it will perform translation first, followed by scaling
Also note that the transformation happens in-place | [
"Transforms",
"mesh",
"Note",
"that",
"it",
"will",
"perform",
"translation",
"first",
"followed",
"by",
"scaling",
"Also",
"note",
"that",
"the",
"transformation",
"happens",
"in",
"-",
"place"
] | [
"\"\"\"\n Transforms mesh\n Note that it will perform translation first, followed by scaling\n Also note that the transformation happens in-place\n \"\"\""
] | [
{
"param": "mesh",
"type": null
},
{
"param": "trans",
"type": null
},
{
"param": "scale",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "mesh",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "trans",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "scale",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def transform_mesh_in_place(mesh, trans, scale):
mesh['v'][:, 0] += trans[0]
mesh['v'][:, 1] += trans[1]
mesh['v'][:, 2] += trans[2]
mesh['v'] *= scale
return mesh | 610,006 | 386 |
bdecd2f5875c37e7181060ca2ed474201918dc17 | AustralianSynchrotron/lightflow | lightflow/config.py | [
"BSD-3-Clause"
] | Python | from_file | <not_specific> | def from_file(cls, filename, *, strict=True):
""" Create a new Config object from a configuration file.
Args:
filename (str): The location and name of the configuration file.
strict (bool): If true raises a ConfigLoadError when the configuration
cannot be found.
Returns:
An instance of the Config class.
Raises:
ConfigLoadError: If the configuration cannot be found.
"""
config = cls()
config.load_from_file(filename, strict=strict)
return config | Create a new Config object from a configuration file.
Args:
filename (str): The location and name of the configuration file.
strict (bool): If true raises a ConfigLoadError when the configuration
cannot be found.
Returns:
An instance of the Config class.
Raises:
ConfigLoadError: If the configuration cannot be found.
| Create a new Config object from a configuration file. | [
"Create",
"a",
"new",
"Config",
"object",
"from",
"a",
"configuration",
"file",
"."
] | def from_file(cls, filename, *, strict=True):
config = cls()
config.load_from_file(filename, strict=strict)
return config | [
"def",
"from_file",
"(",
"cls",
",",
"filename",
",",
"*",
",",
"strict",
"=",
"True",
")",
":",
"config",
"=",
"cls",
"(",
")",
"config",
".",
"load_from_file",
"(",
"filename",
",",
"strict",
"=",
"strict",
")",
"return",
"config"
] | Create a new Config object from a configuration file. | [
"Create",
"a",
"new",
"Config",
"object",
"from",
"a",
"configuration",
"file",
"."
] | [
"\"\"\" Create a new Config object from a configuration file.\n\n Args:\n filename (str): The location and name of the configuration file.\n strict (bool): If true raises a ConfigLoadError when the configuration\n cannot be found.\n\n Returns:\n An instance of the Config class.\n\n Raises:\n ConfigLoadError: If the configuration cannot be found.\n \"\"\""
] | [
{
"param": "cls",
"type": null
},
{
"param": "filename",
"type": null
},
{
"param": "strict",
"type": null
}
] | {
"returns": [
{
"docstring": "An instance of the Config class.",
"docstring_tokens": [
"An",
"instance",
"of",
"the",
"Config",
"class",
"."
],
"type": null
}
],
"raises": [
{
"docstring": "If the configuration cannot be found.",
"docstring_tokens": [
"If",
"the",
"configuration",
"cannot",
"be",
"found",
"."
],
"type": "ConfigLoadError"
}
],
"params": [
{
"identifier": "cls",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "filename",
"type": null,
"docstring": "The location and name of the configuration file.",
"docstring_tokens": [
"The",
"location",
"and",
"name",
"of",
"the",
"configuration",
"file",
"."
],
"default": null,
"is_optional": false
},
{
"identifier": "strict",
"type": null,
"docstring": "If true raises a ConfigLoadError when the configuration\ncannot be found.",
"docstring_tokens": [
"If",
"true",
"raises",
"a",
"ConfigLoadError",
"when",
"the",
"configuration",
"cannot",
"be",
"found",
"."
],
"default": null,
"is_optional": false
}
],
"outlier_params": [],
"others": []
} | def from_file(cls, filename, *, strict=True):
config = cls()
config.load_from_file(filename, strict=strict)
return config | 610,007 | 426 |
07fd24e478ec9f223d2f3f7411dea43d19b3e25e | DNA-and-Natural-Algorithms-Group/crnverifier | crnverifier/utils.py | [
"MIT"
] | Python | crnsize | <not_specific> | def crnsize(crn):
""" The size of a CRN as defined in JDW 2019.
"""
sp = set().union(*[set().union(*rxn[:2]) for rxn in crn])
tot = 0
for r, p in crn:
for s in sp:
tot += math.ceil(math.log2(len([x for x in r if x == s]) + 1)) + \
math.ceil(math.log2(len([x for x in p if x == s]) + 1))
return tot + len(sp) | The size of a CRN as defined in JDW 2019.
| The size of a CRN as defined in JDW 2019. | [
"The",
"size",
"of",
"a",
"CRN",
"as",
"defined",
"in",
"JDW",
"2019",
"."
] | def crnsize(crn):
sp = set().union(*[set().union(*rxn[:2]) for rxn in crn])
tot = 0
for r, p in crn:
for s in sp:
tot += math.ceil(math.log2(len([x for x in r if x == s]) + 1)) + \
math.ceil(math.log2(len([x for x in p if x == s]) + 1))
return tot + len(sp) | [
"def",
"crnsize",
"(",
"crn",
")",
":",
"sp",
"=",
"set",
"(",
")",
".",
"union",
"(",
"*",
"[",
"set",
"(",
")",
".",
"union",
"(",
"*",
"rxn",
"[",
":",
"2",
"]",
")",
"for",
"rxn",
"in",
"crn",
"]",
")",
"tot",
"=",
"0",
"for",
"r",
",",
"p",
"in",
"crn",
":",
"for",
"s",
"in",
"sp",
":",
"tot",
"+=",
"math",
".",
"ceil",
"(",
"math",
".",
"log2",
"(",
"len",
"(",
"[",
"x",
"for",
"x",
"in",
"r",
"if",
"x",
"==",
"s",
"]",
")",
"+",
"1",
")",
")",
"+",
"math",
".",
"ceil",
"(",
"math",
".",
"log2",
"(",
"len",
"(",
"[",
"x",
"for",
"x",
"in",
"p",
"if",
"x",
"==",
"s",
"]",
")",
"+",
"1",
")",
")",
"return",
"tot",
"+",
"len",
"(",
"sp",
")"
] | The size of a CRN as defined in JDW 2019. | [
"The",
"size",
"of",
"a",
"CRN",
"as",
"defined",
"in",
"JDW",
"2019",
"."
] | [
"\"\"\" The size of a CRN as defined in JDW 2019.\n \"\"\""
] | [
{
"param": "crn",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "crn",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import math
def crnsize(crn):
sp = set().union(*[set().union(*rxn[:2]) for rxn in crn])
tot = 0
for r, p in crn:
for s in sp:
tot += math.ceil(math.log2(len([x for x in r if x == s]) + 1)) + \
math.ceil(math.log2(len([x for x in p if x == s]) + 1))
return tot + len(sp) | 610,009 | 1,004 |
1f45c58714497a9a342e027bce380af016204eba | GrzegorzMika/Agriculture | cleaner.py | [
"MIT"
] | Python | run_cleanup | None | def run_cleanup(files: List[str], path: str) -> None:
"""
Remove all files specified by files.
:param files: list of files to remove
:param path: path to the directory containing the files
"""
files = [os.path.join(path, file) for file in files]
for file in files:
os.remove(file) |
Remove all files specified by files.
:param files: list of files to remove
:param path: path to the directory containing the files
| Remove all files specified by files. | [
"Remove",
"all",
"files",
"specified",
"by",
"files",
"."
] | def run_cleanup(files: List[str], path: str) -> None:
files = [os.path.join(path, file) for file in files]
for file in files:
os.remove(file) | [
"def",
"run_cleanup",
"(",
"files",
":",
"List",
"[",
"str",
"]",
",",
"path",
":",
"str",
")",
"->",
"None",
":",
"files",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"file",
")",
"for",
"file",
"in",
"files",
"]",
"for",
"file",
"in",
"files",
":",
"os",
".",
"remove",
"(",
"file",
")"
] | Remove all files specified by files. | [
"Remove",
"all",
"files",
"specified",
"by",
"files",
"."
] | [
"\"\"\"\n Remove all files specified by files.\n :param files: list of files to remove\n :param path: path to the directory containing the files\n \"\"\""
] | [
{
"param": "files",
"type": "List[str]"
},
{
"param": "path",
"type": "str"
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "files",
"type": "List[str]",
"docstring": "list of files to remove",
"docstring_tokens": [
"list",
"of",
"files",
"to",
"remove"
],
"default": null,
"is_optional": null
},
{
"identifier": "path",
"type": "str",
"docstring": "path to the directory containing the files",
"docstring_tokens": [
"path",
"to",
"the",
"directory",
"containing",
"the",
"files"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import os
def run_cleanup(files: List[str], path: str) -> None:
files = [os.path.join(path, file) for file in files]
for file in files:
os.remove(file) | 610,010 | 57 |
62e69a223673fb81d99441900d630d9cc409656c | locationlabs/confab | confab/files.py | [
"Apache-2.0"
] | Python | _clear_dir | null | def _clear_dir(dir_name):
"""
Remove an entire directory tree.
"""
if os.path.isdir(dir_name):
shutil.rmtree(dir_name) |
Remove an entire directory tree.
| Remove an entire directory tree. | [
"Remove",
"an",
"entire",
"directory",
"tree",
"."
] | def _clear_dir(dir_name):
if os.path.isdir(dir_name):
shutil.rmtree(dir_name) | [
"def",
"_clear_dir",
"(",
"dir_name",
")",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"dir_name",
")",
":",
"shutil",
".",
"rmtree",
"(",
"dir_name",
")"
] | Remove an entire directory tree. | [
"Remove",
"an",
"entire",
"directory",
"tree",
"."
] | [
"\"\"\"\n Remove an entire directory tree.\n \"\"\""
] | [
{
"param": "dir_name",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "dir_name",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import shutil
import os
def _clear_dir(dir_name):
if os.path.isdir(dir_name):
shutil.rmtree(dir_name) | 610,011 | 487 |
5bfebfc6fbfcca44cd5ccc0297f734e7d5ad2218 | mktt2897/photon_correlation | python/photon_correlation/util.py | [
"BSD-3-Clause"
] | Python | unique_dots | null | def unique_dots(dots):
"""
Group distinct runs for single dots to enable averaging over all data.
"""
keys = set(map(lambda x: x.dot_key(), dots))
for key in sorted(keys):
yield(list(filter(lambda x: x.dot_key() == key, dots))) |
Group distinct runs for single dots to enable averaging over all data.
| Group distinct runs for single dots to enable averaging over all data. | [
"Group",
"distinct",
"runs",
"for",
"single",
"dots",
"to",
"enable",
"averaging",
"over",
"all",
"data",
"."
] | def unique_dots(dots):
keys = set(map(lambda x: x.dot_key(), dots))
for key in sorted(keys):
yield(list(filter(lambda x: x.dot_key() == key, dots))) | [
"def",
"unique_dots",
"(",
"dots",
")",
":",
"keys",
"=",
"set",
"(",
"map",
"(",
"lambda",
"x",
":",
"x",
".",
"dot_key",
"(",
")",
",",
"dots",
")",
")",
"for",
"key",
"in",
"sorted",
"(",
"keys",
")",
":",
"yield",
"(",
"list",
"(",
"filter",
"(",
"lambda",
"x",
":",
"x",
".",
"dot_key",
"(",
")",
"==",
"key",
",",
"dots",
")",
")",
")"
] | Group distinct runs for single dots to enable averaging over all data. | [
"Group",
"distinct",
"runs",
"for",
"single",
"dots",
"to",
"enable",
"averaging",
"over",
"all",
"data",
"."
] | [
"\"\"\"\n Group distinct runs for single dots to enable averaging over all data.\n \"\"\""
] | [
{
"param": "dots",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "dots",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def unique_dots(dots):
keys = set(map(lambda x: x.dot_key(), dots))
for key in sorted(keys):
yield(list(filter(lambda x: x.dot_key() == key, dots))) | 610,012 | 397 |
1f75e9207de777fc2d47bfb98f4328f5ddcc542b | google/fiddle | fiddle/lingvo/lingvo_config.py | [
"Apache-2.0"
] | Python | _convert_lingvo_param | inspect.Parameter | def _convert_lingvo_param(
name: str,
param: hyperparams._Param, # pylint: disable=protected-access
) -> inspect.Parameter:
"""Returns an inspect.Parameter that corresponds to `param`."""
return inspect.Parameter(
name=name,
kind=inspect.Parameter.KEYWORD_ONLY,
default=param.GetDefault()) | Returns an inspect.Parameter that corresponds to `param`. | Returns an inspect.Parameter that corresponds to `param`. | [
"Returns",
"an",
"inspect",
".",
"Parameter",
"that",
"corresponds",
"to",
"`",
"param",
"`",
"."
] | def _convert_lingvo_param(
name: str,
param: hyperparams._Param,
) -> inspect.Parameter:
return inspect.Parameter(
name=name,
kind=inspect.Parameter.KEYWORD_ONLY,
default=param.GetDefault()) | [
"def",
"_convert_lingvo_param",
"(",
"name",
":",
"str",
",",
"param",
":",
"hyperparams",
".",
"_Param",
",",
")",
"->",
"inspect",
".",
"Parameter",
":",
"return",
"inspect",
".",
"Parameter",
"(",
"name",
"=",
"name",
",",
"kind",
"=",
"inspect",
".",
"Parameter",
".",
"KEYWORD_ONLY",
",",
"default",
"=",
"param",
".",
"GetDefault",
"(",
")",
")"
] | Returns an inspect.Parameter that corresponds to `param`. | [
"Returns",
"an",
"inspect",
".",
"Parameter",
"that",
"corresponds",
"to",
"`",
"param",
"`",
"."
] | [
"# pylint: disable=protected-access",
"\"\"\"Returns an inspect.Parameter that corresponds to `param`.\"\"\""
] | [
{
"param": "name",
"type": "str"
},
{
"param": "param",
"type": "hyperparams._Param"
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "name",
"type": "str",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "param",
"type": "hyperparams._Param",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import inspect
def _convert_lingvo_param(
name: str,
param: hyperparams._Param,
) -> inspect.Parameter:
return inspect.Parameter(
name=name,
kind=inspect.Parameter.KEYWORD_ONLY,
default=param.GetDefault()) | 610,013 | 759 |
b57856cb3ab8bb6253be18c5e6fdd79e34c97e52 | anonymous-authors-2018/robotics-repo | rl_baselines/evaluation/cross_eval_utils.py | [
"MIT"
] | Python | latestPolicy | <not_specific> | def latestPolicy(log_dir,algo_name):
"""
Get the latest saved model from a file
:param log_dir: (str) a path leads to the model saved path
:param algo_name:
:return: the file name of the latest saved policy and a flag
"""
files= glob.glob(os.path.join(log_dir+algo_name+'_*_model.pkl'))
files_list = []
for file in files:
eps=int((file.split('_')[-2]))
files_list.append((eps,file))
def sortFirst(val):
return val[0]
files_list.sort(key=sortFirst)
if len(files_list)>0:
#episode,latest model file path, OK
return files_list[-1][0],files_list[-1][1],True
else:
#No model saved yet
return 0,'',False |
Get the latest saved model from a file
:param log_dir: (str) a path leads to the model saved path
:param algo_name:
:return: the file name of the latest saved policy and a flag
| Get the latest saved model from a file | [
"Get",
"the",
"latest",
"saved",
"model",
"from",
"a",
"file"
] | def latestPolicy(log_dir,algo_name):
files= glob.glob(os.path.join(log_dir+algo_name+'_*_model.pkl'))
files_list = []
for file in files:
eps=int((file.split('_')[-2]))
files_list.append((eps,file))
def sortFirst(val):
return val[0]
files_list.sort(key=sortFirst)
if len(files_list)>0:
return files_list[-1][0],files_list[-1][1],True
else:
return 0,'',False | [
"def",
"latestPolicy",
"(",
"log_dir",
",",
"algo_name",
")",
":",
"files",
"=",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"log_dir",
"+",
"algo_name",
"+",
"'_*_model.pkl'",
")",
")",
"files_list",
"=",
"[",
"]",
"for",
"file",
"in",
"files",
":",
"eps",
"=",
"int",
"(",
"(",
"file",
".",
"split",
"(",
"'_'",
")",
"[",
"-",
"2",
"]",
")",
")",
"files_list",
".",
"append",
"(",
"(",
"eps",
",",
"file",
")",
")",
"def",
"sortFirst",
"(",
"val",
")",
":",
"return",
"val",
"[",
"0",
"]",
"files_list",
".",
"sort",
"(",
"key",
"=",
"sortFirst",
")",
"if",
"len",
"(",
"files_list",
")",
">",
"0",
":",
"return",
"files_list",
"[",
"-",
"1",
"]",
"[",
"0",
"]",
",",
"files_list",
"[",
"-",
"1",
"]",
"[",
"1",
"]",
",",
"True",
"else",
":",
"return",
"0",
",",
"''",
",",
"False"
] | Get the latest saved model from a file | [
"Get",
"the",
"latest",
"saved",
"model",
"from",
"a",
"file"
] | [
"\"\"\"\n Get the latest saved model from a file\n :param log_dir: (str) a path leads to the model saved path\n :param algo_name:\n :return: the file name of the latest saved policy and a flag\n \"\"\"",
"#episode,latest model file path, OK",
"#No model saved yet"
] | [
{
"param": "log_dir",
"type": null
},
{
"param": "algo_name",
"type": null
}
] | {
"returns": [
{
"docstring": "the file name of the latest saved policy and a flag",
"docstring_tokens": [
"the",
"file",
"name",
"of",
"the",
"latest",
"saved",
"policy",
"and",
"a",
"flag"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "log_dir",
"type": null,
"docstring": "(str) a path leads to the model saved path",
"docstring_tokens": [
"(",
"str",
")",
"a",
"path",
"leads",
"to",
"the",
"model",
"saved",
"path"
],
"default": null,
"is_optional": null
},
{
"identifier": "algo_name",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import os
import glob
def latestPolicy(log_dir,algo_name):
files= glob.glob(os.path.join(log_dir+algo_name+'_*_model.pkl'))
files_list = []
for file in files:
eps=int((file.split('_')[-2]))
files_list.append((eps,file))
def sortFirst(val):
return val[0]
files_list.sort(key=sortFirst)
if len(files_list)>0:
return files_list[-1][0],files_list[-1][1],True
else:
return 0,'',False | 610,014 | 879 |
1eac5b92a78c6512954ee3cd47cf85a9f22fd13b | googleads/googleads-adxbuyer-examples | python/samples/samples_util.py | [
"Apache-2.0"
] | Python | _IsLegacy | <not_specific> | def _IsLegacy(discovery_rest_url):
"""Returns whether the given discovery URL uses legacy discovery.
Args:
discovery_rest_url: a str containing a discovery URL for an API.
Returns:
True if the discovery URL is determined to be a legacy, otherwise False.
"""
if 'googleapis.com/$discovery' in discovery_rest_url:
return False
return True | Returns whether the given discovery URL uses legacy discovery.
Args:
discovery_rest_url: a str containing a discovery URL for an API.
Returns:
True if the discovery URL is determined to be a legacy, otherwise False.
| Returns whether the given discovery URL uses legacy discovery. | [
"Returns",
"whether",
"the",
"given",
"discovery",
"URL",
"uses",
"legacy",
"discovery",
"."
] | def _IsLegacy(discovery_rest_url):
if 'googleapis.com/$discovery' in discovery_rest_url:
return False
return True | [
"def",
"_IsLegacy",
"(",
"discovery_rest_url",
")",
":",
"if",
"'googleapis.com/$discovery'",
"in",
"discovery_rest_url",
":",
"return",
"False",
"return",
"True"
] | Returns whether the given discovery URL uses legacy discovery. | [
"Returns",
"whether",
"the",
"given",
"discovery",
"URL",
"uses",
"legacy",
"discovery",
"."
] | [
"\"\"\"Returns whether the given discovery URL uses legacy discovery.\n\n Args:\n discovery_rest_url: a str containing a discovery URL for an API.\n\n Returns:\n True if the discovery URL is determined to be a legacy, otherwise False.\n \"\"\""
] | [
{
"param": "discovery_rest_url",
"type": null
}
] | {
"returns": [
{
"docstring": "True if the discovery URL is determined to be a legacy, otherwise False.",
"docstring_tokens": [
"True",
"if",
"the",
"discovery",
"URL",
"is",
"determined",
"to",
"be",
"a",
"legacy",
"otherwise",
"False",
"."
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "discovery_rest_url",
"type": null,
"docstring": "a str containing a discovery URL for an API.",
"docstring_tokens": [
"a",
"str",
"containing",
"a",
"discovery",
"URL",
"for",
"an",
"API",
"."
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def _IsLegacy(discovery_rest_url):
if 'googleapis.com/$discovery' in discovery_rest_url:
return False
return True | 610,015 | 213 |
c1a1ea83bff0147eb7720979a01d03975fbf70c2 | coderefinery/autocmake | autocmake/configure.py | [
"BSD-3-Clause"
] | Python | add_quotes_to_argv | <not_specific> | def add_quotes_to_argv(argv, arguments):
"""
This function tries to solve this problem:
https://stackoverflow.com/questions/19120247/python-sys-argv-to-preserve-or
The problem is that sys.argv has been stripped of quotes by the shell but
docopt's arguments contains quotes.
So what we do is cycle through all docopt arguments: if they are also
present in sys.argv and contain spaces, we add quotes.
"""
setup_command = ' '.join(argv[:])
for k, v in arguments.items():
if isinstance(v, str):
if ' ' in v:
if v in setup_command:
setup_command = setup_command.replace(v, '"{}"'.format(v))
return setup_command |
This function tries to solve this problem:
https://stackoverflow.com/questions/19120247/python-sys-argv-to-preserve-or
The problem is that sys.argv has been stripped of quotes by the shell but
docopt's arguments contains quotes.
So what we do is cycle through all docopt arguments: if they are also
present in sys.argv and contain spaces, we add quotes.
|
The problem is that sys.argv has been stripped of quotes by the shell but
docopt's arguments contains quotes.
So what we do is cycle through all docopt arguments: if they are also
present in sys.argv and contain spaces, we add quotes. | [
"The",
"problem",
"is",
"that",
"sys",
".",
"argv",
"has",
"been",
"stripped",
"of",
"quotes",
"by",
"the",
"shell",
"but",
"docopt",
"'",
"s",
"arguments",
"contains",
"quotes",
".",
"So",
"what",
"we",
"do",
"is",
"cycle",
"through",
"all",
"docopt",
"arguments",
":",
"if",
"they",
"are",
"also",
"present",
"in",
"sys",
".",
"argv",
"and",
"contain",
"spaces",
"we",
"add",
"quotes",
"."
] | def add_quotes_to_argv(argv, arguments):
setup_command = ' '.join(argv[:])
for k, v in arguments.items():
if isinstance(v, str):
if ' ' in v:
if v in setup_command:
setup_command = setup_command.replace(v, '"{}"'.format(v))
return setup_command | [
"def",
"add_quotes_to_argv",
"(",
"argv",
",",
"arguments",
")",
":",
"setup_command",
"=",
"' '",
".",
"join",
"(",
"argv",
"[",
":",
"]",
")",
"for",
"k",
",",
"v",
"in",
"arguments",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"v",
",",
"str",
")",
":",
"if",
"' '",
"in",
"v",
":",
"if",
"v",
"in",
"setup_command",
":",
"setup_command",
"=",
"setup_command",
".",
"replace",
"(",
"v",
",",
"'\"{}\"'",
".",
"format",
"(",
"v",
")",
")",
"return",
"setup_command"
] | This function tries to solve this problem:
https://stackoverflow.com/questions/19120247/python-sys-argv-to-preserve-or | [
"This",
"function",
"tries",
"to",
"solve",
"this",
"problem",
":",
"https",
":",
"//",
"stackoverflow",
".",
"com",
"/",
"questions",
"/",
"19120247",
"/",
"python",
"-",
"sys",
"-",
"argv",
"-",
"to",
"-",
"preserve",
"-",
"or"
] | [
"\"\"\"\n This function tries to solve this problem:\n https://stackoverflow.com/questions/19120247/python-sys-argv-to-preserve-or\n\n The problem is that sys.argv has been stripped of quotes by the shell but\n docopt's arguments contains quotes.\n\n So what we do is cycle through all docopt arguments: if they are also\n present in sys.argv and contain spaces, we add quotes.\n \"\"\""
] | [
{
"param": "argv",
"type": null
},
{
"param": "arguments",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "argv",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "arguments",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def add_quotes_to_argv(argv, arguments):
setup_command = ' '.join(argv[:])
for k, v in arguments.items():
if isinstance(v, str):
if ' ' in v:
if v in setup_command:
setup_command = setup_command.replace(v, '"{}"'.format(v))
return setup_command | 610,016 | 35 |
f3b1f085018b7be86781174b7c7c7d7fe0fc5546 | ProsperousRF/100DaysOfPython | Day10/main.py | [
"Unlicense"
] | Python | format_name | <not_specific> | def format_name(f_name, l_name):
"""Take a first and last name and format it
to return the title case version of the name."""
if f_name == "" or l_name == "":
return "You didn't provide valid inputs."
formated_f_name = f_name.title()
formated_l_name = l_name.title()
return f"Result: {formated_f_name} {formated_l_name}" | Take a first and last name and format it
to return the title case version of the name. | Take a first and last name and format it
to return the title case version of the name. | [
"Take",
"a",
"first",
"and",
"last",
"name",
"and",
"format",
"it",
"to",
"return",
"the",
"title",
"case",
"version",
"of",
"the",
"name",
"."
] | def format_name(f_name, l_name):
if f_name == "" or l_name == "":
return "You didn't provide valid inputs."
formated_f_name = f_name.title()
formated_l_name = l_name.title()
return f"Result: {formated_f_name} {formated_l_name}" | [
"def",
"format_name",
"(",
"f_name",
",",
"l_name",
")",
":",
"if",
"f_name",
"==",
"\"\"",
"or",
"l_name",
"==",
"\"\"",
":",
"return",
"\"You didn't provide valid inputs.\"",
"formated_f_name",
"=",
"f_name",
".",
"title",
"(",
")",
"formated_l_name",
"=",
"l_name",
".",
"title",
"(",
")",
"return",
"f\"Result: {formated_f_name} {formated_l_name}\""
] | Take a first and last name and format it
to return the title case version of the name. | [
"Take",
"a",
"first",
"and",
"last",
"name",
"and",
"format",
"it",
"to",
"return",
"the",
"title",
"case",
"version",
"of",
"the",
"name",
"."
] | [
"\"\"\"Take a first and last name and format it\n to return the title case version of the name.\"\"\""
] | [
{
"param": "f_name",
"type": null
},
{
"param": "l_name",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "f_name",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "l_name",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def format_name(f_name, l_name):
if f_name == "" or l_name == "":
return "You didn't provide valid inputs."
formated_f_name = f_name.title()
formated_l_name = l_name.title()
return f"Result: {formated_f_name} {formated_l_name}" | 610,017 | 923 |
d854324e4888285ddafe6f4cda69f2911e5d1f4b | bglar/commcare-hq | corehq/apps/es/filters.py | [
"BSD-3-Clause"
] | Python | exists | <not_specific> | def exists(field):
"""
Only return docs which have 'field'
"""
return {"exists": {"field": field}} |
Only return docs which have 'field'
| Only return docs which have 'field' | [
"Only",
"return",
"docs",
"which",
"have",
"'",
"field",
"'"
] | def exists(field):
return {"exists": {"field": field}} | [
"def",
"exists",
"(",
"field",
")",
":",
"return",
"{",
"\"exists\"",
":",
"{",
"\"field\"",
":",
"field",
"}",
"}"
] | Only return docs which have 'field' | [
"Only",
"return",
"docs",
"which",
"have",
"'",
"field",
"'"
] | [
"\"\"\"\n Only return docs which have 'field'\n \"\"\""
] | [
{
"param": "field",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "field",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def exists(field):
return {"exists": {"field": field}} | 610,018 | 305 |
8d183dd7aebf6592a80bac24422ace1d1ce4f021 | noedtwins/DiscogsTagger | discogstagger/ext/mediafile.py | [
"MIT"
] | Python | _unpack_asf_image | <not_specific> | def _unpack_asf_image(data):
"""Unpack image data from a WM/Picture tag. Return a tuple
containing the MIME type, the raw image data, a type indicator, and
the image's description.
This function is treated as "untrusted" and could throw all manner
of exceptions (out-of-bounds, etc.). We should clean this up
sometime so that the failure modes are well-defined.
"""
type, size = struct.unpack_from("<bi", data)
pos = 5
mime = ""
while data[pos:pos+2] != "\x00\x00":
mime += data[pos:pos+2]
pos += 2
pos += 2
description = ""
while data[pos:pos+2] != "\x00\x00":
description += data[pos:pos+2]
pos += 2
pos += 2
image_data = data[pos:pos+size]
return (mime.decode("utf-16-le"), image_data, type,
description.decode("utf-16-le")) | Unpack image data from a WM/Picture tag. Return a tuple
containing the MIME type, the raw image data, a type indicator, and
the image's description.
This function is treated as "untrusted" and could throw all manner
of exceptions (out-of-bounds, etc.). We should clean this up
sometime so that the failure modes are well-defined.
| Unpack image data from a WM/Picture tag. Return a tuple
containing the MIME type, the raw image data, a type indicator, and
the image's description.
This function is treated as "untrusted" and could throw all manner
of exceptions (out-of-bounds, etc.). We should clean this up
sometime so that the failure modes are well-defined. | [
"Unpack",
"image",
"data",
"from",
"a",
"WM",
"/",
"Picture",
"tag",
".",
"Return",
"a",
"tuple",
"containing",
"the",
"MIME",
"type",
"the",
"raw",
"image",
"data",
"a",
"type",
"indicator",
"and",
"the",
"image",
"'",
"s",
"description",
".",
"This",
"function",
"is",
"treated",
"as",
"\"",
"untrusted",
"\"",
"and",
"could",
"throw",
"all",
"manner",
"of",
"exceptions",
"(",
"out",
"-",
"of",
"-",
"bounds",
"etc",
".",
")",
".",
"We",
"should",
"clean",
"this",
"up",
"sometime",
"so",
"that",
"the",
"failure",
"modes",
"are",
"well",
"-",
"defined",
"."
] | def _unpack_asf_image(data):
type, size = struct.unpack_from("<bi", data)
pos = 5
mime = ""
while data[pos:pos+2] != "\x00\x00":
mime += data[pos:pos+2]
pos += 2
pos += 2
description = ""
while data[pos:pos+2] != "\x00\x00":
description += data[pos:pos+2]
pos += 2
pos += 2
image_data = data[pos:pos+size]
return (mime.decode("utf-16-le"), image_data, type,
description.decode("utf-16-le")) | [
"def",
"_unpack_asf_image",
"(",
"data",
")",
":",
"type",
",",
"size",
"=",
"struct",
".",
"unpack_from",
"(",
"\"<bi\"",
",",
"data",
")",
"pos",
"=",
"5",
"mime",
"=",
"\"\"",
"while",
"data",
"[",
"pos",
":",
"pos",
"+",
"2",
"]",
"!=",
"\"\\x00\\x00\"",
":",
"mime",
"+=",
"data",
"[",
"pos",
":",
"pos",
"+",
"2",
"]",
"pos",
"+=",
"2",
"pos",
"+=",
"2",
"description",
"=",
"\"\"",
"while",
"data",
"[",
"pos",
":",
"pos",
"+",
"2",
"]",
"!=",
"\"\\x00\\x00\"",
":",
"description",
"+=",
"data",
"[",
"pos",
":",
"pos",
"+",
"2",
"]",
"pos",
"+=",
"2",
"pos",
"+=",
"2",
"image_data",
"=",
"data",
"[",
"pos",
":",
"pos",
"+",
"size",
"]",
"return",
"(",
"mime",
".",
"decode",
"(",
"\"utf-16-le\"",
")",
",",
"image_data",
",",
"type",
",",
"description",
".",
"decode",
"(",
"\"utf-16-le\"",
")",
")"
] | Unpack image data from a WM/Picture tag. | [
"Unpack",
"image",
"data",
"from",
"a",
"WM",
"/",
"Picture",
"tag",
"."
] | [
"\"\"\"Unpack image data from a WM/Picture tag. Return a tuple\n containing the MIME type, the raw image data, a type indicator, and\n the image's description.\n\n This function is treated as \"untrusted\" and could throw all manner\n of exceptions (out-of-bounds, etc.). We should clean this up\n sometime so that the failure modes are well-defined.\n \"\"\""
] | [
{
"param": "data",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "data",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import struct
def _unpack_asf_image(data):
type, size = struct.unpack_from("<bi", data)
pos = 5
mime = ""
while data[pos:pos+2] != "\x00\x00":
mime += data[pos:pos+2]
pos += 2
pos += 2
description = ""
while data[pos:pos+2] != "\x00\x00":
description += data[pos:pos+2]
pos += 2
pos += 2
image_data = data[pos:pos+size]
return (mime.decode("utf-16-le"), image_data, type,
description.decode("utf-16-le")) | 610,019 | 614 |
460c3005a109ede83fce297234f17c4384477dcf | GinoBacallao/genepy | genepy/mutations/__init__.py | [
"Apache-2.0"
] | Python | checkGeneChangeAccrossAll | <not_specific> | def checkGeneChangeAccrossAll(genecn, thresh=0.2):
"""
used to find poor quality genes in CN data (works with multiple sample file)
compute given a df of gene x sample CN counts, how much change there is accross samples for
a same gene and returns ones that are below the threshold
Args:
-----
genecn: gene cn data frame
thresh: threshold in logfold change accross all of them
"""
return genecn.columns[genecn.var()<thresh].tolist() |
used to find poor quality genes in CN data (works with multiple sample file)
compute given a df of gene x sample CN counts, how much change there is accross samples for
a same gene and returns ones that are below the threshold
Args:
-----
genecn: gene cn data frame
thresh: threshold in logfold change accross all of them
| used to find poor quality genes in CN data (works with multiple sample file)
compute given a df of gene x sample CN counts, how much change there is accross samples for
a same gene and returns ones that are below the threshold | [
"used",
"to",
"find",
"poor",
"quality",
"genes",
"in",
"CN",
"data",
"(",
"works",
"with",
"multiple",
"sample",
"file",
")",
"compute",
"given",
"a",
"df",
"of",
"gene",
"x",
"sample",
"CN",
"counts",
"how",
"much",
"change",
"there",
"is",
"accross",
"samples",
"for",
"a",
"same",
"gene",
"and",
"returns",
"ones",
"that",
"are",
"below",
"the",
"threshold"
] | def checkGeneChangeAccrossAll(genecn, thresh=0.2):
return genecn.columns[genecn.var()<thresh].tolist() | [
"def",
"checkGeneChangeAccrossAll",
"(",
"genecn",
",",
"thresh",
"=",
"0.2",
")",
":",
"return",
"genecn",
".",
"columns",
"[",
"genecn",
".",
"var",
"(",
")",
"<",
"thresh",
"]",
".",
"tolist",
"(",
")"
] | used to find poor quality genes in CN data (works with multiple sample file)
compute given a df of gene x sample CN counts, how much change there is accross samples for
a same gene and returns ones that are below the threshold | [
"used",
"to",
"find",
"poor",
"quality",
"genes",
"in",
"CN",
"data",
"(",
"works",
"with",
"multiple",
"sample",
"file",
")",
"compute",
"given",
"a",
"df",
"of",
"gene",
"x",
"sample",
"CN",
"counts",
"how",
"much",
"change",
"there",
"is",
"accross",
"samples",
"for",
"a",
"same",
"gene",
"and",
"returns",
"ones",
"that",
"are",
"below",
"the",
"threshold"
] | [
"\"\"\"\n used to find poor quality genes in CN data (works with multiple sample file)\n\n compute given a df of gene x sample CN counts, how much change there is accross samples for\n a same gene and returns ones that are below the threshold\n\n Args:\n -----\n genecn: gene cn data frame\n thresh: threshold in logfold change accross all of them\n \"\"\""
] | [
{
"param": "genecn",
"type": null
},
{
"param": "thresh",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "genecn",
"type": null,
"docstring": "gene cn data frame",
"docstring_tokens": [
"gene",
"cn",
"data",
"frame"
],
"default": null,
"is_optional": null
},
{
"identifier": "thresh",
"type": null,
"docstring": "threshold in logfold change accross all of them",
"docstring_tokens": [
"threshold",
"in",
"logfold",
"change",
"accross",
"all",
"of",
"them"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def checkGeneChangeAccrossAll(genecn, thresh=0.2):
return genecn.columns[genecn.var()<thresh].tolist() | 610,020 | 804 |
0ee5b18c7cec100225348edd7ef76d2d9cfd0441 | wellcometrust/catalogue | reindexer/get_reindex_status.py | [
"MIT"
] | Python | count_items_in_table | <not_specific> | def count_items_in_table(session, *, table_name):
"""
Returns an approximate number of items in a table.
"""
dynamodb = session.client("dynamodb")
return dynamodb.describe_table(TableName=table_name)["Table"]["ItemCount"] |
Returns an approximate number of items in a table.
| Returns an approximate number of items in a table. | [
"Returns",
"an",
"approximate",
"number",
"of",
"items",
"in",
"a",
"table",
"."
] | def count_items_in_table(session, *, table_name):
dynamodb = session.client("dynamodb")
return dynamodb.describe_table(TableName=table_name)["Table"]["ItemCount"] | [
"def",
"count_items_in_table",
"(",
"session",
",",
"*",
",",
"table_name",
")",
":",
"dynamodb",
"=",
"session",
".",
"client",
"(",
"\"dynamodb\"",
")",
"return",
"dynamodb",
".",
"describe_table",
"(",
"TableName",
"=",
"table_name",
")",
"[",
"\"Table\"",
"]",
"[",
"\"ItemCount\"",
"]"
] | Returns an approximate number of items in a table. | [
"Returns",
"an",
"approximate",
"number",
"of",
"items",
"in",
"a",
"table",
"."
] | [
"\"\"\"\n Returns an approximate number of items in a table.\n \"\"\""
] | [
{
"param": "session",
"type": null
},
{
"param": "table_name",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "session",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "table_name",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def count_items_in_table(session, *, table_name):
dynamodb = session.client("dynamodb")
return dynamodb.describe_table(TableName=table_name)["Table"]["ItemCount"] | 610,022 | 729 |
b73523022b4d5b95099da8240120f252558c3a6d | Maia-CA/COSMO | src/utils/ml_utils.py | [
"MIT"
] | Python | load_dict | <not_specific> | def load_dict(fname, var_names, load_func=pickle.load):
""" Loads specific keys from a dictionary that was to a file
:type fname: file name
:type var_names: variables to retrieve. Can be a list or comma seperated string
e.g. 'a, b,c' or ['a', 'b', 'c']
:param load_func: default: pickle.load
"""
if type(var_names) == str:
var_names = re.split(', ?[, ]?', var_names)
with open(fname, "rb") as f:
data_dict = load_func(f)
assert isinstance(data_dict, dict)
return tuple([data_dict[var] for var in var_names]) | Loads specific keys from a dictionary that was to a file
:type fname: file name
:type var_names: variables to retrieve. Can be a list or comma seperated string
e.g. 'a, b,c' or ['a', 'b', 'c']
:param load_func: default: pickle.load
| Loads specific keys from a dictionary that was to a file | [
"Loads",
"specific",
"keys",
"from",
"a",
"dictionary",
"that",
"was",
"to",
"a",
"file"
] | def load_dict(fname, var_names, load_func=pickle.load):
if type(var_names) == str:
var_names = re.split(', ?[, ]?', var_names)
with open(fname, "rb") as f:
data_dict = load_func(f)
assert isinstance(data_dict, dict)
return tuple([data_dict[var] for var in var_names]) | [
"def",
"load_dict",
"(",
"fname",
",",
"var_names",
",",
"load_func",
"=",
"pickle",
".",
"load",
")",
":",
"if",
"type",
"(",
"var_names",
")",
"==",
"str",
":",
"var_names",
"=",
"re",
".",
"split",
"(",
"', ?[, ]?'",
",",
"var_names",
")",
"with",
"open",
"(",
"fname",
",",
"\"rb\"",
")",
"as",
"f",
":",
"data_dict",
"=",
"load_func",
"(",
"f",
")",
"assert",
"isinstance",
"(",
"data_dict",
",",
"dict",
")",
"return",
"tuple",
"(",
"[",
"data_dict",
"[",
"var",
"]",
"for",
"var",
"in",
"var_names",
"]",
")"
] | Loads specific keys from a dictionary that was to a file | [
"Loads",
"specific",
"keys",
"from",
"a",
"dictionary",
"that",
"was",
"to",
"a",
"file"
] | [
"\"\"\" Loads specific keys from a dictionary that was to a file\n :type fname: file name\n :type var_names: variables to retrieve. Can be a list or comma seperated string\n e.g. 'a, b,c' or ['a', 'b', 'c']\n :param load_func: default: pickle.load\n \"\"\""
] | [
{
"param": "fname",
"type": null
},
{
"param": "var_names",
"type": null
},
{
"param": "load_func",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "fname",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "var_names",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "load_func",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import re
import pickle
def load_dict(fname, var_names, load_func=pickle.load):
if type(var_names) == str:
var_names = re.split(', ?[, ]?', var_names)
with open(fname, "rb") as f:
data_dict = load_func(f)
assert isinstance(data_dict, dict)
return tuple([data_dict[var] for var in var_names]) | 610,023 | 926 |
de354b9cbd8b6875576aa46a0a8a4930d50ddf62 | angry-tony/ClusterRunner | app/util/fs.py | [
"Apache-2.0"
] | Python | zip_directory | str | def zip_directory(target_dir: str, archive_filename: str) -> str:
"""
Zip up the specified directory and stick the resulting zip file in that directory.
:param target_dir: the directory to zip and the location of the resulting zip file
:param archive_filename: filename for the created zip file
:return: the full path to the created zip archive file
"""
# Create the archive in a temp location and then move it to the target dir.
# (Otherwise the resulting archive will include an extra zero-byte file.)
target_path = os.path.join(target_dir, archive_filename)
with tempfile.TemporaryDirectory() as temp_dirpath:
tmp_zip_filename = os.path.join(temp_dirpath, 'clusterrunner_tmp__' + archive_filename)
with zipfile.ZipFile(tmp_zip_filename, 'w', compression=zipfile.ZIP_DEFLATED) as zf:
for dirpath, dirnames, filenames in os.walk(target_dir):
for filename in filenames:
path = os.path.normpath(os.path.join(dirpath, filename))
if os.path.isfile(path):
relpath = os.path.relpath(path, target_dir)
zf.write(path, relpath)
shutil.move(tmp_zip_filename, target_path)
return target_path |
Zip up the specified directory and stick the resulting zip file in that directory.
:param target_dir: the directory to zip and the location of the resulting zip file
:param archive_filename: filename for the created zip file
:return: the full path to the created zip archive file
| Zip up the specified directory and stick the resulting zip file in that directory. | [
"Zip",
"up",
"the",
"specified",
"directory",
"and",
"stick",
"the",
"resulting",
"zip",
"file",
"in",
"that",
"directory",
"."
] | def zip_directory(target_dir: str, archive_filename: str) -> str:
target_path = os.path.join(target_dir, archive_filename)
with tempfile.TemporaryDirectory() as temp_dirpath:
tmp_zip_filename = os.path.join(temp_dirpath, 'clusterrunner_tmp__' + archive_filename)
with zipfile.ZipFile(tmp_zip_filename, 'w', compression=zipfile.ZIP_DEFLATED) as zf:
for dirpath, dirnames, filenames in os.walk(target_dir):
for filename in filenames:
path = os.path.normpath(os.path.join(dirpath, filename))
if os.path.isfile(path):
relpath = os.path.relpath(path, target_dir)
zf.write(path, relpath)
shutil.move(tmp_zip_filename, target_path)
return target_path | [
"def",
"zip_directory",
"(",
"target_dir",
":",
"str",
",",
"archive_filename",
":",
"str",
")",
"->",
"str",
":",
"target_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"target_dir",
",",
"archive_filename",
")",
"with",
"tempfile",
".",
"TemporaryDirectory",
"(",
")",
"as",
"temp_dirpath",
":",
"tmp_zip_filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"temp_dirpath",
",",
"'clusterrunner_tmp__'",
"+",
"archive_filename",
")",
"with",
"zipfile",
".",
"ZipFile",
"(",
"tmp_zip_filename",
",",
"'w'",
",",
"compression",
"=",
"zipfile",
".",
"ZIP_DEFLATED",
")",
"as",
"zf",
":",
"for",
"dirpath",
",",
"dirnames",
",",
"filenames",
"in",
"os",
".",
"walk",
"(",
"target_dir",
")",
":",
"for",
"filename",
"in",
"filenames",
":",
"path",
"=",
"os",
".",
"path",
".",
"normpath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"dirpath",
",",
"filename",
")",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"path",
")",
":",
"relpath",
"=",
"os",
".",
"path",
".",
"relpath",
"(",
"path",
",",
"target_dir",
")",
"zf",
".",
"write",
"(",
"path",
",",
"relpath",
")",
"shutil",
".",
"move",
"(",
"tmp_zip_filename",
",",
"target_path",
")",
"return",
"target_path"
] | Zip up the specified directory and stick the resulting zip file in that directory. | [
"Zip",
"up",
"the",
"specified",
"directory",
"and",
"stick",
"the",
"resulting",
"zip",
"file",
"in",
"that",
"directory",
"."
] | [
"\"\"\"\n Zip up the specified directory and stick the resulting zip file in that directory.\n :param target_dir: the directory to zip and the location of the resulting zip file\n :param archive_filename: filename for the created zip file\n :return: the full path to the created zip archive file\n \"\"\"",
"# Create the archive in a temp location and then move it to the target dir.",
"# (Otherwise the resulting archive will include an extra zero-byte file.)"
] | [
{
"param": "target_dir",
"type": "str"
},
{
"param": "archive_filename",
"type": "str"
}
] | {
"returns": [
{
"docstring": "the full path to the created zip archive file",
"docstring_tokens": [
"the",
"full",
"path",
"to",
"the",
"created",
"zip",
"archive",
"file"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "target_dir",
"type": "str",
"docstring": "the directory to zip and the location of the resulting zip file",
"docstring_tokens": [
"the",
"directory",
"to",
"zip",
"and",
"the",
"location",
"of",
"the",
"resulting",
"zip",
"file"
],
"default": null,
"is_optional": null
},
{
"identifier": "archive_filename",
"type": "str",
"docstring": "filename for the created zip file",
"docstring_tokens": [
"filename",
"for",
"the",
"created",
"zip",
"file"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import zipfile
import tempfile
import shutil
import os
def zip_directory(target_dir: str, archive_filename: str) -> str:
target_path = os.path.join(target_dir, archive_filename)
with tempfile.TemporaryDirectory() as temp_dirpath:
tmp_zip_filename = os.path.join(temp_dirpath, 'clusterrunner_tmp__' + archive_filename)
with zipfile.ZipFile(tmp_zip_filename, 'w', compression=zipfile.ZIP_DEFLATED) as zf:
for dirpath, dirnames, filenames in os.walk(target_dir):
for filename in filenames:
path = os.path.normpath(os.path.join(dirpath, filename))
if os.path.isfile(path):
relpath = os.path.relpath(path, target_dir)
zf.write(path, relpath)
shutil.move(tmp_zip_filename, target_path)
return target_path | 610,024 | 283 |
b91dc02d822cd0efae2076666373864943d39795 | zeekay/chii | chii.py | [
"Unlicense"
] | Python | event | <not_specific> | def event(*event_types):
"""Decorator which adds callable to the event registry"""
def decorator(func):
def wrapper(*func_args, **func_kwargs):
return func(*func_args, **func_kwargs)
wrapper._registry = 'events'
wrapper._event_types = event_types
wrapper.__name__ = func.__name__
wrapper.__doc__ = func.__doc__
wrapper.__hash__ = lambda *args: zlib.crc32(func.__name__)
return wrapper
return decorator | Decorator which adds callable to the event registry | Decorator which adds callable to the event registry | [
"Decorator",
"which",
"adds",
"callable",
"to",
"the",
"event",
"registry"
] | def event(*event_types):
def decorator(func):
def wrapper(*func_args, **func_kwargs):
return func(*func_args, **func_kwargs)
wrapper._registry = 'events'
wrapper._event_types = event_types
wrapper.__name__ = func.__name__
wrapper.__doc__ = func.__doc__
wrapper.__hash__ = lambda *args: zlib.crc32(func.__name__)
return wrapper
return decorator | [
"def",
"event",
"(",
"*",
"event_types",
")",
":",
"def",
"decorator",
"(",
"func",
")",
":",
"def",
"wrapper",
"(",
"*",
"func_args",
",",
"**",
"func_kwargs",
")",
":",
"return",
"func",
"(",
"*",
"func_args",
",",
"**",
"func_kwargs",
")",
"wrapper",
".",
"_registry",
"=",
"'events'",
"wrapper",
".",
"_event_types",
"=",
"event_types",
"wrapper",
".",
"__name__",
"=",
"func",
".",
"__name__",
"wrapper",
".",
"__doc__",
"=",
"func",
".",
"__doc__",
"wrapper",
".",
"__hash__",
"=",
"lambda",
"*",
"args",
":",
"zlib",
".",
"crc32",
"(",
"func",
".",
"__name__",
")",
"return",
"wrapper",
"return",
"decorator"
] | Decorator which adds callable to the event registry | [
"Decorator",
"which",
"adds",
"callable",
"to",
"the",
"event",
"registry"
] | [
"\"\"\"Decorator which adds callable to the event registry\"\"\""
] | [] | {
"returns": [],
"raises": [],
"params": [],
"outlier_params": [],
"others": []
} | import zlib
def event(*event_types):
def decorator(func):
def wrapper(*func_args, **func_kwargs):
return func(*func_args, **func_kwargs)
wrapper._registry = 'events'
wrapper._event_types = event_types
wrapper.__name__ = func.__name__
wrapper.__doc__ = func.__doc__
wrapper.__hash__ = lambda *args: zlib.crc32(func.__name__)
return wrapper
return decorator | 610,025 | 117 |
247584f70c21dd1ded00a46ff76271c09a5064d7 | sakshamkumar-byt/sdc | sdc/rewrites/ir_utils.py | [
"BSD-2-Clause"
] | Python | filter_block_statements | null | def filter_block_statements(block, stmt_types):
"""
Filters given block returning statments of specific type
"""
for stmt in block.body:
if isinstance(stmt, stmt_types):
yield stmt |
Filters given block returning statments of specific type
| Filters given block returning statments of specific type | [
"Filters",
"given",
"block",
"returning",
"statments",
"of",
"specific",
"type"
] | def filter_block_statements(block, stmt_types):
for stmt in block.body:
if isinstance(stmt, stmt_types):
yield stmt | [
"def",
"filter_block_statements",
"(",
"block",
",",
"stmt_types",
")",
":",
"for",
"stmt",
"in",
"block",
".",
"body",
":",
"if",
"isinstance",
"(",
"stmt",
",",
"stmt_types",
")",
":",
"yield",
"stmt"
] | Filters given block returning statments of specific type | [
"Filters",
"given",
"block",
"returning",
"statments",
"of",
"specific",
"type"
] | [
"\"\"\"\n Filters given block returning statments of specific type\n \"\"\""
] | [
{
"param": "block",
"type": null
},
{
"param": "stmt_types",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "block",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "stmt_types",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def filter_block_statements(block, stmt_types):
for stmt in block.body:
if isinstance(stmt, stmt_types):
yield stmt | 610,026 | 787 |
d829acd7daadf7b648e39df2834858685c84e583 | rpuntaie/rstdoc | rstdoc/retable.py | [
"MIT"
] | Python | pad_fields | <not_specific> | def pad_fields(row, widths):
"""Pads fields of the given row, so each field lines up nicely with the
others.
"""
widths = list(map(lambda w: ' %-' + str(w) + 's ', widths))
# Pad all fields using the calculated widths
new_row = []
for i in range(len(row)):
col = row[i]
col = widths[i] % col.strip()
new_row.append(col)
return new_row | Pads fields of the given row, so each field lines up nicely with the
others.
| Pads fields of the given row, so each field lines up nicely with the
others. | [
"Pads",
"fields",
"of",
"the",
"given",
"row",
"so",
"each",
"field",
"lines",
"up",
"nicely",
"with",
"the",
"others",
"."
] | def pad_fields(row, widths):
widths = list(map(lambda w: ' %-' + str(w) + 's ', widths))
new_row = []
for i in range(len(row)):
col = row[i]
col = widths[i] % col.strip()
new_row.append(col)
return new_row | [
"def",
"pad_fields",
"(",
"row",
",",
"widths",
")",
":",
"widths",
"=",
"list",
"(",
"map",
"(",
"lambda",
"w",
":",
"' %-'",
"+",
"str",
"(",
"w",
")",
"+",
"'s '",
",",
"widths",
")",
")",
"new_row",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"row",
")",
")",
":",
"col",
"=",
"row",
"[",
"i",
"]",
"col",
"=",
"widths",
"[",
"i",
"]",
"%",
"col",
".",
"strip",
"(",
")",
"new_row",
".",
"append",
"(",
"col",
")",
"return",
"new_row"
] | Pads fields of the given row, so each field lines up nicely with the
others. | [
"Pads",
"fields",
"of",
"the",
"given",
"row",
"so",
"each",
"field",
"lines",
"up",
"nicely",
"with",
"the",
"others",
"."
] | [
"\"\"\"Pads fields of the given row, so each field lines up nicely with the\n others.\n\n \"\"\"",
"# Pad all fields using the calculated widths"
] | [
{
"param": "row",
"type": null
},
{
"param": "widths",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "row",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "widths",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def pad_fields(row, widths):
widths = list(map(lambda w: ' %-' + str(w) + 's ', widths))
new_row = []
for i in range(len(row)):
col = row[i]
col = widths[i] % col.strip()
new_row.append(col)
return new_row | 610,027 | 929 |
88ce0ee7d31c9bb2baebcc5ec319e40b5f29d949 | ai-se/x-effort | Technix/sk.py | [
"MIT"
] | Python | leftRight | null | def leftRight(parts,epsilon=0.01):
"""Iterator. For all items in 'parts',
return everything to the left and everything
from here to the end. For reasons of
efficiency, take a first pass over the data
to pre-compute and cache right-hand-sides
"""
rights = {}
n = j = len(parts) - 1
while j > 0:
rights[j] = parts[j]
if j < n: rights[j] += rights[j+1]
j -=1
left = parts[0]
for i,one in enumerate(parts):
if i> 0:
if parts[i]._median - parts[i-1]._median > epsilon:
yield i,left,rights[i]
left += one | Iterator. For all items in 'parts',
return everything to the left and everything
from here to the end. For reasons of
efficiency, take a first pass over the data
to pre-compute and cache right-hand-sides
| Iterator. For all items in 'parts',
return everything to the left and everything
from here to the end. For reasons of
efficiency, take a first pass over the data
to pre-compute and cache right-hand-sides | [
"Iterator",
".",
"For",
"all",
"items",
"in",
"'",
"parts",
"'",
"return",
"everything",
"to",
"the",
"left",
"and",
"everything",
"from",
"here",
"to",
"the",
"end",
".",
"For",
"reasons",
"of",
"efficiency",
"take",
"a",
"first",
"pass",
"over",
"the",
"data",
"to",
"pre",
"-",
"compute",
"and",
"cache",
"right",
"-",
"hand",
"-",
"sides"
] | def leftRight(parts,epsilon=0.01):
rights = {}
n = j = len(parts) - 1
while j > 0:
rights[j] = parts[j]
if j < n: rights[j] += rights[j+1]
j -=1
left = parts[0]
for i,one in enumerate(parts):
if i> 0:
if parts[i]._median - parts[i-1]._median > epsilon:
yield i,left,rights[i]
left += one | [
"def",
"leftRight",
"(",
"parts",
",",
"epsilon",
"=",
"0.01",
")",
":",
"rights",
"=",
"{",
"}",
"n",
"=",
"j",
"=",
"len",
"(",
"parts",
")",
"-",
"1",
"while",
"j",
">",
"0",
":",
"rights",
"[",
"j",
"]",
"=",
"parts",
"[",
"j",
"]",
"if",
"j",
"<",
"n",
":",
"rights",
"[",
"j",
"]",
"+=",
"rights",
"[",
"j",
"+",
"1",
"]",
"j",
"-=",
"1",
"left",
"=",
"parts",
"[",
"0",
"]",
"for",
"i",
",",
"one",
"in",
"enumerate",
"(",
"parts",
")",
":",
"if",
"i",
">",
"0",
":",
"if",
"parts",
"[",
"i",
"]",
".",
"_median",
"-",
"parts",
"[",
"i",
"-",
"1",
"]",
".",
"_median",
">",
"epsilon",
":",
"yield",
"i",
",",
"left",
",",
"rights",
"[",
"i",
"]",
"left",
"+=",
"one"
] | Iterator. | [
"Iterator",
"."
] | [
"\"\"\"Iterator. For all items in 'parts',\n return everything to the left and everything\n from here to the end. For reasons of\n efficiency, take a first pass over the data\n to pre-compute and cache right-hand-sides\n \"\"\""
] | [
{
"param": "parts",
"type": null
},
{
"param": "epsilon",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "parts",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "epsilon",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def leftRight(parts,epsilon=0.01):
rights = {}
n = j = len(parts) - 1
while j > 0:
rights[j] = parts[j]
if j < n: rights[j] += rights[j+1]
j -=1
left = parts[0]
for i,one in enumerate(parts):
if i> 0:
if parts[i]._median - parts[i-1]._median > epsilon:
yield i,left,rights[i]
left += one | 610,028 | 131 |
888b15cfe5d5ca42e680b70038f7341a869cee96 | rosette-api-community/document-summarization | summarize.py | [
"MIT"
] | Python | entity_mentions | null | def entity_mentions(adm):
"""Generate named entity mentions from an ADM (Annotated Data Model)"""
for entity in adm['attributes']['entities']['items']:
for mention in entity['mentions']:
# Augment mentions with the entity type of the entity they refer to
mention['type'] = entity.get('type')
yield mention | Generate named entity mentions from an ADM (Annotated Data Model) | Generate named entity mentions from an ADM (Annotated Data Model) | [
"Generate",
"named",
"entity",
"mentions",
"from",
"an",
"ADM",
"(",
"Annotated",
"Data",
"Model",
")"
] | def entity_mentions(adm):
for entity in adm['attributes']['entities']['items']:
for mention in entity['mentions']:
mention['type'] = entity.get('type')
yield mention | [
"def",
"entity_mentions",
"(",
"adm",
")",
":",
"for",
"entity",
"in",
"adm",
"[",
"'attributes'",
"]",
"[",
"'entities'",
"]",
"[",
"'items'",
"]",
":",
"for",
"mention",
"in",
"entity",
"[",
"'mentions'",
"]",
":",
"mention",
"[",
"'type'",
"]",
"=",
"entity",
".",
"get",
"(",
"'type'",
")",
"yield",
"mention"
] | Generate named entity mentions from an ADM (Annotated Data Model) | [
"Generate",
"named",
"entity",
"mentions",
"from",
"an",
"ADM",
"(",
"Annotated",
"Data",
"Model",
")"
] | [
"\"\"\"Generate named entity mentions from an ADM (Annotated Data Model)\"\"\"",
"# Augment mentions with the entity type of the entity they refer to"
] | [
{
"param": "adm",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "adm",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def entity_mentions(adm):
for entity in adm['attributes']['entities']['items']:
for mention in entity['mentions']:
mention['type'] = entity.get('type')
yield mention | 610,029 | 359 |
76dcad937482f9a697fa3809ccae29161c28d943 | ddkang/fai-imagenet | imagenet_nv/fastai_imagenet.py | [
"Apache-2.0"
] | Python | top_k | <not_specific> | def top_k(output, target, k=5):
"""Computes the precision@k for the specified values of k"""
batch_size = target.size(0)
_, pred = output.topk(k, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
return correct_k.mul_(1.0 / batch_size) | Computes the precision@k for the specified values of k | Computes the precision@k for the specified values of k | [
"Computes",
"the",
"precision@k",
"for",
"the",
"specified",
"values",
"of",
"k"
] | def top_k(output, target, k=5):
batch_size = target.size(0)
_, pred = output.topk(k, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
return correct_k.mul_(1.0 / batch_size) | [
"def",
"top_k",
"(",
"output",
",",
"target",
",",
"k",
"=",
"5",
")",
":",
"batch_size",
"=",
"target",
".",
"size",
"(",
"0",
")",
"_",
",",
"pred",
"=",
"output",
".",
"topk",
"(",
"k",
",",
"1",
",",
"True",
",",
"True",
")",
"pred",
"=",
"pred",
".",
"t",
"(",
")",
"correct",
"=",
"pred",
".",
"eq",
"(",
"target",
".",
"view",
"(",
"1",
",",
"-",
"1",
")",
".",
"expand_as",
"(",
"pred",
")",
")",
"correct_k",
"=",
"correct",
"[",
":",
"k",
"]",
".",
"view",
"(",
"-",
"1",
")",
".",
"float",
"(",
")",
".",
"sum",
"(",
"0",
",",
"keepdim",
"=",
"True",
")",
"return",
"correct_k",
".",
"mul_",
"(",
"1.0",
"/",
"batch_size",
")"
] | Computes the precision@k for the specified values of k | [
"Computes",
"the",
"precision@k",
"for",
"the",
"specified",
"values",
"of",
"k"
] | [
"\"\"\"Computes the precision@k for the specified values of k\"\"\""
] | [
{
"param": "output",
"type": null
},
{
"param": "target",
"type": null
},
{
"param": "k",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "output",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "target",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "k",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def top_k(output, target, k=5):
batch_size = target.size(0)
_, pred = output.topk(k, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
return correct_k.mul_(1.0 / batch_size) | 610,030 | 514 |
e36335563e8ebb13c2f2b4517f3b8ab5203ebdb7 | jpeerz/NZ-ORCID-Hub | orcid_hub/views.py | [
"MIT"
] | Python | year_range | <not_specific> | def year_range(entry):
"""Show an interval of employment in years."""
val = ""
if entry.get("start_date") is None or entry["start_date"]["year"]["value"] is None:
val = "unknown"
else:
val = entry["start_date"]["year"]["value"]
val += "-"
if entry.get("end_date") is None or entry["end_date"]["year"]["value"] is None:
val += "present"
else:
val += entry["end_date"]["year"]["value"]
return val | Show an interval of employment in years. | Show an interval of employment in years. | [
"Show",
"an",
"interval",
"of",
"employment",
"in",
"years",
"."
] | def year_range(entry):
val = ""
if entry.get("start_date") is None or entry["start_date"]["year"]["value"] is None:
val = "unknown"
else:
val = entry["start_date"]["year"]["value"]
val += "-"
if entry.get("end_date") is None or entry["end_date"]["year"]["value"] is None:
val += "present"
else:
val += entry["end_date"]["year"]["value"]
return val | [
"def",
"year_range",
"(",
"entry",
")",
":",
"val",
"=",
"\"\"",
"if",
"entry",
".",
"get",
"(",
"\"start_date\"",
")",
"is",
"None",
"or",
"entry",
"[",
"\"start_date\"",
"]",
"[",
"\"year\"",
"]",
"[",
"\"value\"",
"]",
"is",
"None",
":",
"val",
"=",
"\"unknown\"",
"else",
":",
"val",
"=",
"entry",
"[",
"\"start_date\"",
"]",
"[",
"\"year\"",
"]",
"[",
"\"value\"",
"]",
"val",
"+=",
"\"-\"",
"if",
"entry",
".",
"get",
"(",
"\"end_date\"",
")",
"is",
"None",
"or",
"entry",
"[",
"\"end_date\"",
"]",
"[",
"\"year\"",
"]",
"[",
"\"value\"",
"]",
"is",
"None",
":",
"val",
"+=",
"\"present\"",
"else",
":",
"val",
"+=",
"entry",
"[",
"\"end_date\"",
"]",
"[",
"\"year\"",
"]",
"[",
"\"value\"",
"]",
"return",
"val"
] | Show an interval of employment in years. | [
"Show",
"an",
"interval",
"of",
"employment",
"in",
"years",
"."
] | [
"\"\"\"Show an interval of employment in years.\"\"\""
] | [
{
"param": "entry",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "entry",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def year_range(entry):
val = ""
if entry.get("start_date") is None or entry["start_date"]["year"]["value"] is None:
val = "unknown"
else:
val = entry["start_date"]["year"]["value"]
val += "-"
if entry.get("end_date") is None or entry["end_date"]["year"]["value"] is None:
val += "present"
else:
val += entry["end_date"]["year"]["value"]
return val | 610,032 | 964 |
9ec4982e2080e773177419c2aa3adcc7e60c5bf5 | ZLLentz/pswalker | pswalker/configure.py | [
"BSD-3-Clause-LBNL"
] | Python | namify_config | <not_specific> | def namify_config(obj, **cfg):
"""
Prepend everything in cfg's keys with obj.name_, and remove entries where
the value is None.
"""
return {obj.name + "_" + k: v for k, v in cfg.items() if v is not None} |
Prepend everything in cfg's keys with obj.name_, and remove entries where
the value is None.
| Prepend everything in cfg's keys with obj.name_, and remove entries where
the value is None. | [
"Prepend",
"everything",
"in",
"cfg",
"'",
"s",
"keys",
"with",
"obj",
".",
"name_",
"and",
"remove",
"entries",
"where",
"the",
"value",
"is",
"None",
"."
] | def namify_config(obj, **cfg):
return {obj.name + "_" + k: v for k, v in cfg.items() if v is not None} | [
"def",
"namify_config",
"(",
"obj",
",",
"**",
"cfg",
")",
":",
"return",
"{",
"obj",
".",
"name",
"+",
"\"_\"",
"+",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"cfg",
".",
"items",
"(",
")",
"if",
"v",
"is",
"not",
"None",
"}"
] | Prepend everything in cfg's keys with obj.name_, and remove entries where
the value is None. | [
"Prepend",
"everything",
"in",
"cfg",
"'",
"s",
"keys",
"with",
"obj",
".",
"name_",
"and",
"remove",
"entries",
"where",
"the",
"value",
"is",
"None",
"."
] | [
"\"\"\"\n Prepend everything in cfg's keys with obj.name_, and remove entries where\n the value is None.\n \"\"\""
] | [
{
"param": "obj",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "obj",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def namify_config(obj, **cfg):
return {obj.name + "_" + k: v for k, v in cfg.items() if v is not None} | 610,033 | 89 |
9cf27a833160cb809aef768668bb159ee6e2ac82 | nathanawmk/LO-PHI | python-lophi/lophi/globals.py | [
"BSD-3-Clause"
] | Python | send_socket_data | null | def send_socket_data(sock, data):
"""
Given a socket and some data, this will prefix the length of the data
and send it using the socket.
"""
# Put data in a network format
import struct
data = struct.pack("H", len(data)) + data
# Send the data
sent = 0
while sent < len(data):
sent += sock.send(data[sent:]) |
Given a socket and some data, this will prefix the length of the data
and send it using the socket.
| Given a socket and some data, this will prefix the length of the data
and send it using the socket. | [
"Given",
"a",
"socket",
"and",
"some",
"data",
"this",
"will",
"prefix",
"the",
"length",
"of",
"the",
"data",
"and",
"send",
"it",
"using",
"the",
"socket",
"."
] | def send_socket_data(sock, data):
import struct
data = struct.pack("H", len(data)) + data
sent = 0
while sent < len(data):
sent += sock.send(data[sent:]) | [
"def",
"send_socket_data",
"(",
"sock",
",",
"data",
")",
":",
"import",
"struct",
"data",
"=",
"struct",
".",
"pack",
"(",
"\"H\"",
",",
"len",
"(",
"data",
")",
")",
"+",
"data",
"sent",
"=",
"0",
"while",
"sent",
"<",
"len",
"(",
"data",
")",
":",
"sent",
"+=",
"sock",
".",
"send",
"(",
"data",
"[",
"sent",
":",
"]",
")"
] | Given a socket and some data, this will prefix the length of the data
and send it using the socket. | [
"Given",
"a",
"socket",
"and",
"some",
"data",
"this",
"will",
"prefix",
"the",
"length",
"of",
"the",
"data",
"and",
"send",
"it",
"using",
"the",
"socket",
"."
] | [
"\"\"\"\n Given a socket and some data, this will prefix the length of the data\n and send it using the socket.\n \"\"\"",
"# Put data in a network format",
"# Send the data"
] | [
{
"param": "sock",
"type": null
},
{
"param": "data",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "sock",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "data",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import struct
def send_socket_data(sock, data):
import struct
data = struct.pack("H", len(data)) + data
sent = 0
while sent < len(data):
sent += sock.send(data[sent:]) | 610,034 | 808 |
eca26cb9e0eaaf6818d97300e332278571bfce28 | pyviz/nei | nei/parser.py | [
"BSD-3-Clause"
] | Python | filter_out_markdown_ranges | <not_specific> | def filter_out_markdown_ranges(cls, lines, ranges):
"""
As markdown is the first pass so all lines should be present.
This function filters out the identified markdown ranges from all
the lines to find out what is remaining (potential code cells).
"""
all_line_numbers = list(range(len(lines)))
accounted_lines = [list(range(start, stop+1)) for (start, stop, _, _) in ranges]
flattened = [el for group in accounted_lines for el in group]
remaining_line_numbers = set(all_line_numbers) - set(flattened)
return [(el, lines[el]) for el in sorted(remaining_line_numbers)] |
As markdown is the first pass so all lines should be present.
This function filters out the identified markdown ranges from all
the lines to find out what is remaining (potential code cells).
| As markdown is the first pass so all lines should be present.
This function filters out the identified markdown ranges from all
the lines to find out what is remaining (potential code cells). | [
"As",
"markdown",
"is",
"the",
"first",
"pass",
"so",
"all",
"lines",
"should",
"be",
"present",
".",
"This",
"function",
"filters",
"out",
"the",
"identified",
"markdown",
"ranges",
"from",
"all",
"the",
"lines",
"to",
"find",
"out",
"what",
"is",
"remaining",
"(",
"potential",
"code",
"cells",
")",
"."
] | def filter_out_markdown_ranges(cls, lines, ranges):
all_line_numbers = list(range(len(lines)))
accounted_lines = [list(range(start, stop+1)) for (start, stop, _, _) in ranges]
flattened = [el for group in accounted_lines for el in group]
remaining_line_numbers = set(all_line_numbers) - set(flattened)
return [(el, lines[el]) for el in sorted(remaining_line_numbers)] | [
"def",
"filter_out_markdown_ranges",
"(",
"cls",
",",
"lines",
",",
"ranges",
")",
":",
"all_line_numbers",
"=",
"list",
"(",
"range",
"(",
"len",
"(",
"lines",
")",
")",
")",
"accounted_lines",
"=",
"[",
"list",
"(",
"range",
"(",
"start",
",",
"stop",
"+",
"1",
")",
")",
"for",
"(",
"start",
",",
"stop",
",",
"_",
",",
"_",
")",
"in",
"ranges",
"]",
"flattened",
"=",
"[",
"el",
"for",
"group",
"in",
"accounted_lines",
"for",
"el",
"in",
"group",
"]",
"remaining_line_numbers",
"=",
"set",
"(",
"all_line_numbers",
")",
"-",
"set",
"(",
"flattened",
")",
"return",
"[",
"(",
"el",
",",
"lines",
"[",
"el",
"]",
")",
"for",
"el",
"in",
"sorted",
"(",
"remaining_line_numbers",
")",
"]"
] | As markdown is the first pass so all lines should be present. | [
"As",
"markdown",
"is",
"the",
"first",
"pass",
"so",
"all",
"lines",
"should",
"be",
"present",
"."
] | [
"\"\"\"\n As markdown is the first pass so all lines should be present.\n\n This function filters out the identified markdown ranges from all\n the lines to find out what is remaining (potential code cells).\n \"\"\""
] | [
{
"param": "cls",
"type": null
},
{
"param": "lines",
"type": null
},
{
"param": "ranges",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "cls",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "lines",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "ranges",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def filter_out_markdown_ranges(cls, lines, ranges):
all_line_numbers = list(range(len(lines)))
accounted_lines = [list(range(start, stop+1)) for (start, stop, _, _) in ranges]
flattened = [el for group in accounted_lines for el in group]
remaining_line_numbers = set(all_line_numbers) - set(flattened)
return [(el, lines[el]) for el in sorted(remaining_line_numbers)] | 610,035 | 934 |
0106066cba68fb2dfd052b986c91e04af98a1ae2 | Yuyan-Li/inferno | inferno/extensions/criteria/regularized.py | [
"Apache-2.0"
] | Python | collect_losses | <not_specific> | def collect_losses(module):
"""Collect `_losses` dictionaries from module and children
:param module: a Module to be searched for losses
:return: dictionary of loss names to values
"""
losses = {}
def _collect(m):
if hasattr(m, '_losses'):
for k, v in m._losses.items():
if k in losses:
losses[k] = losses[k] + v
else:
losses[k] = v
module.apply(_collect)
return losses | Collect `_losses` dictionaries from module and children
:param module: a Module to be searched for losses
:return: dictionary of loss names to values
| Collect `_losses` dictionaries from module and children | [
"Collect",
"`",
"_losses",
"`",
"dictionaries",
"from",
"module",
"and",
"children"
] | def collect_losses(module):
losses = {}
def _collect(m):
if hasattr(m, '_losses'):
for k, v in m._losses.items():
if k in losses:
losses[k] = losses[k] + v
else:
losses[k] = v
module.apply(_collect)
return losses | [
"def",
"collect_losses",
"(",
"module",
")",
":",
"losses",
"=",
"{",
"}",
"def",
"_collect",
"(",
"m",
")",
":",
"if",
"hasattr",
"(",
"m",
",",
"'_losses'",
")",
":",
"for",
"k",
",",
"v",
"in",
"m",
".",
"_losses",
".",
"items",
"(",
")",
":",
"if",
"k",
"in",
"losses",
":",
"losses",
"[",
"k",
"]",
"=",
"losses",
"[",
"k",
"]",
"+",
"v",
"else",
":",
"losses",
"[",
"k",
"]",
"=",
"v",
"module",
".",
"apply",
"(",
"_collect",
")",
"return",
"losses"
] | Collect `_losses` dictionaries from module and children | [
"Collect",
"`",
"_losses",
"`",
"dictionaries",
"from",
"module",
"and",
"children"
] | [
"\"\"\"Collect `_losses` dictionaries from module and children\n\n :param module: a Module to be searched for losses\n :return: dictionary of loss names to values\n \"\"\""
] | [
{
"param": "module",
"type": null
}
] | {
"returns": [
{
"docstring": "dictionary of loss names to values",
"docstring_tokens": [
"dictionary",
"of",
"loss",
"names",
"to",
"values"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "module",
"type": null,
"docstring": "a Module to be searched for losses",
"docstring_tokens": [
"a",
"Module",
"to",
"be",
"searched",
"for",
"losses"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def collect_losses(module):
losses = {}
def _collect(m):
if hasattr(m, '_losses'):
for k, v in m._losses.items():
if k in losses:
losses[k] = losses[k] + v
else:
losses[k] = v
module.apply(_collect)
return losses | 610,036 | 547 |
5a664ef0c2304b3a8b760be062fd044745af5395 | kwryankrattiger/xmscore | _package/xms/core/filesystem/filesystem.py | [
"BSD-2-Clause"
] | Python | copyfile | null | def copyfile(src, dest):
"""Copy a file on disk, suppressing errors if source and destination are the same file.
Args:
src (str): Source file path.
dest (str): Destination file path.
"""
try:
shutil.copyfile(src, dest)
except shutil.SameFileError:
pass | Copy a file on disk, suppressing errors if source and destination are the same file.
Args:
src (str): Source file path.
dest (str): Destination file path.
| Copy a file on disk, suppressing errors if source and destination are the same file. | [
"Copy",
"a",
"file",
"on",
"disk",
"suppressing",
"errors",
"if",
"source",
"and",
"destination",
"are",
"the",
"same",
"file",
"."
] | def copyfile(src, dest):
try:
shutil.copyfile(src, dest)
except shutil.SameFileError:
pass | [
"def",
"copyfile",
"(",
"src",
",",
"dest",
")",
":",
"try",
":",
"shutil",
".",
"copyfile",
"(",
"src",
",",
"dest",
")",
"except",
"shutil",
".",
"SameFileError",
":",
"pass"
] | Copy a file on disk, suppressing errors if source and destination are the same file. | [
"Copy",
"a",
"file",
"on",
"disk",
"suppressing",
"errors",
"if",
"source",
"and",
"destination",
"are",
"the",
"same",
"file",
"."
] | [
"\"\"\"Copy a file on disk, suppressing errors if source and destination are the same file.\n\n Args:\n src (str): Source file path.\n dest (str): Destination file path.\n\n \"\"\""
] | [
{
"param": "src",
"type": null
},
{
"param": "dest",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "src",
"type": null,
"docstring": "Source file path.",
"docstring_tokens": [
"Source",
"file",
"path",
"."
],
"default": null,
"is_optional": false
},
{
"identifier": "dest",
"type": null,
"docstring": "Destination file path.",
"docstring_tokens": [
"Destination",
"file",
"path",
"."
],
"default": null,
"is_optional": false
}
],
"outlier_params": [],
"others": []
} | import shutil
def copyfile(src, dest):
try:
shutil.copyfile(src, dest)
except shutil.SameFileError:
pass | 610,037 | 815 |
192525df16c8ce921d1379c032a7e00edc975987 | adit98/good_robot | models.py | [
"BSD-2-Clause"
] | Python | tile_vector_as_image_channels_torch | <not_specific> | def tile_vector_as_image_channels_torch(vector_op, image_shape):
"""
Takes a vector of length n and an image shape BCHW,
and repeat the vector as channels at each pixel.
Code source: https://github.com/ahundt/costar_dataset/blob/master/costar_dataset/block_stacking_reader_torch.py
# Params
vector_op: A tensor vector to tile.
image_shape: A list of integers [width, height] with the desired dimensions.
"""
# input vector shape
ivs = vector_op.shape
# print('image_shape: ' + str(image_shape))
# reshape the vector into a single pixel
vector_op = vector_op.reshape([ivs[0], ivs[1], 1, 1])
# print('vector_op pre-repeat shape:' + str(vector_op.shape))
# repeat the vector at every pixel according to the specified image shape
vector_op = vector_op.expand([ivs[0], ivs[1], image_shape[2], image_shape[3]])
# print('vector_op post-repeat shape:' + str(vector_op.shape))
# print('vector_op first channel: ' + str(vector_op[0,:,0,0]))
return vector_op |
Takes a vector of length n and an image shape BCHW,
and repeat the vector as channels at each pixel.
Code source: https://github.com/ahundt/costar_dataset/blob/master/costar_dataset/block_stacking_reader_torch.py
# Params
vector_op: A tensor vector to tile.
image_shape: A list of integers [width, height] with the desired dimensions.
| Takes a vector of length n and an image shape BCHW,
and repeat the vector as channels at each pixel.
Params
vector_op: A tensor vector to tile.
image_shape: A list of integers [width, height] with the desired dimensions. | [
"Takes",
"a",
"vector",
"of",
"length",
"n",
"and",
"an",
"image",
"shape",
"BCHW",
"and",
"repeat",
"the",
"vector",
"as",
"channels",
"at",
"each",
"pixel",
".",
"Params",
"vector_op",
":",
"A",
"tensor",
"vector",
"to",
"tile",
".",
"image_shape",
":",
"A",
"list",
"of",
"integers",
"[",
"width",
"height",
"]",
"with",
"the",
"desired",
"dimensions",
"."
] | def tile_vector_as_image_channels_torch(vector_op, image_shape):
ivs = vector_op.shape
vector_op = vector_op.reshape([ivs[0], ivs[1], 1, 1])
vector_op = vector_op.expand([ivs[0], ivs[1], image_shape[2], image_shape[3]])
return vector_op | [
"def",
"tile_vector_as_image_channels_torch",
"(",
"vector_op",
",",
"image_shape",
")",
":",
"ivs",
"=",
"vector_op",
".",
"shape",
"vector_op",
"=",
"vector_op",
".",
"reshape",
"(",
"[",
"ivs",
"[",
"0",
"]",
",",
"ivs",
"[",
"1",
"]",
",",
"1",
",",
"1",
"]",
")",
"vector_op",
"=",
"vector_op",
".",
"expand",
"(",
"[",
"ivs",
"[",
"0",
"]",
",",
"ivs",
"[",
"1",
"]",
",",
"image_shape",
"[",
"2",
"]",
",",
"image_shape",
"[",
"3",
"]",
"]",
")",
"return",
"vector_op"
] | Takes a vector of length n and an image shape BCHW,
and repeat the vector as channels at each pixel. | [
"Takes",
"a",
"vector",
"of",
"length",
"n",
"and",
"an",
"image",
"shape",
"BCHW",
"and",
"repeat",
"the",
"vector",
"as",
"channels",
"at",
"each",
"pixel",
"."
] | [
"\"\"\"\n Takes a vector of length n and an image shape BCHW,\n and repeat the vector as channels at each pixel.\n\n Code source: https://github.com/ahundt/costar_dataset/blob/master/costar_dataset/block_stacking_reader_torch.py\n\n # Params\n vector_op: A tensor vector to tile.\n image_shape: A list of integers [width, height] with the desired dimensions.\n \"\"\"",
"# input vector shape",
"# print('image_shape: ' + str(image_shape))",
"# reshape the vector into a single pixel",
"# print('vector_op pre-repeat shape:' + str(vector_op.shape))",
"# repeat the vector at every pixel according to the specified image shape",
"# print('vector_op post-repeat shape:' + str(vector_op.shape))",
"# print('vector_op first channel: ' + str(vector_op[0,:,0,0]))"
] | [
{
"param": "vector_op",
"type": null
},
{
"param": "image_shape",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "vector_op",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "image_shape",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def tile_vector_as_image_channels_torch(vector_op, image_shape):
ivs = vector_op.shape
vector_op = vector_op.reshape([ivs[0], ivs[1], 1, 1])
vector_op = vector_op.expand([ivs[0], ivs[1], image_shape[2], image_shape[3]])
return vector_op | 610,038 | 130 |
1881c0989f3b7c74ba09687d9418cd5a1bdac9ba | TheRoyalEgg/gachapy | gachapy/controller.py | [
"MIT"
] | Python | default_key | float | def default_key(rarity: float) -> float:
"""The default function that converts rarity to drop rate (1/x)
Parameters
----------
rarity : float
the rarity of the item
Returns
-------
float
the drop rate of the item
"""
return 1 / rarity | The default function that converts rarity to drop rate (1/x)
Parameters
----------
rarity : float
the rarity of the item
Returns
-------
float
the drop rate of the item
| The default function that converts rarity to drop rate (1/x)
Parameters
rarity : float
the rarity of the item
Returns
float
the drop rate of the item | [
"The",
"default",
"function",
"that",
"converts",
"rarity",
"to",
"drop",
"rate",
"(",
"1",
"/",
"x",
")",
"Parameters",
"rarity",
":",
"float",
"the",
"rarity",
"of",
"the",
"item",
"Returns",
"float",
"the",
"drop",
"rate",
"of",
"the",
"item"
] | def default_key(rarity: float) -> float:
return 1 / rarity | [
"def",
"default_key",
"(",
"rarity",
":",
"float",
")",
"->",
"float",
":",
"return",
"1",
"/",
"rarity"
] | The default function that converts rarity to drop rate (1/x)
Parameters | [
"The",
"default",
"function",
"that",
"converts",
"rarity",
"to",
"drop",
"rate",
"(",
"1",
"/",
"x",
")",
"Parameters"
] | [
"\"\"\"The default function that converts rarity to drop rate (1/x)\n\n Parameters\n ----------\n rarity : float\n the rarity of the item\n\n Returns\n -------\n float\n the drop rate of the item\n \"\"\""
] | [
{
"param": "rarity",
"type": "float"
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "rarity",
"type": "float",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def default_key(rarity: float) -> float:
return 1 / rarity | 610,041 | 485 |
b9bb749ce6573d509c5e688886e4acbe73c5f1cb | enda2020/it-cert-automation-practice | email.py | [
"Apache-2.0"
] | Python | send_mail | null | def send_mail(message):
"""Sends the message to the configured SMTP server."""
mail_server = smtplib.SMTP('localhost')
mail_server.send_message(message)
mail_server.quit() | Sends the message to the configured SMTP server. | Sends the message to the configured SMTP server. | [
"Sends",
"the",
"message",
"to",
"the",
"configured",
"SMTP",
"server",
"."
] | def send_mail(message):
mail_server = smtplib.SMTP('localhost')
mail_server.send_message(message)
mail_server.quit() | [
"def",
"send_mail",
"(",
"message",
")",
":",
"mail_server",
"=",
"smtplib",
".",
"SMTP",
"(",
"'localhost'",
")",
"mail_server",
".",
"send_message",
"(",
"message",
")",
"mail_server",
".",
"quit",
"(",
")"
] | Sends the message to the configured SMTP server. | [
"Sends",
"the",
"message",
"to",
"the",
"configured",
"SMTP",
"server",
"."
] | [
"\"\"\"Sends the message to the configured SMTP server.\"\"\""
] | [
{
"param": "message",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "message",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import smtplib
def send_mail(message):
mail_server = smtplib.SMTP('localhost')
mail_server.send_message(message)
mail_server.quit() | 610,042 | 522 |
5508084bd76087ba560215f875604ec8a81bc000 | lilanyu/biosteam | biosteam/thermo/interface.py | [
"MIT"
] | Python | Somayajulu | <not_specific> | def Somayajulu(T, Tc, A, B, C):
r'''Calculates air-water surface tension using the [1]_
emperical (parameter-regressed) method. Well regressed, no recent data.
.. math::
\sigma=aX^{5/4}+bX^{9/4}+cX^{13/4}
X=(T_c-T)/T_c
Parameters
----------
T : float
Temperature of fluid [K]
Tc : float
Critical temperature of fluid [K]
A : float
Regression parameter
B : float
Regression parameter
C : float
Regression parameter
Returns
-------
sigma : float
Liquid surface tension, N/m
Notes
-----
Presently untested, but matches expected values. Internal units are mN/m.
Form of function returns imaginary results when T > Tc; None is returned
if this is the case. Function is claimed valid from the triple to the
critical point. Results can be evaluated beneath the triple point.
Examples
--------
Water at 300 K
>>> Somayajulu(300, 647.126, 232.713514, -140.18645, -4.890098)
0.07166386387996757
References
----------
.. [1] Somayajulu, G. R. "A Generalized Equation for Surface Tension from
the Triple Point to the Critical Point." International Journal of
Thermophysics 9, no. 4 (July 1988): 559-66. doi:10.1007/BF00503154.
'''
X = (Tc-T)/Tc
sigma = (A*X**1.25 + B*X**2.25 + C*X**3.25)/1000.
return sigma | r'''Calculates air-water surface tension using the [1]_
emperical (parameter-regressed) method. Well regressed, no recent data.
.. math::
\sigma=aX^{5/4}+bX^{9/4}+cX^{13/4}
X=(T_c-T)/T_c
Parameters
----------
T : float
Temperature of fluid [K]
Tc : float
Critical temperature of fluid [K]
A : float
Regression parameter
B : float
Regression parameter
C : float
Regression parameter
Returns
-------
sigma : float
Liquid surface tension, N/m
Notes
-----
Presently untested, but matches expected values. Internal units are mN/m.
Form of function returns imaginary results when T > Tc; None is returned
if this is the case. Function is claimed valid from the triple to the
critical point. Results can be evaluated beneath the triple point.
Examples
--------
Water at 300 K
>>> Somayajulu(300, 647.126, 232.713514, -140.18645, -4.890098)
0.07166386387996757
References
----------
.. [1] Somayajulu, G. R. "A Generalized Equation for Surface Tension from
the Triple Point to the Critical Point." International Journal of
Thermophysics 9, no. 4 (July 1988): 559-66. doi:10.1007/BF00503154.
| r'''Calculates air-water surface tension using the [1]_
emperical (parameter-regressed) method. Well regressed, no recent data.
sigma : float
Liquid surface tension, N/m
Notes
Presently untested, but matches expected values. Internal units are mN/m.
Form of function returns imaginary results when T > Tc; None is returned
if this is the case. Function is claimed valid from the triple to the
critical point. Results can be evaluated beneath the triple point.
Examples
[1] Somayajulu, G. R. "A Generalized Equation for Surface Tension from
the Triple Point to the Critical Point." International Journal of
Thermophysics 9, no. | [
"r",
"'",
"'",
"'",
"Calculates",
"air",
"-",
"water",
"surface",
"tension",
"using",
"the",
"[",
"1",
"]",
"_",
"emperical",
"(",
"parameter",
"-",
"regressed",
")",
"method",
".",
"Well",
"regressed",
"no",
"recent",
"data",
".",
"sigma",
":",
"float",
"Liquid",
"surface",
"tension",
"N",
"/",
"m",
"Notes",
"Presently",
"untested",
"but",
"matches",
"expected",
"values",
".",
"Internal",
"units",
"are",
"mN",
"/",
"m",
".",
"Form",
"of",
"function",
"returns",
"imaginary",
"results",
"when",
"T",
">",
"Tc",
";",
"None",
"is",
"returned",
"if",
"this",
"is",
"the",
"case",
".",
"Function",
"is",
"claimed",
"valid",
"from",
"the",
"triple",
"to",
"the",
"critical",
"point",
".",
"Results",
"can",
"be",
"evaluated",
"beneath",
"the",
"triple",
"point",
".",
"Examples",
"[",
"1",
"]",
"Somayajulu",
"G",
".",
"R",
".",
"\"",
"A",
"Generalized",
"Equation",
"for",
"Surface",
"Tension",
"from",
"the",
"Triple",
"Point",
"to",
"the",
"Critical",
"Point",
".",
"\"",
"International",
"Journal",
"of",
"Thermophysics",
"9",
"no",
"."
] | def Somayajulu(T, Tc, A, B, C):
X = (Tc-T)/Tc
sigma = (A*X**1.25 + B*X**2.25 + C*X**3.25)/1000.
return sigma | [
"def",
"Somayajulu",
"(",
"T",
",",
"Tc",
",",
"A",
",",
"B",
",",
"C",
")",
":",
"X",
"=",
"(",
"Tc",
"-",
"T",
")",
"/",
"Tc",
"sigma",
"=",
"(",
"A",
"*",
"X",
"**",
"1.25",
"+",
"B",
"*",
"X",
"**",
"2.25",
"+",
"C",
"*",
"X",
"**",
"3.25",
")",
"/",
"1000.",
"return",
"sigma"
] | r'''Calculates air-water surface tension using the [1]_
emperical (parameter-regressed) method. | [
"r",
"'",
"'",
"'",
"Calculates",
"air",
"-",
"water",
"surface",
"tension",
"using",
"the",
"[",
"1",
"]",
"_",
"emperical",
"(",
"parameter",
"-",
"regressed",
")",
"method",
"."
] | [
"r'''Calculates air-water surface tension using the [1]_\n emperical (parameter-regressed) method. Well regressed, no recent data.\n .. math::\n \\sigma=aX^{5/4}+bX^{9/4}+cX^{13/4}\n X=(T_c-T)/T_c\n Parameters\n ----------\n T : float\n Temperature of fluid [K]\n Tc : float\n Critical temperature of fluid [K]\n A : float\n Regression parameter\n B : float\n Regression parameter\n C : float\n Regression parameter\n Returns\n -------\n sigma : float\n Liquid surface tension, N/m\n Notes\n -----\n Presently untested, but matches expected values. Internal units are mN/m.\n Form of function returns imaginary results when T > Tc; None is returned\n if this is the case. Function is claimed valid from the triple to the\n critical point. Results can be evaluated beneath the triple point.\n Examples\n --------\n Water at 300 K\n >>> Somayajulu(300, 647.126, 232.713514, -140.18645, -4.890098)\n 0.07166386387996757\n References\n ----------\n .. [1] Somayajulu, G. R. \"A Generalized Equation for Surface Tension from\n the Triple Point to the Critical Point.\" International Journal of\n Thermophysics 9, no. 4 (July 1988): 559-66. doi:10.1007/BF00503154.\n '''"
] | [
{
"param": "T",
"type": null
},
{
"param": "Tc",
"type": null
},
{
"param": "A",
"type": null
},
{
"param": "B",
"type": null
},
{
"param": "C",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "T",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "Tc",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "A",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "B",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "C",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def Somayajulu(T, Tc, A, B, C):
X = (Tc-T)/Tc
sigma = (A*X**1.25 + B*X**2.25 + C*X**3.25)/1000.
return sigma | 610,043 | 373 |
fc6e11ffa88b7094b4923e5212e158136ff80bf6 | minhtannguyen/RAdam | nmt/my_module/utils.py | [
"Apache-2.0"
] | Python | Unfreeze_layer | null | def Unfreeze_layer(layer):
"""
Unfreeze a given layer in t he DNN.
#Argument: the name of a layer in the given DNN.
"""
for param in layer.parameters():
param.requires_grad = True |
Unfreeze a given layer in t he DNN.
#Argument: the name of a layer in the given DNN.
| Unfreeze a given layer in t he DNN.
Argument: the name of a layer in the given DNN. | [
"Unfreeze",
"a",
"given",
"layer",
"in",
"t",
"he",
"DNN",
".",
"Argument",
":",
"the",
"name",
"of",
"a",
"layer",
"in",
"the",
"given",
"DNN",
"."
] | def Unfreeze_layer(layer):
for param in layer.parameters():
param.requires_grad = True | [
"def",
"Unfreeze_layer",
"(",
"layer",
")",
":",
"for",
"param",
"in",
"layer",
".",
"parameters",
"(",
")",
":",
"param",
".",
"requires_grad",
"=",
"True"
] | Unfreeze a given layer in t he DNN. | [
"Unfreeze",
"a",
"given",
"layer",
"in",
"t",
"he",
"DNN",
"."
] | [
"\"\"\"\n Unfreeze a given layer in t he DNN.\n #Argument: the name of a layer in the given DNN.\n \"\"\""
] | [
{
"param": "layer",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "layer",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def Unfreeze_layer(layer):
for param in layer.parameters():
param.requires_grad = True | 610,045 | 443 |
c9d65a0433305218c0d2ab3eb7d29e9b6ddfe513 | royadityak94/Interview | test/Root/src/main/utilities.py | [
"MIT"
] | Python | count_lines_in_file | <not_specific> | def count_lines_in_file(file_path, to_read='\n'):
"""Module to support light-weight read of a given char in file
Parameters
----------
file_path*: Support both relative, absolute references to the file to be read.
to_read: Character identifier that needs to be counted in that file (default: line delimiter)
(* - Required parameters)
Returns
-------
count of the to_read char in the furnished file
"""
if not os.path.exists(file_path):
raise FileNotFoundError
# Lightest way to read char of interest (in this case, line delimiter) in a given file
chunks = 4*(1024**2)
count = 0
file_ptr = open(file_path)
file_iterator = file_ptr.read
buffered = file_iterator(chunks)
while buffered:
count += buffered.count(to_read)
buffered = file_iterator(chunks)
file_ptr.close()
return count | Module to support light-weight read of a given char in file
Parameters
----------
file_path*: Support both relative, absolute references to the file to be read.
to_read: Character identifier that needs to be counted in that file (default: line delimiter)
(* - Required parameters)
Returns
-------
count of the to_read char in the furnished file
| Module to support light-weight read of a given char in file | [
"Module",
"to",
"support",
"light",
"-",
"weight",
"read",
"of",
"a",
"given",
"char",
"in",
"file"
] | def count_lines_in_file(file_path, to_read='\n'):
if not os.path.exists(file_path):
raise FileNotFoundError
chunks = 4*(1024**2)
count = 0
file_ptr = open(file_path)
file_iterator = file_ptr.read
buffered = file_iterator(chunks)
while buffered:
count += buffered.count(to_read)
buffered = file_iterator(chunks)
file_ptr.close()
return count | [
"def",
"count_lines_in_file",
"(",
"file_path",
",",
"to_read",
"=",
"'\\n'",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"file_path",
")",
":",
"raise",
"FileNotFoundError",
"chunks",
"=",
"4",
"*",
"(",
"1024",
"**",
"2",
")",
"count",
"=",
"0",
"file_ptr",
"=",
"open",
"(",
"file_path",
")",
"file_iterator",
"=",
"file_ptr",
".",
"read",
"buffered",
"=",
"file_iterator",
"(",
"chunks",
")",
"while",
"buffered",
":",
"count",
"+=",
"buffered",
".",
"count",
"(",
"to_read",
")",
"buffered",
"=",
"file_iterator",
"(",
"chunks",
")",
"file_ptr",
".",
"close",
"(",
")",
"return",
"count"
] | Module to support light-weight read of a given char in file | [
"Module",
"to",
"support",
"light",
"-",
"weight",
"read",
"of",
"a",
"given",
"char",
"in",
"file"
] | [
"\"\"\"Module to support light-weight read of a given char in file\r\n Parameters\r\n ----------\r\n file_path*: Support both relative, absolute references to the file to be read.\r\n to_read: Character identifier that needs to be counted in that file (default: line delimiter)\r\n (* - Required parameters)\r\n\r\n Returns\r\n -------\r\n count of the to_read char in the furnished file\r\n \"\"\"",
"# Lightest way to read char of interest (in this case, line delimiter) in a given file\r"
] | [
{
"param": "file_path",
"type": null
},
{
"param": "to_read",
"type": null
}
] | {
"returns": [
{
"docstring": null,
"docstring_tokens": [
"None"
],
"type": "count of the to_read char in the furnished file\r"
}
],
"raises": [],
"params": [
{
"identifier": "file_path",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "to_read",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": false
}
],
"outlier_params": [
{
"identifier": "file_path*",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": false
},
{
"identifier": "(* - Required parameters)\r",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": null
}
],
"others": []
} | import os
def count_lines_in_file(file_path, to_read='\n'):
if not os.path.exists(file_path):
raise FileNotFoundError
chunks = 4*(1024**2)
count = 0
file_ptr = open(file_path)
file_iterator = file_ptr.read
buffered = file_iterator(chunks)
while buffered:
count += buffered.count(to_read)
buffered = file_iterator(chunks)
file_ptr.close()
return count | 610,046 | 186 |
b4b62e5f5e12c50c811eb0ef672506279acd699d | feliximmohr/master_thesis_software | python_model_DNN/auditory_model/utils/eval.py | [
"MIT"
] | Python | model_eval | <not_specific> | def model_eval(model, b_gen, metric_str, workers=4):
"""
Evaluate model on data generator.
Prints and returns scores for specified metrics.
Parameters
----------
model : Keras model
The model to evaluate.
b_gen : DataGenerator object
DataGenerator object to use for loading the data.
metric_str : list of str
List of names of the metrics to evaluate. For printing.
workers : int, optional
Number of workers for the multiprocessing functionalities of
the Keras model methods. Defauts to 4.
Returns
-------
score : ndarray
Array containing results of the evaluated metrics as returned
by Keras model.evaluate() methods.
"""
print('\nModel Evaluation: \n')
score = model.evaluate_generator(b_gen, verbose=1,
use_multiprocessing=True,
workers=workers)
for i, m in enumerate(metric_str):
print('Test '+m+':', score[i])
return score |
Evaluate model on data generator.
Prints and returns scores for specified metrics.
Parameters
----------
model : Keras model
The model to evaluate.
b_gen : DataGenerator object
DataGenerator object to use for loading the data.
metric_str : list of str
List of names of the metrics to evaluate. For printing.
workers : int, optional
Number of workers for the multiprocessing functionalities of
the Keras model methods. Defauts to 4.
Returns
-------
score : ndarray
Array containing results of the evaluated metrics as returned
by Keras model.evaluate() methods.
| Evaluate model on data generator.
Prints and returns scores for specified metrics.
Parameters
Returns
score : ndarray
Array containing results of the evaluated metrics as returned
by Keras model.evaluate() methods. | [
"Evaluate",
"model",
"on",
"data",
"generator",
".",
"Prints",
"and",
"returns",
"scores",
"for",
"specified",
"metrics",
".",
"Parameters",
"Returns",
"score",
":",
"ndarray",
"Array",
"containing",
"results",
"of",
"the",
"evaluated",
"metrics",
"as",
"returned",
"by",
"Keras",
"model",
".",
"evaluate",
"()",
"methods",
"."
] | def model_eval(model, b_gen, metric_str, workers=4):
print('\nModel Evaluation: \n')
score = model.evaluate_generator(b_gen, verbose=1,
use_multiprocessing=True,
workers=workers)
for i, m in enumerate(metric_str):
print('Test '+m+':', score[i])
return score | [
"def",
"model_eval",
"(",
"model",
",",
"b_gen",
",",
"metric_str",
",",
"workers",
"=",
"4",
")",
":",
"print",
"(",
"'\\nModel Evaluation: \\n'",
")",
"score",
"=",
"model",
".",
"evaluate_generator",
"(",
"b_gen",
",",
"verbose",
"=",
"1",
",",
"use_multiprocessing",
"=",
"True",
",",
"workers",
"=",
"workers",
")",
"for",
"i",
",",
"m",
"in",
"enumerate",
"(",
"metric_str",
")",
":",
"print",
"(",
"'Test '",
"+",
"m",
"+",
"':'",
",",
"score",
"[",
"i",
"]",
")",
"return",
"score"
] | Evaluate model on data generator. | [
"Evaluate",
"model",
"on",
"data",
"generator",
"."
] | [
"\"\"\"\n Evaluate model on data generator.\n Prints and returns scores for specified metrics.\n\n Parameters\n ----------\n model : Keras model\n The model to evaluate.\n b_gen : DataGenerator object\n DataGenerator object to use for loading the data.\n metric_str : list of str \n List of names of the metrics to evaluate. For printing.\n workers : int, optional\n Number of workers for the multiprocessing functionalities of\n the Keras model methods. Defauts to 4.\n \n Returns\n -------\n score : ndarray\n Array containing results of the evaluated metrics as returned\n by Keras model.evaluate() methods.\n \"\"\""
] | [
{
"param": "model",
"type": null
},
{
"param": "b_gen",
"type": null
},
{
"param": "metric_str",
"type": null
},
{
"param": "workers",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "model",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "b_gen",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "metric_str",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "workers",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def model_eval(model, b_gen, metric_str, workers=4):
print('\nModel Evaluation: \n')
score = model.evaluate_generator(b_gen, verbose=1,
use_multiprocessing=True,
workers=workers)
for i, m in enumerate(metric_str):
print('Test '+m+':', score[i])
return score | 610,047 | 719 |
dadbccce5ff60f5cdb43338a1c8bd2d298a9ce56 | yhexie/O-CNN | python/dataset.py | [
"MIT"
] | Python | _get_conversion_error_str | <not_specific> | def _get_conversion_error_str(cached_conversion_arguments, specified_conversion_arguments):
"""
Generates error string for mismatched conversion arguments
Args:
cached_conversion_arguments (str): Conversion arguments which were cached
specified_conversion_arguments (str): Conversion arguments which were specified
Returns:
str: Conversion error string
"""
return ("Cached conversion arguments:" + os.linesep + os.linesep +
str(cached_conversion_arguments) + os.linesep +
"differs from specified arguments:" + os.linesep + os.linesep +
str(specified_conversion_arguments) + os.linesep) |
Generates error string for mismatched conversion arguments
Args:
cached_conversion_arguments (str): Conversion arguments which were cached
specified_conversion_arguments (str): Conversion arguments which were specified
Returns:
str: Conversion error string
| Generates error string for mismatched conversion arguments | [
"Generates",
"error",
"string",
"for",
"mismatched",
"conversion",
"arguments"
] | def _get_conversion_error_str(cached_conversion_arguments, specified_conversion_arguments):
return ("Cached conversion arguments:" + os.linesep + os.linesep +
str(cached_conversion_arguments) + os.linesep +
"differs from specified arguments:" + os.linesep + os.linesep +
str(specified_conversion_arguments) + os.linesep) | [
"def",
"_get_conversion_error_str",
"(",
"cached_conversion_arguments",
",",
"specified_conversion_arguments",
")",
":",
"return",
"(",
"\"Cached conversion arguments:\"",
"+",
"os",
".",
"linesep",
"+",
"os",
".",
"linesep",
"+",
"str",
"(",
"cached_conversion_arguments",
")",
"+",
"os",
".",
"linesep",
"+",
"\"differs from specified arguments:\"",
"+",
"os",
".",
"linesep",
"+",
"os",
".",
"linesep",
"+",
"str",
"(",
"specified_conversion_arguments",
")",
"+",
"os",
".",
"linesep",
")"
] | Generates error string for mismatched conversion arguments | [
"Generates",
"error",
"string",
"for",
"mismatched",
"conversion",
"arguments"
] | [
"\"\"\"\n Generates error string for mismatched conversion arguments\n\n Args:\n cached_conversion_arguments (str): Conversion arguments which were cached\n specified_conversion_arguments (str): Conversion arguments which were specified\n Returns:\n str: Conversion error string\n \"\"\""
] | [
{
"param": "cached_conversion_arguments",
"type": null
},
{
"param": "specified_conversion_arguments",
"type": null
}
] | {
"returns": [
{
"docstring": "Conversion error string",
"docstring_tokens": [
"Conversion",
"error",
"string"
],
"type": "str"
}
],
"raises": [],
"params": [
{
"identifier": "cached_conversion_arguments",
"type": null,
"docstring": "Conversion arguments which were cached",
"docstring_tokens": [
"Conversion",
"arguments",
"which",
"were",
"cached"
],
"default": null,
"is_optional": false
},
{
"identifier": "specified_conversion_arguments",
"type": null,
"docstring": "Conversion arguments which were specified",
"docstring_tokens": [
"Conversion",
"arguments",
"which",
"were",
"specified"
],
"default": null,
"is_optional": false
}
],
"outlier_params": [],
"others": []
} | import os
def _get_conversion_error_str(cached_conversion_arguments, specified_conversion_arguments):
return ("Cached conversion arguments:" + os.linesep + os.linesep +
str(cached_conversion_arguments) + os.linesep +
"differs from specified arguments:" + os.linesep + os.linesep +
str(specified_conversion_arguments) + os.linesep) | 610,048 | 981 |
4fce587de1b34be360e99b6303e65f62cab7b94f | jensonjose/utilbox | utilbox/mail_utils/mail_utils.py | [
"MIT"
] | Python | _create_html_message | <not_specific> | def _create_html_message(plain_message_string):
"""
Internal method to convert plain-text message string to HTML.
:param plain_message_string: The message string to converted to HTML.
:return: The HTML-based message string.
:rtype: str
"""
return "<html><head></head><body><p>" + str(plain_message_string) + "</p></body></html>" |
Internal method to convert plain-text message string to HTML.
:param plain_message_string: The message string to converted to HTML.
:return: The HTML-based message string.
:rtype: str
| Internal method to convert plain-text message string to HTML. | [
"Internal",
"method",
"to",
"convert",
"plain",
"-",
"text",
"message",
"string",
"to",
"HTML",
"."
] | def _create_html_message(plain_message_string):
return "<html><head></head><body><p>" + str(plain_message_string) + "</p></body></html>" | [
"def",
"_create_html_message",
"(",
"plain_message_string",
")",
":",
"return",
"\"<html><head></head><body><p>\"",
"+",
"str",
"(",
"plain_message_string",
")",
"+",
"\"</p></body></html>\""
] | Internal method to convert plain-text message string to HTML. | [
"Internal",
"method",
"to",
"convert",
"plain",
"-",
"text",
"message",
"string",
"to",
"HTML",
"."
] | [
"\"\"\"\n Internal method to convert plain-text message string to HTML.\n\n :param plain_message_string: The message string to converted to HTML.\n\n :return: The HTML-based message string.\n :rtype: str\n \"\"\""
] | [
{
"param": "plain_message_string",
"type": null
}
] | {
"returns": [
{
"docstring": "The HTML-based message string.",
"docstring_tokens": [
"The",
"HTML",
"-",
"based",
"message",
"string",
"."
],
"type": "str"
}
],
"raises": [],
"params": [
{
"identifier": "plain_message_string",
"type": null,
"docstring": "The message string to converted to HTML.",
"docstring_tokens": [
"The",
"message",
"string",
"to",
"converted",
"to",
"HTML",
"."
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def _create_html_message(plain_message_string):
return "<html><head></head><body><p>" + str(plain_message_string) + "</p></body></html>" | 610,049 | 213 |
a70e5ab9fe32e549786ca8f9146b7b0f5773ec89 | bshaffer/google-cloud-sdk | lib/googlecloudsdk/command_lib/run/flags.py | [
"Apache-2.0"
] | Python | _AddImageArg | null | def _AddImageArg(parser):
"""Add an image resource arg."""
parser.add_argument(
'--image',
help='The path to the GCR container to deploy.') | Add an image resource arg. | Add an image resource arg. | [
"Add",
"an",
"image",
"resource",
"arg",
"."
] | def _AddImageArg(parser):
parser.add_argument(
'--image',
help='The path to the GCR container to deploy.') | [
"def",
"_AddImageArg",
"(",
"parser",
")",
":",
"parser",
".",
"add_argument",
"(",
"'--image'",
",",
"help",
"=",
"'The path to the GCR container to deploy.'",
")"
] | Add an image resource arg. | [
"Add",
"an",
"image",
"resource",
"arg",
"."
] | [
"\"\"\"Add an image resource arg.\"\"\""
] | [
{
"param": "parser",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "parser",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def _AddImageArg(parser):
parser.add_argument(
'--image',
help='The path to the GCR container to deploy.') | 610,050 | 266 |
e5ef167794249ed1fe0f484d8a6fcf6d49940e5b | MSD200X/echoprint-server | API/solr.py | [
"Apache-2.0"
] | Python | pooled_connection | null | def pooled_connection(pool):
"""
Provides some syntactic sugar for using a ConnectionPool. Example use:
pool = ConnectionPool(SolrConnection, 'http://localhost:8080/solr')
with pooled_connection(pool) as conn:
docs = conn.query('*:*')
"""
conn = pool.get()
try:
yield conn
except Exception:
raise
else:
# only return connection to pool if an exception wasn't raised
pool.put(conn) |
Provides some syntactic sugar for using a ConnectionPool. Example use:
pool = ConnectionPool(SolrConnection, 'http://localhost:8080/solr')
with pooled_connection(pool) as conn:
docs = conn.query('*:*')
| Provides some syntactic sugar for using a ConnectionPool. | [
"Provides",
"some",
"syntactic",
"sugar",
"for",
"using",
"a",
"ConnectionPool",
"."
] | def pooled_connection(pool):
conn = pool.get()
try:
yield conn
except Exception:
raise
else:
pool.put(conn) | [
"def",
"pooled_connection",
"(",
"pool",
")",
":",
"conn",
"=",
"pool",
".",
"get",
"(",
")",
"try",
":",
"yield",
"conn",
"except",
"Exception",
":",
"raise",
"else",
":",
"pool",
".",
"put",
"(",
"conn",
")"
] | Provides some syntactic sugar for using a ConnectionPool. | [
"Provides",
"some",
"syntactic",
"sugar",
"for",
"using",
"a",
"ConnectionPool",
"."
] | [
"\"\"\"\n Provides some syntactic sugar for using a ConnectionPool. Example use:\n \n pool = ConnectionPool(SolrConnection, 'http://localhost:8080/solr')\n with pooled_connection(pool) as conn:\n docs = conn.query('*:*')\n \"\"\"",
"# only return connection to pool if an exception wasn't raised"
] | [
{
"param": "pool",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "pool",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def pooled_connection(pool):
conn = pool.get()
try:
yield conn
except Exception:
raise
else:
pool.put(conn) | 610,051 | 911 |
946395103aca43aace2cff5be72ef4d66a26dc3f | edelsonc/txt_search | search_text.py | [
"MIT"
] | Python | remove_punc | <not_specific> | def remove_punc(words):
"""Function that removes punctuation from a string
Arguments
---------
words -- string to have punctuation removes
"""
trans = {ord(c): None for c in string.punctuation}
no_punc = words.translate(trans)
return no_punc | Function that removes punctuation from a string
Arguments
---------
words -- string to have punctuation removes
| Function that removes punctuation from a string
Arguments
- string to have punctuation removes | [
"Function",
"that",
"removes",
"punctuation",
"from",
"a",
"string",
"Arguments",
"-",
"string",
"to",
"have",
"punctuation",
"removes"
] | def remove_punc(words):
trans = {ord(c): None for c in string.punctuation}
no_punc = words.translate(trans)
return no_punc | [
"def",
"remove_punc",
"(",
"words",
")",
":",
"trans",
"=",
"{",
"ord",
"(",
"c",
")",
":",
"None",
"for",
"c",
"in",
"string",
".",
"punctuation",
"}",
"no_punc",
"=",
"words",
".",
"translate",
"(",
"trans",
")",
"return",
"no_punc"
] | Function that removes punctuation from a string
Arguments | [
"Function",
"that",
"removes",
"punctuation",
"from",
"a",
"string",
"Arguments"
] | [
"\"\"\"Function that removes punctuation from a string\n\n Arguments\n ---------\n words -- string to have punctuation removes\n \"\"\""
] | [
{
"param": "words",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "words",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import string
def remove_punc(words):
trans = {ord(c): None for c in string.punctuation}
no_punc = words.translate(trans)
return no_punc | 610,052 | 646 |
4a0e363041e095845fd66a0704cca6392a88b93c | karlobermeyer/numeric-digit-classification | image_recognition_tools.py | [
"MIT"
] | Python | truncate_string | <not_specific> | def truncate_string(s, ubnd, add_ellipses=True):
'''Return a version of the string that is clamped at the end to
length `ubnd`.
Args:
s: string, string to clamp.
ubnd: unsigned int, max length of output string.
add_ellipses: bool, whether to replace the last 3 chars of truncated
strings with an ellipsis.
Returns:
string, clamped version of `s`.
'''
s_length = len(s)
if s_length <= ubnd:
return s
else:
return s[:ubnd-3] + '...' | Return a version of the string that is clamped at the end to
length `ubnd`.
Args:
s: string, string to clamp.
ubnd: unsigned int, max length of output string.
add_ellipses: bool, whether to replace the last 3 chars of truncated
strings with an ellipsis.
Returns:
string, clamped version of `s`.
| Return a version of the string that is clamped at the end to
length `ubnd`. | [
"Return",
"a",
"version",
"of",
"the",
"string",
"that",
"is",
"clamped",
"at",
"the",
"end",
"to",
"length",
"`",
"ubnd",
"`",
"."
] | def truncate_string(s, ubnd, add_ellipses=True):
s_length = len(s)
if s_length <= ubnd:
return s
else:
return s[:ubnd-3] + '...' | [
"def",
"truncate_string",
"(",
"s",
",",
"ubnd",
",",
"add_ellipses",
"=",
"True",
")",
":",
"s_length",
"=",
"len",
"(",
"s",
")",
"if",
"s_length",
"<=",
"ubnd",
":",
"return",
"s",
"else",
":",
"return",
"s",
"[",
":",
"ubnd",
"-",
"3",
"]",
"+",
"'...'"
] | Return a version of the string that is clamped at the end to
length `ubnd`. | [
"Return",
"a",
"version",
"of",
"the",
"string",
"that",
"is",
"clamped",
"at",
"the",
"end",
"to",
"length",
"`",
"ubnd",
"`",
"."
] | [
"'''Return a version of the string that is clamped at the end to\n length `ubnd`.\n\n Args:\n s: string, string to clamp.\n ubnd: unsigned int, max length of output string.\n add_ellipses: bool, whether to replace the last 3 chars of truncated\n strings with an ellipsis.\n\n Returns:\n string, clamped version of `s`.\n '''"
] | [
{
"param": "s",
"type": null
},
{
"param": "ubnd",
"type": null
},
{
"param": "add_ellipses",
"type": null
}
] | {
"returns": [
{
"docstring": "string, clamped version of `s`.",
"docstring_tokens": [
"string",
"clamped",
"version",
"of",
"`",
"s",
"`",
"."
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "s",
"type": null,
"docstring": "string, string to clamp.",
"docstring_tokens": [
"string",
"string",
"to",
"clamp",
"."
],
"default": null,
"is_optional": null
},
{
"identifier": "ubnd",
"type": null,
"docstring": "unsigned int, max length of output string.",
"docstring_tokens": [
"unsigned",
"int",
"max",
"length",
"of",
"output",
"string",
"."
],
"default": null,
"is_optional": null
},
{
"identifier": "add_ellipses",
"type": null,
"docstring": "bool, whether to replace the last 3 chars of truncated\nstrings with an ellipsis.",
"docstring_tokens": [
"bool",
"whether",
"to",
"replace",
"the",
"last",
"3",
"chars",
"of",
"truncated",
"strings",
"with",
"an",
"ellipsis",
"."
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def truncate_string(s, ubnd, add_ellipses=True):
s_length = len(s)
if s_length <= ubnd:
return s
else:
return s[:ubnd-3] + '...' | 610,053 | 132 |
e5ae160d5309de9f8302a72b5b9af09cc9dd6755 | shrijaltamrakar/Descent_py | sorting_algs/counting_sort.py | [
"MIT"
] | Python | counting_sort | <not_specific> | def counting_sort(collection):
"""
pure implementation of counting sort algorighm in python
:param collection: some mutable ordered collection with heterogenous
comparable items inside
:return: the same collection ordered by ascending
"""
#if collection is empty , returns empty
if collection == []:
return []
#get some information about the collection
coll_len = len(collection)
coll_max = max(collection)
coll_min = min(collection)
#create counting array
counting_arr_length = coll_max + 1 - coll_min
counting_arr = [0] * counting_arr_length
#count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with its predecessors .now , counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1, counting_arr_length):
counting_arr[i] = counting_arr[i] + counting_arr[i-1]
# create the output collection
ordered = [0]* coll_len
#place the elements in the output , respecting the original order
# from the end to begin, updating counting_arr
for i in reversed(range(0,coll_len)):
ordered[counting_arr[collection[i] - coll_min] - 1] = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered |
pure implementation of counting sort algorighm in python
:param collection: some mutable ordered collection with heterogenous
comparable items inside
:return: the same collection ordered by ascending
| pure implementation of counting sort algorighm in python | [
"pure",
"implementation",
"of",
"counting",
"sort",
"algorighm",
"in",
"python"
] | def counting_sort(collection):
if collection == []:
return []
coll_len = len(collection)
coll_max = max(collection)
coll_min = min(collection)
counting_arr_length = coll_max + 1 - coll_min
counting_arr = [0] * counting_arr_length
for number in collection:
counting_arr[number - coll_min] += 1
for i in range(1, counting_arr_length):
counting_arr[i] = counting_arr[i] + counting_arr[i-1]
ordered = [0]* coll_len
for i in reversed(range(0,coll_len)):
ordered[counting_arr[collection[i] - coll_min] - 1] = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered | [
"def",
"counting_sort",
"(",
"collection",
")",
":",
"if",
"collection",
"==",
"[",
"]",
":",
"return",
"[",
"]",
"coll_len",
"=",
"len",
"(",
"collection",
")",
"coll_max",
"=",
"max",
"(",
"collection",
")",
"coll_min",
"=",
"min",
"(",
"collection",
")",
"counting_arr_length",
"=",
"coll_max",
"+",
"1",
"-",
"coll_min",
"counting_arr",
"=",
"[",
"0",
"]",
"*",
"counting_arr_length",
"for",
"number",
"in",
"collection",
":",
"counting_arr",
"[",
"number",
"-",
"coll_min",
"]",
"+=",
"1",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"counting_arr_length",
")",
":",
"counting_arr",
"[",
"i",
"]",
"=",
"counting_arr",
"[",
"i",
"]",
"+",
"counting_arr",
"[",
"i",
"-",
"1",
"]",
"ordered",
"=",
"[",
"0",
"]",
"*",
"coll_len",
"for",
"i",
"in",
"reversed",
"(",
"range",
"(",
"0",
",",
"coll_len",
")",
")",
":",
"ordered",
"[",
"counting_arr",
"[",
"collection",
"[",
"i",
"]",
"-",
"coll_min",
"]",
"-",
"1",
"]",
"=",
"collection",
"[",
"i",
"]",
"counting_arr",
"[",
"collection",
"[",
"i",
"]",
"-",
"coll_min",
"]",
"-=",
"1",
"return",
"ordered"
] | pure implementation of counting sort algorighm in python | [
"pure",
"implementation",
"of",
"counting",
"sort",
"algorighm",
"in",
"python"
] | [
"\"\"\"\n pure implementation of counting sort algorighm in python\n :param collection: some mutable ordered collection with heterogenous\n comparable items inside\n :return: the same collection ordered by ascending\n \"\"\"",
"#if collection is empty , returns empty",
"#get some information about the collection",
"#create counting array",
"#count how much a number appears in the collection",
"# sum each position with its predecessors .now , counting_arr[i] tells",
"# us how many elements <= i has in the collection",
"# create the output collection",
"#place the elements in the output , respecting the original order",
"# from the end to begin, updating counting_arr"
] | [
{
"param": "collection",
"type": null
}
] | {
"returns": [
{
"docstring": "the same collection ordered by ascending",
"docstring_tokens": [
"the",
"same",
"collection",
"ordered",
"by",
"ascending"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "collection",
"type": null,
"docstring": "some mutable ordered collection with heterogenous\ncomparable items inside",
"docstring_tokens": [
"some",
"mutable",
"ordered",
"collection",
"with",
"heterogenous",
"comparable",
"items",
"inside"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def counting_sort(collection):
if collection == []:
return []
coll_len = len(collection)
coll_max = max(collection)
coll_min = min(collection)
counting_arr_length = coll_max + 1 - coll_min
counting_arr = [0] * counting_arr_length
for number in collection:
counting_arr[number - coll_min] += 1
for i in range(1, counting_arr_length):
counting_arr[i] = counting_arr[i] + counting_arr[i-1]
ordered = [0]* coll_len
for i in reversed(range(0,coll_len)):
ordered[counting_arr[collection[i] - coll_min] - 1] = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered | 610,054 | 32 |
1734eddb60c326ca46bec8afa3612ed45d12e28f | lilahtovmoon/mypaas | mypaas/stats/monitor.py | [
"BSD-2-Clause"
] | Python | hashit | <not_specific> | def hashit(value):
"""Hash any value by applying md5 to the stringified value.
Returns an integer.
"""
if isinstance(value, int):
return abs(value)
h = hashlib.md5(str(value).encode())
return abs(int(h.hexdigest()[:14], 16)) | Hash any value by applying md5 to the stringified value.
Returns an integer.
| Hash any value by applying md5 to the stringified value.
Returns an integer. | [
"Hash",
"any",
"value",
"by",
"applying",
"md5",
"to",
"the",
"stringified",
"value",
".",
"Returns",
"an",
"integer",
"."
] | def hashit(value):
if isinstance(value, int):
return abs(value)
h = hashlib.md5(str(value).encode())
return abs(int(h.hexdigest()[:14], 16)) | [
"def",
"hashit",
"(",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"int",
")",
":",
"return",
"abs",
"(",
"value",
")",
"h",
"=",
"hashlib",
".",
"md5",
"(",
"str",
"(",
"value",
")",
".",
"encode",
"(",
")",
")",
"return",
"abs",
"(",
"int",
"(",
"h",
".",
"hexdigest",
"(",
")",
"[",
":",
"14",
"]",
",",
"16",
")",
")"
] | Hash any value by applying md5 to the stringified value. | [
"Hash",
"any",
"value",
"by",
"applying",
"md5",
"to",
"the",
"stringified",
"value",
"."
] | [
"\"\"\"Hash any value by applying md5 to the stringified value.\n Returns an integer.\n \"\"\""
] | [
{
"param": "value",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "value",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import hashlib
def hashit(value):
if isinstance(value, int):
return abs(value)
h = hashlib.md5(str(value).encode())
return abs(int(h.hexdigest()[:14], 16)) | 610,055 | 135 |
4eeb4d0a1afe47c25f290f7e43047c17587c52a3 | crovner/haproxyadmin | haproxyadmin/utils.py | [
"Apache-2.0"
] | Python | elements_of_list_same | <not_specific> | def elements_of_list_same(iterator):
"""Check is all elements of an iterator are equal.
:param iterator: a iterator
:type iterator: ``list``
:rtype: ``bool``
Usage::
>>> from haproxyadmin import utils
>>> iterator = ['OK', 'ok']
>>> utils.elements_of_list_same(iterator)
False
>>> iterator = ['OK', 'OK']
>>> utils.elements_of_list_same(iterator)
True
>>> iterator = [22, 22, 22]
>>> utils.elements_of_list_same(iterator)
True
>>> iterator = [22, 22, 222]
>>> utils.elements_of_list_same(iterator)
False
"""
return len(set(iterator)) == 1 | Check is all elements of an iterator are equal.
:param iterator: a iterator
:type iterator: ``list``
:rtype: ``bool``
Usage::
>>> from haproxyadmin import utils
>>> iterator = ['OK', 'ok']
>>> utils.elements_of_list_same(iterator)
False
>>> iterator = ['OK', 'OK']
>>> utils.elements_of_list_same(iterator)
True
>>> iterator = [22, 22, 22]
>>> utils.elements_of_list_same(iterator)
True
>>> iterator = [22, 22, 222]
>>> utils.elements_of_list_same(iterator)
False
| Check is all elements of an iterator are equal. | [
"Check",
"is",
"all",
"elements",
"of",
"an",
"iterator",
"are",
"equal",
"."
] | def elements_of_list_same(iterator):
return len(set(iterator)) == 1 | [
"def",
"elements_of_list_same",
"(",
"iterator",
")",
":",
"return",
"len",
"(",
"set",
"(",
"iterator",
")",
")",
"==",
"1"
] | Check is all elements of an iterator are equal. | [
"Check",
"is",
"all",
"elements",
"of",
"an",
"iterator",
"are",
"equal",
"."
] | [
"\"\"\"Check is all elements of an iterator are equal.\n\n :param iterator: a iterator\n :type iterator: ``list``\n :rtype: ``bool``\n\n Usage::\n\n >>> from haproxyadmin import utils\n >>> iterator = ['OK', 'ok']\n >>> utils.elements_of_list_same(iterator)\n False\n >>> iterator = ['OK', 'OK']\n >>> utils.elements_of_list_same(iterator)\n True\n >>> iterator = [22, 22, 22]\n >>> utils.elements_of_list_same(iterator)\n True\n >>> iterator = [22, 22, 222]\n >>> utils.elements_of_list_same(iterator)\n False\n \"\"\""
] | [
{
"param": "iterator",
"type": null
}
] | {
"returns": [
{
"docstring": null,
"docstring_tokens": [
"None"
],
"type": "``bool``\nUsage::\n\n >>> from haproxyadmin import utils\n >>> iterator = ['OK', 'ok']\n >>> utils.elements_of_list_same(iterator)\n False\n >>> iterator = ['OK', 'OK']\n >>> utils.elements_of_list_same(iterator)\n True\n >>> iterator = [22, 22, 22]\n >>> utils.elements_of_list_same(iterator)\n True\n >>> iterator = [22, 22, 222]\n >>> utils.elements_of_list_same(iterator)\n False"
}
],
"raises": [],
"params": [
{
"identifier": "iterator",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def elements_of_list_same(iterator):
return len(set(iterator)) == 1 | 610,056 | 773 |
ac512932fb9eabac4c3e479a84626fd1cd3ed889 | rbaltrusch/desktop_shop | desktop_shop/crypto.py | [
"MIT"
] | Python | hash_string | <not_specific> | def hash_string(password, salt):
'''Hashes the specified password with the passed salt. Returns hash in hex format (str)'''
encoded_password = bytes(password, encoding='utf-8')
encoded_salt = bytes(salt, encoding='utf-8')
#should be iterations=10000 for security, just changed to 1 for easy database generation
derived_key = hashlib.pbkdf2_hmac('sha256', encoded_password, encoded_salt, iterations=1)
return derived_key.hex() | Hashes the specified password with the passed salt. Returns hash in hex format (str) | Hashes the specified password with the passed salt. Returns hash in hex format (str) | [
"Hashes",
"the",
"specified",
"password",
"with",
"the",
"passed",
"salt",
".",
"Returns",
"hash",
"in",
"hex",
"format",
"(",
"str",
")"
] | def hash_string(password, salt):
encoded_password = bytes(password, encoding='utf-8')
encoded_salt = bytes(salt, encoding='utf-8')
derived_key = hashlib.pbkdf2_hmac('sha256', encoded_password, encoded_salt, iterations=1)
return derived_key.hex() | [
"def",
"hash_string",
"(",
"password",
",",
"salt",
")",
":",
"encoded_password",
"=",
"bytes",
"(",
"password",
",",
"encoding",
"=",
"'utf-8'",
")",
"encoded_salt",
"=",
"bytes",
"(",
"salt",
",",
"encoding",
"=",
"'utf-8'",
")",
"derived_key",
"=",
"hashlib",
".",
"pbkdf2_hmac",
"(",
"'sha256'",
",",
"encoded_password",
",",
"encoded_salt",
",",
"iterations",
"=",
"1",
")",
"return",
"derived_key",
".",
"hex",
"(",
")"
] | Hashes the specified password with the passed salt. | [
"Hashes",
"the",
"specified",
"password",
"with",
"the",
"passed",
"salt",
"."
] | [
"'''Hashes the specified password with the passed salt. Returns hash in hex format (str)'''",
"#should be iterations=10000 for security, just changed to 1 for easy database generation"
] | [
{
"param": "password",
"type": null
},
{
"param": "salt",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "password",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "salt",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import hashlib
def hash_string(password, salt):
encoded_password = bytes(password, encoding='utf-8')
encoded_salt = bytes(salt, encoding='utf-8')
derived_key = hashlib.pbkdf2_hmac('sha256', encoded_password, encoded_salt, iterations=1)
return derived_key.hex() | 610,057 | 28 |
0a4ad1972f15f0ba16c1a663dd77611a4f006fe1 | arjoly/clusterlib | clusterlib/tests/test_scheduler.py | [
"BSD-3-Clause"
] | Python | _check_job_id | <not_specific> | def _check_job_id(command):
"""Perform a dispatch and return the job id."""
# TODO: This utility function should be properly documented any made more
# robust to be included in the scheduler module itself
cmd_encoding = 'utf-8'
output = subprocess.check_output(
command.encode(cmd_encoding), shell=True).decode(cmd_encoding)
if output.startswith(u'Your job '):
job_id = output.split()[2]
elif output.startswith(u'Submitted batch job '):
job_id = output.split()[3]
else:
raise RuntimeError(
u"Failed to parse job_id from command output:\n %s\ncmd:\n%s"
% (command, output))
return job_id | Perform a dispatch and return the job id. | Perform a dispatch and return the job id. | [
"Perform",
"a",
"dispatch",
"and",
"return",
"the",
"job",
"id",
"."
] | def _check_job_id(command):
cmd_encoding = 'utf-8'
output = subprocess.check_output(
command.encode(cmd_encoding), shell=True).decode(cmd_encoding)
if output.startswith(u'Your job '):
job_id = output.split()[2]
elif output.startswith(u'Submitted batch job '):
job_id = output.split()[3]
else:
raise RuntimeError(
u"Failed to parse job_id from command output:\n %s\ncmd:\n%s"
% (command, output))
return job_id | [
"def",
"_check_job_id",
"(",
"command",
")",
":",
"cmd_encoding",
"=",
"'utf-8'",
"output",
"=",
"subprocess",
".",
"check_output",
"(",
"command",
".",
"encode",
"(",
"cmd_encoding",
")",
",",
"shell",
"=",
"True",
")",
".",
"decode",
"(",
"cmd_encoding",
")",
"if",
"output",
".",
"startswith",
"(",
"u'Your job '",
")",
":",
"job_id",
"=",
"output",
".",
"split",
"(",
")",
"[",
"2",
"]",
"elif",
"output",
".",
"startswith",
"(",
"u'Submitted batch job '",
")",
":",
"job_id",
"=",
"output",
".",
"split",
"(",
")",
"[",
"3",
"]",
"else",
":",
"raise",
"RuntimeError",
"(",
"u\"Failed to parse job_id from command output:\\n %s\\ncmd:\\n%s\"",
"%",
"(",
"command",
",",
"output",
")",
")",
"return",
"job_id"
] | Perform a dispatch and return the job id. | [
"Perform",
"a",
"dispatch",
"and",
"return",
"the",
"job",
"id",
"."
] | [
"\"\"\"Perform a dispatch and return the job id.\"\"\"",
"# TODO: This utility function should be properly documented any made more",
"# robust to be included in the scheduler module itself"
] | [
{
"param": "command",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "command",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import subprocess
def _check_job_id(command):
cmd_encoding = 'utf-8'
output = subprocess.check_output(
command.encode(cmd_encoding), shell=True).decode(cmd_encoding)
if output.startswith(u'Your job '):
job_id = output.split()[2]
elif output.startswith(u'Submitted batch job '):
job_id = output.split()[3]
else:
raise RuntimeError(
u"Failed to parse job_id from command output:\n %s\ncmd:\n%s"
% (command, output))
return job_id | 610,058 | 480 |
33107e40a8e96bb2b6df491c938697d443fbb544 | CARRIER-project/v6-vertical-analysis | baseContainer/redacted_logging/redacted_logging.py | [
"Apache-2.0"
] | Python | probe_config_file | null | def probe_config_file(config_file):
""" Check for the existance of a config file for the logger.
`configparser` does not give a sane error message for missing files,
this is more understandable.
Args:
config_file (str): Name of the config file.
Raises:
FileNotFoundError: If config_file is not found.
"""
if not os.path.isfile(config_file):
raise FileNotFoundError("Config file " + config_file + " " +
"does not exist. Create it or use " +
"get_logger(__name__, " +
"config_file=\"./my_config_file\") to " +
"point to another name or location.") | Check for the existance of a config file for the logger.
`configparser` does not give a sane error message for missing files,
this is more understandable.
Args:
config_file (str): Name of the config file.
Raises:
FileNotFoundError: If config_file is not found.
| Check for the existance of a config file for the logger.
`configparser` does not give a sane error message for missing files,
this is more understandable. | [
"Check",
"for",
"the",
"existance",
"of",
"a",
"config",
"file",
"for",
"the",
"logger",
".",
"`",
"configparser",
"`",
"does",
"not",
"give",
"a",
"sane",
"error",
"message",
"for",
"missing",
"files",
"this",
"is",
"more",
"understandable",
"."
] | def probe_config_file(config_file):
if not os.path.isfile(config_file):
raise FileNotFoundError("Config file " + config_file + " " +
"does not exist. Create it or use " +
"get_logger(__name__, " +
"config_file=\"./my_config_file\") to " +
"point to another name or location.") | [
"def",
"probe_config_file",
"(",
"config_file",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"config_file",
")",
":",
"raise",
"FileNotFoundError",
"(",
"\"Config file \"",
"+",
"config_file",
"+",
"\" \"",
"+",
"\"does not exist. Create it or use \"",
"+",
"\"get_logger(__name__, \"",
"+",
"\"config_file=\\\"./my_config_file\\\") to \"",
"+",
"\"point to another name or location.\"",
")"
] | Check for the existance of a config file for the logger. | [
"Check",
"for",
"the",
"existance",
"of",
"a",
"config",
"file",
"for",
"the",
"logger",
"."
] | [
"\"\"\" Check for the existance of a config file for the logger.\n\n `configparser` does not give a sane error message for missing files,\n this is more understandable.\n\n Args:\n config_file (str): Name of the config file.\n\n Raises:\n FileNotFoundError: If config_file is not found.\n \"\"\""
] | [
{
"param": "config_file",
"type": null
}
] | {
"returns": [],
"raises": [
{
"docstring": "If config_file is not found.",
"docstring_tokens": [
"If",
"config_file",
"is",
"not",
"found",
"."
],
"type": "FileNotFoundError"
}
],
"params": [
{
"identifier": "config_file",
"type": null,
"docstring": "Name of the config file.",
"docstring_tokens": [
"Name",
"of",
"the",
"config",
"file",
"."
],
"default": null,
"is_optional": false
}
],
"outlier_params": [],
"others": []
} | import os
def probe_config_file(config_file):
if not os.path.isfile(config_file):
raise FileNotFoundError("Config file " + config_file + " " +
"does not exist. Create it or use " +
"get_logger(__name__, " +
"config_file=\"./my_config_file\") to " +
"point to another name or location.") | 610,059 | 241 |
088c57cb85bad2739afb18e6c73e38cf7a18100f | WilliamLoy/otter | otter/test/utils.py | [
"Apache-2.0"
] | Python | alist_get | <not_specific> | def alist_get(data, key):
"""Look up a value in an association list."""
for dkey, dvalue in data:
if dkey == key:
return dvalue
raise KeyError(key) | Look up a value in an association list. | Look up a value in an association list. | [
"Look",
"up",
"a",
"value",
"in",
"an",
"association",
"list",
"."
] | def alist_get(data, key):
for dkey, dvalue in data:
if dkey == key:
return dvalue
raise KeyError(key) | [
"def",
"alist_get",
"(",
"data",
",",
"key",
")",
":",
"for",
"dkey",
",",
"dvalue",
"in",
"data",
":",
"if",
"dkey",
"==",
"key",
":",
"return",
"dvalue",
"raise",
"KeyError",
"(",
"key",
")"
] | Look up a value in an association list. | [
"Look",
"up",
"a",
"value",
"in",
"an",
"association",
"list",
"."
] | [
"\"\"\"Look up a value in an association list.\"\"\""
] | [
{
"param": "data",
"type": null
},
{
"param": "key",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "data",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "key",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def alist_get(data, key):
for dkey, dvalue in data:
if dkey == key:
return dvalue
raise KeyError(key) | 610,060 | 0 |
773c92eef246527092f9973a9bea12a5a866a642 | irenepan-twinkle/SI506-2020Winter | code/problem_set_06/problem_set_06.py | [
"BSD-3-Clause"
] | Python | sort_dictionary_by_value | <not_specific> | def sort_dictionary_by_value(dictionary):
"""
DON'T CHANGE THIS FUNCTION. This function sorts a dictionary by its values in a
descending fashion.
Parameters:
dictionary (dict): A dictionary to be sorted.
Returns:
dict: The sorted dictionary.
"""
desc_dictionary = dict(sorted(dictionary.items(), key=operator.itemgetter(1), reverse=True))
return desc_dictionary |
DON'T CHANGE THIS FUNCTION. This function sorts a dictionary by its values in a
descending fashion.
Parameters:
dictionary (dict): A dictionary to be sorted.
Returns:
dict: The sorted dictionary.
| DON'T CHANGE THIS FUNCTION. This function sorts a dictionary by its values in a
descending fashion. | [
"DON",
"'",
"T",
"CHANGE",
"THIS",
"FUNCTION",
".",
"This",
"function",
"sorts",
"a",
"dictionary",
"by",
"its",
"values",
"in",
"a",
"descending",
"fashion",
"."
] | def sort_dictionary_by_value(dictionary):
desc_dictionary = dict(sorted(dictionary.items(), key=operator.itemgetter(1), reverse=True))
return desc_dictionary | [
"def",
"sort_dictionary_by_value",
"(",
"dictionary",
")",
":",
"desc_dictionary",
"=",
"dict",
"(",
"sorted",
"(",
"dictionary",
".",
"items",
"(",
")",
",",
"key",
"=",
"operator",
".",
"itemgetter",
"(",
"1",
")",
",",
"reverse",
"=",
"True",
")",
")",
"return",
"desc_dictionary"
] | DON'T CHANGE THIS FUNCTION. | [
"DON",
"'",
"T",
"CHANGE",
"THIS",
"FUNCTION",
"."
] | [
"\"\"\"\n DON'T CHANGE THIS FUNCTION. This function sorts a dictionary by its values in a\n descending fashion.\n\n Parameters:\n dictionary (dict): A dictionary to be sorted.\n\n Returns:\n dict: The sorted dictionary.\n \"\"\""
] | [
{
"param": "dictionary",
"type": null
}
] | {
"returns": [
{
"docstring": "The sorted dictionary.",
"docstring_tokens": [
"The",
"sorted",
"dictionary",
"."
],
"type": "dict"
}
],
"raises": [],
"params": [
{
"identifier": "dictionary",
"type": null,
"docstring": "A dictionary to be sorted.",
"docstring_tokens": [
"A",
"dictionary",
"to",
"be",
"sorted",
"."
],
"default": null,
"is_optional": false
}
],
"outlier_params": [],
"others": []
} | import operator
def sort_dictionary_by_value(dictionary):
desc_dictionary = dict(sorted(dictionary.items(), key=operator.itemgetter(1), reverse=True))
return desc_dictionary | 610,061 | 799 |
73e1d16fd33cde7110f92cdd23594768e446c5b8 | the-louie/sensors2domoticz | telldus2json.py | [
"MIT"
] | Python | parse_sensors | <not_specific> | def parse_sensors(configuration):
"""
Bake the sensors array to a dict where the telldus id is
the key for easier parsing later on.
"""
result = {}
for sensor in configuration["my_sensors"]:
result[sensor["id"]] = sensor
return result |
Bake the sensors array to a dict where the telldus id is
the key for easier parsing later on.
| Bake the sensors array to a dict where the telldus id is
the key for easier parsing later on. | [
"Bake",
"the",
"sensors",
"array",
"to",
"a",
"dict",
"where",
"the",
"telldus",
"id",
"is",
"the",
"key",
"for",
"easier",
"parsing",
"later",
"on",
"."
] | def parse_sensors(configuration):
result = {}
for sensor in configuration["my_sensors"]:
result[sensor["id"]] = sensor
return result | [
"def",
"parse_sensors",
"(",
"configuration",
")",
":",
"result",
"=",
"{",
"}",
"for",
"sensor",
"in",
"configuration",
"[",
"\"my_sensors\"",
"]",
":",
"result",
"[",
"sensor",
"[",
"\"id\"",
"]",
"]",
"=",
"sensor",
"return",
"result"
] | Bake the sensors array to a dict where the telldus id is
the key for easier parsing later on. | [
"Bake",
"the",
"sensors",
"array",
"to",
"a",
"dict",
"where",
"the",
"telldus",
"id",
"is",
"the",
"key",
"for",
"easier",
"parsing",
"later",
"on",
"."
] | [
"\"\"\"\n Bake the sensors array to a dict where the telldus id is\n the key for easier parsing later on.\n \"\"\""
] | [
{
"param": "configuration",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "configuration",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def parse_sensors(configuration):
result = {}
for sensor in configuration["my_sensors"]:
result[sensor["id"]] = sensor
return result | 610,062 | 359 |
6dfe4484c400ea89e5651f3ba0427b577e6e28c3 | Shiphero/python-amazon-mws | mws/apis/feeds.py | [
"Unlicense"
] | Python | feed_options_str | <not_specific> | def feed_options_str(feed_options):
"""Convert a FeedOptions dict of values into an appropriate string value.
Amazon docs for VAT upload with details:
https://m.media-amazon.com/images/G/01/B2B/DeveloperGuide/vat_calculation_service__dev_guide_H383rf73k4hsu1TYRH139kk134yzs.pdf
(section 6.4)
Example:
feed_options = {
"shippingid": "283845474",
"totalAmount": 3.25,
"totalvatamount": 1.23,
"invoicenumber": "INT-3431-XJE3",
"documenttype": "CreditNote",
"transactionid": "amzn:crow:429491192ksjfhe39s",
}
print(feed_options_str(feed_options))
>>> "metadata:shippingid=283845474;metadata:totalAmount=3.25;metadata:totalvatamount=1.23;
metadata:invoicenumber=INT-3431-XJE3;metadata:documenttype=CreditNote;
metadata:transactionid=amzn:crow:429491192ksjfhe39s"
"""
if not feed_options:
return None
if not isinstance(feed_options, dict):
raise ValueError("`feed_options` should be a dict or None")
output = []
for key, val in feed_options.items():
outval = val
if outval is True or outval is False:
# Convert literal `True` or `False` to strings `"true"` and `"false"`
outval = str(outval).lower()
output.append(f"metadata:{key}={outval}")
return ";".join(output) | Convert a FeedOptions dict of values into an appropriate string value.
Amazon docs for VAT upload with details:
https://m.media-amazon.com/images/G/01/B2B/DeveloperGuide/vat_calculation_service__dev_guide_H383rf73k4hsu1TYRH139kk134yzs.pdf
(section 6.4)
Example:
feed_options = {
"shippingid": "283845474",
"totalAmount": 3.25,
"totalvatamount": 1.23,
"invoicenumber": "INT-3431-XJE3",
"documenttype": "CreditNote",
"transactionid": "amzn:crow:429491192ksjfhe39s",
}
print(feed_options_str(feed_options))
>>> "metadata:shippingid=283845474;metadata:totalAmount=3.25;metadata:totalvatamount=1.23;
metadata:invoicenumber=INT-3431-XJE3;metadata:documenttype=CreditNote;
metadata:transactionid=amzn:crow:429491192ksjfhe39s"
| Convert a FeedOptions dict of values into an appropriate string value. | [
"Convert",
"a",
"FeedOptions",
"dict",
"of",
"values",
"into",
"an",
"appropriate",
"string",
"value",
"."
] | def feed_options_str(feed_options):
if not feed_options:
return None
if not isinstance(feed_options, dict):
raise ValueError("`feed_options` should be a dict or None")
output = []
for key, val in feed_options.items():
outval = val
if outval is True or outval is False:
outval = str(outval).lower()
output.append(f"metadata:{key}={outval}")
return ";".join(output) | [
"def",
"feed_options_str",
"(",
"feed_options",
")",
":",
"if",
"not",
"feed_options",
":",
"return",
"None",
"if",
"not",
"isinstance",
"(",
"feed_options",
",",
"dict",
")",
":",
"raise",
"ValueError",
"(",
"\"`feed_options` should be a dict or None\"",
")",
"output",
"=",
"[",
"]",
"for",
"key",
",",
"val",
"in",
"feed_options",
".",
"items",
"(",
")",
":",
"outval",
"=",
"val",
"if",
"outval",
"is",
"True",
"or",
"outval",
"is",
"False",
":",
"outval",
"=",
"str",
"(",
"outval",
")",
".",
"lower",
"(",
")",
"output",
".",
"append",
"(",
"f\"metadata:{key}={outval}\"",
")",
"return",
"\";\"",
".",
"join",
"(",
"output",
")"
] | Convert a FeedOptions dict of values into an appropriate string value. | [
"Convert",
"a",
"FeedOptions",
"dict",
"of",
"values",
"into",
"an",
"appropriate",
"string",
"value",
"."
] | [
"\"\"\"Convert a FeedOptions dict of values into an appropriate string value.\n \n Amazon docs for VAT upload with details:\n https://m.media-amazon.com/images/G/01/B2B/DeveloperGuide/vat_calculation_service__dev_guide_H383rf73k4hsu1TYRH139kk134yzs.pdf\n (section 6.4)\n \n Example:\n feed_options = {\n \"shippingid\": \"283845474\",\n \"totalAmount\": 3.25,\n \"totalvatamount\": 1.23,\n \"invoicenumber\": \"INT-3431-XJE3\",\n \"documenttype\": \"CreditNote\",\n \"transactionid\": \"amzn:crow:429491192ksjfhe39s\",\n }\n print(feed_options_str(feed_options))\n >>> \"metadata:shippingid=283845474;metadata:totalAmount=3.25;metadata:totalvatamount=1.23;\n metadata:invoicenumber=INT-3431-XJE3;metadata:documenttype=CreditNote;\n metadata:transactionid=amzn:crow:429491192ksjfhe39s\"\n \"\"\"",
"# Convert literal `True` or `False` to strings `\"true\"` and `\"false\"`"
] | [
{
"param": "feed_options",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "feed_options",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": [
{
"identifier": "examples",
"docstring": "feed_options = {\n\"shippingid\": \"283845474\",\n\"totalAmount\": 3.25,\n\"totalvatamount\": 1.23,\n\"invoicenumber\": \"INT-3431-XJE3\",\n\"documenttype\": \"CreditNote\",\n\"transactionid\": \"amzn:crow:429491192ksjfhe39s\",\n}\nprint(feed_options_str(feed_options))\n>>> \"metadata:shippingid=283845474;metadata:totalAmount=3.25;metadata:totalvatamount=1.23;\nmetadata:invoicenumber=INT-3431-XJE3;metadata:documenttype=CreditNote;\nmetadata:transactionid=amzn:crow:429491192ksjfhe39s\"",
"docstring_tokens": [
"feed_options",
"=",
"{",
"\"",
"shippingid",
"\"",
":",
"\"",
"283845474",
"\"",
"\"",
"totalAmount",
"\"",
":",
"3",
".",
"25",
"\"",
"totalvatamount",
"\"",
":",
"1",
".",
"23",
"\"",
"invoicenumber",
"\"",
":",
"\"",
"INT",
"-",
"3431",
"-",
"XJE3",
"\"",
"\"",
"documenttype",
"\"",
":",
"\"",
"CreditNote",
"\"",
"\"",
"transactionid",
"\"",
":",
"\"",
"amzn",
":",
"crow",
":",
"429491192ksjfhe39s",
"\"",
"}",
"print",
"(",
"feed_options_str",
"(",
"feed_options",
"))",
">>>",
"\"",
"metadata",
":",
"shippingid",
"=",
"283845474",
";",
"metadata",
":",
"totalAmount",
"=",
"3",
".",
"25",
";",
"metadata",
":",
"totalvatamount",
"=",
"1",
".",
"23",
";",
"metadata",
":",
"invoicenumber",
"=",
"INT",
"-",
"3431",
"-",
"XJE3",
";",
"metadata",
":",
"documenttype",
"=",
"CreditNote",
";",
"metadata",
":",
"transactionid",
"=",
"amzn",
":",
"crow",
":",
"429491192ksjfhe39s",
"\""
]
}
]
} | def feed_options_str(feed_options):
if not feed_options:
return None
if not isinstance(feed_options, dict):
raise ValueError("`feed_options` should be a dict or None")
output = []
for key, val in feed_options.items():
outval = val
if outval is True or outval is False:
outval = str(outval).lower()
output.append(f"metadata:{key}={outval}")
return ";".join(output) | 610,063 | 186 |
0278f95918a21fc61f016ccb0b7d04f5690d025d | cns-iu/HuBMAP---Hacking-the-Kidney | models/4-Deeplive.exe/code/utils/metrics.py | [
"MIT"
] | Python | dice_scores_img_tensor | <not_specific> | def dice_scores_img_tensor(pred, truth, eps=1e-8):
"""
Dice metric for a single image as tensor.
Args:
pred (torch tensor): Predictions.
truth (torch tensor): Ground truths.
eps (float, optional): epsilon to avoid dividing by 0. Defaults to 1e-8.
Returns:
np array : dice value for each class.
"""
pred = pred.view(-1) > 0
truth = truth.contiguous().view(-1) > 0
intersect = (pred & truth).sum(-1)
union = pred.sum(-1) + truth.sum(-1)
dice = (2.0 * intersect + eps) / (union + eps)
return float(dice) |
Dice metric for a single image as tensor.
Args:
pred (torch tensor): Predictions.
truth (torch tensor): Ground truths.
eps (float, optional): epsilon to avoid dividing by 0. Defaults to 1e-8.
Returns:
np array : dice value for each class.
| Dice metric for a single image as tensor. | [
"Dice",
"metric",
"for",
"a",
"single",
"image",
"as",
"tensor",
"."
] | def dice_scores_img_tensor(pred, truth, eps=1e-8):
pred = pred.view(-1) > 0
truth = truth.contiguous().view(-1) > 0
intersect = (pred & truth).sum(-1)
union = pred.sum(-1) + truth.sum(-1)
dice = (2.0 * intersect + eps) / (union + eps)
return float(dice) | [
"def",
"dice_scores_img_tensor",
"(",
"pred",
",",
"truth",
",",
"eps",
"=",
"1e-8",
")",
":",
"pred",
"=",
"pred",
".",
"view",
"(",
"-",
"1",
")",
">",
"0",
"truth",
"=",
"truth",
".",
"contiguous",
"(",
")",
".",
"view",
"(",
"-",
"1",
")",
">",
"0",
"intersect",
"=",
"(",
"pred",
"&",
"truth",
")",
".",
"sum",
"(",
"-",
"1",
")",
"union",
"=",
"pred",
".",
"sum",
"(",
"-",
"1",
")",
"+",
"truth",
".",
"sum",
"(",
"-",
"1",
")",
"dice",
"=",
"(",
"2.0",
"*",
"intersect",
"+",
"eps",
")",
"/",
"(",
"union",
"+",
"eps",
")",
"return",
"float",
"(",
"dice",
")"
] | Dice metric for a single image as tensor. | [
"Dice",
"metric",
"for",
"a",
"single",
"image",
"as",
"tensor",
"."
] | [
"\"\"\"\n Dice metric for a single image as tensor.\n\n Args:\n pred (torch tensor): Predictions.\n truth (torch tensor): Ground truths.\n eps (float, optional): epsilon to avoid dividing by 0. Defaults to 1e-8.\n\n Returns:\n np array : dice value for each class.\n \"\"\""
] | [
{
"param": "pred",
"type": null
},
{
"param": "truth",
"type": null
},
{
"param": "eps",
"type": null
}
] | {
"returns": [
{
"docstring": "np array : dice value for each class.",
"docstring_tokens": [
"np",
"array",
":",
"dice",
"value",
"for",
"each",
"class",
"."
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "pred",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": false
},
{
"identifier": "truth",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": false
},
{
"identifier": "eps",
"type": null,
"docstring": "epsilon to avoid dividing by 0. Defaults to 1e-8.",
"docstring_tokens": [
"epsilon",
"to",
"avoid",
"dividing",
"by",
"0",
".",
"Defaults",
"to",
"1e",
"-",
"8",
"."
],
"default": "1e-8",
"is_optional": true
}
],
"outlier_params": [],
"others": []
} | def dice_scores_img_tensor(pred, truth, eps=1e-8):
pred = pred.view(-1) > 0
truth = truth.contiguous().view(-1) > 0
intersect = (pred & truth).sum(-1)
union = pred.sum(-1) + truth.sum(-1)
dice = (2.0 * intersect + eps) / (union + eps)
return float(dice) | 610,064 | 327 |
8d9330b42405fb9a2a9cf1a6f25628cdb5947105 | benety/mongo | buildscripts/errorcodes.py | [
"Apache-2.0"
] | Python | coerce_to_number | <not_specific> | def coerce_to_number(ticket_value):
"""Coerce the input into a number.
If the input is a number, return itself. Otherwise parses input strings of two forms.
'SERVER-12345' and '12345' will both return 12345'.
"""
if isinstance(ticket_value, int):
return ticket_value
ticket_re = re.compile(r'(?:SERVER-)?(\d+)', re.IGNORECASE)
matches = ticket_re.fullmatch(ticket_value)
if not matches:
print("Unknown ticket number. Input: " + ticket_value)
return -1
return int(matches.group(1)) | Coerce the input into a number.
If the input is a number, return itself. Otherwise parses input strings of two forms.
'SERVER-12345' and '12345' will both return 12345'.
| Coerce the input into a number.
If the input is a number, return itself. Otherwise parses input strings of two forms. | [
"Coerce",
"the",
"input",
"into",
"a",
"number",
".",
"If",
"the",
"input",
"is",
"a",
"number",
"return",
"itself",
".",
"Otherwise",
"parses",
"input",
"strings",
"of",
"two",
"forms",
"."
] | def coerce_to_number(ticket_value):
if isinstance(ticket_value, int):
return ticket_value
ticket_re = re.compile(r'(?:SERVER-)?(\d+)', re.IGNORECASE)
matches = ticket_re.fullmatch(ticket_value)
if not matches:
print("Unknown ticket number. Input: " + ticket_value)
return -1
return int(matches.group(1)) | [
"def",
"coerce_to_number",
"(",
"ticket_value",
")",
":",
"if",
"isinstance",
"(",
"ticket_value",
",",
"int",
")",
":",
"return",
"ticket_value",
"ticket_re",
"=",
"re",
".",
"compile",
"(",
"r'(?:SERVER-)?(\\d+)'",
",",
"re",
".",
"IGNORECASE",
")",
"matches",
"=",
"ticket_re",
".",
"fullmatch",
"(",
"ticket_value",
")",
"if",
"not",
"matches",
":",
"print",
"(",
"\"Unknown ticket number. Input: \"",
"+",
"ticket_value",
")",
"return",
"-",
"1",
"return",
"int",
"(",
"matches",
".",
"group",
"(",
"1",
")",
")"
] | Coerce the input into a number. | [
"Coerce",
"the",
"input",
"into",
"a",
"number",
"."
] | [
"\"\"\"Coerce the input into a number.\n\n If the input is a number, return itself. Otherwise parses input strings of two forms.\n 'SERVER-12345' and '12345' will both return 12345'.\n \"\"\""
] | [
{
"param": "ticket_value",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "ticket_value",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import re
def coerce_to_number(ticket_value):
if isinstance(ticket_value, int):
return ticket_value
ticket_re = re.compile(r'(?:SERVER-)?(\d+)', re.IGNORECASE)
matches = ticket_re.fullmatch(ticket_value)
if not matches:
print("Unknown ticket number. Input: " + ticket_value)
return -1
return int(matches.group(1)) | 610,065 | 733 |
d07eaeb1dd396386b222ce0ef9739dbd6bde1e2e | krivers/ITAP-django | hintgen/astTools.py | [
"MIT"
] | Python | isTokenStepString | <not_specific> | def isTokenStepString(s):
"""Determine whether this is a placeholder string"""
if len(s) < 2:
return False
return s[0] == "~" and s[-1] == "~" | Determine whether this is a placeholder string | Determine whether this is a placeholder string | [
"Determine",
"whether",
"this",
"is",
"a",
"placeholder",
"string"
] | def isTokenStepString(s):
if len(s) < 2:
return False
return s[0] == "~" and s[-1] == "~" | [
"def",
"isTokenStepString",
"(",
"s",
")",
":",
"if",
"len",
"(",
"s",
")",
"<",
"2",
":",
"return",
"False",
"return",
"s",
"[",
"0",
"]",
"==",
"\"~\"",
"and",
"s",
"[",
"-",
"1",
"]",
"==",
"\"~\""
] | Determine whether this is a placeholder string | [
"Determine",
"whether",
"this",
"is",
"a",
"placeholder",
"string"
] | [
"\"\"\"Determine whether this is a placeholder string\"\"\""
] | [
{
"param": "s",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "s",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def isTokenStepString(s):
if len(s) < 2:
return False
return s[0] == "~" and s[-1] == "~" | 610,066 | 698 |
a7b58982c093eb1e617fe0a6fc22056bed8f8b7c | xiaopeifeng/blade-build | src/blade/cc_targets.py | [
"BSD-3-Clause"
] | Python | _parse_hdr_level_line | <not_specific> | def _parse_hdr_level_line(line):
"""Parse a normal line of a header stack file
Example:
. ./common/rpc/rpc_client.h
"""
pos = line.find(' ')
if pos == -1:
return -1, ''
level, hdr = line[:pos].count('.'), line[pos + 1:]
if hdr.startswith('./'):
hdr = hdr[2:]
return level, hdr | Parse a normal line of a header stack file
Example:
. ./common/rpc/rpc_client.h
| Parse a normal line of a header stack file | [
"Parse",
"a",
"normal",
"line",
"of",
"a",
"header",
"stack",
"file"
] | def _parse_hdr_level_line(line):
pos = line.find(' ')
if pos == -1:
return -1, ''
level, hdr = line[:pos].count('.'), line[pos + 1:]
if hdr.startswith('./'):
hdr = hdr[2:]
return level, hdr | [
"def",
"_parse_hdr_level_line",
"(",
"line",
")",
":",
"pos",
"=",
"line",
".",
"find",
"(",
"' '",
")",
"if",
"pos",
"==",
"-",
"1",
":",
"return",
"-",
"1",
",",
"''",
"level",
",",
"hdr",
"=",
"line",
"[",
":",
"pos",
"]",
".",
"count",
"(",
"'.'",
")",
",",
"line",
"[",
"pos",
"+",
"1",
":",
"]",
"if",
"hdr",
".",
"startswith",
"(",
"'./'",
")",
":",
"hdr",
"=",
"hdr",
"[",
"2",
":",
"]",
"return",
"level",
",",
"hdr"
] | Parse a normal line of a header stack file | [
"Parse",
"a",
"normal",
"line",
"of",
"a",
"header",
"stack",
"file"
] | [
"\"\"\"Parse a normal line of a header stack file\n\n Example:\n . ./common/rpc/rpc_client.h\n \"\"\""
] | [
{
"param": "line",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "line",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": [
{
"identifier": "examples",
"docstring": null,
"docstring_tokens": [
"None"
]
}
]
} | def _parse_hdr_level_line(line):
pos = line.find(' ')
if pos == -1:
return -1, ''
level, hdr = line[:pos].count('.'), line[pos + 1:]
if hdr.startswith('./'):
hdr = hdr[2:]
return level, hdr | 610,067 | 208 |
55f800af00f97598cdc65feb247a01ceca7a0d3f | Rikorose/DeepFilterNet | scripts/copy_datadir.py | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | Python | du | <not_specific> | def du(path):
"""disk usage in human readable format (e.g. '2,1')"""
return float(
subprocess.check_output(["du", "-shD", "--block-size=1G", path]).split()[0].decode("utf-8")
) | disk usage in human readable format (e.g. '2,1') | disk usage in human readable format | [
"disk",
"usage",
"in",
"human",
"readable",
"format"
] | def du(path):
return float(
subprocess.check_output(["du", "-shD", "--block-size=1G", path]).split()[0].decode("utf-8")
) | [
"def",
"du",
"(",
"path",
")",
":",
"return",
"float",
"(",
"subprocess",
".",
"check_output",
"(",
"[",
"\"du\"",
",",
"\"-shD\"",
",",
"\"--block-size=1G\"",
",",
"path",
"]",
")",
".",
"split",
"(",
")",
"[",
"0",
"]",
".",
"decode",
"(",
"\"utf-8\"",
")",
")"
] | disk usage in human readable format (e.g. | [
"disk",
"usage",
"in",
"human",
"readable",
"format",
"(",
"e",
".",
"g",
"."
] | [
"\"\"\"disk usage in human readable format (e.g. '2,1')\"\"\""
] | [
{
"param": "path",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "path",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import subprocess
def du(path):
return float(
subprocess.check_output(["du", "-shD", "--block-size=1G", path]).split()[0].decode("utf-8")
) | 610,068 | 321 |
757a17e72f6d0e36336907c4898cb5954aba716f | cclauss/upvote | upvote/gae/datastore/utils.py | [
"Apache-2.0"
] | Python | DeletePropertyValue | null | def DeletePropertyValue(entity, property_name):
"""Delete a property's value from an ndb entity.
NOTE: This function does not save the changes made to the provided entity. An
additional put is required to save these changes.
Args:
entity: ndb.Model, The entity from which the property value will be removed.
property_name: str, The name of the property whose value will be removed.
"""
if property_name in entity._values: # pylint: disable=protected-access
del entity._values[property_name] # pylint: disable=protected-access | Delete a property's value from an ndb entity.
NOTE: This function does not save the changes made to the provided entity. An
additional put is required to save these changes.
Args:
entity: ndb.Model, The entity from which the property value will be removed.
property_name: str, The name of the property whose value will be removed.
| Delete a property's value from an ndb entity.
NOTE: This function does not save the changes made to the provided entity. An
additional put is required to save these changes. | [
"Delete",
"a",
"property",
"'",
"s",
"value",
"from",
"an",
"ndb",
"entity",
".",
"NOTE",
":",
"This",
"function",
"does",
"not",
"save",
"the",
"changes",
"made",
"to",
"the",
"provided",
"entity",
".",
"An",
"additional",
"put",
"is",
"required",
"to",
"save",
"these",
"changes",
"."
] | def DeletePropertyValue(entity, property_name):
if property_name in entity._values:
del entity._values[property_name] | [
"def",
"DeletePropertyValue",
"(",
"entity",
",",
"property_name",
")",
":",
"if",
"property_name",
"in",
"entity",
".",
"_values",
":",
"del",
"entity",
".",
"_values",
"[",
"property_name",
"]"
] | Delete a property's value from an ndb entity. | [
"Delete",
"a",
"property",
"'",
"s",
"value",
"from",
"an",
"ndb",
"entity",
"."
] | [
"\"\"\"Delete a property's value from an ndb entity.\n\n NOTE: This function does not save the changes made to the provided entity. An\n additional put is required to save these changes.\n\n Args:\n entity: ndb.Model, The entity from which the property value will be removed.\n property_name: str, The name of the property whose value will be removed.\n \"\"\"",
"# pylint: disable=protected-access",
"# pylint: disable=protected-access"
] | [
{
"param": "entity",
"type": null
},
{
"param": "property_name",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "entity",
"type": null,
"docstring": "ndb.Model, The entity from which the property value will be removed.",
"docstring_tokens": [
"ndb",
".",
"Model",
"The",
"entity",
"from",
"which",
"the",
"property",
"value",
"will",
"be",
"removed",
"."
],
"default": null,
"is_optional": null
},
{
"identifier": "property_name",
"type": null,
"docstring": "str, The name of the property whose value will be removed.",
"docstring_tokens": [
"str",
"The",
"name",
"of",
"the",
"property",
"whose",
"value",
"will",
"be",
"removed",
"."
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def DeletePropertyValue(entity, property_name):
if property_name in entity._values:
del entity._values[property_name] | 610,069 | 605 |
7f2caca0014f1a14279f4f3f6c45c8496032c5f9 | Asap7772/railrl_evalsawyer | experiments/vitchyr/vaes/vae.py | [
"MIT"
] | Python | kl_to_prior | <not_specific> | def kl_to_prior(means, log_stds, stds):
"""
KL between a Gaussian and a standard Gaussian.
https://stats.stackexchange.com/questions/60680/kl-divergence-between-two-multivariate-gaussians
"""
return 0.5 * (
- 2 * log_stds # log std_prior = 0
- 1 # d = 1
+ stds ** 2
+ means ** 2
) |
KL between a Gaussian and a standard Gaussian.
https://stats.stackexchange.com/questions/60680/kl-divergence-between-two-multivariate-gaussians
| KL between a Gaussian and a standard Gaussian. | [
"KL",
"between",
"a",
"Gaussian",
"and",
"a",
"standard",
"Gaussian",
"."
] | def kl_to_prior(means, log_stds, stds):
return 0.5 * (
- 2 * log_stds
- 1
+ stds ** 2
+ means ** 2
) | [
"def",
"kl_to_prior",
"(",
"means",
",",
"log_stds",
",",
"stds",
")",
":",
"return",
"0.5",
"*",
"(",
"-",
"2",
"*",
"log_stds",
"-",
"1",
"+",
"stds",
"**",
"2",
"+",
"means",
"**",
"2",
")"
] | KL between a Gaussian and a standard Gaussian. | [
"KL",
"between",
"a",
"Gaussian",
"and",
"a",
"standard",
"Gaussian",
"."
] | [
"\"\"\"\n KL between a Gaussian and a standard Gaussian.\n\n https://stats.stackexchange.com/questions/60680/kl-divergence-between-two-multivariate-gaussians\n \"\"\"",
"# log std_prior = 0",
"# d = 1"
] | [
{
"param": "means",
"type": null
},
{
"param": "log_stds",
"type": null
},
{
"param": "stds",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "means",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "log_stds",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "stds",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def kl_to_prior(means, log_stds, stds):
return 0.5 * (
- 2 * log_stds
- 1
+ stds ** 2
+ means ** 2
) | 610,070 | 220 |
56c05badd3db9ae3de257f30c39c88c4d47613d8 | NetSys/kappa | compiler/transform/util.py | [
"BSD-2-Clause"
] | Python | parse_ast_stmt | ast.stmt | def parse_ast_stmt(stmt_code: str) -> ast.stmt:
"""Parses code for a single statement into an AST."""
stmt_code = textwrap.dedent(stmt_code)
node = ast.parse(stmt_code, mode="exec")
assert isinstance(node, ast.Module)
if len(node.body) > 1:
raise ValueError(f"Code contains more than one statement: {stmt_code}")
return node.body[0] | Parses code for a single statement into an AST. | Parses code for a single statement into an AST. | [
"Parses",
"code",
"for",
"a",
"single",
"statement",
"into",
"an",
"AST",
"."
] | def parse_ast_stmt(stmt_code: str) -> ast.stmt:
stmt_code = textwrap.dedent(stmt_code)
node = ast.parse(stmt_code, mode="exec")
assert isinstance(node, ast.Module)
if len(node.body) > 1:
raise ValueError(f"Code contains more than one statement: {stmt_code}")
return node.body[0] | [
"def",
"parse_ast_stmt",
"(",
"stmt_code",
":",
"str",
")",
"->",
"ast",
".",
"stmt",
":",
"stmt_code",
"=",
"textwrap",
".",
"dedent",
"(",
"stmt_code",
")",
"node",
"=",
"ast",
".",
"parse",
"(",
"stmt_code",
",",
"mode",
"=",
"\"exec\"",
")",
"assert",
"isinstance",
"(",
"node",
",",
"ast",
".",
"Module",
")",
"if",
"len",
"(",
"node",
".",
"body",
")",
">",
"1",
":",
"raise",
"ValueError",
"(",
"f\"Code contains more than one statement: {stmt_code}\"",
")",
"return",
"node",
".",
"body",
"[",
"0",
"]"
] | Parses code for a single statement into an AST. | [
"Parses",
"code",
"for",
"a",
"single",
"statement",
"into",
"an",
"AST",
"."
] | [
"\"\"\"Parses code for a single statement into an AST.\"\"\""
] | [
{
"param": "stmt_code",
"type": "str"
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "stmt_code",
"type": "str",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import ast
import textwrap
def parse_ast_stmt(stmt_code: str) -> ast.stmt:
stmt_code = textwrap.dedent(stmt_code)
node = ast.parse(stmt_code, mode="exec")
assert isinstance(node, ast.Module)
if len(node.body) > 1:
raise ValueError(f"Code contains more than one statement: {stmt_code}")
return node.body[0] | 610,071 | 590 |
1fc5865adc1cae49a54c041b23db6e2464f295f1 | SheetMusic-Team-3/MVP | aws-scripts/inference.py | [
"MIT"
] | Python | _process_input | <not_specific> | def _process_input(data, context):
""" pre-process request input before it is sent to
TensorFlow Serving REST API
Args:
data (obj): the request data, in format of dict or string
context (Context): object containing request and configuration details
Returns:
(dict): a JSON-serializable dict that contains request body and headers
"""
if context.request_content_type == 'application/json':
data = data.read().decode("utf-8")
return data if len(data) else ''
raise ValueError('{{"error": "unsupported content type {}"}}'.format(
context.request_content_type or "unknown"
)) | pre-process request input before it is sent to
TensorFlow Serving REST API
Args:
data (obj): the request data, in format of dict or string
context (Context): object containing request and configuration details
Returns:
(dict): a JSON-serializable dict that contains request body and headers
| pre-process request input before it is sent to
TensorFlow Serving REST API | [
"pre",
"-",
"process",
"request",
"input",
"before",
"it",
"is",
"sent",
"to",
"TensorFlow",
"Serving",
"REST",
"API"
] | def _process_input(data, context):
if context.request_content_type == 'application/json':
data = data.read().decode("utf-8")
return data if len(data) else ''
raise ValueError('{{"error": "unsupported content type {}"}}'.format(
context.request_content_type or "unknown"
)) | [
"def",
"_process_input",
"(",
"data",
",",
"context",
")",
":",
"if",
"context",
".",
"request_content_type",
"==",
"'application/json'",
":",
"data",
"=",
"data",
".",
"read",
"(",
")",
".",
"decode",
"(",
"\"utf-8\"",
")",
"return",
"data",
"if",
"len",
"(",
"data",
")",
"else",
"''",
"raise",
"ValueError",
"(",
"'{{\"error\": \"unsupported content type {}\"}}'",
".",
"format",
"(",
"context",
".",
"request_content_type",
"or",
"\"unknown\"",
")",
")"
] | pre-process request input before it is sent to
TensorFlow Serving REST API | [
"pre",
"-",
"process",
"request",
"input",
"before",
"it",
"is",
"sent",
"to",
"TensorFlow",
"Serving",
"REST",
"API"
] | [
"\"\"\" pre-process request input before it is sent to\n TensorFlow Serving REST API\n Args:\n data (obj): the request data, in format of dict or string\n context (Context): object containing request and configuration details\n Returns:\n (dict): a JSON-serializable dict that contains request body and headers\n \"\"\""
] | [
{
"param": "data",
"type": null
},
{
"param": "context",
"type": null
}
] | {
"returns": [
{
"docstring": "a JSON-serializable dict that contains request body and headers",
"docstring_tokens": [
"a",
"JSON",
"-",
"serializable",
"dict",
"that",
"contains",
"request",
"body",
"and",
"headers"
],
"type": "(dict)"
}
],
"raises": [],
"params": [
{
"identifier": "data",
"type": null,
"docstring": "the request data, in format of dict or string",
"docstring_tokens": [
"the",
"request",
"data",
"in",
"format",
"of",
"dict",
"or",
"string"
],
"default": null,
"is_optional": false
},
{
"identifier": "context",
"type": null,
"docstring": "object containing request and configuration details",
"docstring_tokens": [
"object",
"containing",
"request",
"and",
"configuration",
"details"
],
"default": null,
"is_optional": false
}
],
"outlier_params": [],
"others": []
} | def _process_input(data, context):
if context.request_content_type == 'application/json':
data = data.read().decode("utf-8")
return data if len(data) else ''
raise ValueError('{{"error": "unsupported content type {}"}}'.format(
context.request_content_type or "unknown"
)) | 610,073 | 295 |
76278d15bf1333a3dd2e311c2bf0c34fca753e9b | yrath/law | law/parser.py | [
"BSD-3-Clause"
] | Python | add_cmdline_arg | <not_specific> | def add_cmdline_arg(args, arg, *values):
"""
Adds a command line argument *arg* to a list of argument *args*, e.g. as returned from
:py:func:`global_cmdline_args`. When *arg* exists, *args* is returned unchanged. Otherwise,
*arg* is appended to the end with optional argument *values*. Example:
.. code-block:: python
args = global_cmdline_values()
# -> ["--local-scheduler"]
add_cmdline_arg(args, "--local-scheduler")
# -> ["--local-scheduler"]
add_cmdline_arg(args, "--workers", 4)
# -> ["--local-scheduler", "--workers", "4"]
"""
if arg not in args:
args = list(args) + [arg] + list(values)
return args |
Adds a command line argument *arg* to a list of argument *args*, e.g. as returned from
:py:func:`global_cmdline_args`. When *arg* exists, *args* is returned unchanged. Otherwise,
*arg* is appended to the end with optional argument *values*. Example:
.. code-block:: python
args = global_cmdline_values()
# -> ["--local-scheduler"]
add_cmdline_arg(args, "--local-scheduler")
# -> ["--local-scheduler"]
add_cmdline_arg(args, "--workers", 4)
# -> ["--local-scheduler", "--workers", "4"]
| Adds a command line argument *arg* to a list of argument *args*, e.g. as returned from | [
"Adds",
"a",
"command",
"line",
"argument",
"*",
"arg",
"*",
"to",
"a",
"list",
"of",
"argument",
"*",
"args",
"*",
"e",
".",
"g",
".",
"as",
"returned",
"from"
] | def add_cmdline_arg(args, arg, *values):
if arg not in args:
args = list(args) + [arg] + list(values)
return args | [
"def",
"add_cmdline_arg",
"(",
"args",
",",
"arg",
",",
"*",
"values",
")",
":",
"if",
"arg",
"not",
"in",
"args",
":",
"args",
"=",
"list",
"(",
"args",
")",
"+",
"[",
"arg",
"]",
"+",
"list",
"(",
"values",
")",
"return",
"args"
] | Adds a command line argument *arg* to a list of argument *args*, e.g. | [
"Adds",
"a",
"command",
"line",
"argument",
"*",
"arg",
"*",
"to",
"a",
"list",
"of",
"argument",
"*",
"args",
"*",
"e",
".",
"g",
"."
] | [
"\"\"\"\n Adds a command line argument *arg* to a list of argument *args*, e.g. as returned from\n :py:func:`global_cmdline_args`. When *arg* exists, *args* is returned unchanged. Otherwise,\n *arg* is appended to the end with optional argument *values*. Example:\n\n .. code-block:: python\n\n args = global_cmdline_values()\n # -> [\"--local-scheduler\"]\n\n add_cmdline_arg(args, \"--local-scheduler\")\n # -> [\"--local-scheduler\"]\n\n add_cmdline_arg(args, \"--workers\", 4)\n # -> [\"--local-scheduler\", \"--workers\", \"4\"]\n \"\"\""
] | [
{
"param": "args",
"type": null
},
{
"param": "arg",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "args",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "arg",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": [
{
"identifier": "py",
"docstring": "\n\ncode-block:: python\n\nargs = global_cmdline_values()\n-> [\"--local-scheduler\"]\n\nadd_cmdline_arg(args, \"--local-scheduler\")\n-> [\"--local-scheduler\"]\n\n",
"docstring_tokens": [
"code",
"-",
"block",
"::",
"python",
"args",
"=",
"global_cmdline_values",
"()",
"-",
">",
"[",
"\"",
"--",
"local",
"-",
"scheduler",
"\"",
"]",
"add_cmdline_arg",
"(",
"args",
"\"",
"--",
"local",
"-",
"scheduler",
"\"",
")",
"-",
">",
"[",
"\"",
"--",
"local",
"-",
"scheduler",
"\"",
"]"
]
}
]
} | def add_cmdline_arg(args, arg, *values):
if arg not in args:
args = list(args) + [arg] + list(values)
return args | 610,076 | 47 |