repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
sequence
docstring
stringlengths
3
17.3k
docstring_tokens
sequence
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
smdabdoub/phylotoast
phylotoast/util.py
split_phylogeny
def split_phylogeny(p, level="s"): """ Return either the full or truncated version of a QIIME-formatted taxonomy string. :type p: str :param p: A QIIME-formatted taxonomy string: k__Foo; p__Bar; ... :type level: str :param level: The different level of identification are kingdom (k), phylum (p), class (c),order (o), family (f), genus (g) and species (s). If level is not provided, the default level of identification is species. :rtype: str :return: A QIIME-formatted taxonomy string up to the classification given by param level. """ level = level+"__" result = p.split(level) return result[0]+level+result[1].split(";")[0]
python
def split_phylogeny(p, level="s"): """ Return either the full or truncated version of a QIIME-formatted taxonomy string. :type p: str :param p: A QIIME-formatted taxonomy string: k__Foo; p__Bar; ... :type level: str :param level: The different level of identification are kingdom (k), phylum (p), class (c),order (o), family (f), genus (g) and species (s). If level is not provided, the default level of identification is species. :rtype: str :return: A QIIME-formatted taxonomy string up to the classification given by param level. """ level = level+"__" result = p.split(level) return result[0]+level+result[1].split(";")[0]
[ "def", "split_phylogeny", "(", "p", ",", "level", "=", "\"s\"", ")", ":", "level", "=", "level", "+", "\"__\"", "result", "=", "p", ".", "split", "(", "level", ")", "return", "result", "[", "0", "]", "+", "level", "+", "result", "[", "1", "]", ".", "split", "(", "\";\"", ")", "[", "0", "]" ]
Return either the full or truncated version of a QIIME-formatted taxonomy string. :type p: str :param p: A QIIME-formatted taxonomy string: k__Foo; p__Bar; ... :type level: str :param level: The different level of identification are kingdom (k), phylum (p), class (c),order (o), family (f), genus (g) and species (s). If level is not provided, the default level of identification is species. :rtype: str :return: A QIIME-formatted taxonomy string up to the classification given by param level.
[ "Return", "either", "the", "full", "or", "truncated", "version", "of", "a", "QIIME", "-", "formatted", "taxonomy", "string", "." ]
0b74ef171e6a84761710548501dfac71285a58a3
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/phylotoast/util.py#L159-L177
train
smdabdoub/phylotoast
phylotoast/util.py
ensure_dir
def ensure_dir(d): """ Check to make sure the supplied directory path does not exist, if so, create it. The method catches OSError exceptions and returns a descriptive message instead of re-raising the error. :type d: str :param d: It is the full path to a directory. :return: Does not return anything, but creates a directory path if it doesn't exist already. """ if not os.path.exists(d): try: os.makedirs(d) except OSError as oe: # should not happen with os.makedirs # ENOENT: No such file or directory if os.errno == errno.ENOENT: msg = twdd("""One or more directories in the path ({}) do not exist. If you are specifying a new directory for output, please ensure all other directories in the path currently exist.""") return msg.format(d) else: msg = twdd("""An error occurred trying to create the output directory ({}) with message: {}""") return msg.format(d, oe.strerror)
python
def ensure_dir(d): """ Check to make sure the supplied directory path does not exist, if so, create it. The method catches OSError exceptions and returns a descriptive message instead of re-raising the error. :type d: str :param d: It is the full path to a directory. :return: Does not return anything, but creates a directory path if it doesn't exist already. """ if not os.path.exists(d): try: os.makedirs(d) except OSError as oe: # should not happen with os.makedirs # ENOENT: No such file or directory if os.errno == errno.ENOENT: msg = twdd("""One or more directories in the path ({}) do not exist. If you are specifying a new directory for output, please ensure all other directories in the path currently exist.""") return msg.format(d) else: msg = twdd("""An error occurred trying to create the output directory ({}) with message: {}""") return msg.format(d, oe.strerror)
[ "def", "ensure_dir", "(", "d", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "d", ")", ":", "try", ":", "os", ".", "makedirs", "(", "d", ")", "except", "OSError", "as", "oe", ":", "if", "os", ".", "errno", "==", "errno", ".", "ENOENT", ":", "msg", "=", "twdd", "(", ")", "return", "msg", ".", "format", "(", "d", ")", "else", ":", "msg", "=", "twdd", "(", ")", "return", "msg", ".", "format", "(", "d", ",", "oe", ".", "strerror", ")" ]
Check to make sure the supplied directory path does not exist, if so, create it. The method catches OSError exceptions and returns a descriptive message instead of re-raising the error. :type d: str :param d: It is the full path to a directory. :return: Does not return anything, but creates a directory path if it doesn't exist already.
[ "Check", "to", "make", "sure", "the", "supplied", "directory", "path", "does", "not", "exist", "if", "so", "create", "it", ".", "The", "method", "catches", "OSError", "exceptions", "and", "returns", "a", "descriptive", "message", "instead", "of", "re", "-", "raising", "the", "error", "." ]
0b74ef171e6a84761710548501dfac71285a58a3
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/phylotoast/util.py#L180-L206
train
smdabdoub/phylotoast
phylotoast/util.py
file_handle
def file_handle(fnh, mode="rU"): """ Takes either a file path or an open file handle, checks validity and returns an open file handle or raises an appropriate Exception. :type fnh: str :param fnh: It is the full path to a file, or open file handle :type mode: str :param mode: The way in which this file will be used, for example to read or write or both. By default, file will be opened in rU mode. :return: Returns an opened file for appropriate usage. """ handle = None if isinstance(fnh, file): if fnh.closed: raise ValueError("Input file is closed.") handle = fnh elif isinstance(fnh, str): handle = open(fnh, mode) return handle
python
def file_handle(fnh, mode="rU"): """ Takes either a file path or an open file handle, checks validity and returns an open file handle or raises an appropriate Exception. :type fnh: str :param fnh: It is the full path to a file, or open file handle :type mode: str :param mode: The way in which this file will be used, for example to read or write or both. By default, file will be opened in rU mode. :return: Returns an opened file for appropriate usage. """ handle = None if isinstance(fnh, file): if fnh.closed: raise ValueError("Input file is closed.") handle = fnh elif isinstance(fnh, str): handle = open(fnh, mode) return handle
[ "def", "file_handle", "(", "fnh", ",", "mode", "=", "\"rU\"", ")", ":", "handle", "=", "None", "if", "isinstance", "(", "fnh", ",", "file", ")", ":", "if", "fnh", ".", "closed", ":", "raise", "ValueError", "(", "\"Input file is closed.\"", ")", "handle", "=", "fnh", "elif", "isinstance", "(", "fnh", ",", "str", ")", ":", "handle", "=", "open", "(", "fnh", ",", "mode", ")", "return", "handle" ]
Takes either a file path or an open file handle, checks validity and returns an open file handle or raises an appropriate Exception. :type fnh: str :param fnh: It is the full path to a file, or open file handle :type mode: str :param mode: The way in which this file will be used, for example to read or write or both. By default, file will be opened in rU mode. :return: Returns an opened file for appropriate usage.
[ "Takes", "either", "a", "file", "path", "or", "an", "open", "file", "handle", "checks", "validity", "and", "returns", "an", "open", "file", "handle", "or", "raises", "an", "appropriate", "Exception", "." ]
0b74ef171e6a84761710548501dfac71285a58a3
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/phylotoast/util.py#L209-L231
train
smdabdoub/phylotoast
phylotoast/util.py
gather_categories
def gather_categories(imap, header, categories=None): """ Find the user specified categories in the map and create a dictionary to contain the relevant data for each type within the categories. Multiple categories will have their types combined such that each possible combination will have its own entry in the dictionary. :type imap: dict :param imap: The input mapping file data keyed by SampleID :type header: list :param header: The header line from the input mapping file. This will be searched for the user-specified categories :type categories: list :param categories: The list of user-specified category column name from mapping file :rtype: dict :return: A sorted dictionary keyed on the combinations of all the types found within the user-specified categories. Each entry will contain an empty DataCategory namedtuple. If no categories are specified, a single entry with the key 'default' will be returned """ # If no categories provided, return all SampleIDs if categories is None: return {"default": DataCategory(set(imap.keys()), {})} cat_ids = [header.index(cat) for cat in categories if cat in header and "=" not in cat] table = OrderedDict() conditions = defaultdict(set) for i, cat in enumerate(categories): if "=" in cat and cat.split("=")[0] in header: cat_name = header[header.index(cat.split("=")[0])] conditions[cat_name].add(cat.split("=")[1]) # If invalid categories or conditions identified, return all SampleIDs if not cat_ids and not conditions: return {"default": DataCategory(set(imap.keys()), {})} #If only category column given, return column-wise SampleIDs if cat_ids and not conditions: for sid, row in imap.items(): cat_name = "_".join([row[cid] for cid in cat_ids]) if cat_name not in table: table[cat_name] = DataCategory(set(), {}) table[cat_name].sids.add(sid) return table # Collect all condition names cond_ids = set() for k in conditions: try: cond_ids.add(header.index(k)) except ValueError: continue idx_to_test = set(cat_ids).union(cond_ids) # If column name and condition given, return overlapping SampleIDs of column and # condition combinations for sid, row in imap.items(): if all([row[header.index(c)] in conditions[c] for c in conditions]): key = "_".join([row[idx] for idx in idx_to_test]) try: assert key in table.keys() except AssertionError: table[key] = DataCategory(set(), {}) table[key].sids.add(sid) try: assert len(table) > 0 except AssertionError: return {"default": DataCategory(set(imap.keys()), {})} else: return table
python
def gather_categories(imap, header, categories=None): """ Find the user specified categories in the map and create a dictionary to contain the relevant data for each type within the categories. Multiple categories will have their types combined such that each possible combination will have its own entry in the dictionary. :type imap: dict :param imap: The input mapping file data keyed by SampleID :type header: list :param header: The header line from the input mapping file. This will be searched for the user-specified categories :type categories: list :param categories: The list of user-specified category column name from mapping file :rtype: dict :return: A sorted dictionary keyed on the combinations of all the types found within the user-specified categories. Each entry will contain an empty DataCategory namedtuple. If no categories are specified, a single entry with the key 'default' will be returned """ # If no categories provided, return all SampleIDs if categories is None: return {"default": DataCategory(set(imap.keys()), {})} cat_ids = [header.index(cat) for cat in categories if cat in header and "=" not in cat] table = OrderedDict() conditions = defaultdict(set) for i, cat in enumerate(categories): if "=" in cat and cat.split("=")[0] in header: cat_name = header[header.index(cat.split("=")[0])] conditions[cat_name].add(cat.split("=")[1]) # If invalid categories or conditions identified, return all SampleIDs if not cat_ids and not conditions: return {"default": DataCategory(set(imap.keys()), {})} #If only category column given, return column-wise SampleIDs if cat_ids and not conditions: for sid, row in imap.items(): cat_name = "_".join([row[cid] for cid in cat_ids]) if cat_name not in table: table[cat_name] = DataCategory(set(), {}) table[cat_name].sids.add(sid) return table # Collect all condition names cond_ids = set() for k in conditions: try: cond_ids.add(header.index(k)) except ValueError: continue idx_to_test = set(cat_ids).union(cond_ids) # If column name and condition given, return overlapping SampleIDs of column and # condition combinations for sid, row in imap.items(): if all([row[header.index(c)] in conditions[c] for c in conditions]): key = "_".join([row[idx] for idx in idx_to_test]) try: assert key in table.keys() except AssertionError: table[key] = DataCategory(set(), {}) table[key].sids.add(sid) try: assert len(table) > 0 except AssertionError: return {"default": DataCategory(set(imap.keys()), {})} else: return table
[ "def", "gather_categories", "(", "imap", ",", "header", ",", "categories", "=", "None", ")", ":", "if", "categories", "is", "None", ":", "return", "{", "\"default\"", ":", "DataCategory", "(", "set", "(", "imap", ".", "keys", "(", ")", ")", ",", "{", "}", ")", "}", "cat_ids", "=", "[", "header", ".", "index", "(", "cat", ")", "for", "cat", "in", "categories", "if", "cat", "in", "header", "and", "\"=\"", "not", "in", "cat", "]", "table", "=", "OrderedDict", "(", ")", "conditions", "=", "defaultdict", "(", "set", ")", "for", "i", ",", "cat", "in", "enumerate", "(", "categories", ")", ":", "if", "\"=\"", "in", "cat", "and", "cat", ".", "split", "(", "\"=\"", ")", "[", "0", "]", "in", "header", ":", "cat_name", "=", "header", "[", "header", ".", "index", "(", "cat", ".", "split", "(", "\"=\"", ")", "[", "0", "]", ")", "]", "conditions", "[", "cat_name", "]", ".", "add", "(", "cat", ".", "split", "(", "\"=\"", ")", "[", "1", "]", ")", "if", "not", "cat_ids", "and", "not", "conditions", ":", "return", "{", "\"default\"", ":", "DataCategory", "(", "set", "(", "imap", ".", "keys", "(", ")", ")", ",", "{", "}", ")", "}", "if", "cat_ids", "and", "not", "conditions", ":", "for", "sid", ",", "row", "in", "imap", ".", "items", "(", ")", ":", "cat_name", "=", "\"_\"", ".", "join", "(", "[", "row", "[", "cid", "]", "for", "cid", "in", "cat_ids", "]", ")", "if", "cat_name", "not", "in", "table", ":", "table", "[", "cat_name", "]", "=", "DataCategory", "(", "set", "(", ")", ",", "{", "}", ")", "table", "[", "cat_name", "]", ".", "sids", ".", "add", "(", "sid", ")", "return", "table", "cond_ids", "=", "set", "(", ")", "for", "k", "in", "conditions", ":", "try", ":", "cond_ids", ".", "add", "(", "header", ".", "index", "(", "k", ")", ")", "except", "ValueError", ":", "continue", "idx_to_test", "=", "set", "(", "cat_ids", ")", ".", "union", "(", "cond_ids", ")", "for", "sid", ",", "row", "in", "imap", ".", "items", "(", ")", ":", "if", "all", "(", "[", "row", "[", "header", ".", "index", "(", "c", ")", "]", "in", "conditions", "[", "c", "]", "for", "c", "in", "conditions", "]", ")", ":", "key", "=", "\"_\"", ".", "join", "(", "[", "row", "[", "idx", "]", "for", "idx", "in", "idx_to_test", "]", ")", "try", ":", "assert", "key", "in", "table", ".", "keys", "(", ")", "except", "AssertionError", ":", "table", "[", "key", "]", "=", "DataCategory", "(", "set", "(", ")", ",", "{", "}", ")", "table", "[", "key", "]", ".", "sids", ".", "add", "(", "sid", ")", "try", ":", "assert", "len", "(", "table", ")", ">", "0", "except", "AssertionError", ":", "return", "{", "\"default\"", ":", "DataCategory", "(", "set", "(", "imap", ".", "keys", "(", ")", ")", ",", "{", "}", ")", "}", "else", ":", "return", "table" ]
Find the user specified categories in the map and create a dictionary to contain the relevant data for each type within the categories. Multiple categories will have their types combined such that each possible combination will have its own entry in the dictionary. :type imap: dict :param imap: The input mapping file data keyed by SampleID :type header: list :param header: The header line from the input mapping file. This will be searched for the user-specified categories :type categories: list :param categories: The list of user-specified category column name from mapping file :rtype: dict :return: A sorted dictionary keyed on the combinations of all the types found within the user-specified categories. Each entry will contain an empty DataCategory namedtuple. If no categories are specified, a single entry with the key 'default' will be returned
[ "Find", "the", "user", "specified", "categories", "in", "the", "map", "and", "create", "a", "dictionary", "to", "contain", "the", "relevant", "data", "for", "each", "type", "within", "the", "categories", ".", "Multiple", "categories", "will", "have", "their", "types", "combined", "such", "that", "each", "possible", "combination", "will", "have", "its", "own", "entry", "in", "the", "dictionary", "." ]
0b74ef171e6a84761710548501dfac71285a58a3
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/phylotoast/util.py#L238-L309
train
smdabdoub/phylotoast
phylotoast/util.py
parse_unifrac
def parse_unifrac(unifracFN): """ Parses the unifrac results file into a dictionary :type unifracFN: str :param unifracFN: The path to the unifrac results file :rtype: dict :return: A dictionary with keys: 'pcd' (principle coordinates data) which is a dictionary of the data keyed by sample ID, 'eigvals' (eigenvalues), and 'varexp' (variation explained) """ with open(unifracFN, "rU") as uF: first = uF.next().split("\t") lines = [line.strip() for line in uF] unifrac = {"pcd": OrderedDict(), "eigvals": [], "varexp": []} if first[0] == "pc vector number": return parse_unifrac_v1_8(unifrac, lines) elif first[0] == "Eigvals": return parse_unifrac_v1_9(unifrac, lines) else: raise ValueError("File format not supported/recognized. Please check input " "unifrac file.")
python
def parse_unifrac(unifracFN): """ Parses the unifrac results file into a dictionary :type unifracFN: str :param unifracFN: The path to the unifrac results file :rtype: dict :return: A dictionary with keys: 'pcd' (principle coordinates data) which is a dictionary of the data keyed by sample ID, 'eigvals' (eigenvalues), and 'varexp' (variation explained) """ with open(unifracFN, "rU") as uF: first = uF.next().split("\t") lines = [line.strip() for line in uF] unifrac = {"pcd": OrderedDict(), "eigvals": [], "varexp": []} if first[0] == "pc vector number": return parse_unifrac_v1_8(unifrac, lines) elif first[0] == "Eigvals": return parse_unifrac_v1_9(unifrac, lines) else: raise ValueError("File format not supported/recognized. Please check input " "unifrac file.")
[ "def", "parse_unifrac", "(", "unifracFN", ")", ":", "with", "open", "(", "unifracFN", ",", "\"rU\"", ")", "as", "uF", ":", "first", "=", "uF", ".", "next", "(", ")", ".", "split", "(", "\"\\t\"", ")", "lines", "=", "[", "line", ".", "strip", "(", ")", "for", "line", "in", "uF", "]", "unifrac", "=", "{", "\"pcd\"", ":", "OrderedDict", "(", ")", ",", "\"eigvals\"", ":", "[", "]", ",", "\"varexp\"", ":", "[", "]", "}", "if", "first", "[", "0", "]", "==", "\"pc vector number\"", ":", "return", "parse_unifrac_v1_8", "(", "unifrac", ",", "lines", ")", "elif", "first", "[", "0", "]", "==", "\"Eigvals\"", ":", "return", "parse_unifrac_v1_9", "(", "unifrac", ",", "lines", ")", "else", ":", "raise", "ValueError", "(", "\"File format not supported/recognized. Please check input \"", "\"unifrac file.\"", ")" ]
Parses the unifrac results file into a dictionary :type unifracFN: str :param unifracFN: The path to the unifrac results file :rtype: dict :return: A dictionary with keys: 'pcd' (principle coordinates data) which is a dictionary of the data keyed by sample ID, 'eigvals' (eigenvalues), and 'varexp' (variation explained)
[ "Parses", "the", "unifrac", "results", "file", "into", "a", "dictionary" ]
0b74ef171e6a84761710548501dfac71285a58a3
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/phylotoast/util.py#L311-L334
train
smdabdoub/phylotoast
phylotoast/util.py
parse_unifrac_v1_8
def parse_unifrac_v1_8(unifrac, file_data): """ Function to parse data from older version of unifrac file obtained from Qiime version 1.8 and earlier. :type unifrac: dict :param unifracFN: The path to the unifrac results file :type file_data: list :param file_data: Unifrac data lines after stripping whitespace characters. """ for line in file_data: if line == "": break line = line.split("\t") unifrac["pcd"][line[0]] = [float(e) for e in line[1:]] unifrac["eigvals"] = [float(entry) for entry in file_data[-2].split("\t")[1:]] unifrac["varexp"] = [float(entry) for entry in file_data[-1].split("\t")[1:]] return unifrac
python
def parse_unifrac_v1_8(unifrac, file_data): """ Function to parse data from older version of unifrac file obtained from Qiime version 1.8 and earlier. :type unifrac: dict :param unifracFN: The path to the unifrac results file :type file_data: list :param file_data: Unifrac data lines after stripping whitespace characters. """ for line in file_data: if line == "": break line = line.split("\t") unifrac["pcd"][line[0]] = [float(e) for e in line[1:]] unifrac["eigvals"] = [float(entry) for entry in file_data[-2].split("\t")[1:]] unifrac["varexp"] = [float(entry) for entry in file_data[-1].split("\t")[1:]] return unifrac
[ "def", "parse_unifrac_v1_8", "(", "unifrac", ",", "file_data", ")", ":", "for", "line", "in", "file_data", ":", "if", "line", "==", "\"\"", ":", "break", "line", "=", "line", ".", "split", "(", "\"\\t\"", ")", "unifrac", "[", "\"pcd\"", "]", "[", "line", "[", "0", "]", "]", "=", "[", "float", "(", "e", ")", "for", "e", "in", "line", "[", "1", ":", "]", "]", "unifrac", "[", "\"eigvals\"", "]", "=", "[", "float", "(", "entry", ")", "for", "entry", "in", "file_data", "[", "-", "2", "]", ".", "split", "(", "\"\\t\"", ")", "[", "1", ":", "]", "]", "unifrac", "[", "\"varexp\"", "]", "=", "[", "float", "(", "entry", ")", "for", "entry", "in", "file_data", "[", "-", "1", "]", ".", "split", "(", "\"\\t\"", ")", "[", "1", ":", "]", "]", "return", "unifrac" ]
Function to parse data from older version of unifrac file obtained from Qiime version 1.8 and earlier. :type unifrac: dict :param unifracFN: The path to the unifrac results file :type file_data: list :param file_data: Unifrac data lines after stripping whitespace characters.
[ "Function", "to", "parse", "data", "from", "older", "version", "of", "unifrac", "file", "obtained", "from", "Qiime", "version", "1", ".", "8", "and", "earlier", "." ]
0b74ef171e6a84761710548501dfac71285a58a3
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/phylotoast/util.py#L337-L356
train
smdabdoub/phylotoast
phylotoast/util.py
parse_unifrac_v1_9
def parse_unifrac_v1_9(unifrac, file_data): """ Function to parse data from newer version of unifrac file obtained from Qiime version 1.9 and later. :type unifracFN: str :param unifracFN: The path to the unifrac results file :type file_data: list :param file_data: Unifrac data lines after stripping whitespace characters. """ unifrac["eigvals"] = [float(entry) for entry in file_data[0].split("\t")] unifrac["varexp"] = [float(entry)*100 for entry in file_data[3].split("\t")] for line in file_data[8:]: if line == "": break line = line.split("\t") unifrac["pcd"][line[0]] = [float(e) for e in line[1:]] return unifrac
python
def parse_unifrac_v1_9(unifrac, file_data): """ Function to parse data from newer version of unifrac file obtained from Qiime version 1.9 and later. :type unifracFN: str :param unifracFN: The path to the unifrac results file :type file_data: list :param file_data: Unifrac data lines after stripping whitespace characters. """ unifrac["eigvals"] = [float(entry) for entry in file_data[0].split("\t")] unifrac["varexp"] = [float(entry)*100 for entry in file_data[3].split("\t")] for line in file_data[8:]: if line == "": break line = line.split("\t") unifrac["pcd"][line[0]] = [float(e) for e in line[1:]] return unifrac
[ "def", "parse_unifrac_v1_9", "(", "unifrac", ",", "file_data", ")", ":", "unifrac", "[", "\"eigvals\"", "]", "=", "[", "float", "(", "entry", ")", "for", "entry", "in", "file_data", "[", "0", "]", ".", "split", "(", "\"\\t\"", ")", "]", "unifrac", "[", "\"varexp\"", "]", "=", "[", "float", "(", "entry", ")", "*", "100", "for", "entry", "in", "file_data", "[", "3", "]", ".", "split", "(", "\"\\t\"", ")", "]", "for", "line", "in", "file_data", "[", "8", ":", "]", ":", "if", "line", "==", "\"\"", ":", "break", "line", "=", "line", ".", "split", "(", "\"\\t\"", ")", "unifrac", "[", "\"pcd\"", "]", "[", "line", "[", "0", "]", "]", "=", "[", "float", "(", "e", ")", "for", "e", "in", "line", "[", "1", ":", "]", "]", "return", "unifrac" ]
Function to parse data from newer version of unifrac file obtained from Qiime version 1.9 and later. :type unifracFN: str :param unifracFN: The path to the unifrac results file :type file_data: list :param file_data: Unifrac data lines after stripping whitespace characters.
[ "Function", "to", "parse", "data", "from", "newer", "version", "of", "unifrac", "file", "obtained", "from", "Qiime", "version", "1", ".", "9", "and", "later", "." ]
0b74ef171e6a84761710548501dfac71285a58a3
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/phylotoast/util.py#L359-L378
train
smdabdoub/phylotoast
phylotoast/util.py
color_mapping
def color_mapping(sample_map, header, group_column, color_column=None): """ Determine color-category mapping. If color_column was specified, then map the category names to color values. Otherwise, use the palettable colors to automatically generate a set of colors for the group values. :type sample_map: dict :param unifracFN: Map associating each line of the mapping file with the appropriate sample ID (each value of the map also contains the sample ID) :type header: tuple :param A tuple of header line for mapping file :type group_column: str :param group_column: String denoting the column name for sample groups. :type color_column: str :param color_column: String denoting the column name for sample colors. :type return: dict :param return: {SampleID: Color} """ group_colors = OrderedDict() group_gather = gather_categories(sample_map, header, [group_column]) if color_column is not None: color_gather = gather_categories(sample_map, header, [color_column]) # match sample IDs between color_gather and group_gather for group in group_gather: for color in color_gather: # allow incomplete assignment of colors, if group sids overlap at # all with the color sids, consider it a match if group_gather[group].sids.intersection(color_gather[color].sids): group_colors[group] = color else: bcolors = itertools.cycle(Set3_12.hex_colors) for group in group_gather: group_colors[group] = bcolors.next() return group_colors
python
def color_mapping(sample_map, header, group_column, color_column=None): """ Determine color-category mapping. If color_column was specified, then map the category names to color values. Otherwise, use the palettable colors to automatically generate a set of colors for the group values. :type sample_map: dict :param unifracFN: Map associating each line of the mapping file with the appropriate sample ID (each value of the map also contains the sample ID) :type header: tuple :param A tuple of header line for mapping file :type group_column: str :param group_column: String denoting the column name for sample groups. :type color_column: str :param color_column: String denoting the column name for sample colors. :type return: dict :param return: {SampleID: Color} """ group_colors = OrderedDict() group_gather = gather_categories(sample_map, header, [group_column]) if color_column is not None: color_gather = gather_categories(sample_map, header, [color_column]) # match sample IDs between color_gather and group_gather for group in group_gather: for color in color_gather: # allow incomplete assignment of colors, if group sids overlap at # all with the color sids, consider it a match if group_gather[group].sids.intersection(color_gather[color].sids): group_colors[group] = color else: bcolors = itertools.cycle(Set3_12.hex_colors) for group in group_gather: group_colors[group] = bcolors.next() return group_colors
[ "def", "color_mapping", "(", "sample_map", ",", "header", ",", "group_column", ",", "color_column", "=", "None", ")", ":", "group_colors", "=", "OrderedDict", "(", ")", "group_gather", "=", "gather_categories", "(", "sample_map", ",", "header", ",", "[", "group_column", "]", ")", "if", "color_column", "is", "not", "None", ":", "color_gather", "=", "gather_categories", "(", "sample_map", ",", "header", ",", "[", "color_column", "]", ")", "for", "group", "in", "group_gather", ":", "for", "color", "in", "color_gather", ":", "if", "group_gather", "[", "group", "]", ".", "sids", ".", "intersection", "(", "color_gather", "[", "color", "]", ".", "sids", ")", ":", "group_colors", "[", "group", "]", "=", "color", "else", ":", "bcolors", "=", "itertools", ".", "cycle", "(", "Set3_12", ".", "hex_colors", ")", "for", "group", "in", "group_gather", ":", "group_colors", "[", "group", "]", "=", "bcolors", ".", "next", "(", ")", "return", "group_colors" ]
Determine color-category mapping. If color_column was specified, then map the category names to color values. Otherwise, use the palettable colors to automatically generate a set of colors for the group values. :type sample_map: dict :param unifracFN: Map associating each line of the mapping file with the appropriate sample ID (each value of the map also contains the sample ID) :type header: tuple :param A tuple of header line for mapping file :type group_column: str :param group_column: String denoting the column name for sample groups. :type color_column: str :param color_column: String denoting the column name for sample colors. :type return: dict :param return: {SampleID: Color}
[ "Determine", "color", "-", "category", "mapping", ".", "If", "color_column", "was", "specified", "then", "map", "the", "category", "names", "to", "color", "values", ".", "Otherwise", "use", "the", "palettable", "colors", "to", "automatically", "generate", "a", "set", "of", "colors", "for", "the", "group", "values", "." ]
0b74ef171e6a84761710548501dfac71285a58a3
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/phylotoast/util.py#L380-L419
train
christophertbrown/bioscripts
ctbBio/shuffle_genome.py
rev_c
def rev_c(read): """ return reverse completment of read """ rc = [] rc_nucs = {'A':'T', 'T':'A', 'G':'C', 'C':'G', 'N':'N'} for base in read: rc.extend(rc_nucs[base.upper()]) return rc[::-1]
python
def rev_c(read): """ return reverse completment of read """ rc = [] rc_nucs = {'A':'T', 'T':'A', 'G':'C', 'C':'G', 'N':'N'} for base in read: rc.extend(rc_nucs[base.upper()]) return rc[::-1]
[ "def", "rev_c", "(", "read", ")", ":", "rc", "=", "[", "]", "rc_nucs", "=", "{", "'A'", ":", "'T'", ",", "'T'", ":", "'A'", ",", "'G'", ":", "'C'", ",", "'C'", ":", "'G'", ",", "'N'", ":", "'N'", "}", "for", "base", "in", "read", ":", "rc", ".", "extend", "(", "rc_nucs", "[", "base", ".", "upper", "(", ")", "]", ")", "return", "rc", "[", ":", ":", "-", "1", "]" ]
return reverse completment of read
[ "return", "reverse", "completment", "of", "read" ]
83b2566b3a5745437ec651cd6cafddd056846240
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/shuffle_genome.py#L27-L35
train
christophertbrown/bioscripts
ctbBio/shuffle_genome.py
shuffle_genome
def shuffle_genome(genome, cat, fraction = float(100), plot = True, \ alpha = 0.1, beta = 100000, \ min_length = 1000, max_length = 200000): """ randomly shuffle genome """ header = '>randomized_%s' % (genome.name) sequence = list(''.join([i[1] for i in parse_fasta(genome)])) length = len(sequence) shuffled = [] # break genome into pieces while sequence is not False: s = int(random.gammavariate(alpha, beta)) if s <= min_length or s >= max_length: continue if len(sequence) < s: seq = sequence[0:] else: seq = sequence[0:s] sequence = sequence[s:] # if bool(random.getrandbits(1)) is True: # seq = rev_c(seq) # print('fragment length: %s reverse complement: True' % ('{:,}'.format(s)), file=sys.stderr) # else: # print('fragment length: %s reverse complement: False' % ('{:,}'.format(s)), file=sys.stderr) shuffled.append(''.join(seq)) if sequence == []: break # shuffle pieces random.shuffle(shuffled) # subset fragments if fraction == float(100): subset = shuffled else: max_pieces = int(length * fraction/100) subset, total = [], 0 for fragment in shuffled: length = len(fragment) if total + length <= max_pieces: subset.append(fragment) total += length else: diff = max_pieces - total subset.append(fragment[0:diff]) break # combine sequences, if requested if cat is True: yield [header, ''.join(subset)] else: for i, seq in enumerate(subset): yield ['%s fragment:%s' % (header, i), seq]
python
def shuffle_genome(genome, cat, fraction = float(100), plot = True, \ alpha = 0.1, beta = 100000, \ min_length = 1000, max_length = 200000): """ randomly shuffle genome """ header = '>randomized_%s' % (genome.name) sequence = list(''.join([i[1] for i in parse_fasta(genome)])) length = len(sequence) shuffled = [] # break genome into pieces while sequence is not False: s = int(random.gammavariate(alpha, beta)) if s <= min_length or s >= max_length: continue if len(sequence) < s: seq = sequence[0:] else: seq = sequence[0:s] sequence = sequence[s:] # if bool(random.getrandbits(1)) is True: # seq = rev_c(seq) # print('fragment length: %s reverse complement: True' % ('{:,}'.format(s)), file=sys.stderr) # else: # print('fragment length: %s reverse complement: False' % ('{:,}'.format(s)), file=sys.stderr) shuffled.append(''.join(seq)) if sequence == []: break # shuffle pieces random.shuffle(shuffled) # subset fragments if fraction == float(100): subset = shuffled else: max_pieces = int(length * fraction/100) subset, total = [], 0 for fragment in shuffled: length = len(fragment) if total + length <= max_pieces: subset.append(fragment) total += length else: diff = max_pieces - total subset.append(fragment[0:diff]) break # combine sequences, if requested if cat is True: yield [header, ''.join(subset)] else: for i, seq in enumerate(subset): yield ['%s fragment:%s' % (header, i), seq]
[ "def", "shuffle_genome", "(", "genome", ",", "cat", ",", "fraction", "=", "float", "(", "100", ")", ",", "plot", "=", "True", ",", "alpha", "=", "0.1", ",", "beta", "=", "100000", ",", "min_length", "=", "1000", ",", "max_length", "=", "200000", ")", ":", "header", "=", "'>randomized_%s'", "%", "(", "genome", ".", "name", ")", "sequence", "=", "list", "(", "''", ".", "join", "(", "[", "i", "[", "1", "]", "for", "i", "in", "parse_fasta", "(", "genome", ")", "]", ")", ")", "length", "=", "len", "(", "sequence", ")", "shuffled", "=", "[", "]", "while", "sequence", "is", "not", "False", ":", "s", "=", "int", "(", "random", ".", "gammavariate", "(", "alpha", ",", "beta", ")", ")", "if", "s", "<=", "min_length", "or", "s", ">=", "max_length", ":", "continue", "if", "len", "(", "sequence", ")", "<", "s", ":", "seq", "=", "sequence", "[", "0", ":", "]", "else", ":", "seq", "=", "sequence", "[", "0", ":", "s", "]", "sequence", "=", "sequence", "[", "s", ":", "]", "shuffled", ".", "append", "(", "''", ".", "join", "(", "seq", ")", ")", "if", "sequence", "==", "[", "]", ":", "break", "random", ".", "shuffle", "(", "shuffled", ")", "if", "fraction", "==", "float", "(", "100", ")", ":", "subset", "=", "shuffled", "else", ":", "max_pieces", "=", "int", "(", "length", "*", "fraction", "/", "100", ")", "subset", ",", "total", "=", "[", "]", ",", "0", "for", "fragment", "in", "shuffled", ":", "length", "=", "len", "(", "fragment", ")", "if", "total", "+", "length", "<=", "max_pieces", ":", "subset", ".", "append", "(", "fragment", ")", "total", "+=", "length", "else", ":", "diff", "=", "max_pieces", "-", "total", "subset", ".", "append", "(", "fragment", "[", "0", ":", "diff", "]", ")", "break", "if", "cat", "is", "True", ":", "yield", "[", "header", ",", "''", ".", "join", "(", "subset", ")", "]", "else", ":", "for", "i", ",", "seq", "in", "enumerate", "(", "subset", ")", ":", "yield", "[", "'%s fragment:%s'", "%", "(", "header", ",", "i", ")", ",", "seq", "]" ]
randomly shuffle genome
[ "randomly", "shuffle", "genome" ]
83b2566b3a5745437ec651cd6cafddd056846240
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/shuffle_genome.py#L37-L87
train
opengridcc/opengrid
opengrid/library/regression.py
MultiVarLinReg._prune
def _prune(self, fit, p_max): """ If the fit contains statistically insignificant parameters, remove them. Returns a pruned fit where all parameters have p-values of the t-statistic below p_max Parameters ---------- fit: fm.ols fit object Can contain insignificant parameters p_max : float Maximum allowed probability of the t-statistic Returns ------- fit: fm.ols fit object Won't contain any insignificant parameters """ def remove_from_model_desc(x, model_desc): """ Return a model_desc without x """ rhs_termlist = [] for t in model_desc.rhs_termlist: if not t.factors: # intercept, add anyway rhs_termlist.append(t) elif not x == t.factors[0]._varname: # this is not the term with x rhs_termlist.append(t) md = ModelDesc(model_desc.lhs_termlist, rhs_termlist) return md corrected_model_desc = ModelDesc(fit.model.formula.lhs_termlist[:], fit.model.formula.rhs_termlist[:]) pars_to_prune = fit.pvalues.where(fit.pvalues > p_max).dropna().index.tolist() try: pars_to_prune.remove('Intercept') except: pass while pars_to_prune: corrected_model_desc = remove_from_model_desc(pars_to_prune[0], corrected_model_desc) fit = fm.ols(corrected_model_desc, data=self.df).fit() pars_to_prune = fit.pvalues.where(fit.pvalues > p_max).dropna().index.tolist() try: pars_to_prune.remove('Intercept') except: pass return fit
python
def _prune(self, fit, p_max): """ If the fit contains statistically insignificant parameters, remove them. Returns a pruned fit where all parameters have p-values of the t-statistic below p_max Parameters ---------- fit: fm.ols fit object Can contain insignificant parameters p_max : float Maximum allowed probability of the t-statistic Returns ------- fit: fm.ols fit object Won't contain any insignificant parameters """ def remove_from_model_desc(x, model_desc): """ Return a model_desc without x """ rhs_termlist = [] for t in model_desc.rhs_termlist: if not t.factors: # intercept, add anyway rhs_termlist.append(t) elif not x == t.factors[0]._varname: # this is not the term with x rhs_termlist.append(t) md = ModelDesc(model_desc.lhs_termlist, rhs_termlist) return md corrected_model_desc = ModelDesc(fit.model.formula.lhs_termlist[:], fit.model.formula.rhs_termlist[:]) pars_to_prune = fit.pvalues.where(fit.pvalues > p_max).dropna().index.tolist() try: pars_to_prune.remove('Intercept') except: pass while pars_to_prune: corrected_model_desc = remove_from_model_desc(pars_to_prune[0], corrected_model_desc) fit = fm.ols(corrected_model_desc, data=self.df).fit() pars_to_prune = fit.pvalues.where(fit.pvalues > p_max).dropna().index.tolist() try: pars_to_prune.remove('Intercept') except: pass return fit
[ "def", "_prune", "(", "self", ",", "fit", ",", "p_max", ")", ":", "def", "remove_from_model_desc", "(", "x", ",", "model_desc", ")", ":", "rhs_termlist", "=", "[", "]", "for", "t", "in", "model_desc", ".", "rhs_termlist", ":", "if", "not", "t", ".", "factors", ":", "rhs_termlist", ".", "append", "(", "t", ")", "elif", "not", "x", "==", "t", ".", "factors", "[", "0", "]", ".", "_varname", ":", "rhs_termlist", ".", "append", "(", "t", ")", "md", "=", "ModelDesc", "(", "model_desc", ".", "lhs_termlist", ",", "rhs_termlist", ")", "return", "md", "corrected_model_desc", "=", "ModelDesc", "(", "fit", ".", "model", ".", "formula", ".", "lhs_termlist", "[", ":", "]", ",", "fit", ".", "model", ".", "formula", ".", "rhs_termlist", "[", ":", "]", ")", "pars_to_prune", "=", "fit", ".", "pvalues", ".", "where", "(", "fit", ".", "pvalues", ">", "p_max", ")", ".", "dropna", "(", ")", ".", "index", ".", "tolist", "(", ")", "try", ":", "pars_to_prune", ".", "remove", "(", "'Intercept'", ")", "except", ":", "pass", "while", "pars_to_prune", ":", "corrected_model_desc", "=", "remove_from_model_desc", "(", "pars_to_prune", "[", "0", "]", ",", "corrected_model_desc", ")", "fit", "=", "fm", ".", "ols", "(", "corrected_model_desc", ",", "data", "=", "self", ".", "df", ")", ".", "fit", "(", ")", "pars_to_prune", "=", "fit", ".", "pvalues", ".", "where", "(", "fit", ".", "pvalues", ">", "p_max", ")", ".", "dropna", "(", ")", ".", "index", ".", "tolist", "(", ")", "try", ":", "pars_to_prune", ".", "remove", "(", "'Intercept'", ")", "except", ":", "pass", "return", "fit" ]
If the fit contains statistically insignificant parameters, remove them. Returns a pruned fit where all parameters have p-values of the t-statistic below p_max Parameters ---------- fit: fm.ols fit object Can contain insignificant parameters p_max : float Maximum allowed probability of the t-statistic Returns ------- fit: fm.ols fit object Won't contain any insignificant parameters
[ "If", "the", "fit", "contains", "statistically", "insignificant", "parameters", "remove", "them", ".", "Returns", "a", "pruned", "fit", "where", "all", "parameters", "have", "p", "-", "values", "of", "the", "t", "-", "statistic", "below", "p_max" ]
69b8da3c8fcea9300226c45ef0628cd6d4307651
https://github.com/opengridcc/opengrid/blob/69b8da3c8fcea9300226c45ef0628cd6d4307651/opengrid/library/regression.py#L222-L272
train
opengridcc/opengrid
opengrid/library/regression.py
MultiVarLinReg.find_best_rsquared
def find_best_rsquared(list_of_fits): """Return the best fit, based on rsquared""" res = sorted(list_of_fits, key=lambda x: x.rsquared) return res[-1]
python
def find_best_rsquared(list_of_fits): """Return the best fit, based on rsquared""" res = sorted(list_of_fits, key=lambda x: x.rsquared) return res[-1]
[ "def", "find_best_rsquared", "(", "list_of_fits", ")", ":", "res", "=", "sorted", "(", "list_of_fits", ",", "key", "=", "lambda", "x", ":", "x", ".", "rsquared", ")", "return", "res", "[", "-", "1", "]" ]
Return the best fit, based on rsquared
[ "Return", "the", "best", "fit", "based", "on", "rsquared" ]
69b8da3c8fcea9300226c45ef0628cd6d4307651
https://github.com/opengridcc/opengrid/blob/69b8da3c8fcea9300226c45ef0628cd6d4307651/opengrid/library/regression.py#L275-L278
train
opengridcc/opengrid
opengrid/library/regression.py
MultiVarLinReg._predict
def _predict(self, fit, df): """ Return a df with predictions and confidence interval Notes ----- The df will contain the following columns: - 'predicted': the model output - 'interval_u', 'interval_l': upper and lower confidence bounds. The result will depend on the following attributes of self: confint : float (default=0.95) Confidence level for two-sided hypothesis allow_negative_predictions : bool (default=True) If False, correct negative predictions to zero (typically for energy consumption predictions) Parameters ---------- fit : Statsmodels fit df : pandas DataFrame or None (default) If None, use self.df Returns ------- df_res : pandas DataFrame Copy of df with additional columns 'predicted', 'interval_u' and 'interval_l' """ # Add model results to data as column 'predictions' df_res = df.copy() if 'Intercept' in fit.model.exog_names: df_res['Intercept'] = 1.0 df_res['predicted'] = fit.predict(df_res) if not self.allow_negative_predictions: df_res.loc[df_res['predicted'] < 0, 'predicted'] = 0 prstd, interval_l, interval_u = wls_prediction_std(fit, df_res[fit.model.exog_names], alpha=1 - self.confint) df_res['interval_l'] = interval_l df_res['interval_u'] = interval_u if 'Intercept' in df_res: df_res.drop(labels=['Intercept'], axis=1, inplace=True) return df_res
python
def _predict(self, fit, df): """ Return a df with predictions and confidence interval Notes ----- The df will contain the following columns: - 'predicted': the model output - 'interval_u', 'interval_l': upper and lower confidence bounds. The result will depend on the following attributes of self: confint : float (default=0.95) Confidence level for two-sided hypothesis allow_negative_predictions : bool (default=True) If False, correct negative predictions to zero (typically for energy consumption predictions) Parameters ---------- fit : Statsmodels fit df : pandas DataFrame or None (default) If None, use self.df Returns ------- df_res : pandas DataFrame Copy of df with additional columns 'predicted', 'interval_u' and 'interval_l' """ # Add model results to data as column 'predictions' df_res = df.copy() if 'Intercept' in fit.model.exog_names: df_res['Intercept'] = 1.0 df_res['predicted'] = fit.predict(df_res) if not self.allow_negative_predictions: df_res.loc[df_res['predicted'] < 0, 'predicted'] = 0 prstd, interval_l, interval_u = wls_prediction_std(fit, df_res[fit.model.exog_names], alpha=1 - self.confint) df_res['interval_l'] = interval_l df_res['interval_u'] = interval_u if 'Intercept' in df_res: df_res.drop(labels=['Intercept'], axis=1, inplace=True) return df_res
[ "def", "_predict", "(", "self", ",", "fit", ",", "df", ")", ":", "df_res", "=", "df", ".", "copy", "(", ")", "if", "'Intercept'", "in", "fit", ".", "model", ".", "exog_names", ":", "df_res", "[", "'Intercept'", "]", "=", "1.0", "df_res", "[", "'predicted'", "]", "=", "fit", ".", "predict", "(", "df_res", ")", "if", "not", "self", ".", "allow_negative_predictions", ":", "df_res", ".", "loc", "[", "df_res", "[", "'predicted'", "]", "<", "0", ",", "'predicted'", "]", "=", "0", "prstd", ",", "interval_l", ",", "interval_u", "=", "wls_prediction_std", "(", "fit", ",", "df_res", "[", "fit", ".", "model", ".", "exog_names", "]", ",", "alpha", "=", "1", "-", "self", ".", "confint", ")", "df_res", "[", "'interval_l'", "]", "=", "interval_l", "df_res", "[", "'interval_u'", "]", "=", "interval_u", "if", "'Intercept'", "in", "df_res", ":", "df_res", ".", "drop", "(", "labels", "=", "[", "'Intercept'", "]", ",", "axis", "=", "1", ",", "inplace", "=", "True", ")", "return", "df_res" ]
Return a df with predictions and confidence interval Notes ----- The df will contain the following columns: - 'predicted': the model output - 'interval_u', 'interval_l': upper and lower confidence bounds. The result will depend on the following attributes of self: confint : float (default=0.95) Confidence level for two-sided hypothesis allow_negative_predictions : bool (default=True) If False, correct negative predictions to zero (typically for energy consumption predictions) Parameters ---------- fit : Statsmodels fit df : pandas DataFrame or None (default) If None, use self.df Returns ------- df_res : pandas DataFrame Copy of df with additional columns 'predicted', 'interval_u' and 'interval_l'
[ "Return", "a", "df", "with", "predictions", "and", "confidence", "interval" ]
69b8da3c8fcea9300226c45ef0628cd6d4307651
https://github.com/opengridcc/opengrid/blob/69b8da3c8fcea9300226c45ef0628cd6d4307651/opengrid/library/regression.py#L292-L338
train
smdabdoub/phylotoast
phylotoast/biom_calc.py
relative_abundance
def relative_abundance(biomf, sampleIDs=None): """ Calculate the relative abundance of each OTUID in a Sample. :type biomf: A BIOM file. :param biomf: OTU table format. :type sampleIDs: list :param sampleIDs: A list of sample id's from BIOM format OTU table. :rtype: dict :return: Returns a keyed on SampleIDs, and the values are dictionaries keyed on OTUID's and their values represent the relative abundance of that OTUID in that SampleID. """ if sampleIDs is None: sampleIDs = biomf.ids() else: try: for sid in sampleIDs: assert sid in biomf.ids() except AssertionError: raise ValueError( "\nError while calculating relative abundances: The sampleIDs provided do" " not match the sampleIDs in biom file. Please double check the sampleIDs" " provided.\n") otuIDs = biomf.ids(axis="observation") norm_biomf = biomf.norm(inplace=False) return {sample: {otuID: norm_biomf.get_value_by_ids(otuID, sample) for otuID in otuIDs} for sample in sampleIDs}
python
def relative_abundance(biomf, sampleIDs=None): """ Calculate the relative abundance of each OTUID in a Sample. :type biomf: A BIOM file. :param biomf: OTU table format. :type sampleIDs: list :param sampleIDs: A list of sample id's from BIOM format OTU table. :rtype: dict :return: Returns a keyed on SampleIDs, and the values are dictionaries keyed on OTUID's and their values represent the relative abundance of that OTUID in that SampleID. """ if sampleIDs is None: sampleIDs = biomf.ids() else: try: for sid in sampleIDs: assert sid in biomf.ids() except AssertionError: raise ValueError( "\nError while calculating relative abundances: The sampleIDs provided do" " not match the sampleIDs in biom file. Please double check the sampleIDs" " provided.\n") otuIDs = biomf.ids(axis="observation") norm_biomf = biomf.norm(inplace=False) return {sample: {otuID: norm_biomf.get_value_by_ids(otuID, sample) for otuID in otuIDs} for sample in sampleIDs}
[ "def", "relative_abundance", "(", "biomf", ",", "sampleIDs", "=", "None", ")", ":", "if", "sampleIDs", "is", "None", ":", "sampleIDs", "=", "biomf", ".", "ids", "(", ")", "else", ":", "try", ":", "for", "sid", "in", "sampleIDs", ":", "assert", "sid", "in", "biomf", ".", "ids", "(", ")", "except", "AssertionError", ":", "raise", "ValueError", "(", "\"\\nError while calculating relative abundances: The sampleIDs provided do\"", "\" not match the sampleIDs in biom file. Please double check the sampleIDs\"", "\" provided.\\n\"", ")", "otuIDs", "=", "biomf", ".", "ids", "(", "axis", "=", "\"observation\"", ")", "norm_biomf", "=", "biomf", ".", "norm", "(", "inplace", "=", "False", ")", "return", "{", "sample", ":", "{", "otuID", ":", "norm_biomf", ".", "get_value_by_ids", "(", "otuID", ",", "sample", ")", "for", "otuID", "in", "otuIDs", "}", "for", "sample", "in", "sampleIDs", "}" ]
Calculate the relative abundance of each OTUID in a Sample. :type biomf: A BIOM file. :param biomf: OTU table format. :type sampleIDs: list :param sampleIDs: A list of sample id's from BIOM format OTU table. :rtype: dict :return: Returns a keyed on SampleIDs, and the values are dictionaries keyed on OTUID's and their values represent the relative abundance of that OTUID in that SampleID.
[ "Calculate", "the", "relative", "abundance", "of", "each", "OTUID", "in", "a", "Sample", "." ]
0b74ef171e6a84761710548501dfac71285a58a3
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/phylotoast/biom_calc.py#L11-L41
train
smdabdoub/phylotoast
phylotoast/biom_calc.py
mean_otu_pct_abundance
def mean_otu_pct_abundance(ra, otuIDs): """ Calculate the mean OTU abundance percentage. :type ra: Dict :param ra: 'ra' refers to a dictionary keyed on SampleIDs, and the values are dictionaries keyed on OTUID's and their values represent the relative abundance of that OTUID in that SampleID. 'ra' is the output of relative_abundance() function. :type otuIDs: List :param otuIDs: A list of OTUID's for which the percentage abundance needs to be measured. :rtype: dict :return: A dictionary of OTUID and their percent relative abundance as key/value pair. """ sids = ra.keys() otumeans = defaultdict(int) for oid in otuIDs: otumeans[oid] = sum([ra[sid][oid] for sid in sids if oid in ra[sid]]) / len(sids) * 100 return otumeans
python
def mean_otu_pct_abundance(ra, otuIDs): """ Calculate the mean OTU abundance percentage. :type ra: Dict :param ra: 'ra' refers to a dictionary keyed on SampleIDs, and the values are dictionaries keyed on OTUID's and their values represent the relative abundance of that OTUID in that SampleID. 'ra' is the output of relative_abundance() function. :type otuIDs: List :param otuIDs: A list of OTUID's for which the percentage abundance needs to be measured. :rtype: dict :return: A dictionary of OTUID and their percent relative abundance as key/value pair. """ sids = ra.keys() otumeans = defaultdict(int) for oid in otuIDs: otumeans[oid] = sum([ra[sid][oid] for sid in sids if oid in ra[sid]]) / len(sids) * 100 return otumeans
[ "def", "mean_otu_pct_abundance", "(", "ra", ",", "otuIDs", ")", ":", "sids", "=", "ra", ".", "keys", "(", ")", "otumeans", "=", "defaultdict", "(", "int", ")", "for", "oid", "in", "otuIDs", ":", "otumeans", "[", "oid", "]", "=", "sum", "(", "[", "ra", "[", "sid", "]", "[", "oid", "]", "for", "sid", "in", "sids", "if", "oid", "in", "ra", "[", "sid", "]", "]", ")", "/", "len", "(", "sids", ")", "*", "100", "return", "otumeans" ]
Calculate the mean OTU abundance percentage. :type ra: Dict :param ra: 'ra' refers to a dictionary keyed on SampleIDs, and the values are dictionaries keyed on OTUID's and their values represent the relative abundance of that OTUID in that SampleID. 'ra' is the output of relative_abundance() function. :type otuIDs: List :param otuIDs: A list of OTUID's for which the percentage abundance needs to be measured. :rtype: dict :return: A dictionary of OTUID and their percent relative abundance as key/value pair.
[ "Calculate", "the", "mean", "OTU", "abundance", "percentage", "." ]
0b74ef171e6a84761710548501dfac71285a58a3
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/phylotoast/biom_calc.py#L44-L67
train
smdabdoub/phylotoast
phylotoast/biom_calc.py
MRA
def MRA(biomf, sampleIDs=None, transform=None): """ Calculate the mean relative abundance percentage. :type biomf: A BIOM file. :param biomf: OTU table format. :type sampleIDs: list :param sampleIDs: A list of sample id's from BIOM format OTU table. :param transform: Mathematical function which is used to transform smax to another format. By default, the function has been set to None. :rtype: dict :return: A dictionary keyed on OTUID's and their mean relative abundance for a given number of sampleIDs. """ ra = relative_abundance(biomf, sampleIDs) if transform is not None: ra = {sample: {otuID: transform(abd) for otuID, abd in ra[sample].items()} for sample in ra.keys()} otuIDs = biomf.ids(axis="observation") return mean_otu_pct_abundance(ra, otuIDs)
python
def MRA(biomf, sampleIDs=None, transform=None): """ Calculate the mean relative abundance percentage. :type biomf: A BIOM file. :param biomf: OTU table format. :type sampleIDs: list :param sampleIDs: A list of sample id's from BIOM format OTU table. :param transform: Mathematical function which is used to transform smax to another format. By default, the function has been set to None. :rtype: dict :return: A dictionary keyed on OTUID's and their mean relative abundance for a given number of sampleIDs. """ ra = relative_abundance(biomf, sampleIDs) if transform is not None: ra = {sample: {otuID: transform(abd) for otuID, abd in ra[sample].items()} for sample in ra.keys()} otuIDs = biomf.ids(axis="observation") return mean_otu_pct_abundance(ra, otuIDs)
[ "def", "MRA", "(", "biomf", ",", "sampleIDs", "=", "None", ",", "transform", "=", "None", ")", ":", "ra", "=", "relative_abundance", "(", "biomf", ",", "sampleIDs", ")", "if", "transform", "is", "not", "None", ":", "ra", "=", "{", "sample", ":", "{", "otuID", ":", "transform", "(", "abd", ")", "for", "otuID", ",", "abd", "in", "ra", "[", "sample", "]", ".", "items", "(", ")", "}", "for", "sample", "in", "ra", ".", "keys", "(", ")", "}", "otuIDs", "=", "biomf", ".", "ids", "(", "axis", "=", "\"observation\"", ")", "return", "mean_otu_pct_abundance", "(", "ra", ",", "otuIDs", ")" ]
Calculate the mean relative abundance percentage. :type biomf: A BIOM file. :param biomf: OTU table format. :type sampleIDs: list :param sampleIDs: A list of sample id's from BIOM format OTU table. :param transform: Mathematical function which is used to transform smax to another format. By default, the function has been set to None. :rtype: dict :return: A dictionary keyed on OTUID's and their mean relative abundance for a given number of sampleIDs.
[ "Calculate", "the", "mean", "relative", "abundance", "percentage", "." ]
0b74ef171e6a84761710548501dfac71285a58a3
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/phylotoast/biom_calc.py#L70-L92
train
smdabdoub/phylotoast
phylotoast/biom_calc.py
raw_abundance
def raw_abundance(biomf, sampleIDs=None, sample_abd=True): """ Calculate the total number of sequences in each OTU or SampleID. :type biomf: A BIOM file. :param biomf: OTU table format. :type sampleIDs: List :param sampleIDs: A list of column id's from BIOM format OTU table. By default, the list has been set to None. :type sample_abd: Boolean :param sample_abd: A boolean operator to provide output for OTUID's or SampleID's. By default, the output will be provided for SampleID's. :rtype: dict :return: Returns a dictionary keyed on either OTUID's or SampleIDs and their respective abundance as values. """ results = defaultdict(int) if sampleIDs is None: sampleIDs = biomf.ids() else: try: for sid in sampleIDs: assert sid in biomf.ids() except AssertionError: raise ValueError( "\nError while calculating raw total abundances: The sampleIDs provided " "do not match the sampleIDs in biom file. Please double check the " "sampleIDs provided.\n") otuIDs = biomf.ids(axis="observation") for sampleID in sampleIDs: for otuID in otuIDs: abd = biomf.get_value_by_ids(otuID, sampleID) if sample_abd: results[sampleID] += abd else: results[otuID] += abd return results
python
def raw_abundance(biomf, sampleIDs=None, sample_abd=True): """ Calculate the total number of sequences in each OTU or SampleID. :type biomf: A BIOM file. :param biomf: OTU table format. :type sampleIDs: List :param sampleIDs: A list of column id's from BIOM format OTU table. By default, the list has been set to None. :type sample_abd: Boolean :param sample_abd: A boolean operator to provide output for OTUID's or SampleID's. By default, the output will be provided for SampleID's. :rtype: dict :return: Returns a dictionary keyed on either OTUID's or SampleIDs and their respective abundance as values. """ results = defaultdict(int) if sampleIDs is None: sampleIDs = biomf.ids() else: try: for sid in sampleIDs: assert sid in biomf.ids() except AssertionError: raise ValueError( "\nError while calculating raw total abundances: The sampleIDs provided " "do not match the sampleIDs in biom file. Please double check the " "sampleIDs provided.\n") otuIDs = biomf.ids(axis="observation") for sampleID in sampleIDs: for otuID in otuIDs: abd = biomf.get_value_by_ids(otuID, sampleID) if sample_abd: results[sampleID] += abd else: results[otuID] += abd return results
[ "def", "raw_abundance", "(", "biomf", ",", "sampleIDs", "=", "None", ",", "sample_abd", "=", "True", ")", ":", "results", "=", "defaultdict", "(", "int", ")", "if", "sampleIDs", "is", "None", ":", "sampleIDs", "=", "biomf", ".", "ids", "(", ")", "else", ":", "try", ":", "for", "sid", "in", "sampleIDs", ":", "assert", "sid", "in", "biomf", ".", "ids", "(", ")", "except", "AssertionError", ":", "raise", "ValueError", "(", "\"\\nError while calculating raw total abundances: The sampleIDs provided \"", "\"do not match the sampleIDs in biom file. Please double check the \"", "\"sampleIDs provided.\\n\"", ")", "otuIDs", "=", "biomf", ".", "ids", "(", "axis", "=", "\"observation\"", ")", "for", "sampleID", "in", "sampleIDs", ":", "for", "otuID", "in", "otuIDs", ":", "abd", "=", "biomf", ".", "get_value_by_ids", "(", "otuID", ",", "sampleID", ")", "if", "sample_abd", ":", "results", "[", "sampleID", "]", "+=", "abd", "else", ":", "results", "[", "otuID", "]", "+=", "abd", "return", "results" ]
Calculate the total number of sequences in each OTU or SampleID. :type biomf: A BIOM file. :param biomf: OTU table format. :type sampleIDs: List :param sampleIDs: A list of column id's from BIOM format OTU table. By default, the list has been set to None. :type sample_abd: Boolean :param sample_abd: A boolean operator to provide output for OTUID's or SampleID's. By default, the output will be provided for SampleID's. :rtype: dict :return: Returns a dictionary keyed on either OTUID's or SampleIDs and their respective abundance as values.
[ "Calculate", "the", "total", "number", "of", "sequences", "in", "each", "OTU", "or", "SampleID", "." ]
0b74ef171e6a84761710548501dfac71285a58a3
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/phylotoast/biom_calc.py#L95-L135
train
smdabdoub/phylotoast
phylotoast/biom_calc.py
transform_raw_abundance
def transform_raw_abundance(biomf, fn=math.log10, sampleIDs=None, sample_abd=True): """ Function to transform the total abundance calculation for each sample ID to another format based on user given transformation function. :type biomf: A BIOM file. :param biomf: OTU table format. :param fn: Mathematical function which is used to transform smax to another format. By default, the function has been given as base 10 logarithm. :rtype: dict :return: Returns a dictionary similar to output of raw_abundance function but with the abundance values modified by the mathematical operation. By default, the operation performed on the abundances is base 10 logarithm. """ totals = raw_abundance(biomf, sampleIDs, sample_abd) return {sid: fn(abd) for sid, abd in totals.items()}
python
def transform_raw_abundance(biomf, fn=math.log10, sampleIDs=None, sample_abd=True): """ Function to transform the total abundance calculation for each sample ID to another format based on user given transformation function. :type biomf: A BIOM file. :param biomf: OTU table format. :param fn: Mathematical function which is used to transform smax to another format. By default, the function has been given as base 10 logarithm. :rtype: dict :return: Returns a dictionary similar to output of raw_abundance function but with the abundance values modified by the mathematical operation. By default, the operation performed on the abundances is base 10 logarithm. """ totals = raw_abundance(biomf, sampleIDs, sample_abd) return {sid: fn(abd) for sid, abd in totals.items()}
[ "def", "transform_raw_abundance", "(", "biomf", ",", "fn", "=", "math", ".", "log10", ",", "sampleIDs", "=", "None", ",", "sample_abd", "=", "True", ")", ":", "totals", "=", "raw_abundance", "(", "biomf", ",", "sampleIDs", ",", "sample_abd", ")", "return", "{", "sid", ":", "fn", "(", "abd", ")", "for", "sid", ",", "abd", "in", "totals", ".", "items", "(", ")", "}" ]
Function to transform the total abundance calculation for each sample ID to another format based on user given transformation function. :type biomf: A BIOM file. :param biomf: OTU table format. :param fn: Mathematical function which is used to transform smax to another format. By default, the function has been given as base 10 logarithm. :rtype: dict :return: Returns a dictionary similar to output of raw_abundance function but with the abundance values modified by the mathematical operation. By default, the operation performed on the abundances is base 10 logarithm.
[ "Function", "to", "transform", "the", "total", "abundance", "calculation", "for", "each", "sample", "ID", "to", "another", "format", "based", "on", "user", "given", "transformation", "function", "." ]
0b74ef171e6a84761710548501dfac71285a58a3
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/phylotoast/biom_calc.py#L138-L155
train
smdabdoub/phylotoast
bin/diversity.py
print_MannWhitneyU
def print_MannWhitneyU(div_calc): """ Compute the Mann-Whitney U test for unequal group sample sizes. """ try: x = div_calc.values()[0].values() y = div_calc.values()[1].values() except: return "Error setting up input arrays for Mann-Whitney U Test. Skipping "\ "significance testing." T, p = stats.mannwhitneyu(x, y) print "\nMann-Whitney U test statistic:", T print "Two-tailed p-value: {}".format(2 * p)
python
def print_MannWhitneyU(div_calc): """ Compute the Mann-Whitney U test for unequal group sample sizes. """ try: x = div_calc.values()[0].values() y = div_calc.values()[1].values() except: return "Error setting up input arrays for Mann-Whitney U Test. Skipping "\ "significance testing." T, p = stats.mannwhitneyu(x, y) print "\nMann-Whitney U test statistic:", T print "Two-tailed p-value: {}".format(2 * p)
[ "def", "print_MannWhitneyU", "(", "div_calc", ")", ":", "try", ":", "x", "=", "div_calc", ".", "values", "(", ")", "[", "0", "]", ".", "values", "(", ")", "y", "=", "div_calc", ".", "values", "(", ")", "[", "1", "]", ".", "values", "(", ")", "except", ":", "return", "\"Error setting up input arrays for Mann-Whitney U Test. Skipping \"", "\"significance testing.\"", "T", ",", "p", "=", "stats", ".", "mannwhitneyu", "(", "x", ",", "y", ")", "print", "\"\\nMann-Whitney U test statistic:\"", ",", "T", "print", "\"Two-tailed p-value: {}\"", ".", "format", "(", "2", "*", "p", ")" ]
Compute the Mann-Whitney U test for unequal group sample sizes.
[ "Compute", "the", "Mann", "-", "Whitney", "U", "test", "for", "unequal", "group", "sample", "sizes", "." ]
0b74ef171e6a84761710548501dfac71285a58a3
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/bin/diversity.py#L54-L66
train
smdabdoub/phylotoast
bin/diversity.py
print_KruskalWallisH
def print_KruskalWallisH(div_calc): """ Compute the Kruskal-Wallis H-test for independent samples. A typical rule is that each group must have at least 5 measurements. """ calc = defaultdict(list) try: for k1, v1 in div_calc.iteritems(): for k2, v2 in v1.iteritems(): calc[k1].append(v2) except: return "Error setting up input arrays for Kruskal-Wallis H-Test. Skipping "\ "significance testing." h, p = stats.kruskal(*calc.values()) print "\nKruskal-Wallis H-test statistic for {} groups: {}".format(str(len(div_calc)), h) print "p-value: {}".format(p)
python
def print_KruskalWallisH(div_calc): """ Compute the Kruskal-Wallis H-test for independent samples. A typical rule is that each group must have at least 5 measurements. """ calc = defaultdict(list) try: for k1, v1 in div_calc.iteritems(): for k2, v2 in v1.iteritems(): calc[k1].append(v2) except: return "Error setting up input arrays for Kruskal-Wallis H-Test. Skipping "\ "significance testing." h, p = stats.kruskal(*calc.values()) print "\nKruskal-Wallis H-test statistic for {} groups: {}".format(str(len(div_calc)), h) print "p-value: {}".format(p)
[ "def", "print_KruskalWallisH", "(", "div_calc", ")", ":", "calc", "=", "defaultdict", "(", "list", ")", "try", ":", "for", "k1", ",", "v1", "in", "div_calc", ".", "iteritems", "(", ")", ":", "for", "k2", ",", "v2", "in", "v1", ".", "iteritems", "(", ")", ":", "calc", "[", "k1", "]", ".", "append", "(", "v2", ")", "except", ":", "return", "\"Error setting up input arrays for Kruskal-Wallis H-Test. Skipping \"", "\"significance testing.\"", "h", ",", "p", "=", "stats", ".", "kruskal", "(", "*", "calc", ".", "values", "(", ")", ")", "print", "\"\\nKruskal-Wallis H-test statistic for {} groups: {}\"", ".", "format", "(", "str", "(", "len", "(", "div_calc", ")", ")", ",", "h", ")", "print", "\"p-value: {}\"", ".", "format", "(", "p", ")" ]
Compute the Kruskal-Wallis H-test for independent samples. A typical rule is that each group must have at least 5 measurements.
[ "Compute", "the", "Kruskal", "-", "Wallis", "H", "-", "test", "for", "independent", "samples", ".", "A", "typical", "rule", "is", "that", "each", "group", "must", "have", "at", "least", "5", "measurements", "." ]
0b74ef171e6a84761710548501dfac71285a58a3
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/bin/diversity.py#L69-L84
train
smdabdoub/phylotoast
bin/diversity.py
handle_program_options
def handle_program_options(): """Parses the given options passed in at the command line.""" parser = argparse.ArgumentParser(description="Calculate the alpha diversity\ of a set of samples using one or more \ metrics and output a kernal density \ estimator-smoothed histogram of the \ results.") parser.add_argument("-m", "--map_file", help="QIIME mapping file.") parser.add_argument("-i", "--biom_fp", help="Path to the BIOM table") parser.add_argument("-c", "--category", help="Specific category from the mapping file.") parser.add_argument("-d", "--diversity", default=["shannon"], nargs="+", help="The alpha diversity metric. Default \ value is 'shannon', which will calculate the Shannon\ entropy. Multiple metrics can be specified (space separated).\ The full list of metrics is available at:\ http://scikit-bio.org/docs/latest/generated/skbio.diversity.alpha.html.\ Beta diversity metrics will be supported in the future.") parser.add_argument("--x_label", default=[None], nargs="+", help="The name of the diversity metric to be displayed on the\ plot as the X-axis label. If multiple metrics are specified,\ then multiple entries for the X-axis label should be given.") parser.add_argument("--color_by", help="A column name in the mapping file containing\ hexadecimal (#FF0000) color values that will\ be used to color the groups. Each sample ID must\ have a color entry.") parser.add_argument("--plot_title", default="", help="A descriptive title that will appear at the top \ of the output plot. Surround with quotes if there are\ spaces in the title.") parser.add_argument("-o", "--output_dir", default=".", help="The directory plots will be saved to.") parser.add_argument("--image_type", default="png", help="The type of image to save: png, svg, pdf, eps, etc...") parser.add_argument("--save_calculations", help="Path and name of text file to store the calculated " "diversity metrics.") parser.add_argument("--suppress_stats", action="store_true", help="Do not display " "significance testing results which are shown by default.") parser.add_argument("--show_available_metrics", action="store_true", help="Supply this parameter to see which alpha diversity metrics " " are available for usage. No calculations will be performed" " if this parameter is provided.") return parser.parse_args()
python
def handle_program_options(): """Parses the given options passed in at the command line.""" parser = argparse.ArgumentParser(description="Calculate the alpha diversity\ of a set of samples using one or more \ metrics and output a kernal density \ estimator-smoothed histogram of the \ results.") parser.add_argument("-m", "--map_file", help="QIIME mapping file.") parser.add_argument("-i", "--biom_fp", help="Path to the BIOM table") parser.add_argument("-c", "--category", help="Specific category from the mapping file.") parser.add_argument("-d", "--diversity", default=["shannon"], nargs="+", help="The alpha diversity metric. Default \ value is 'shannon', which will calculate the Shannon\ entropy. Multiple metrics can be specified (space separated).\ The full list of metrics is available at:\ http://scikit-bio.org/docs/latest/generated/skbio.diversity.alpha.html.\ Beta diversity metrics will be supported in the future.") parser.add_argument("--x_label", default=[None], nargs="+", help="The name of the diversity metric to be displayed on the\ plot as the X-axis label. If multiple metrics are specified,\ then multiple entries for the X-axis label should be given.") parser.add_argument("--color_by", help="A column name in the mapping file containing\ hexadecimal (#FF0000) color values that will\ be used to color the groups. Each sample ID must\ have a color entry.") parser.add_argument("--plot_title", default="", help="A descriptive title that will appear at the top \ of the output plot. Surround with quotes if there are\ spaces in the title.") parser.add_argument("-o", "--output_dir", default=".", help="The directory plots will be saved to.") parser.add_argument("--image_type", default="png", help="The type of image to save: png, svg, pdf, eps, etc...") parser.add_argument("--save_calculations", help="Path and name of text file to store the calculated " "diversity metrics.") parser.add_argument("--suppress_stats", action="store_true", help="Do not display " "significance testing results which are shown by default.") parser.add_argument("--show_available_metrics", action="store_true", help="Supply this parameter to see which alpha diversity metrics " " are available for usage. No calculations will be performed" " if this parameter is provided.") return parser.parse_args()
[ "def", "handle_program_options", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "\"Calculate the alpha diversity\\ of a set of samples using one or more \\ metrics and output a kernal density \\ estimator-smoothed histogram of the \\ results.\"", ")", "parser", ".", "add_argument", "(", "\"-m\"", ",", "\"--map_file\"", ",", "help", "=", "\"QIIME mapping file.\"", ")", "parser", ".", "add_argument", "(", "\"-i\"", ",", "\"--biom_fp\"", ",", "help", "=", "\"Path to the BIOM table\"", ")", "parser", ".", "add_argument", "(", "\"-c\"", ",", "\"--category\"", ",", "help", "=", "\"Specific category from the mapping file.\"", ")", "parser", ".", "add_argument", "(", "\"-d\"", ",", "\"--diversity\"", ",", "default", "=", "[", "\"shannon\"", "]", ",", "nargs", "=", "\"+\"", ",", "help", "=", "\"The alpha diversity metric. Default \\ value is 'shannon', which will calculate the Shannon\\ entropy. Multiple metrics can be specified (space separated).\\ The full list of metrics is available at:\\ http://scikit-bio.org/docs/latest/generated/skbio.diversity.alpha.html.\\ Beta diversity metrics will be supported in the future.\"", ")", "parser", ".", "add_argument", "(", "\"--x_label\"", ",", "default", "=", "[", "None", "]", ",", "nargs", "=", "\"+\"", ",", "help", "=", "\"The name of the diversity metric to be displayed on the\\ plot as the X-axis label. If multiple metrics are specified,\\ then multiple entries for the X-axis label should be given.\"", ")", "parser", ".", "add_argument", "(", "\"--color_by\"", ",", "help", "=", "\"A column name in the mapping file containing\\ hexadecimal (#FF0000) color values that will\\ be used to color the groups. Each sample ID must\\ have a color entry.\"", ")", "parser", ".", "add_argument", "(", "\"--plot_title\"", ",", "default", "=", "\"\"", ",", "help", "=", "\"A descriptive title that will appear at the top \\ of the output plot. Surround with quotes if there are\\ spaces in the title.\"", ")", "parser", ".", "add_argument", "(", "\"-o\"", ",", "\"--output_dir\"", ",", "default", "=", "\".\"", ",", "help", "=", "\"The directory plots will be saved to.\"", ")", "parser", ".", "add_argument", "(", "\"--image_type\"", ",", "default", "=", "\"png\"", ",", "help", "=", "\"The type of image to save: png, svg, pdf, eps, etc...\"", ")", "parser", ".", "add_argument", "(", "\"--save_calculations\"", ",", "help", "=", "\"Path and name of text file to store the calculated \"", "\"diversity metrics.\"", ")", "parser", ".", "add_argument", "(", "\"--suppress_stats\"", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"Do not display \"", "\"significance testing results which are shown by default.\"", ")", "parser", ".", "add_argument", "(", "\"--show_available_metrics\"", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"Supply this parameter to see which alpha diversity metrics \"", "\" are available for usage. No calculations will be performed\"", "\" if this parameter is provided.\"", ")", "return", "parser", ".", "parse_args", "(", ")" ]
Parses the given options passed in at the command line.
[ "Parses", "the", "given", "options", "passed", "in", "at", "the", "command", "line", "." ]
0b74ef171e6a84761710548501dfac71285a58a3
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/bin/diversity.py#L122-L168
train
christophertbrown/bioscripts
ctbBio/search.py
blastdb
def blastdb(fasta, maxfile = 10000000): """ make blast db """ db = fasta.rsplit('.', 1)[0] type = check_type(fasta) if type == 'nucl': type = ['nhr', type] else: type = ['phr', type] if os.path.exists('%s.%s' % (db, type[0])) is False \ and os.path.exists('%s.00.%s' % (db, type[0])) is False: print('# ... making blastdb for: %s' % (fasta), file=sys.stderr) os.system('makeblastdb \ -in %s -out %s -dbtype %s -max_file_sz %s >> log.txt' \ % (fasta, db, type[1], maxfile)) else: print('# ... database found for: %s' % (fasta), file=sys.stderr) return db
python
def blastdb(fasta, maxfile = 10000000): """ make blast db """ db = fasta.rsplit('.', 1)[0] type = check_type(fasta) if type == 'nucl': type = ['nhr', type] else: type = ['phr', type] if os.path.exists('%s.%s' % (db, type[0])) is False \ and os.path.exists('%s.00.%s' % (db, type[0])) is False: print('# ... making blastdb for: %s' % (fasta), file=sys.stderr) os.system('makeblastdb \ -in %s -out %s -dbtype %s -max_file_sz %s >> log.txt' \ % (fasta, db, type[1], maxfile)) else: print('# ... database found for: %s' % (fasta), file=sys.stderr) return db
[ "def", "blastdb", "(", "fasta", ",", "maxfile", "=", "10000000", ")", ":", "db", "=", "fasta", ".", "rsplit", "(", "'.'", ",", "1", ")", "[", "0", "]", "type", "=", "check_type", "(", "fasta", ")", "if", "type", "==", "'nucl'", ":", "type", "=", "[", "'nhr'", ",", "type", "]", "else", ":", "type", "=", "[", "'phr'", ",", "type", "]", "if", "os", ".", "path", ".", "exists", "(", "'%s.%s'", "%", "(", "db", ",", "type", "[", "0", "]", ")", ")", "is", "False", "and", "os", ".", "path", ".", "exists", "(", "'%s.00.%s'", "%", "(", "db", ",", "type", "[", "0", "]", ")", ")", "is", "False", ":", "print", "(", "'# ... making blastdb for: %s'", "%", "(", "fasta", ")", ",", "file", "=", "sys", ".", "stderr", ")", "os", ".", "system", "(", "'makeblastdb \\ -in %s -out %s -dbtype %s -max_file_sz %s >> log.txt'", "%", "(", "fasta", ",", "db", ",", "type", "[", "1", "]", ",", "maxfile", ")", ")", "else", ":", "print", "(", "'# ... database found for: %s'", "%", "(", "fasta", ")", ",", "file", "=", "sys", ".", "stderr", ")", "return", "db" ]
make blast db
[ "make", "blast", "db" ]
83b2566b3a5745437ec651cd6cafddd056846240
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/search.py#L28-L46
train
christophertbrown/bioscripts
ctbBio/search.py
usearchdb
def usearchdb(fasta, alignment = 'local', usearch_loc = 'usearch'): """ make usearch db """ if '.udb' in fasta: print('# ... database found: %s' % (fasta), file=sys.stderr) return fasta type = check_type(fasta) db = '%s.%s.udb' % (fasta.rsplit('.', 1)[0], type) if os.path.exists(db) is False: print('# ... making usearch db for: %s' % (fasta), file=sys.stderr) if alignment == 'local': os.system('%s -makeudb_ublast %s -output %s >> log.txt' % (usearch_loc, fasta, db)) elif alignment == 'global': os.system('%s -makeudb_usearch %s -output %s >> log.txt' % (usearch_loc, fasta, db)) else: print('# ... database found for: %s' % (fasta), file=sys.stderr) return db
python
def usearchdb(fasta, alignment = 'local', usearch_loc = 'usearch'): """ make usearch db """ if '.udb' in fasta: print('# ... database found: %s' % (fasta), file=sys.stderr) return fasta type = check_type(fasta) db = '%s.%s.udb' % (fasta.rsplit('.', 1)[0], type) if os.path.exists(db) is False: print('# ... making usearch db for: %s' % (fasta), file=sys.stderr) if alignment == 'local': os.system('%s -makeudb_ublast %s -output %s >> log.txt' % (usearch_loc, fasta, db)) elif alignment == 'global': os.system('%s -makeudb_usearch %s -output %s >> log.txt' % (usearch_loc, fasta, db)) else: print('# ... database found for: %s' % (fasta), file=sys.stderr) return db
[ "def", "usearchdb", "(", "fasta", ",", "alignment", "=", "'local'", ",", "usearch_loc", "=", "'usearch'", ")", ":", "if", "'.udb'", "in", "fasta", ":", "print", "(", "'# ... database found: %s'", "%", "(", "fasta", ")", ",", "file", "=", "sys", ".", "stderr", ")", "return", "fasta", "type", "=", "check_type", "(", "fasta", ")", "db", "=", "'%s.%s.udb'", "%", "(", "fasta", ".", "rsplit", "(", "'.'", ",", "1", ")", "[", "0", "]", ",", "type", ")", "if", "os", ".", "path", ".", "exists", "(", "db", ")", "is", "False", ":", "print", "(", "'# ... making usearch db for: %s'", "%", "(", "fasta", ")", ",", "file", "=", "sys", ".", "stderr", ")", "if", "alignment", "==", "'local'", ":", "os", ".", "system", "(", "'%s -makeudb_ublast %s -output %s >> log.txt'", "%", "(", "usearch_loc", ",", "fasta", ",", "db", ")", ")", "elif", "alignment", "==", "'global'", ":", "os", ".", "system", "(", "'%s -makeudb_usearch %s -output %s >> log.txt'", "%", "(", "usearch_loc", ",", "fasta", ",", "db", ")", ")", "else", ":", "print", "(", "'# ... database found for: %s'", "%", "(", "fasta", ")", ",", "file", "=", "sys", ".", "stderr", ")", "return", "db" ]
make usearch db
[ "make", "usearch", "db" ]
83b2566b3a5745437ec651cd6cafddd056846240
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/search.py#L68-L85
train
mkouhei/bootstrap-py
bootstrap_py/control.py
_pp
def _pp(dict_data): """Pretty print.""" for key, val in dict_data.items(): # pylint: disable=superfluous-parens print('{0:<11}: {1}'.format(key, val))
python
def _pp(dict_data): """Pretty print.""" for key, val in dict_data.items(): # pylint: disable=superfluous-parens print('{0:<11}: {1}'.format(key, val))
[ "def", "_pp", "(", "dict_data", ")", ":", "for", "key", ",", "val", "in", "dict_data", ".", "items", "(", ")", ":", "print", "(", "'{0:<11}: {1}'", ".", "format", "(", "key", ",", "val", ")", ")" ]
Pretty print.
[ "Pretty", "print", "." ]
95d56ed98ef409fd9f019dc352fd1c3711533275
https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/control.py#L11-L15
train
mkouhei/bootstrap-py
bootstrap_py/control.py
print_licences
def print_licences(params, metadata): """Print licenses. :param argparse.Namespace params: parameter :param bootstrap_py.classifier.Classifiers metadata: package metadata """ if hasattr(params, 'licenses'): if params.licenses: _pp(metadata.licenses_desc()) sys.exit(0)
python
def print_licences(params, metadata): """Print licenses. :param argparse.Namespace params: parameter :param bootstrap_py.classifier.Classifiers metadata: package metadata """ if hasattr(params, 'licenses'): if params.licenses: _pp(metadata.licenses_desc()) sys.exit(0)
[ "def", "print_licences", "(", "params", ",", "metadata", ")", ":", "if", "hasattr", "(", "params", ",", "'licenses'", ")", ":", "if", "params", ".", "licenses", ":", "_pp", "(", "metadata", ".", "licenses_desc", "(", ")", ")", "sys", ".", "exit", "(", "0", ")" ]
Print licenses. :param argparse.Namespace params: parameter :param bootstrap_py.classifier.Classifiers metadata: package metadata
[ "Print", "licenses", "." ]
95d56ed98ef409fd9f019dc352fd1c3711533275
https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/control.py#L27-L36
train
mkouhei/bootstrap-py
bootstrap_py/control.py
check_repository_existence
def check_repository_existence(params): """Check repository existence. :param argparse.Namespace params: parameters """ repodir = os.path.join(params.outdir, params.name) if os.path.isdir(repodir): raise Conflict( 'Package repository "{0}" has already exists.'.format(repodir))
python
def check_repository_existence(params): """Check repository existence. :param argparse.Namespace params: parameters """ repodir = os.path.join(params.outdir, params.name) if os.path.isdir(repodir): raise Conflict( 'Package repository "{0}" has already exists.'.format(repodir))
[ "def", "check_repository_existence", "(", "params", ")", ":", "repodir", "=", "os", ".", "path", ".", "join", "(", "params", ".", "outdir", ",", "params", ".", "name", ")", "if", "os", ".", "path", ".", "isdir", "(", "repodir", ")", ":", "raise", "Conflict", "(", "'Package repository \"{0}\" has already exists.'", ".", "format", "(", "repodir", ")", ")" ]
Check repository existence. :param argparse.Namespace params: parameters
[ "Check", "repository", "existence", "." ]
95d56ed98ef409fd9f019dc352fd1c3711533275
https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/control.py#L39-L47
train
mkouhei/bootstrap-py
bootstrap_py/control.py
generate_package
def generate_package(params): """Generate package repository. :param argparse.Namespace params: parameters """ pkg_data = package.PackageData(params) pkg_tree = package.PackageTree(pkg_data) pkg_tree.generate() pkg_tree.move() VCS(os.path.join(pkg_tree.outdir, pkg_tree.name), pkg_tree.pkg_data)
python
def generate_package(params): """Generate package repository. :param argparse.Namespace params: parameters """ pkg_data = package.PackageData(params) pkg_tree = package.PackageTree(pkg_data) pkg_tree.generate() pkg_tree.move() VCS(os.path.join(pkg_tree.outdir, pkg_tree.name), pkg_tree.pkg_data)
[ "def", "generate_package", "(", "params", ")", ":", "pkg_data", "=", "package", ".", "PackageData", "(", "params", ")", "pkg_tree", "=", "package", ".", "PackageTree", "(", "pkg_data", ")", "pkg_tree", ".", "generate", "(", ")", "pkg_tree", ".", "move", "(", ")", "VCS", "(", "os", ".", "path", ".", "join", "(", "pkg_tree", ".", "outdir", ",", "pkg_tree", ".", "name", ")", ",", "pkg_tree", ".", "pkg_data", ")" ]
Generate package repository. :param argparse.Namespace params: parameters
[ "Generate", "package", "repository", "." ]
95d56ed98ef409fd9f019dc352fd1c3711533275
https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/control.py#L59-L68
train
christophertbrown/bioscripts
ctbBio/sam2fastq.py
print_single
def print_single(line, rev): """ print single reads to stderr """ if rev is True: seq = rc(['', line[9]])[1] qual = line[10][::-1] else: seq = line[9] qual = line[10] fq = ['@%s' % line[0], seq, '+%s' % line[0], qual] print('\n'.join(fq), file = sys.stderr)
python
def print_single(line, rev): """ print single reads to stderr """ if rev is True: seq = rc(['', line[9]])[1] qual = line[10][::-1] else: seq = line[9] qual = line[10] fq = ['@%s' % line[0], seq, '+%s' % line[0], qual] print('\n'.join(fq), file = sys.stderr)
[ "def", "print_single", "(", "line", ",", "rev", ")", ":", "if", "rev", "is", "True", ":", "seq", "=", "rc", "(", "[", "''", ",", "line", "[", "9", "]", "]", ")", "[", "1", "]", "qual", "=", "line", "[", "10", "]", "[", ":", ":", "-", "1", "]", "else", ":", "seq", "=", "line", "[", "9", "]", "qual", "=", "line", "[", "10", "]", "fq", "=", "[", "'@%s'", "%", "line", "[", "0", "]", ",", "seq", ",", "'+%s'", "%", "line", "[", "0", "]", ",", "qual", "]", "print", "(", "'\\n'", ".", "join", "(", "fq", ")", ",", "file", "=", "sys", ".", "stderr", ")" ]
print single reads to stderr
[ "print", "single", "reads", "to", "stderr" ]
83b2566b3a5745437ec651cd6cafddd056846240
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/sam2fastq.py#L13-L24
train
christophertbrown/bioscripts
ctbBio/sam2fastq.py
sam2fastq
def sam2fastq(sam, singles = False, force = False): """ convert sam to fastq """ L, R = None, None for line in sam: if line.startswith('@') is True: continue line = line.strip().split() bit = [True if i == '1' else False \ for i in bin(int(line[1])).split('b')[1][::-1]] while len(bit) < 8: bit.append(False) pair, proper, na, nap, rev, mrev, left, right = bit # make sure read is paired if pair is False: if singles is True: print_single(line, rev) continue # check if sequence is reverse-complemented if rev is True: seq = rc(['', line[9]])[1] qual = line[10][::-1] else: seq = line[9] qual = line[10] # check if read is forward or reverse, return when both have been found if left is True: if L is not None and force is False: print('sam file is not sorted', file = sys.stderr) print('\te.g.: %s' % (line[0]), file = sys.stderr) exit() if L is not None: L = None continue L = ['@%s' % line[0], seq, '+%s' % line[0], qual] if R is not None: yield L yield R L, R = None, None if right is True: if R is not None and force is False: print('sam file is not sorted', file = sys.stderr) print('\te.g.: %s' % (line[0]), file = sys.stderr) exit() if R is not None: R = None continue R = ['@%s' % line[0], seq, '+%s' % line[0], qual] if L is not None: yield L yield R L, R = None, None
python
def sam2fastq(sam, singles = False, force = False): """ convert sam to fastq """ L, R = None, None for line in sam: if line.startswith('@') is True: continue line = line.strip().split() bit = [True if i == '1' else False \ for i in bin(int(line[1])).split('b')[1][::-1]] while len(bit) < 8: bit.append(False) pair, proper, na, nap, rev, mrev, left, right = bit # make sure read is paired if pair is False: if singles is True: print_single(line, rev) continue # check if sequence is reverse-complemented if rev is True: seq = rc(['', line[9]])[1] qual = line[10][::-1] else: seq = line[9] qual = line[10] # check if read is forward or reverse, return when both have been found if left is True: if L is not None and force is False: print('sam file is not sorted', file = sys.stderr) print('\te.g.: %s' % (line[0]), file = sys.stderr) exit() if L is not None: L = None continue L = ['@%s' % line[0], seq, '+%s' % line[0], qual] if R is not None: yield L yield R L, R = None, None if right is True: if R is not None and force is False: print('sam file is not sorted', file = sys.stderr) print('\te.g.: %s' % (line[0]), file = sys.stderr) exit() if R is not None: R = None continue R = ['@%s' % line[0], seq, '+%s' % line[0], qual] if L is not None: yield L yield R L, R = None, None
[ "def", "sam2fastq", "(", "sam", ",", "singles", "=", "False", ",", "force", "=", "False", ")", ":", "L", ",", "R", "=", "None", ",", "None", "for", "line", "in", "sam", ":", "if", "line", ".", "startswith", "(", "'@'", ")", "is", "True", ":", "continue", "line", "=", "line", ".", "strip", "(", ")", ".", "split", "(", ")", "bit", "=", "[", "True", "if", "i", "==", "'1'", "else", "False", "for", "i", "in", "bin", "(", "int", "(", "line", "[", "1", "]", ")", ")", ".", "split", "(", "'b'", ")", "[", "1", "]", "[", ":", ":", "-", "1", "]", "]", "while", "len", "(", "bit", ")", "<", "8", ":", "bit", ".", "append", "(", "False", ")", "pair", ",", "proper", ",", "na", ",", "nap", ",", "rev", ",", "mrev", ",", "left", ",", "right", "=", "bit", "if", "pair", "is", "False", ":", "if", "singles", "is", "True", ":", "print_single", "(", "line", ",", "rev", ")", "continue", "if", "rev", "is", "True", ":", "seq", "=", "rc", "(", "[", "''", ",", "line", "[", "9", "]", "]", ")", "[", "1", "]", "qual", "=", "line", "[", "10", "]", "[", ":", ":", "-", "1", "]", "else", ":", "seq", "=", "line", "[", "9", "]", "qual", "=", "line", "[", "10", "]", "if", "left", "is", "True", ":", "if", "L", "is", "not", "None", "and", "force", "is", "False", ":", "print", "(", "'sam file is not sorted'", ",", "file", "=", "sys", ".", "stderr", ")", "print", "(", "'\\te.g.: %s'", "%", "(", "line", "[", "0", "]", ")", ",", "file", "=", "sys", ".", "stderr", ")", "exit", "(", ")", "if", "L", "is", "not", "None", ":", "L", "=", "None", "continue", "L", "=", "[", "'@%s'", "%", "line", "[", "0", "]", ",", "seq", ",", "'+%s'", "%", "line", "[", "0", "]", ",", "qual", "]", "if", "R", "is", "not", "None", ":", "yield", "L", "yield", "R", "L", ",", "R", "=", "None", ",", "None", "if", "right", "is", "True", ":", "if", "R", "is", "not", "None", "and", "force", "is", "False", ":", "print", "(", "'sam file is not sorted'", ",", "file", "=", "sys", ".", "stderr", ")", "print", "(", "'\\te.g.: %s'", "%", "(", "line", "[", "0", "]", ")", ",", "file", "=", "sys", ".", "stderr", ")", "exit", "(", ")", "if", "R", "is", "not", "None", ":", "R", "=", "None", "continue", "R", "=", "[", "'@%s'", "%", "line", "[", "0", "]", ",", "seq", ",", "'+%s'", "%", "line", "[", "0", "]", ",", "qual", "]", "if", "L", "is", "not", "None", ":", "yield", "L", "yield", "R", "L", ",", "R", "=", "None", ",", "None" ]
convert sam to fastq
[ "convert", "sam", "to", "fastq" ]
83b2566b3a5745437ec651cd6cafddd056846240
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/sam2fastq.py#L26-L78
train
christophertbrown/bioscripts
ctbBio/subset_sam.py
sort_sam
def sort_sam(sam, sort): """ sort sam file """ tempdir = '%s/' % (os.path.abspath(sam).rsplit('/', 1)[0]) if sort is True: mapping = '%s.sorted.sam' % (sam.rsplit('.', 1)[0]) if sam != '-': if os.path.exists(mapping) is False: os.system("\ sort -k1 --buffer-size=%sG -T %s -o %s %s\ " % (sbuffer, tempdir, mapping, sam)) else: mapping = 'stdin-sam.sorted.sam' p = Popen("sort -k1 --buffer-size=%sG -T %s -o %s" \ % (sbuffer, tempdir, mapping), stdin = sys.stdin, shell = True) p.communicate() mapping = open(mapping) else: if sam == '-': mapping = sys.stdin else: mapping = open(sam) return mapping
python
def sort_sam(sam, sort): """ sort sam file """ tempdir = '%s/' % (os.path.abspath(sam).rsplit('/', 1)[0]) if sort is True: mapping = '%s.sorted.sam' % (sam.rsplit('.', 1)[0]) if sam != '-': if os.path.exists(mapping) is False: os.system("\ sort -k1 --buffer-size=%sG -T %s -o %s %s\ " % (sbuffer, tempdir, mapping, sam)) else: mapping = 'stdin-sam.sorted.sam' p = Popen("sort -k1 --buffer-size=%sG -T %s -o %s" \ % (sbuffer, tempdir, mapping), stdin = sys.stdin, shell = True) p.communicate() mapping = open(mapping) else: if sam == '-': mapping = sys.stdin else: mapping = open(sam) return mapping
[ "def", "sort_sam", "(", "sam", ",", "sort", ")", ":", "tempdir", "=", "'%s/'", "%", "(", "os", ".", "path", ".", "abspath", "(", "sam", ")", ".", "rsplit", "(", "'/'", ",", "1", ")", "[", "0", "]", ")", "if", "sort", "is", "True", ":", "mapping", "=", "'%s.sorted.sam'", "%", "(", "sam", ".", "rsplit", "(", "'.'", ",", "1", ")", "[", "0", "]", ")", "if", "sam", "!=", "'-'", ":", "if", "os", ".", "path", ".", "exists", "(", "mapping", ")", "is", "False", ":", "os", ".", "system", "(", "\"\\ sort -k1 --buffer-size=%sG -T %s -o %s %s\\ \"", "%", "(", "sbuffer", ",", "tempdir", ",", "mapping", ",", "sam", ")", ")", "else", ":", "mapping", "=", "'stdin-sam.sorted.sam'", "p", "=", "Popen", "(", "\"sort -k1 --buffer-size=%sG -T %s -o %s\"", "%", "(", "sbuffer", ",", "tempdir", ",", "mapping", ")", ",", "stdin", "=", "sys", ".", "stdin", ",", "shell", "=", "True", ")", "p", ".", "communicate", "(", ")", "mapping", "=", "open", "(", "mapping", ")", "else", ":", "if", "sam", "==", "'-'", ":", "mapping", "=", "sys", ".", "stdin", "else", ":", "mapping", "=", "open", "(", "sam", ")", "return", "mapping" ]
sort sam file
[ "sort", "sam", "file" ]
83b2566b3a5745437ec651cd6cafddd056846240
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/subset_sam.py#L14-L37
train
christophertbrown/bioscripts
ctbBio/subset_sam.py
sub_sam
def sub_sam(sam, percent, sort = True, sbuffer = False): """ randomly subset sam file """ mapping = sort_sam(sam, sort) pool = [1 for i in range(0, percent)] + [0 for i in range(0, 100 - percent)] c = cycle([1, 2]) for line in mapping: line = line.strip().split() if line[0].startswith('@'): # get the sam header yield line continue if int(line[1]) <= 20: # is this from a single read? if random.choice(pool) == 1: yield line else: n = next(c) if n == 1: prev = line if n == 2 and random.choice(pool) == 1: yield prev yield line
python
def sub_sam(sam, percent, sort = True, sbuffer = False): """ randomly subset sam file """ mapping = sort_sam(sam, sort) pool = [1 for i in range(0, percent)] + [0 for i in range(0, 100 - percent)] c = cycle([1, 2]) for line in mapping: line = line.strip().split() if line[0].startswith('@'): # get the sam header yield line continue if int(line[1]) <= 20: # is this from a single read? if random.choice(pool) == 1: yield line else: n = next(c) if n == 1: prev = line if n == 2 and random.choice(pool) == 1: yield prev yield line
[ "def", "sub_sam", "(", "sam", ",", "percent", ",", "sort", "=", "True", ",", "sbuffer", "=", "False", ")", ":", "mapping", "=", "sort_sam", "(", "sam", ",", "sort", ")", "pool", "=", "[", "1", "for", "i", "in", "range", "(", "0", ",", "percent", ")", "]", "+", "[", "0", "for", "i", "in", "range", "(", "0", ",", "100", "-", "percent", ")", "]", "c", "=", "cycle", "(", "[", "1", ",", "2", "]", ")", "for", "line", "in", "mapping", ":", "line", "=", "line", ".", "strip", "(", ")", ".", "split", "(", ")", "if", "line", "[", "0", "]", ".", "startswith", "(", "'@'", ")", ":", "yield", "line", "continue", "if", "int", "(", "line", "[", "1", "]", ")", "<=", "20", ":", "if", "random", ".", "choice", "(", "pool", ")", "==", "1", ":", "yield", "line", "else", ":", "n", "=", "next", "(", "c", ")", "if", "n", "==", "1", ":", "prev", "=", "line", "if", "n", "==", "2", "and", "random", ".", "choice", "(", "pool", ")", "==", "1", ":", "yield", "prev", "yield", "line" ]
randomly subset sam file
[ "randomly", "subset", "sam", "file" ]
83b2566b3a5745437ec651cd6cafddd056846240
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/subset_sam.py#L39-L60
train
christophertbrown/bioscripts
ctbBio/fastq2fasta.py
fq2fa
def fq2fa(fq): """ convert fq to fa """ c = cycle([1, 2, 3, 4]) for line in fq: n = next(c) if n == 1: seq = ['>%s' % (line.strip().split('@', 1)[1])] if n == 2: seq.append(line.strip()) yield seq
python
def fq2fa(fq): """ convert fq to fa """ c = cycle([1, 2, 3, 4]) for line in fq: n = next(c) if n == 1: seq = ['>%s' % (line.strip().split('@', 1)[1])] if n == 2: seq.append(line.strip()) yield seq
[ "def", "fq2fa", "(", "fq", ")", ":", "c", "=", "cycle", "(", "[", "1", ",", "2", ",", "3", ",", "4", "]", ")", "for", "line", "in", "fq", ":", "n", "=", "next", "(", "c", ")", "if", "n", "==", "1", ":", "seq", "=", "[", "'>%s'", "%", "(", "line", ".", "strip", "(", ")", ".", "split", "(", "'@'", ",", "1", ")", "[", "1", "]", ")", "]", "if", "n", "==", "2", ":", "seq", ".", "append", "(", "line", ".", "strip", "(", ")", ")", "yield", "seq" ]
convert fq to fa
[ "convert", "fq", "to", "fa" ]
83b2566b3a5745437ec651cd6cafddd056846240
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/fastq2fasta.py#L11-L22
train
elbow-jason/Uno-deprecated
uno/decorators.py
change_return_type
def change_return_type(f): """ Converts the returned value of wrapped function to the type of the first arg or to the type specified by a kwarg key return_type's value. """ @wraps(f) def wrapper(*args, **kwargs): if kwargs.has_key('return_type'): return_type = kwargs['return_type'] kwargs.pop('return_type') return return_type(f(*args, **kwargs)) elif len(args) > 0: return_type = type(args[0]) return return_type(f(*args, **kwargs)) else: return f(*args, **kwargs) return wrapper
python
def change_return_type(f): """ Converts the returned value of wrapped function to the type of the first arg or to the type specified by a kwarg key return_type's value. """ @wraps(f) def wrapper(*args, **kwargs): if kwargs.has_key('return_type'): return_type = kwargs['return_type'] kwargs.pop('return_type') return return_type(f(*args, **kwargs)) elif len(args) > 0: return_type = type(args[0]) return return_type(f(*args, **kwargs)) else: return f(*args, **kwargs) return wrapper
[ "def", "change_return_type", "(", "f", ")", ":", "@", "wraps", "(", "f", ")", "def", "wrapper", "(", "*", "args", ",", "**", "kwargs", ")", ":", "if", "kwargs", ".", "has_key", "(", "'return_type'", ")", ":", "return_type", "=", "kwargs", "[", "'return_type'", "]", "kwargs", ".", "pop", "(", "'return_type'", ")", "return", "return_type", "(", "f", "(", "*", "args", ",", "**", "kwargs", ")", ")", "elif", "len", "(", "args", ")", ">", "0", ":", "return_type", "=", "type", "(", "args", "[", "0", "]", ")", "return", "return_type", "(", "f", "(", "*", "args", ",", "**", "kwargs", ")", ")", "else", ":", "return", "f", "(", "*", "args", ",", "**", "kwargs", ")", "return", "wrapper" ]
Converts the returned value of wrapped function to the type of the first arg or to the type specified by a kwarg key return_type's value.
[ "Converts", "the", "returned", "value", "of", "wrapped", "function", "to", "the", "type", "of", "the", "first", "arg", "or", "to", "the", "type", "specified", "by", "a", "kwarg", "key", "return_type", "s", "value", "." ]
4ad07d7b84e5b6e3e2b2c89db69448906f24b4e4
https://github.com/elbow-jason/Uno-deprecated/blob/4ad07d7b84e5b6e3e2b2c89db69448906f24b4e4/uno/decorators.py#L11-L27
train
elbow-jason/Uno-deprecated
uno/decorators.py
convert_args_to_sets
def convert_args_to_sets(f): """ Converts all args to 'set' type via self.setify function. """ @wraps(f) def wrapper(*args, **kwargs): args = (setify(x) for x in args) return f(*args, **kwargs) return wrapper
python
def convert_args_to_sets(f): """ Converts all args to 'set' type via self.setify function. """ @wraps(f) def wrapper(*args, **kwargs): args = (setify(x) for x in args) return f(*args, **kwargs) return wrapper
[ "def", "convert_args_to_sets", "(", "f", ")", ":", "@", "wraps", "(", "f", ")", "def", "wrapper", "(", "*", "args", ",", "**", "kwargs", ")", ":", "args", "=", "(", "setify", "(", "x", ")", "for", "x", "in", "args", ")", "return", "f", "(", "*", "args", ",", "**", "kwargs", ")", "return", "wrapper" ]
Converts all args to 'set' type via self.setify function.
[ "Converts", "all", "args", "to", "set", "type", "via", "self", ".", "setify", "function", "." ]
4ad07d7b84e5b6e3e2b2c89db69448906f24b4e4
https://github.com/elbow-jason/Uno-deprecated/blob/4ad07d7b84e5b6e3e2b2c89db69448906f24b4e4/uno/decorators.py#L30-L38
train
laymonage/kbbi-python
kbbi/kbbi.py
KBBI._init_entri
def _init_entri(self, laman): """Membuat objek-objek entri dari laman yang diambil. :param laman: Laman respons yang dikembalikan oleh KBBI daring. :type laman: Response """ sup = BeautifulSoup(laman.text, 'html.parser') estr = '' for label in sup.find('hr').next_siblings: if label.name == 'hr': self.entri.append(Entri(estr)) break if label.name == 'h2': if estr: self.entri.append(Entri(estr)) estr = '' estr += str(label).strip()
python
def _init_entri(self, laman): """Membuat objek-objek entri dari laman yang diambil. :param laman: Laman respons yang dikembalikan oleh KBBI daring. :type laman: Response """ sup = BeautifulSoup(laman.text, 'html.parser') estr = '' for label in sup.find('hr').next_siblings: if label.name == 'hr': self.entri.append(Entri(estr)) break if label.name == 'h2': if estr: self.entri.append(Entri(estr)) estr = '' estr += str(label).strip()
[ "def", "_init_entri", "(", "self", ",", "laman", ")", ":", "sup", "=", "BeautifulSoup", "(", "laman", ".", "text", ",", "'html.parser'", ")", "estr", "=", "''", "for", "label", "in", "sup", ".", "find", "(", "'hr'", ")", ".", "next_siblings", ":", "if", "label", ".", "name", "==", "'hr'", ":", "self", ".", "entri", ".", "append", "(", "Entri", "(", "estr", ")", ")", "break", "if", "label", ".", "name", "==", "'h2'", ":", "if", "estr", ":", "self", ".", "entri", ".", "append", "(", "Entri", "(", "estr", ")", ")", "estr", "=", "''", "estr", "+=", "str", "(", "label", ")", ".", "strip", "(", ")" ]
Membuat objek-objek entri dari laman yang diambil. :param laman: Laman respons yang dikembalikan oleh KBBI daring. :type laman: Response
[ "Membuat", "objek", "-", "objek", "entri", "dari", "laman", "yang", "diambil", "." ]
1a52ba8bcc6dc4c5c1215f9e00207aca264287d6
https://github.com/laymonage/kbbi-python/blob/1a52ba8bcc6dc4c5c1215f9e00207aca264287d6/kbbi/kbbi.py#L46-L63
train
laymonage/kbbi-python
kbbi/kbbi.py
Entri._init_kata_dasar
def _init_kata_dasar(self, dasar): """Memproses kata dasar yang ada dalam nama entri. :param dasar: ResultSet untuk label HTML dengan class="rootword" :type dasar: ResultSet """ for tiap in dasar: kata = tiap.find('a') dasar_no = kata.find('sup') kata = ambil_teks_dalam_label(kata) self.kata_dasar.append( kata + ' [{}]'.format(dasar_no.text.strip()) if dasar_no else kata )
python
def _init_kata_dasar(self, dasar): """Memproses kata dasar yang ada dalam nama entri. :param dasar: ResultSet untuk label HTML dengan class="rootword" :type dasar: ResultSet """ for tiap in dasar: kata = tiap.find('a') dasar_no = kata.find('sup') kata = ambil_teks_dalam_label(kata) self.kata_dasar.append( kata + ' [{}]'.format(dasar_no.text.strip()) if dasar_no else kata )
[ "def", "_init_kata_dasar", "(", "self", ",", "dasar", ")", ":", "for", "tiap", "in", "dasar", ":", "kata", "=", "tiap", ".", "find", "(", "'a'", ")", "dasar_no", "=", "kata", ".", "find", "(", "'sup'", ")", "kata", "=", "ambil_teks_dalam_label", "(", "kata", ")", "self", ".", "kata_dasar", ".", "append", "(", "kata", "+", "' [{}]'", ".", "format", "(", "dasar_no", ".", "text", ".", "strip", "(", ")", ")", "if", "dasar_no", "else", "kata", ")" ]
Memproses kata dasar yang ada dalam nama entri. :param dasar: ResultSet untuk label HTML dengan class="rootword" :type dasar: ResultSet
[ "Memproses", "kata", "dasar", "yang", "ada", "dalam", "nama", "entri", "." ]
1a52ba8bcc6dc4c5c1215f9e00207aca264287d6
https://github.com/laymonage/kbbi-python/blob/1a52ba8bcc6dc4c5c1215f9e00207aca264287d6/kbbi/kbbi.py#L126-L139
train
laymonage/kbbi-python
kbbi/kbbi.py
Entri.serialisasi
def serialisasi(self): """Mengembalikan hasil serialisasi objek Entri ini. :returns: Dictionary hasil serialisasi :rtype: dict """ return { "nama": self.nama, "nomor": self.nomor, "kata_dasar": self.kata_dasar, "pelafalan": self.pelafalan, "bentuk_tidak_baku": self.bentuk_tidak_baku, "varian": self.varian, "makna": [makna.serialisasi() for makna in self.makna] }
python
def serialisasi(self): """Mengembalikan hasil serialisasi objek Entri ini. :returns: Dictionary hasil serialisasi :rtype: dict """ return { "nama": self.nama, "nomor": self.nomor, "kata_dasar": self.kata_dasar, "pelafalan": self.pelafalan, "bentuk_tidak_baku": self.bentuk_tidak_baku, "varian": self.varian, "makna": [makna.serialisasi() for makna in self.makna] }
[ "def", "serialisasi", "(", "self", ")", ":", "return", "{", "\"nama\"", ":", "self", ".", "nama", ",", "\"nomor\"", ":", "self", ".", "nomor", ",", "\"kata_dasar\"", ":", "self", ".", "kata_dasar", ",", "\"pelafalan\"", ":", "self", ".", "pelafalan", ",", "\"bentuk_tidak_baku\"", ":", "self", ".", "bentuk_tidak_baku", ",", "\"varian\"", ":", "self", ".", "varian", ",", "\"makna\"", ":", "[", "makna", ".", "serialisasi", "(", ")", "for", "makna", "in", "self", ".", "makna", "]", "}" ]
Mengembalikan hasil serialisasi objek Entri ini. :returns: Dictionary hasil serialisasi :rtype: dict
[ "Mengembalikan", "hasil", "serialisasi", "objek", "Entri", "ini", "." ]
1a52ba8bcc6dc4c5c1215f9e00207aca264287d6
https://github.com/laymonage/kbbi-python/blob/1a52ba8bcc6dc4c5c1215f9e00207aca264287d6/kbbi/kbbi.py#L141-L156
train
laymonage/kbbi-python
kbbi/kbbi.py
Entri._makna
def _makna(self): """Mengembalikan representasi string untuk semua makna entri ini. :returns: String representasi makna-makna :rtype: str """ if len(self.makna) > 1: return '\n'.join( str(i) + ". " + str(makna) for i, makna in enumerate(self.makna, 1) ) return str(self.makna[0])
python
def _makna(self): """Mengembalikan representasi string untuk semua makna entri ini. :returns: String representasi makna-makna :rtype: str """ if len(self.makna) > 1: return '\n'.join( str(i) + ". " + str(makna) for i, makna in enumerate(self.makna, 1) ) return str(self.makna[0])
[ "def", "_makna", "(", "self", ")", ":", "if", "len", "(", "self", ".", "makna", ")", ">", "1", ":", "return", "'\\n'", ".", "join", "(", "str", "(", "i", ")", "+", "\". \"", "+", "str", "(", "makna", ")", "for", "i", ",", "makna", "in", "enumerate", "(", "self", ".", "makna", ",", "1", ")", ")", "return", "str", "(", "self", ".", "makna", "[", "0", "]", ")" ]
Mengembalikan representasi string untuk semua makna entri ini. :returns: String representasi makna-makna :rtype: str
[ "Mengembalikan", "representasi", "string", "untuk", "semua", "makna", "entri", "ini", "." ]
1a52ba8bcc6dc4c5c1215f9e00207aca264287d6
https://github.com/laymonage/kbbi-python/blob/1a52ba8bcc6dc4c5c1215f9e00207aca264287d6/kbbi/kbbi.py#L158-L170
train
laymonage/kbbi-python
kbbi/kbbi.py
Entri._nama
def _nama(self): """Mengembalikan representasi string untuk nama entri ini. :returns: String representasi nama entri :rtype: str """ hasil = self.nama if self.nomor: hasil += " [{}]".format(self.nomor) if self.kata_dasar: hasil = " » ".join(self.kata_dasar) + " » " + hasil return hasil
python
def _nama(self): """Mengembalikan representasi string untuk nama entri ini. :returns: String representasi nama entri :rtype: str """ hasil = self.nama if self.nomor: hasil += " [{}]".format(self.nomor) if self.kata_dasar: hasil = " » ".join(self.kata_dasar) + " » " + hasil return hasil
[ "def", "_nama", "(", "self", ")", ":", "hasil", "=", "self", ".", "nama", "if", "self", ".", "nomor", ":", "hasil", "+=", "\" [{}]\"", ".", "format", "(", "self", ".", "nomor", ")", "if", "self", ".", "kata_dasar", ":", "hasil", "=", "\" » \".", "j", "oin(", "s", "elf.", "k", "ata_dasar)", " ", " ", " » \" +", "h", "sil", "return", "hasil" ]
Mengembalikan representasi string untuk nama entri ini. :returns: String representasi nama entri :rtype: str
[ "Mengembalikan", "representasi", "string", "untuk", "nama", "entri", "ini", "." ]
1a52ba8bcc6dc4c5c1215f9e00207aca264287d6
https://github.com/laymonage/kbbi-python/blob/1a52ba8bcc6dc4c5c1215f9e00207aca264287d6/kbbi/kbbi.py#L172-L184
train
laymonage/kbbi-python
kbbi/kbbi.py
Entri._varian
def _varian(self, varian): """Mengembalikan representasi string untuk varian entri ini. Dapat digunakan untuk "Varian" maupun "Bentuk tidak baku". :param varian: List bentuk tidak baku atau varian :type varian: list :returns: String representasi varian atau bentuk tidak baku :rtype: str """ if varian == self.bentuk_tidak_baku: nama = "Bentuk tidak baku" elif varian == self.varian: nama = "Varian" else: return '' return nama + ': ' + ', '.join(varian)
python
def _varian(self, varian): """Mengembalikan representasi string untuk varian entri ini. Dapat digunakan untuk "Varian" maupun "Bentuk tidak baku". :param varian: List bentuk tidak baku atau varian :type varian: list :returns: String representasi varian atau bentuk tidak baku :rtype: str """ if varian == self.bentuk_tidak_baku: nama = "Bentuk tidak baku" elif varian == self.varian: nama = "Varian" else: return '' return nama + ': ' + ', '.join(varian)
[ "def", "_varian", "(", "self", ",", "varian", ")", ":", "if", "varian", "==", "self", ".", "bentuk_tidak_baku", ":", "nama", "=", "\"Bentuk tidak baku\"", "elif", "varian", "==", "self", ".", "varian", ":", "nama", "=", "\"Varian\"", "else", ":", "return", "''", "return", "nama", "+", "': '", "+", "', '", ".", "join", "(", "varian", ")" ]
Mengembalikan representasi string untuk varian entri ini. Dapat digunakan untuk "Varian" maupun "Bentuk tidak baku". :param varian: List bentuk tidak baku atau varian :type varian: list :returns: String representasi varian atau bentuk tidak baku :rtype: str
[ "Mengembalikan", "representasi", "string", "untuk", "varian", "entri", "ini", ".", "Dapat", "digunakan", "untuk", "Varian", "maupun", "Bentuk", "tidak", "baku", "." ]
1a52ba8bcc6dc4c5c1215f9e00207aca264287d6
https://github.com/laymonage/kbbi-python/blob/1a52ba8bcc6dc4c5c1215f9e00207aca264287d6/kbbi/kbbi.py#L186-L202
train
laymonage/kbbi-python
kbbi/kbbi.py
Makna._init_kelas
def _init_kelas(self, makna_label): """Memproses kelas kata yang ada dalam makna. :param makna_label: BeautifulSoup untuk makna yang ingin diproses. :type makna_label: BeautifulSoup """ kelas = makna_label.find(color='red') lain = makna_label.find(color='darkgreen') info = makna_label.find(color='green') if kelas: kelas = kelas.find_all('span') if lain: self.kelas = {lain.text.strip(): lain['title'].strip()} self.submakna = lain.next_sibling.strip() self.submakna += ' ' + makna_label.find(color='grey').text.strip() else: self.kelas = { k.text.strip(): k['title'].strip() for k in kelas } if kelas else {} self.info = info.text.strip() if info else ''
python
def _init_kelas(self, makna_label): """Memproses kelas kata yang ada dalam makna. :param makna_label: BeautifulSoup untuk makna yang ingin diproses. :type makna_label: BeautifulSoup """ kelas = makna_label.find(color='red') lain = makna_label.find(color='darkgreen') info = makna_label.find(color='green') if kelas: kelas = kelas.find_all('span') if lain: self.kelas = {lain.text.strip(): lain['title'].strip()} self.submakna = lain.next_sibling.strip() self.submakna += ' ' + makna_label.find(color='grey').text.strip() else: self.kelas = { k.text.strip(): k['title'].strip() for k in kelas } if kelas else {} self.info = info.text.strip() if info else ''
[ "def", "_init_kelas", "(", "self", ",", "makna_label", ")", ":", "kelas", "=", "makna_label", ".", "find", "(", "color", "=", "'red'", ")", "lain", "=", "makna_label", ".", "find", "(", "color", "=", "'darkgreen'", ")", "info", "=", "makna_label", ".", "find", "(", "color", "=", "'green'", ")", "if", "kelas", ":", "kelas", "=", "kelas", ".", "find_all", "(", "'span'", ")", "if", "lain", ":", "self", ".", "kelas", "=", "{", "lain", ".", "text", ".", "strip", "(", ")", ":", "lain", "[", "'title'", "]", ".", "strip", "(", ")", "}", "self", ".", "submakna", "=", "lain", ".", "next_sibling", ".", "strip", "(", ")", "self", ".", "submakna", "+=", "' '", "+", "makna_label", ".", "find", "(", "color", "=", "'grey'", ")", ".", "text", ".", "strip", "(", ")", "else", ":", "self", ".", "kelas", "=", "{", "k", ".", "text", ".", "strip", "(", ")", ":", "k", "[", "'title'", "]", ".", "strip", "(", ")", "for", "k", "in", "kelas", "}", "if", "kelas", "else", "{", "}", "self", ".", "info", "=", "info", ".", "text", ".", "strip", "(", ")", "if", "info", "else", "''" ]
Memproses kelas kata yang ada dalam makna. :param makna_label: BeautifulSoup untuk makna yang ingin diproses. :type makna_label: BeautifulSoup
[ "Memproses", "kelas", "kata", "yang", "ada", "dalam", "makna", "." ]
1a52ba8bcc6dc4c5c1215f9e00207aca264287d6
https://github.com/laymonage/kbbi-python/blob/1a52ba8bcc6dc4c5c1215f9e00207aca264287d6/kbbi/kbbi.py#L239-L259
train
laymonage/kbbi-python
kbbi/kbbi.py
Makna._init_contoh
def _init_contoh(self, makna_label): """Memproses contoh yang ada dalam makna. :param makna_label: BeautifulSoup untuk makna yang ingin diproses. :type makna_label: BeautifulSoup """ indeks = makna_label.text.find(': ') if indeks != -1: contoh = makna_label.text[indeks + 2:].strip() self.contoh = contoh.split('; ') else: self.contoh = []
python
def _init_contoh(self, makna_label): """Memproses contoh yang ada dalam makna. :param makna_label: BeautifulSoup untuk makna yang ingin diproses. :type makna_label: BeautifulSoup """ indeks = makna_label.text.find(': ') if indeks != -1: contoh = makna_label.text[indeks + 2:].strip() self.contoh = contoh.split('; ') else: self.contoh = []
[ "def", "_init_contoh", "(", "self", ",", "makna_label", ")", ":", "indeks", "=", "makna_label", ".", "text", ".", "find", "(", "': '", ")", "if", "indeks", "!=", "-", "1", ":", "contoh", "=", "makna_label", ".", "text", "[", "indeks", "+", "2", ":", "]", ".", "strip", "(", ")", "self", ".", "contoh", "=", "contoh", ".", "split", "(", "'; '", ")", "else", ":", "self", ".", "contoh", "=", "[", "]" ]
Memproses contoh yang ada dalam makna. :param makna_label: BeautifulSoup untuk makna yang ingin diproses. :type makna_label: BeautifulSoup
[ "Memproses", "contoh", "yang", "ada", "dalam", "makna", "." ]
1a52ba8bcc6dc4c5c1215f9e00207aca264287d6
https://github.com/laymonage/kbbi-python/blob/1a52ba8bcc6dc4c5c1215f9e00207aca264287d6/kbbi/kbbi.py#L261-L273
train
laymonage/kbbi-python
kbbi/kbbi.py
Makna.serialisasi
def serialisasi(self): """Mengembalikan hasil serialisasi objek Makna ini. :returns: Dictionary hasil serialisasi :rtype: dict """ return { "kelas": self.kelas, "submakna": self.submakna, "info": self.info, "contoh": self.contoh }
python
def serialisasi(self): """Mengembalikan hasil serialisasi objek Makna ini. :returns: Dictionary hasil serialisasi :rtype: dict """ return { "kelas": self.kelas, "submakna": self.submakna, "info": self.info, "contoh": self.contoh }
[ "def", "serialisasi", "(", "self", ")", ":", "return", "{", "\"kelas\"", ":", "self", ".", "kelas", ",", "\"submakna\"", ":", "self", ".", "submakna", ",", "\"info\"", ":", "self", ".", "info", ",", "\"contoh\"", ":", "self", ".", "contoh", "}" ]
Mengembalikan hasil serialisasi objek Makna ini. :returns: Dictionary hasil serialisasi :rtype: dict
[ "Mengembalikan", "hasil", "serialisasi", "objek", "Makna", "ini", "." ]
1a52ba8bcc6dc4c5c1215f9e00207aca264287d6
https://github.com/laymonage/kbbi-python/blob/1a52ba8bcc6dc4c5c1215f9e00207aca264287d6/kbbi/kbbi.py#L275-L287
train
mkouhei/bootstrap-py
bootstrap_py/docs.py
build_sphinx
def build_sphinx(pkg_data, projectdir): """Build sphinx documentation. :rtype: int :return: subprocess.call return code :param `bootstrap_py.control.PackageData` pkg_data: package meta data :param str projectdir: project root directory """ try: version, _minor_version = pkg_data.version.rsplit('.', 1) except ValueError: version = pkg_data.version args = ' '.join(('sphinx-quickstart', '--sep', '-q', '-p "{name}"', '-a "{author}"', '-v "{version}"', '-r "{release}"', '-l en', '--suffix=.rst', '--master=index', '--ext-autodoc', '--ext-viewcode', '--makefile', '{projectdir}')).format(name=pkg_data.name, author=pkg_data.author, version=version, release=pkg_data.version, projectdir=projectdir) if subprocess.call(shlex.split(args)) == 0: _touch_gitkeep(projectdir)
python
def build_sphinx(pkg_data, projectdir): """Build sphinx documentation. :rtype: int :return: subprocess.call return code :param `bootstrap_py.control.PackageData` pkg_data: package meta data :param str projectdir: project root directory """ try: version, _minor_version = pkg_data.version.rsplit('.', 1) except ValueError: version = pkg_data.version args = ' '.join(('sphinx-quickstart', '--sep', '-q', '-p "{name}"', '-a "{author}"', '-v "{version}"', '-r "{release}"', '-l en', '--suffix=.rst', '--master=index', '--ext-autodoc', '--ext-viewcode', '--makefile', '{projectdir}')).format(name=pkg_data.name, author=pkg_data.author, version=version, release=pkg_data.version, projectdir=projectdir) if subprocess.call(shlex.split(args)) == 0: _touch_gitkeep(projectdir)
[ "def", "build_sphinx", "(", "pkg_data", ",", "projectdir", ")", ":", "try", ":", "version", ",", "_minor_version", "=", "pkg_data", ".", "version", ".", "rsplit", "(", "'.'", ",", "1", ")", "except", "ValueError", ":", "version", "=", "pkg_data", ".", "version", "args", "=", "' '", ".", "join", "(", "(", "'sphinx-quickstart'", ",", "'--sep'", ",", "'-q'", ",", "'-p \"{name}\"'", ",", "'-a \"{author}\"'", ",", "'-v \"{version}\"'", ",", "'-r \"{release}\"'", ",", "'-l en'", ",", "'--suffix=.rst'", ",", "'--master=index'", ",", "'--ext-autodoc'", ",", "'--ext-viewcode'", ",", "'--makefile'", ",", "'{projectdir}'", ")", ")", ".", "format", "(", "name", "=", "pkg_data", ".", "name", ",", "author", "=", "pkg_data", ".", "author", ",", "version", "=", "version", ",", "release", "=", "pkg_data", ".", "version", ",", "projectdir", "=", "projectdir", ")", "if", "subprocess", ".", "call", "(", "shlex", ".", "split", "(", "args", ")", ")", "==", "0", ":", "_touch_gitkeep", "(", "projectdir", ")" ]
Build sphinx documentation. :rtype: int :return: subprocess.call return code :param `bootstrap_py.control.PackageData` pkg_data: package meta data :param str projectdir: project root directory
[ "Build", "sphinx", "documentation", "." ]
95d56ed98ef409fd9f019dc352fd1c3711533275
https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/docs.py#L8-L40
train
christophertbrown/bioscripts
ctbBio/crossmap.py
bowtiedb
def bowtiedb(fa, keepDB): """ make bowtie db """ btdir = '%s/bt2' % (os.getcwd()) # make directory for if not os.path.exists(btdir): os.mkdir(btdir) btdb = '%s/%s' % (btdir, fa.rsplit('/', 1)[-1]) if keepDB is True: if os.path.exists('%s.1.bt2' % (btdb)): return btdb p = subprocess.Popen('bowtie2-build -q %s %s' \ % (fa, btdb), shell = True) p.communicate() return btdb
python
def bowtiedb(fa, keepDB): """ make bowtie db """ btdir = '%s/bt2' % (os.getcwd()) # make directory for if not os.path.exists(btdir): os.mkdir(btdir) btdb = '%s/%s' % (btdir, fa.rsplit('/', 1)[-1]) if keepDB is True: if os.path.exists('%s.1.bt2' % (btdb)): return btdb p = subprocess.Popen('bowtie2-build -q %s %s' \ % (fa, btdb), shell = True) p.communicate() return btdb
[ "def", "bowtiedb", "(", "fa", ",", "keepDB", ")", ":", "btdir", "=", "'%s/bt2'", "%", "(", "os", ".", "getcwd", "(", ")", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "btdir", ")", ":", "os", ".", "mkdir", "(", "btdir", ")", "btdb", "=", "'%s/%s'", "%", "(", "btdir", ",", "fa", ".", "rsplit", "(", "'/'", ",", "1", ")", "[", "-", "1", "]", ")", "if", "keepDB", "is", "True", ":", "if", "os", ".", "path", ".", "exists", "(", "'%s.1.bt2'", "%", "(", "btdb", ")", ")", ":", "return", "btdb", "p", "=", "subprocess", ".", "Popen", "(", "'bowtie2-build -q %s %s'", "%", "(", "fa", ",", "btdb", ")", ",", "shell", "=", "True", ")", "p", ".", "communicate", "(", ")", "return", "btdb" ]
make bowtie db
[ "make", "bowtie", "db" ]
83b2566b3a5745437ec651cd6cafddd056846240
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/crossmap.py#L16-L31
train
christophertbrown/bioscripts
ctbBio/crossmap.py
bowtie
def bowtie(sam, btd, f, r, u, opt, no_shrink, threads): """ generate bowtie2 command """ bt2 = 'bowtie2 -x %s -p %s ' % (btd, threads) if f is not False: bt2 += '-1 %s -2 %s ' % (f, r) if u is not False: bt2 += '-U %s ' % (u) bt2 += opt if no_shrink is False: if f is False: bt2 += ' | shrinksam -u -k %s-shrunk.sam ' % (sam) else: bt2 += ' | shrinksam -k %s-shrunk.sam ' % (sam) else: bt2 += ' > %s.sam' % (sam) return bt2
python
def bowtie(sam, btd, f, r, u, opt, no_shrink, threads): """ generate bowtie2 command """ bt2 = 'bowtie2 -x %s -p %s ' % (btd, threads) if f is not False: bt2 += '-1 %s -2 %s ' % (f, r) if u is not False: bt2 += '-U %s ' % (u) bt2 += opt if no_shrink is False: if f is False: bt2 += ' | shrinksam -u -k %s-shrunk.sam ' % (sam) else: bt2 += ' | shrinksam -k %s-shrunk.sam ' % (sam) else: bt2 += ' > %s.sam' % (sam) return bt2
[ "def", "bowtie", "(", "sam", ",", "btd", ",", "f", ",", "r", ",", "u", ",", "opt", ",", "no_shrink", ",", "threads", ")", ":", "bt2", "=", "'bowtie2 -x %s -p %s '", "%", "(", "btd", ",", "threads", ")", "if", "f", "is", "not", "False", ":", "bt2", "+=", "'-1 %s -2 %s '", "%", "(", "f", ",", "r", ")", "if", "u", "is", "not", "False", ":", "bt2", "+=", "'-U %s '", "%", "(", "u", ")", "bt2", "+=", "opt", "if", "no_shrink", "is", "False", ":", "if", "f", "is", "False", ":", "bt2", "+=", "' | shrinksam -u -k %s-shrunk.sam '", "%", "(", "sam", ")", "else", ":", "bt2", "+=", "' | shrinksam -k %s-shrunk.sam '", "%", "(", "sam", ")", "else", ":", "bt2", "+=", "' > %s.sam'", "%", "(", "sam", ")", "return", "bt2" ]
generate bowtie2 command
[ "generate", "bowtie2", "command" ]
83b2566b3a5745437ec651cd6cafddd056846240
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/crossmap.py#L33-L50
train
christophertbrown/bioscripts
ctbBio/crossmap.py
crossmap
def crossmap(fas, reads, options, no_shrink, keepDB, threads, cluster, nodes): """ map all read sets against all fasta files """ if cluster is True: threads = '48' btc = [] for fa in fas: btd = bowtiedb(fa, keepDB) F, R, U = reads if F is not False: if U is False: u = False for i, f in enumerate(F): r = R[i] if U is not False: u = U[i] sam = '%s/%s-vs-%s' % (os.getcwd(), \ fa.rsplit('/', 1)[-1], f.rsplit('/', 1)[-1].rsplit('.', 3)[0]) btc.append(bowtie(sam, btd, f, r, u, options, no_shrink, threads)) else: f = False r = False for u in U: sam = '%s/%s-vs-%s' % (os.getcwd(), \ fa.rsplit('/', 1)[-1], u.rsplit('/', 1)[-1].rsplit('.', 3)[0]) btc.append(bowtie(sam, btd, f, r, u, options, no_shrink, threads)) if cluster is False: for i in btc: p = subprocess.Popen(i, shell = True) p.communicate() else: ID = ''.join(random.choice([str(i) for i in range(0, 9)]) for _ in range(5)) for node, commands in enumerate(chunks(btc, nodes), 1): bs = open('%s/crossmap-qsub.%s.%s.sh' % (os.getcwd(), ID, node), 'w') print('\n'.join(commands), file=bs) bs.close() p = subprocess.Popen(\ 'qsub -V -N crossmap %s' \ % (bs.name), \ shell = True) p.communicate()
python
def crossmap(fas, reads, options, no_shrink, keepDB, threads, cluster, nodes): """ map all read sets against all fasta files """ if cluster is True: threads = '48' btc = [] for fa in fas: btd = bowtiedb(fa, keepDB) F, R, U = reads if F is not False: if U is False: u = False for i, f in enumerate(F): r = R[i] if U is not False: u = U[i] sam = '%s/%s-vs-%s' % (os.getcwd(), \ fa.rsplit('/', 1)[-1], f.rsplit('/', 1)[-1].rsplit('.', 3)[0]) btc.append(bowtie(sam, btd, f, r, u, options, no_shrink, threads)) else: f = False r = False for u in U: sam = '%s/%s-vs-%s' % (os.getcwd(), \ fa.rsplit('/', 1)[-1], u.rsplit('/', 1)[-1].rsplit('.', 3)[0]) btc.append(bowtie(sam, btd, f, r, u, options, no_shrink, threads)) if cluster is False: for i in btc: p = subprocess.Popen(i, shell = True) p.communicate() else: ID = ''.join(random.choice([str(i) for i in range(0, 9)]) for _ in range(5)) for node, commands in enumerate(chunks(btc, nodes), 1): bs = open('%s/crossmap-qsub.%s.%s.sh' % (os.getcwd(), ID, node), 'w') print('\n'.join(commands), file=bs) bs.close() p = subprocess.Popen(\ 'qsub -V -N crossmap %s' \ % (bs.name), \ shell = True) p.communicate()
[ "def", "crossmap", "(", "fas", ",", "reads", ",", "options", ",", "no_shrink", ",", "keepDB", ",", "threads", ",", "cluster", ",", "nodes", ")", ":", "if", "cluster", "is", "True", ":", "threads", "=", "'48'", "btc", "=", "[", "]", "for", "fa", "in", "fas", ":", "btd", "=", "bowtiedb", "(", "fa", ",", "keepDB", ")", "F", ",", "R", ",", "U", "=", "reads", "if", "F", "is", "not", "False", ":", "if", "U", "is", "False", ":", "u", "=", "False", "for", "i", ",", "f", "in", "enumerate", "(", "F", ")", ":", "r", "=", "R", "[", "i", "]", "if", "U", "is", "not", "False", ":", "u", "=", "U", "[", "i", "]", "sam", "=", "'%s/%s-vs-%s'", "%", "(", "os", ".", "getcwd", "(", ")", ",", "fa", ".", "rsplit", "(", "'/'", ",", "1", ")", "[", "-", "1", "]", ",", "f", ".", "rsplit", "(", "'/'", ",", "1", ")", "[", "-", "1", "]", ".", "rsplit", "(", "'.'", ",", "3", ")", "[", "0", "]", ")", "btc", ".", "append", "(", "bowtie", "(", "sam", ",", "btd", ",", "f", ",", "r", ",", "u", ",", "options", ",", "no_shrink", ",", "threads", ")", ")", "else", ":", "f", "=", "False", "r", "=", "False", "for", "u", "in", "U", ":", "sam", "=", "'%s/%s-vs-%s'", "%", "(", "os", ".", "getcwd", "(", ")", ",", "fa", ".", "rsplit", "(", "'/'", ",", "1", ")", "[", "-", "1", "]", ",", "u", ".", "rsplit", "(", "'/'", ",", "1", ")", "[", "-", "1", "]", ".", "rsplit", "(", "'.'", ",", "3", ")", "[", "0", "]", ")", "btc", ".", "append", "(", "bowtie", "(", "sam", ",", "btd", ",", "f", ",", "r", ",", "u", ",", "options", ",", "no_shrink", ",", "threads", ")", ")", "if", "cluster", "is", "False", ":", "for", "i", "in", "btc", ":", "p", "=", "subprocess", ".", "Popen", "(", "i", ",", "shell", "=", "True", ")", "p", ".", "communicate", "(", ")", "else", ":", "ID", "=", "''", ".", "join", "(", "random", ".", "choice", "(", "[", "str", "(", "i", ")", "for", "i", "in", "range", "(", "0", ",", "9", ")", "]", ")", "for", "_", "in", "range", "(", "5", ")", ")", "for", "node", ",", "commands", "in", "enumerate", "(", "chunks", "(", "btc", ",", "nodes", ")", ",", "1", ")", ":", "bs", "=", "open", "(", "'%s/crossmap-qsub.%s.%s.sh'", "%", "(", "os", ".", "getcwd", "(", ")", ",", "ID", ",", "node", ")", ",", "'w'", ")", "print", "(", "'\\n'", ".", "join", "(", "commands", ")", ",", "file", "=", "bs", ")", "bs", ".", "close", "(", ")", "p", "=", "subprocess", ".", "Popen", "(", "'qsub -V -N crossmap %s'", "%", "(", "bs", ".", "name", ")", ",", "shell", "=", "True", ")", "p", ".", "communicate", "(", ")" ]
map all read sets against all fasta files
[ "map", "all", "read", "sets", "against", "all", "fasta", "files" ]
83b2566b3a5745437ec651cd6cafddd056846240
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/crossmap.py#L55-L96
train
disqus/nydus
nydus/db/base.py
BaseCluster.get_conn
def get_conn(self, *args, **kwargs): """ Returns a connection object from the router given ``args``. Useful in cases where a connection cannot be automatically determined during all steps of the process. An example of this would be Redis pipelines. """ connections = self.__connections_for('get_conn', args=args, kwargs=kwargs) if len(connections) is 1: return connections[0] else: return connections
python
def get_conn(self, *args, **kwargs): """ Returns a connection object from the router given ``args``. Useful in cases where a connection cannot be automatically determined during all steps of the process. An example of this would be Redis pipelines. """ connections = self.__connections_for('get_conn', args=args, kwargs=kwargs) if len(connections) is 1: return connections[0] else: return connections
[ "def", "get_conn", "(", "self", ",", "*", "args", ",", "**", "kwargs", ")", ":", "connections", "=", "self", ".", "__connections_for", "(", "'get_conn'", ",", "args", "=", "args", ",", "kwargs", "=", "kwargs", ")", "if", "len", "(", "connections", ")", "is", "1", ":", "return", "connections", "[", "0", "]", "else", ":", "return", "connections" ]
Returns a connection object from the router given ``args``. Useful in cases where a connection cannot be automatically determined during all steps of the process. An example of this would be Redis pipelines.
[ "Returns", "a", "connection", "object", "from", "the", "router", "given", "args", "." ]
9b505840da47a34f758a830c3992fa5dcb7bb7ad
https://github.com/disqus/nydus/blob/9b505840da47a34f758a830c3992fa5dcb7bb7ad/nydus/db/base.py#L100-L113
train
scottrice/pysteam
pysteam/_crc_algorithms.py
Crc.__get_nondirect_init
def __get_nondirect_init(self, init): """ return the non-direct init if the direct algorithm has been selected. """ crc = init for i in range(self.Width): bit = crc & 0x01 if bit: crc^= self.Poly crc >>= 1 if bit: crc |= self.MSB_Mask return crc & self.Mask
python
def __get_nondirect_init(self, init): """ return the non-direct init if the direct algorithm has been selected. """ crc = init for i in range(self.Width): bit = crc & 0x01 if bit: crc^= self.Poly crc >>= 1 if bit: crc |= self.MSB_Mask return crc & self.Mask
[ "def", "__get_nondirect_init", "(", "self", ",", "init", ")", ":", "crc", "=", "init", "for", "i", "in", "range", "(", "self", ".", "Width", ")", ":", "bit", "=", "crc", "&", "0x01", "if", "bit", ":", "crc", "^=", "self", ".", "Poly", "crc", ">>=", "1", "if", "bit", ":", "crc", "|=", "self", ".", "MSB_Mask", "return", "crc", "&", "self", ".", "Mask" ]
return the non-direct init if the direct algorithm has been selected.
[ "return", "the", "non", "-", "direct", "init", "if", "the", "direct", "algorithm", "has", "been", "selected", "." ]
1eb2254b5235a053a953e596fa7602d0b110245d
https://github.com/scottrice/pysteam/blob/1eb2254b5235a053a953e596fa7602d0b110245d/pysteam/_crc_algorithms.py#L98-L110
train
scottrice/pysteam
pysteam/_crc_algorithms.py
Crc.reflect
def reflect(self, data, width): """ reflect a data word, i.e. reverts the bit order. """ x = data & 0x01 for i in range(width - 1): data >>= 1 x = (x << 1) | (data & 0x01) return x
python
def reflect(self, data, width): """ reflect a data word, i.e. reverts the bit order. """ x = data & 0x01 for i in range(width - 1): data >>= 1 x = (x << 1) | (data & 0x01) return x
[ "def", "reflect", "(", "self", ",", "data", ",", "width", ")", ":", "x", "=", "data", "&", "0x01", "for", "i", "in", "range", "(", "width", "-", "1", ")", ":", "data", ">>=", "1", "x", "=", "(", "x", "<<", "1", ")", "|", "(", "data", "&", "0x01", ")", "return", "x" ]
reflect a data word, i.e. reverts the bit order.
[ "reflect", "a", "data", "word", "i", ".", "e", ".", "reverts", "the", "bit", "order", "." ]
1eb2254b5235a053a953e596fa7602d0b110245d
https://github.com/scottrice/pysteam/blob/1eb2254b5235a053a953e596fa7602d0b110245d/pysteam/_crc_algorithms.py#L115-L123
train
scottrice/pysteam
pysteam/_crc_algorithms.py
Crc.bit_by_bit
def bit_by_bit(self, in_data): """ Classic simple and slow CRC implementation. This function iterates bit by bit over the augmented input message and returns the calculated CRC value at the end. """ # If the input data is a string, convert to bytes. if isinstance(in_data, str): in_data = [ord(c) for c in in_data] register = self.NonDirectInit for octet in in_data: if self.ReflectIn: octet = self.reflect(octet, 8) for i in range(8): topbit = register & self.MSB_Mask register = ((register << 1) & self.Mask) | ((octet >> (7 - i)) & 0x01) if topbit: register ^= self.Poly for i in range(self.Width): topbit = register & self.MSB_Mask register = ((register << 1) & self.Mask) if topbit: register ^= self.Poly if self.ReflectOut: register = self.reflect(register, self.Width) return register ^ self.XorOut
python
def bit_by_bit(self, in_data): """ Classic simple and slow CRC implementation. This function iterates bit by bit over the augmented input message and returns the calculated CRC value at the end. """ # If the input data is a string, convert to bytes. if isinstance(in_data, str): in_data = [ord(c) for c in in_data] register = self.NonDirectInit for octet in in_data: if self.ReflectIn: octet = self.reflect(octet, 8) for i in range(8): topbit = register & self.MSB_Mask register = ((register << 1) & self.Mask) | ((octet >> (7 - i)) & 0x01) if topbit: register ^= self.Poly for i in range(self.Width): topbit = register & self.MSB_Mask register = ((register << 1) & self.Mask) if topbit: register ^= self.Poly if self.ReflectOut: register = self.reflect(register, self.Width) return register ^ self.XorOut
[ "def", "bit_by_bit", "(", "self", ",", "in_data", ")", ":", "if", "isinstance", "(", "in_data", ",", "str", ")", ":", "in_data", "=", "[", "ord", "(", "c", ")", "for", "c", "in", "in_data", "]", "register", "=", "self", ".", "NonDirectInit", "for", "octet", "in", "in_data", ":", "if", "self", ".", "ReflectIn", ":", "octet", "=", "self", ".", "reflect", "(", "octet", ",", "8", ")", "for", "i", "in", "range", "(", "8", ")", ":", "topbit", "=", "register", "&", "self", ".", "MSB_Mask", "register", "=", "(", "(", "register", "<<", "1", ")", "&", "self", ".", "Mask", ")", "|", "(", "(", "octet", ">>", "(", "7", "-", "i", ")", ")", "&", "0x01", ")", "if", "topbit", ":", "register", "^=", "self", ".", "Poly", "for", "i", "in", "range", "(", "self", ".", "Width", ")", ":", "topbit", "=", "register", "&", "self", ".", "MSB_Mask", "register", "=", "(", "(", "register", "<<", "1", ")", "&", "self", ".", "Mask", ")", "if", "topbit", ":", "register", "^=", "self", ".", "Poly", "if", "self", ".", "ReflectOut", ":", "register", "=", "self", ".", "reflect", "(", "register", ",", "self", ".", "Width", ")", "return", "register", "^", "self", ".", "XorOut" ]
Classic simple and slow CRC implementation. This function iterates bit by bit over the augmented input message and returns the calculated CRC value at the end.
[ "Classic", "simple", "and", "slow", "CRC", "implementation", ".", "This", "function", "iterates", "bit", "by", "bit", "over", "the", "augmented", "input", "message", "and", "returns", "the", "calculated", "CRC", "value", "at", "the", "end", "." ]
1eb2254b5235a053a953e596fa7602d0b110245d
https://github.com/scottrice/pysteam/blob/1eb2254b5235a053a953e596fa7602d0b110245d/pysteam/_crc_algorithms.py#L128-L156
train
scottrice/pysteam
pysteam/_crc_algorithms.py
Crc.gen_table
def gen_table(self): """ This function generates the CRC table used for the table_driven CRC algorithm. The Python version cannot handle tables of an index width other than 8. See the generated C code for tables with different sizes instead. """ table_length = 1 << self.TableIdxWidth tbl = [0] * table_length for i in range(table_length): register = i if self.ReflectIn: register = self.reflect(register, self.TableIdxWidth) register = register << (self.Width - self.TableIdxWidth + self.CrcShift) for j in range(self.TableIdxWidth): if register & (self.MSB_Mask << self.CrcShift) != 0: register = (register << 1) ^ (self.Poly << self.CrcShift) else: register = (register << 1) if self.ReflectIn: register = self.reflect(register >> self.CrcShift, self.Width) << self.CrcShift tbl[i] = register & (self.Mask << self.CrcShift) return tbl
python
def gen_table(self): """ This function generates the CRC table used for the table_driven CRC algorithm. The Python version cannot handle tables of an index width other than 8. See the generated C code for tables with different sizes instead. """ table_length = 1 << self.TableIdxWidth tbl = [0] * table_length for i in range(table_length): register = i if self.ReflectIn: register = self.reflect(register, self.TableIdxWidth) register = register << (self.Width - self.TableIdxWidth + self.CrcShift) for j in range(self.TableIdxWidth): if register & (self.MSB_Mask << self.CrcShift) != 0: register = (register << 1) ^ (self.Poly << self.CrcShift) else: register = (register << 1) if self.ReflectIn: register = self.reflect(register >> self.CrcShift, self.Width) << self.CrcShift tbl[i] = register & (self.Mask << self.CrcShift) return tbl
[ "def", "gen_table", "(", "self", ")", ":", "table_length", "=", "1", "<<", "self", ".", "TableIdxWidth", "tbl", "=", "[", "0", "]", "*", "table_length", "for", "i", "in", "range", "(", "table_length", ")", ":", "register", "=", "i", "if", "self", ".", "ReflectIn", ":", "register", "=", "self", ".", "reflect", "(", "register", ",", "self", ".", "TableIdxWidth", ")", "register", "=", "register", "<<", "(", "self", ".", "Width", "-", "self", ".", "TableIdxWidth", "+", "self", ".", "CrcShift", ")", "for", "j", "in", "range", "(", "self", ".", "TableIdxWidth", ")", ":", "if", "register", "&", "(", "self", ".", "MSB_Mask", "<<", "self", ".", "CrcShift", ")", "!=", "0", ":", "register", "=", "(", "register", "<<", "1", ")", "^", "(", "self", ".", "Poly", "<<", "self", ".", "CrcShift", ")", "else", ":", "register", "=", "(", "register", "<<", "1", ")", "if", "self", ".", "ReflectIn", ":", "register", "=", "self", ".", "reflect", "(", "register", ">>", "self", ".", "CrcShift", ",", "self", ".", "Width", ")", "<<", "self", ".", "CrcShift", "tbl", "[", "i", "]", "=", "register", "&", "(", "self", ".", "Mask", "<<", "self", ".", "CrcShift", ")", "return", "tbl" ]
This function generates the CRC table used for the table_driven CRC algorithm. The Python version cannot handle tables of an index width other than 8. See the generated C code for tables with different sizes instead.
[ "This", "function", "generates", "the", "CRC", "table", "used", "for", "the", "table_driven", "CRC", "algorithm", ".", "The", "Python", "version", "cannot", "handle", "tables", "of", "an", "index", "width", "other", "than", "8", ".", "See", "the", "generated", "C", "code", "for", "tables", "with", "different", "sizes", "instead", "." ]
1eb2254b5235a053a953e596fa7602d0b110245d
https://github.com/scottrice/pysteam/blob/1eb2254b5235a053a953e596fa7602d0b110245d/pysteam/_crc_algorithms.py#L190-L212
train
scottrice/pysteam
pysteam/_crc_algorithms.py
Crc.table_driven
def table_driven(self, in_data): """ The Standard table_driven CRC algorithm. """ # If the input data is a string, convert to bytes. if isinstance(in_data, str): in_data = [ord(c) for c in in_data] tbl = self.gen_table() register = self.DirectInit << self.CrcShift if not self.ReflectIn: for octet in in_data: tblidx = ((register >> (self.Width - self.TableIdxWidth + self.CrcShift)) ^ octet) & 0xff register = ((register << (self.TableIdxWidth - self.CrcShift)) ^ tbl[tblidx]) & (self.Mask << self.CrcShift) register = register >> self.CrcShift else: register = self.reflect(register, self.Width + self.CrcShift) << self.CrcShift for octet in in_data: tblidx = ((register >> self.CrcShift) ^ octet) & 0xff register = ((register >> self.TableIdxWidth) ^ tbl[tblidx]) & (self.Mask << self.CrcShift) register = self.reflect(register, self.Width + self.CrcShift) & self.Mask if self.ReflectOut: register = self.reflect(register, self.Width) return register ^ self.XorOut
python
def table_driven(self, in_data): """ The Standard table_driven CRC algorithm. """ # If the input data is a string, convert to bytes. if isinstance(in_data, str): in_data = [ord(c) for c in in_data] tbl = self.gen_table() register = self.DirectInit << self.CrcShift if not self.ReflectIn: for octet in in_data: tblidx = ((register >> (self.Width - self.TableIdxWidth + self.CrcShift)) ^ octet) & 0xff register = ((register << (self.TableIdxWidth - self.CrcShift)) ^ tbl[tblidx]) & (self.Mask << self.CrcShift) register = register >> self.CrcShift else: register = self.reflect(register, self.Width + self.CrcShift) << self.CrcShift for octet in in_data: tblidx = ((register >> self.CrcShift) ^ octet) & 0xff register = ((register >> self.TableIdxWidth) ^ tbl[tblidx]) & (self.Mask << self.CrcShift) register = self.reflect(register, self.Width + self.CrcShift) & self.Mask if self.ReflectOut: register = self.reflect(register, self.Width) return register ^ self.XorOut
[ "def", "table_driven", "(", "self", ",", "in_data", ")", ":", "if", "isinstance", "(", "in_data", ",", "str", ")", ":", "in_data", "=", "[", "ord", "(", "c", ")", "for", "c", "in", "in_data", "]", "tbl", "=", "self", ".", "gen_table", "(", ")", "register", "=", "self", ".", "DirectInit", "<<", "self", ".", "CrcShift", "if", "not", "self", ".", "ReflectIn", ":", "for", "octet", "in", "in_data", ":", "tblidx", "=", "(", "(", "register", ">>", "(", "self", ".", "Width", "-", "self", ".", "TableIdxWidth", "+", "self", ".", "CrcShift", ")", ")", "^", "octet", ")", "&", "0xff", "register", "=", "(", "(", "register", "<<", "(", "self", ".", "TableIdxWidth", "-", "self", ".", "CrcShift", ")", ")", "^", "tbl", "[", "tblidx", "]", ")", "&", "(", "self", ".", "Mask", "<<", "self", ".", "CrcShift", ")", "register", "=", "register", ">>", "self", ".", "CrcShift", "else", ":", "register", "=", "self", ".", "reflect", "(", "register", ",", "self", ".", "Width", "+", "self", ".", "CrcShift", ")", "<<", "self", ".", "CrcShift", "for", "octet", "in", "in_data", ":", "tblidx", "=", "(", "(", "register", ">>", "self", ".", "CrcShift", ")", "^", "octet", ")", "&", "0xff", "register", "=", "(", "(", "register", ">>", "self", ".", "TableIdxWidth", ")", "^", "tbl", "[", "tblidx", "]", ")", "&", "(", "self", ".", "Mask", "<<", "self", ".", "CrcShift", ")", "register", "=", "self", ".", "reflect", "(", "register", ",", "self", ".", "Width", "+", "self", ".", "CrcShift", ")", "&", "self", ".", "Mask", "if", "self", ".", "ReflectOut", ":", "register", "=", "self", ".", "reflect", "(", "register", ",", "self", ".", "Width", ")", "return", "register", "^", "self", ".", "XorOut" ]
The Standard table_driven CRC algorithm.
[ "The", "Standard", "table_driven", "CRC", "algorithm", "." ]
1eb2254b5235a053a953e596fa7602d0b110245d
https://github.com/scottrice/pysteam/blob/1eb2254b5235a053a953e596fa7602d0b110245d/pysteam/_crc_algorithms.py#L217-L242
train
christophertbrown/bioscripts
ctbBio/strip_masked.py
parse_masked
def parse_masked(seq, min_len): """ parse masked sequence into non-masked and masked regions """ nm, masked = [], [[]] prev = None for base in seq[1]: if base.isupper(): nm.append(base) if masked != [[]] and len(masked[-1]) < min_len: nm.extend(masked[-1]) del masked[-1] prev = False elif base.islower(): if prev is False: masked.append([]) masked[-1].append(base) prev = True return nm, masked
python
def parse_masked(seq, min_len): """ parse masked sequence into non-masked and masked regions """ nm, masked = [], [[]] prev = None for base in seq[1]: if base.isupper(): nm.append(base) if masked != [[]] and len(masked[-1]) < min_len: nm.extend(masked[-1]) del masked[-1] prev = False elif base.islower(): if prev is False: masked.append([]) masked[-1].append(base) prev = True return nm, masked
[ "def", "parse_masked", "(", "seq", ",", "min_len", ")", ":", "nm", ",", "masked", "=", "[", "]", ",", "[", "[", "]", "]", "prev", "=", "None", "for", "base", "in", "seq", "[", "1", "]", ":", "if", "base", ".", "isupper", "(", ")", ":", "nm", ".", "append", "(", "base", ")", "if", "masked", "!=", "[", "[", "]", "]", "and", "len", "(", "masked", "[", "-", "1", "]", ")", "<", "min_len", ":", "nm", ".", "extend", "(", "masked", "[", "-", "1", "]", ")", "del", "masked", "[", "-", "1", "]", "prev", "=", "False", "elif", "base", ".", "islower", "(", ")", ":", "if", "prev", "is", "False", ":", "masked", ".", "append", "(", "[", "]", ")", "masked", "[", "-", "1", "]", ".", "append", "(", "base", ")", "prev", "=", "True", "return", "nm", ",", "masked" ]
parse masked sequence into non-masked and masked regions
[ "parse", "masked", "sequence", "into", "non", "-", "masked", "and", "masked", "regions" ]
83b2566b3a5745437ec651cd6cafddd056846240
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/strip_masked.py#L13-L31
train
christophertbrown/bioscripts
ctbBio/strip_masked.py
strip_masked
def strip_masked(fasta, min_len, print_masked): """ remove masked regions from fasta file as long as they are longer than min_len """ for seq in parse_fasta(fasta): nm, masked = parse_masked(seq, min_len) nm = ['%s removed_masked >=%s' % (seq[0], min_len), ''.join(nm)] yield [0, nm] if print_masked is True: for i, m in enumerate([i for i in masked if i != []], 1): m = ['%s insertion:%s' % (seq[0], i), ''.join(m)] yield [1, m]
python
def strip_masked(fasta, min_len, print_masked): """ remove masked regions from fasta file as long as they are longer than min_len """ for seq in parse_fasta(fasta): nm, masked = parse_masked(seq, min_len) nm = ['%s removed_masked >=%s' % (seq[0], min_len), ''.join(nm)] yield [0, nm] if print_masked is True: for i, m in enumerate([i for i in masked if i != []], 1): m = ['%s insertion:%s' % (seq[0], i), ''.join(m)] yield [1, m]
[ "def", "strip_masked", "(", "fasta", ",", "min_len", ",", "print_masked", ")", ":", "for", "seq", "in", "parse_fasta", "(", "fasta", ")", ":", "nm", ",", "masked", "=", "parse_masked", "(", "seq", ",", "min_len", ")", "nm", "=", "[", "'%s removed_masked >=%s'", "%", "(", "seq", "[", "0", "]", ",", "min_len", ")", ",", "''", ".", "join", "(", "nm", ")", "]", "yield", "[", "0", ",", "nm", "]", "if", "print_masked", "is", "True", ":", "for", "i", ",", "m", "in", "enumerate", "(", "[", "i", "for", "i", "in", "masked", "if", "i", "!=", "[", "]", "]", ",", "1", ")", ":", "m", "=", "[", "'%s insertion:%s'", "%", "(", "seq", "[", "0", "]", ",", "i", ")", ",", "''", ".", "join", "(", "m", ")", "]", "yield", "[", "1", ",", "m", "]" ]
remove masked regions from fasta file as long as they are longer than min_len
[ "remove", "masked", "regions", "from", "fasta", "file", "as", "long", "as", "they", "are", "longer", "than", "min_len" ]
83b2566b3a5745437ec651cd6cafddd056846240
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/strip_masked.py#L33-L45
train
smdabdoub/phylotoast
bin/network_plots_gephi.py
get_relative_abundance
def get_relative_abundance(biomfile): """ Return arcsine transformed relative abundance from a BIOM format file. :type biomfile: BIOM format file :param biomfile: BIOM format file used to obtain relative abundances for each OTU in a SampleID, which are used as node sizes in network plots. :type return: Dictionary of dictionaries. :return: Dictionary keyed on SampleID whose value is a dictionarykeyed on OTU Name whose value is the arc sine tranfsormed relative abundance value for that SampleID-OTU Name pair. """ biomf = biom.load_table(biomfile) norm_biomf = biomf.norm(inplace=False) rel_abd = {} for sid in norm_biomf.ids(): rel_abd[sid] = {} for otuid in norm_biomf.ids("observation"): otuname = oc.otu_name(norm_biomf.metadata(otuid, axis="observation")["taxonomy"]) otuname = " ".join(otuname.split("_")) abd = norm_biomf.get_value_by_ids(otuid, sid) rel_abd[sid][otuname] = abd ast_rel_abd = bc.arcsine_sqrt_transform(rel_abd) return ast_rel_abd
python
def get_relative_abundance(biomfile): """ Return arcsine transformed relative abundance from a BIOM format file. :type biomfile: BIOM format file :param biomfile: BIOM format file used to obtain relative abundances for each OTU in a SampleID, which are used as node sizes in network plots. :type return: Dictionary of dictionaries. :return: Dictionary keyed on SampleID whose value is a dictionarykeyed on OTU Name whose value is the arc sine tranfsormed relative abundance value for that SampleID-OTU Name pair. """ biomf = biom.load_table(biomfile) norm_biomf = biomf.norm(inplace=False) rel_abd = {} for sid in norm_biomf.ids(): rel_abd[sid] = {} for otuid in norm_biomf.ids("observation"): otuname = oc.otu_name(norm_biomf.metadata(otuid, axis="observation")["taxonomy"]) otuname = " ".join(otuname.split("_")) abd = norm_biomf.get_value_by_ids(otuid, sid) rel_abd[sid][otuname] = abd ast_rel_abd = bc.arcsine_sqrt_transform(rel_abd) return ast_rel_abd
[ "def", "get_relative_abundance", "(", "biomfile", ")", ":", "biomf", "=", "biom", ".", "load_table", "(", "biomfile", ")", "norm_biomf", "=", "biomf", ".", "norm", "(", "inplace", "=", "False", ")", "rel_abd", "=", "{", "}", "for", "sid", "in", "norm_biomf", ".", "ids", "(", ")", ":", "rel_abd", "[", "sid", "]", "=", "{", "}", "for", "otuid", "in", "norm_biomf", ".", "ids", "(", "\"observation\"", ")", ":", "otuname", "=", "oc", ".", "otu_name", "(", "norm_biomf", ".", "metadata", "(", "otuid", ",", "axis", "=", "\"observation\"", ")", "[", "\"taxonomy\"", "]", ")", "otuname", "=", "\" \"", ".", "join", "(", "otuname", ".", "split", "(", "\"_\"", ")", ")", "abd", "=", "norm_biomf", ".", "get_value_by_ids", "(", "otuid", ",", "sid", ")", "rel_abd", "[", "sid", "]", "[", "otuname", "]", "=", "abd", "ast_rel_abd", "=", "bc", ".", "arcsine_sqrt_transform", "(", "rel_abd", ")", "return", "ast_rel_abd" ]
Return arcsine transformed relative abundance from a BIOM format file. :type biomfile: BIOM format file :param biomfile: BIOM format file used to obtain relative abundances for each OTU in a SampleID, which are used as node sizes in network plots. :type return: Dictionary of dictionaries. :return: Dictionary keyed on SampleID whose value is a dictionarykeyed on OTU Name whose value is the arc sine tranfsormed relative abundance value for that SampleID-OTU Name pair.
[ "Return", "arcsine", "transformed", "relative", "abundance", "from", "a", "BIOM", "format", "file", "." ]
0b74ef171e6a84761710548501dfac71285a58a3
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/bin/network_plots_gephi.py#L33-L57
train
smdabdoub/phylotoast
bin/iTol.py
find_otu
def find_otu(otuid, tree): """ Find an OTU ID in a Newick-format tree. Return the starting position of the ID or None if not found. """ for m in re.finditer(otuid, tree): before, after = tree[m.start()-1], tree[m.start()+len(otuid)] if before in ["(", ",", ")"] and after in [":", ";"]: return m.start() return None
python
def find_otu(otuid, tree): """ Find an OTU ID in a Newick-format tree. Return the starting position of the ID or None if not found. """ for m in re.finditer(otuid, tree): before, after = tree[m.start()-1], tree[m.start()+len(otuid)] if before in ["(", ",", ")"] and after in [":", ";"]: return m.start() return None
[ "def", "find_otu", "(", "otuid", ",", "tree", ")", ":", "for", "m", "in", "re", ".", "finditer", "(", "otuid", ",", "tree", ")", ":", "before", ",", "after", "=", "tree", "[", "m", ".", "start", "(", ")", "-", "1", "]", ",", "tree", "[", "m", ".", "start", "(", ")", "+", "len", "(", "otuid", ")", "]", "if", "before", "in", "[", "\"(\"", ",", "\",\"", ",", "\")\"", "]", "and", "after", "in", "[", "\":\"", ",", "\";\"", "]", ":", "return", "m", ".", "start", "(", ")", "return", "None" ]
Find an OTU ID in a Newick-format tree. Return the starting position of the ID or None if not found.
[ "Find", "an", "OTU", "ID", "in", "a", "Newick", "-", "format", "tree", ".", "Return", "the", "starting", "position", "of", "the", "ID", "or", "None", "if", "not", "found", "." ]
0b74ef171e6a84761710548501dfac71285a58a3
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/bin/iTol.py#L17-L26
train
smdabdoub/phylotoast
bin/iTol.py
newick_replace_otuids
def newick_replace_otuids(tree, biomf): """ Replace the OTU ids in the Newick phylogenetic tree format with truncated OTU names """ for val, id_, md in biomf.iter(axis="observation"): otu_loc = find_otu(id_, tree) if otu_loc is not None: tree = tree[:otu_loc] + \ oc.otu_name(md["taxonomy"]) + \ tree[otu_loc + len(id_):] return tree
python
def newick_replace_otuids(tree, biomf): """ Replace the OTU ids in the Newick phylogenetic tree format with truncated OTU names """ for val, id_, md in biomf.iter(axis="observation"): otu_loc = find_otu(id_, tree) if otu_loc is not None: tree = tree[:otu_loc] + \ oc.otu_name(md["taxonomy"]) + \ tree[otu_loc + len(id_):] return tree
[ "def", "newick_replace_otuids", "(", "tree", ",", "biomf", ")", ":", "for", "val", ",", "id_", ",", "md", "in", "biomf", ".", "iter", "(", "axis", "=", "\"observation\"", ")", ":", "otu_loc", "=", "find_otu", "(", "id_", ",", "tree", ")", "if", "otu_loc", "is", "not", "None", ":", "tree", "=", "tree", "[", ":", "otu_loc", "]", "+", "oc", ".", "otu_name", "(", "md", "[", "\"taxonomy\"", "]", ")", "+", "tree", "[", "otu_loc", "+", "len", "(", "id_", ")", ":", "]", "return", "tree" ]
Replace the OTU ids in the Newick phylogenetic tree format with truncated OTU names
[ "Replace", "the", "OTU", "ids", "in", "the", "Newick", "phylogenetic", "tree", "format", "with", "truncated", "OTU", "names" ]
0b74ef171e6a84761710548501dfac71285a58a3
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/bin/iTol.py#L29-L40
train
christophertbrown/bioscripts
ctbBio/cluster_ani.py
genome_info
def genome_info(genome, info): """ return genome info for choosing representative if ggKbase table provided - choose rep based on SCGs and genome length - priority for most SCGs - extra SCGs, then largest genome otherwise, based on largest genome """ try: scg = info['#SCGs'] dups = info['#SCG duplicates'] length = info['genome size (bp)'] return [scg - dups, length, genome] except: return [False, False, info['genome size (bp)'], genome]
python
def genome_info(genome, info): """ return genome info for choosing representative if ggKbase table provided - choose rep based on SCGs and genome length - priority for most SCGs - extra SCGs, then largest genome otherwise, based on largest genome """ try: scg = info['#SCGs'] dups = info['#SCG duplicates'] length = info['genome size (bp)'] return [scg - dups, length, genome] except: return [False, False, info['genome size (bp)'], genome]
[ "def", "genome_info", "(", "genome", ",", "info", ")", ":", "try", ":", "scg", "=", "info", "[", "'#SCGs'", "]", "dups", "=", "info", "[", "'#SCG duplicates'", "]", "length", "=", "info", "[", "'genome size (bp)'", "]", "return", "[", "scg", "-", "dups", ",", "length", ",", "genome", "]", "except", ":", "return", "[", "False", ",", "False", ",", "info", "[", "'genome size (bp)'", "]", ",", "genome", "]" ]
return genome info for choosing representative if ggKbase table provided - choose rep based on SCGs and genome length - priority for most SCGs - extra SCGs, then largest genome otherwise, based on largest genome
[ "return", "genome", "info", "for", "choosing", "representative" ]
83b2566b3a5745437ec651cd6cafddd056846240
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/cluster_ani.py#L97-L112
train
christophertbrown/bioscripts
ctbBio/cluster_ani.py
print_clusters
def print_clusters(fastas, info, ANI): """ choose represenative genome and print cluster information *if ggKbase table is provided, use SCG info to choose best genome """ header = ['#cluster', 'num. genomes', 'rep.', 'genome', '#SCGs', '#SCG duplicates', \ 'genome size (bp)', 'fragments', 'list'] yield header in_cluster = [] for cluster_num, cluster in enumerate(connected_components(ANI)): cluster = sorted([genome_info(genome, info[genome]) \ for genome in cluster], \ key = lambda x: x[0:], reverse = True) rep = cluster[0][-1] cluster = [i[-1] for i in cluster] size = len(cluster) for genome in cluster: in_cluster.append(genome) try: stats = [size, rep, genome, \ info[genome]['#SCGs'], info[genome]['#SCG duplicates'], \ info[genome]['genome size (bp)'], info[genome]['# contigs'], cluster] except: stats = [size, rep, genome, \ 'n/a', 'n/a', \ info[genome]['genome size (bp)'], info[genome]['# contigs'], cluster] if rep == genome: stats = ['*%s' % (cluster_num)] + stats else: stats = [cluster_num] + stats yield stats # print singletons try: start = cluster_num + 1 except: start = 0 fastas = set([i.rsplit('.', 1)[0].rsplit('/', 1)[-1].rsplit('.contigs')[0] for i in fastas]) for cluster_num, genome in \ enumerate(fastas.difference(set(in_cluster)), start): try: stats = ['*%s' % (cluster_num), 1, genome, genome, \ info[genome]['#SCGs'], info[genome]['#SCG duplicates'], \ info[genome]['genome size (bp)'], info[genome]['# contigs'], [genome]] except: stats = ['*%s' % (cluster_num), 1, genome, genome, \ 'n/a', 'n/a', \ info[genome]['genome size (bp)'], info[genome]['# contigs'], [genome]] yield stats
python
def print_clusters(fastas, info, ANI): """ choose represenative genome and print cluster information *if ggKbase table is provided, use SCG info to choose best genome """ header = ['#cluster', 'num. genomes', 'rep.', 'genome', '#SCGs', '#SCG duplicates', \ 'genome size (bp)', 'fragments', 'list'] yield header in_cluster = [] for cluster_num, cluster in enumerate(connected_components(ANI)): cluster = sorted([genome_info(genome, info[genome]) \ for genome in cluster], \ key = lambda x: x[0:], reverse = True) rep = cluster[0][-1] cluster = [i[-1] for i in cluster] size = len(cluster) for genome in cluster: in_cluster.append(genome) try: stats = [size, rep, genome, \ info[genome]['#SCGs'], info[genome]['#SCG duplicates'], \ info[genome]['genome size (bp)'], info[genome]['# contigs'], cluster] except: stats = [size, rep, genome, \ 'n/a', 'n/a', \ info[genome]['genome size (bp)'], info[genome]['# contigs'], cluster] if rep == genome: stats = ['*%s' % (cluster_num)] + stats else: stats = [cluster_num] + stats yield stats # print singletons try: start = cluster_num + 1 except: start = 0 fastas = set([i.rsplit('.', 1)[0].rsplit('/', 1)[-1].rsplit('.contigs')[0] for i in fastas]) for cluster_num, genome in \ enumerate(fastas.difference(set(in_cluster)), start): try: stats = ['*%s' % (cluster_num), 1, genome, genome, \ info[genome]['#SCGs'], info[genome]['#SCG duplicates'], \ info[genome]['genome size (bp)'], info[genome]['# contigs'], [genome]] except: stats = ['*%s' % (cluster_num), 1, genome, genome, \ 'n/a', 'n/a', \ info[genome]['genome size (bp)'], info[genome]['# contigs'], [genome]] yield stats
[ "def", "print_clusters", "(", "fastas", ",", "info", ",", "ANI", ")", ":", "header", "=", "[", "'#cluster'", ",", "'num. genomes'", ",", "'rep.'", ",", "'genome'", ",", "'#SCGs'", ",", "'#SCG duplicates'", ",", "'genome size (bp)'", ",", "'fragments'", ",", "'list'", "]", "yield", "header", "in_cluster", "=", "[", "]", "for", "cluster_num", ",", "cluster", "in", "enumerate", "(", "connected_components", "(", "ANI", ")", ")", ":", "cluster", "=", "sorted", "(", "[", "genome_info", "(", "genome", ",", "info", "[", "genome", "]", ")", "for", "genome", "in", "cluster", "]", ",", "key", "=", "lambda", "x", ":", "x", "[", "0", ":", "]", ",", "reverse", "=", "True", ")", "rep", "=", "cluster", "[", "0", "]", "[", "-", "1", "]", "cluster", "=", "[", "i", "[", "-", "1", "]", "for", "i", "in", "cluster", "]", "size", "=", "len", "(", "cluster", ")", "for", "genome", "in", "cluster", ":", "in_cluster", ".", "append", "(", "genome", ")", "try", ":", "stats", "=", "[", "size", ",", "rep", ",", "genome", ",", "info", "[", "genome", "]", "[", "'#SCGs'", "]", ",", "info", "[", "genome", "]", "[", "'#SCG duplicates'", "]", ",", "info", "[", "genome", "]", "[", "'genome size (bp)'", "]", ",", "info", "[", "genome", "]", "[", "'# contigs'", "]", ",", "cluster", "]", "except", ":", "stats", "=", "[", "size", ",", "rep", ",", "genome", ",", "'n/a'", ",", "'n/a'", ",", "info", "[", "genome", "]", "[", "'genome size (bp)'", "]", ",", "info", "[", "genome", "]", "[", "'# contigs'", "]", ",", "cluster", "]", "if", "rep", "==", "genome", ":", "stats", "=", "[", "'*%s'", "%", "(", "cluster_num", ")", "]", "+", "stats", "else", ":", "stats", "=", "[", "cluster_num", "]", "+", "stats", "yield", "stats", "try", ":", "start", "=", "cluster_num", "+", "1", "except", ":", "start", "=", "0", "fastas", "=", "set", "(", "[", "i", ".", "rsplit", "(", "'.'", ",", "1", ")", "[", "0", "]", ".", "rsplit", "(", "'/'", ",", "1", ")", "[", "-", "1", "]", ".", "rsplit", "(", "'.contigs'", ")", "[", "0", "]", "for", "i", "in", "fastas", "]", ")", "for", "cluster_num", ",", "genome", "in", "enumerate", "(", "fastas", ".", "difference", "(", "set", "(", "in_cluster", ")", ")", ",", "start", ")", ":", "try", ":", "stats", "=", "[", "'*%s'", "%", "(", "cluster_num", ")", ",", "1", ",", "genome", ",", "genome", ",", "info", "[", "genome", "]", "[", "'#SCGs'", "]", ",", "info", "[", "genome", "]", "[", "'#SCG duplicates'", "]", ",", "info", "[", "genome", "]", "[", "'genome size (bp)'", "]", ",", "info", "[", "genome", "]", "[", "'# contigs'", "]", ",", "[", "genome", "]", "]", "except", ":", "stats", "=", "[", "'*%s'", "%", "(", "cluster_num", ")", ",", "1", ",", "genome", ",", "genome", ",", "'n/a'", ",", "'n/a'", ",", "info", "[", "genome", "]", "[", "'genome size (bp)'", "]", ",", "info", "[", "genome", "]", "[", "'# contigs'", "]", ",", "[", "genome", "]", "]", "yield", "stats" ]
choose represenative genome and print cluster information *if ggKbase table is provided, use SCG info to choose best genome
[ "choose", "represenative", "genome", "and", "print", "cluster", "information" ]
83b2566b3a5745437ec651cd6cafddd056846240
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/cluster_ani.py#L114-L163
train
christophertbrown/bioscripts
ctbBio/cluster_ani.py
parse_ggKbase_tables
def parse_ggKbase_tables(tables, id_type): """ convert ggKbase genome info tables to dictionary """ g2info = {} for table in tables: for line in open(table): line = line.strip().split('\t') if line[0].startswith('name'): header = line header[4] = 'genome size (bp)' header[12] = '#SCGs' header[13] = '#SCG duplicates' continue name, code, info = line[0], line[1], line info = [to_int(i) for i in info] if id_type is False: # try to use name and code ID if 'UNK' in code or 'unknown' in code: code = name if (name != code) and (name and code in g2info): print('# duplicate name or code in table(s)', file=sys.stderr) print('# %s and/or %s' % (name, code), file=sys.stderr) exit() if name not in g2info: g2info[name] = {item:stat for item, stat in zip(header, info)} if code not in g2info: g2info[code] = {item:stat for item, stat in zip(header, info)} else: if id_type == 'name': ID = name elif id_type == 'code': ID = code else: print('# specify name or code column using -id', file=sys.stderr) exit() ID = ID.replace(' ', '') g2info[ID] = {item:stat for item, stat in zip(header, info)} if g2info[ID]['genome size (bp)'] == '': g2info[ID]['genome size (bp)'] = 0 return g2info
python
def parse_ggKbase_tables(tables, id_type): """ convert ggKbase genome info tables to dictionary """ g2info = {} for table in tables: for line in open(table): line = line.strip().split('\t') if line[0].startswith('name'): header = line header[4] = 'genome size (bp)' header[12] = '#SCGs' header[13] = '#SCG duplicates' continue name, code, info = line[0], line[1], line info = [to_int(i) for i in info] if id_type is False: # try to use name and code ID if 'UNK' in code or 'unknown' in code: code = name if (name != code) and (name and code in g2info): print('# duplicate name or code in table(s)', file=sys.stderr) print('# %s and/or %s' % (name, code), file=sys.stderr) exit() if name not in g2info: g2info[name] = {item:stat for item, stat in zip(header, info)} if code not in g2info: g2info[code] = {item:stat for item, stat in zip(header, info)} else: if id_type == 'name': ID = name elif id_type == 'code': ID = code else: print('# specify name or code column using -id', file=sys.stderr) exit() ID = ID.replace(' ', '') g2info[ID] = {item:stat for item, stat in zip(header, info)} if g2info[ID]['genome size (bp)'] == '': g2info[ID]['genome size (bp)'] = 0 return g2info
[ "def", "parse_ggKbase_tables", "(", "tables", ",", "id_type", ")", ":", "g2info", "=", "{", "}", "for", "table", "in", "tables", ":", "for", "line", "in", "open", "(", "table", ")", ":", "line", "=", "line", ".", "strip", "(", ")", ".", "split", "(", "'\\t'", ")", "if", "line", "[", "0", "]", ".", "startswith", "(", "'name'", ")", ":", "header", "=", "line", "header", "[", "4", "]", "=", "'genome size (bp)'", "header", "[", "12", "]", "=", "'#SCGs'", "header", "[", "13", "]", "=", "'#SCG duplicates'", "continue", "name", ",", "code", ",", "info", "=", "line", "[", "0", "]", ",", "line", "[", "1", "]", ",", "line", "info", "=", "[", "to_int", "(", "i", ")", "for", "i", "in", "info", "]", "if", "id_type", "is", "False", ":", "if", "'UNK'", "in", "code", "or", "'unknown'", "in", "code", ":", "code", "=", "name", "if", "(", "name", "!=", "code", ")", "and", "(", "name", "and", "code", "in", "g2info", ")", ":", "print", "(", "'# duplicate name or code in table(s)'", ",", "file", "=", "sys", ".", "stderr", ")", "print", "(", "'# %s and/or %s'", "%", "(", "name", ",", "code", ")", ",", "file", "=", "sys", ".", "stderr", ")", "exit", "(", ")", "if", "name", "not", "in", "g2info", ":", "g2info", "[", "name", "]", "=", "{", "item", ":", "stat", "for", "item", ",", "stat", "in", "zip", "(", "header", ",", "info", ")", "}", "if", "code", "not", "in", "g2info", ":", "g2info", "[", "code", "]", "=", "{", "item", ":", "stat", "for", "item", ",", "stat", "in", "zip", "(", "header", ",", "info", ")", "}", "else", ":", "if", "id_type", "==", "'name'", ":", "ID", "=", "name", "elif", "id_type", "==", "'code'", ":", "ID", "=", "code", "else", ":", "print", "(", "'# specify name or code column using -id'", ",", "file", "=", "sys", ".", "stderr", ")", "exit", "(", ")", "ID", "=", "ID", ".", "replace", "(", "' '", ",", "''", ")", "g2info", "[", "ID", "]", "=", "{", "item", ":", "stat", "for", "item", ",", "stat", "in", "zip", "(", "header", ",", "info", ")", "}", "if", "g2info", "[", "ID", "]", "[", "'genome size (bp)'", "]", "==", "''", ":", "g2info", "[", "ID", "]", "[", "'genome size (bp)'", "]", "=", "0", "return", "g2info" ]
convert ggKbase genome info tables to dictionary
[ "convert", "ggKbase", "genome", "info", "tables", "to", "dictionary" ]
83b2566b3a5745437ec651cd6cafddd056846240
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/cluster_ani.py#L174-L213
train
christophertbrown/bioscripts
ctbBio/cluster_ani.py
parse_checkM_tables
def parse_checkM_tables(tables): """ convert checkM genome info tables to dictionary """ g2info = {} for table in tables: for line in open(table): line = line.strip().split('\t') if line[0].startswith('Bin Id'): header = line header[8] = 'genome size (bp)' header[5] = '#SCGs' header[6] = '#SCG duplicates' continue ID, info = line[0], line info = [to_int(i) for i in info] ID = ID.replace(' ', '') g2info[ID] = {item:stat for item, stat in zip(header, info)} if g2info[ID]['genome size (bp)'] == '': g2info[ID]['genome size (bp)'] = 0 return g2info
python
def parse_checkM_tables(tables): """ convert checkM genome info tables to dictionary """ g2info = {} for table in tables: for line in open(table): line = line.strip().split('\t') if line[0].startswith('Bin Id'): header = line header[8] = 'genome size (bp)' header[5] = '#SCGs' header[6] = '#SCG duplicates' continue ID, info = line[0], line info = [to_int(i) for i in info] ID = ID.replace(' ', '') g2info[ID] = {item:stat for item, stat in zip(header, info)} if g2info[ID]['genome size (bp)'] == '': g2info[ID]['genome size (bp)'] = 0 return g2info
[ "def", "parse_checkM_tables", "(", "tables", ")", ":", "g2info", "=", "{", "}", "for", "table", "in", "tables", ":", "for", "line", "in", "open", "(", "table", ")", ":", "line", "=", "line", ".", "strip", "(", ")", ".", "split", "(", "'\\t'", ")", "if", "line", "[", "0", "]", ".", "startswith", "(", "'Bin Id'", ")", ":", "header", "=", "line", "header", "[", "8", "]", "=", "'genome size (bp)'", "header", "[", "5", "]", "=", "'#SCGs'", "header", "[", "6", "]", "=", "'#SCG duplicates'", "continue", "ID", ",", "info", "=", "line", "[", "0", "]", ",", "line", "info", "=", "[", "to_int", "(", "i", ")", "for", "i", "in", "info", "]", "ID", "=", "ID", ".", "replace", "(", "' '", ",", "''", ")", "g2info", "[", "ID", "]", "=", "{", "item", ":", "stat", "for", "item", ",", "stat", "in", "zip", "(", "header", ",", "info", ")", "}", "if", "g2info", "[", "ID", "]", "[", "'genome size (bp)'", "]", "==", "''", ":", "g2info", "[", "ID", "]", "[", "'genome size (bp)'", "]", "=", "0", "return", "g2info" ]
convert checkM genome info tables to dictionary
[ "convert", "checkM", "genome", "info", "tables", "to", "dictionary" ]
83b2566b3a5745437ec651cd6cafddd056846240
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/cluster_ani.py#L215-L235
train
christophertbrown/bioscripts
ctbBio/cluster_ani.py
genome_lengths
def genome_lengths(fastas, info): """ get genome lengths """ if info is False: info = {} for genome in fastas: name = genome.rsplit('.', 1)[0].rsplit('/', 1)[-1].rsplit('.contigs')[0] if name in info: continue length = 0 fragments = 0 for seq in parse_fasta(genome): length += len(seq[1]) fragments += 1 info[name] = {'genome size (bp)':length, '# contigs':fragments} return info
python
def genome_lengths(fastas, info): """ get genome lengths """ if info is False: info = {} for genome in fastas: name = genome.rsplit('.', 1)[0].rsplit('/', 1)[-1].rsplit('.contigs')[0] if name in info: continue length = 0 fragments = 0 for seq in parse_fasta(genome): length += len(seq[1]) fragments += 1 info[name] = {'genome size (bp)':length, '# contigs':fragments} return info
[ "def", "genome_lengths", "(", "fastas", ",", "info", ")", ":", "if", "info", "is", "False", ":", "info", "=", "{", "}", "for", "genome", "in", "fastas", ":", "name", "=", "genome", ".", "rsplit", "(", "'.'", ",", "1", ")", "[", "0", "]", ".", "rsplit", "(", "'/'", ",", "1", ")", "[", "-", "1", "]", ".", "rsplit", "(", "'.contigs'", ")", "[", "0", "]", "if", "name", "in", "info", ":", "continue", "length", "=", "0", "fragments", "=", "0", "for", "seq", "in", "parse_fasta", "(", "genome", ")", ":", "length", "+=", "len", "(", "seq", "[", "1", "]", ")", "fragments", "+=", "1", "info", "[", "name", "]", "=", "{", "'genome size (bp)'", ":", "length", ",", "'# contigs'", ":", "fragments", "}", "return", "info" ]
get genome lengths
[ "get", "genome", "lengths" ]
83b2566b3a5745437ec651cd6cafddd056846240
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/cluster_ani.py#L237-L253
train
disqus/nydus
nydus/db/routers/base.py
BaseRouter.get_dbs
def get_dbs(self, attr, args, kwargs, **fkwargs): """ Returns a list of db keys to route the given call to. :param attr: Name of attribute being called on the connection. :param args: List of arguments being passed to ``attr``. :param kwargs: Dictionary of keyword arguments being passed to ``attr``. >>> redis = Cluster(router=BaseRouter) >>> router = redis.router >>> router.get_dbs('incr', args=('key name', 1)) [0,1,2] """ if not self._ready: if not self.setup_router(args=args, kwargs=kwargs, **fkwargs): raise self.UnableToSetupRouter() retval = self._pre_routing(attr=attr, args=args, kwargs=kwargs, **fkwargs) if retval is not None: args, kwargs = retval if not (args or kwargs): return self.cluster.hosts.keys() try: db_nums = self._route(attr=attr, args=args, kwargs=kwargs, **fkwargs) except Exception as e: self._handle_exception(e) db_nums = [] return self._post_routing(attr=attr, db_nums=db_nums, args=args, kwargs=kwargs, **fkwargs)
python
def get_dbs(self, attr, args, kwargs, **fkwargs): """ Returns a list of db keys to route the given call to. :param attr: Name of attribute being called on the connection. :param args: List of arguments being passed to ``attr``. :param kwargs: Dictionary of keyword arguments being passed to ``attr``. >>> redis = Cluster(router=BaseRouter) >>> router = redis.router >>> router.get_dbs('incr', args=('key name', 1)) [0,1,2] """ if not self._ready: if not self.setup_router(args=args, kwargs=kwargs, **fkwargs): raise self.UnableToSetupRouter() retval = self._pre_routing(attr=attr, args=args, kwargs=kwargs, **fkwargs) if retval is not None: args, kwargs = retval if not (args or kwargs): return self.cluster.hosts.keys() try: db_nums = self._route(attr=attr, args=args, kwargs=kwargs, **fkwargs) except Exception as e: self._handle_exception(e) db_nums = [] return self._post_routing(attr=attr, db_nums=db_nums, args=args, kwargs=kwargs, **fkwargs)
[ "def", "get_dbs", "(", "self", ",", "attr", ",", "args", ",", "kwargs", ",", "**", "fkwargs", ")", ":", "if", "not", "self", ".", "_ready", ":", "if", "not", "self", ".", "setup_router", "(", "args", "=", "args", ",", "kwargs", "=", "kwargs", ",", "**", "fkwargs", ")", ":", "raise", "self", ".", "UnableToSetupRouter", "(", ")", "retval", "=", "self", ".", "_pre_routing", "(", "attr", "=", "attr", ",", "args", "=", "args", ",", "kwargs", "=", "kwargs", ",", "**", "fkwargs", ")", "if", "retval", "is", "not", "None", ":", "args", ",", "kwargs", "=", "retval", "if", "not", "(", "args", "or", "kwargs", ")", ":", "return", "self", ".", "cluster", ".", "hosts", ".", "keys", "(", ")", "try", ":", "db_nums", "=", "self", ".", "_route", "(", "attr", "=", "attr", ",", "args", "=", "args", ",", "kwargs", "=", "kwargs", ",", "**", "fkwargs", ")", "except", "Exception", "as", "e", ":", "self", ".", "_handle_exception", "(", "e", ")", "db_nums", "=", "[", "]", "return", "self", ".", "_post_routing", "(", "attr", "=", "attr", ",", "db_nums", "=", "db_nums", ",", "args", "=", "args", ",", "kwargs", "=", "kwargs", ",", "**", "fkwargs", ")" ]
Returns a list of db keys to route the given call to. :param attr: Name of attribute being called on the connection. :param args: List of arguments being passed to ``attr``. :param kwargs: Dictionary of keyword arguments being passed to ``attr``. >>> redis = Cluster(router=BaseRouter) >>> router = redis.router >>> router.get_dbs('incr', args=('key name', 1)) [0,1,2]
[ "Returns", "a", "list", "of", "db", "keys", "to", "route", "the", "given", "call", "to", "." ]
9b505840da47a34f758a830c3992fa5dcb7bb7ad
https://github.com/disqus/nydus/blob/9b505840da47a34f758a830c3992fa5dcb7bb7ad/nydus/db/routers/base.py#L50-L81
train
disqus/nydus
nydus/db/routers/base.py
BaseRouter.setup_router
def setup_router(self, args, kwargs, **fkwargs): """ Call method to perform any setup """ self._ready = self._setup_router(args=args, kwargs=kwargs, **fkwargs) return self._ready
python
def setup_router(self, args, kwargs, **fkwargs): """ Call method to perform any setup """ self._ready = self._setup_router(args=args, kwargs=kwargs, **fkwargs) return self._ready
[ "def", "setup_router", "(", "self", ",", "args", ",", "kwargs", ",", "**", "fkwargs", ")", ":", "self", ".", "_ready", "=", "self", ".", "_setup_router", "(", "args", "=", "args", ",", "kwargs", "=", "kwargs", ",", "**", "fkwargs", ")", "return", "self", ".", "_ready" ]
Call method to perform any setup
[ "Call", "method", "to", "perform", "any", "setup" ]
9b505840da47a34f758a830c3992fa5dcb7bb7ad
https://github.com/disqus/nydus/blob/9b505840da47a34f758a830c3992fa5dcb7bb7ad/nydus/db/routers/base.py#L87-L93
train
disqus/nydus
nydus/db/routers/base.py
BaseRouter._route
def _route(self, attr, args, kwargs, **fkwargs): """ Perform routing and return db_nums """ return self.cluster.hosts.keys()
python
def _route(self, attr, args, kwargs, **fkwargs): """ Perform routing and return db_nums """ return self.cluster.hosts.keys()
[ "def", "_route", "(", "self", ",", "attr", ",", "args", ",", "kwargs", ",", "**", "fkwargs", ")", ":", "return", "self", ".", "cluster", ".", "hosts", ".", "keys", "(", ")" ]
Perform routing and return db_nums
[ "Perform", "routing", "and", "return", "db_nums" ]
9b505840da47a34f758a830c3992fa5dcb7bb7ad
https://github.com/disqus/nydus/blob/9b505840da47a34f758a830c3992fa5dcb7bb7ad/nydus/db/routers/base.py#L111-L115
train
disqus/nydus
nydus/db/routers/base.py
RoundRobinRouter.check_down_connections
def check_down_connections(self): """ Iterates through all connections which were previously listed as unavailable and marks any that have expired their retry_timeout as being up. """ now = time.time() for db_num, marked_down_at in self._down_connections.items(): if marked_down_at + self.retry_timeout <= now: self.mark_connection_up(db_num)
python
def check_down_connections(self): """ Iterates through all connections which were previously listed as unavailable and marks any that have expired their retry_timeout as being up. """ now = time.time() for db_num, marked_down_at in self._down_connections.items(): if marked_down_at + self.retry_timeout <= now: self.mark_connection_up(db_num)
[ "def", "check_down_connections", "(", "self", ")", ":", "now", "=", "time", ".", "time", "(", ")", "for", "db_num", ",", "marked_down_at", "in", "self", ".", "_down_connections", ".", "items", "(", ")", ":", "if", "marked_down_at", "+", "self", ".", "retry_timeout", "<=", "now", ":", "self", ".", "mark_connection_up", "(", "db_num", ")" ]
Iterates through all connections which were previously listed as unavailable and marks any that have expired their retry_timeout as being up.
[ "Iterates", "through", "all", "connections", "which", "were", "previously", "listed", "as", "unavailable", "and", "marks", "any", "that", "have", "expired", "their", "retry_timeout", "as", "being", "up", "." ]
9b505840da47a34f758a830c3992fa5dcb7bb7ad
https://github.com/disqus/nydus/blob/9b505840da47a34f758a830c3992fa5dcb7bb7ad/nydus/db/routers/base.py#L175-L184
train
disqus/nydus
nydus/db/routers/base.py
RoundRobinRouter.flush_down_connections
def flush_down_connections(self): """ Marks all connections which were previously listed as unavailable as being up. """ self._get_db_attempts = 0 for db_num in self._down_connections.keys(): self.mark_connection_up(db_num)
python
def flush_down_connections(self): """ Marks all connections which were previously listed as unavailable as being up. """ self._get_db_attempts = 0 for db_num in self._down_connections.keys(): self.mark_connection_up(db_num)
[ "def", "flush_down_connections", "(", "self", ")", ":", "self", ".", "_get_db_attempts", "=", "0", "for", "db_num", "in", "self", ".", "_down_connections", ".", "keys", "(", ")", ":", "self", ".", "mark_connection_up", "(", "db_num", ")" ]
Marks all connections which were previously listed as unavailable as being up.
[ "Marks", "all", "connections", "which", "were", "previously", "listed", "as", "unavailable", "as", "being", "up", "." ]
9b505840da47a34f758a830c3992fa5dcb7bb7ad
https://github.com/disqus/nydus/blob/9b505840da47a34f758a830c3992fa5dcb7bb7ad/nydus/db/routers/base.py#L186-L192
train
opengridcc/opengrid
opengrid/library/analysis.py
standby
def standby(df, resolution='24h', time_window=None): """ Compute standby power Parameters ---------- df : pandas.DataFrame or pandas.Series Electricity Power resolution : str, default='d' Resolution of the computation. Data will be resampled to this resolution (as mean) before computation of the minimum. String that can be parsed by the pandas resample function, example ='h', '15min', '6h' time_window : tuple with start-hour and end-hour, default=None Specify the start-time and end-time for the analysis. Only data within this time window will be considered. Both times have to be specified as string ('01:00', '06:30') or as datetime.time() objects Returns ------- df : pandas.Series with DateTimeIndex in the given resolution """ if df.empty: raise EmptyDataFrame() df = pd.DataFrame(df) # if df was a pd.Series, convert to DataFrame def parse_time(t): if isinstance(t, numbers.Number): return pd.Timestamp.utcfromtimestamp(t).time() else: return pd.Timestamp(t).time() # first filter based on the time-window if time_window is not None: t_start = parse_time(time_window[0]) t_end = parse_time(time_window[1]) if t_start > t_end: # start before midnight df = df[(df.index.time >= t_start) | (df.index.time < t_end)] else: df = df[(df.index.time >= t_start) & (df.index.time < t_end)] return df.resample(resolution).min()
python
def standby(df, resolution='24h', time_window=None): """ Compute standby power Parameters ---------- df : pandas.DataFrame or pandas.Series Electricity Power resolution : str, default='d' Resolution of the computation. Data will be resampled to this resolution (as mean) before computation of the minimum. String that can be parsed by the pandas resample function, example ='h', '15min', '6h' time_window : tuple with start-hour and end-hour, default=None Specify the start-time and end-time for the analysis. Only data within this time window will be considered. Both times have to be specified as string ('01:00', '06:30') or as datetime.time() objects Returns ------- df : pandas.Series with DateTimeIndex in the given resolution """ if df.empty: raise EmptyDataFrame() df = pd.DataFrame(df) # if df was a pd.Series, convert to DataFrame def parse_time(t): if isinstance(t, numbers.Number): return pd.Timestamp.utcfromtimestamp(t).time() else: return pd.Timestamp(t).time() # first filter based on the time-window if time_window is not None: t_start = parse_time(time_window[0]) t_end = parse_time(time_window[1]) if t_start > t_end: # start before midnight df = df[(df.index.time >= t_start) | (df.index.time < t_end)] else: df = df[(df.index.time >= t_start) & (df.index.time < t_end)] return df.resample(resolution).min()
[ "def", "standby", "(", "df", ",", "resolution", "=", "'24h'", ",", "time_window", "=", "None", ")", ":", "if", "df", ".", "empty", ":", "raise", "EmptyDataFrame", "(", ")", "df", "=", "pd", ".", "DataFrame", "(", "df", ")", "def", "parse_time", "(", "t", ")", ":", "if", "isinstance", "(", "t", ",", "numbers", ".", "Number", ")", ":", "return", "pd", ".", "Timestamp", ".", "utcfromtimestamp", "(", "t", ")", ".", "time", "(", ")", "else", ":", "return", "pd", ".", "Timestamp", "(", "t", ")", ".", "time", "(", ")", "if", "time_window", "is", "not", "None", ":", "t_start", "=", "parse_time", "(", "time_window", "[", "0", "]", ")", "t_end", "=", "parse_time", "(", "time_window", "[", "1", "]", ")", "if", "t_start", ">", "t_end", ":", "df", "=", "df", "[", "(", "df", ".", "index", ".", "time", ">=", "t_start", ")", "|", "(", "df", ".", "index", ".", "time", "<", "t_end", ")", "]", "else", ":", "df", "=", "df", "[", "(", "df", ".", "index", ".", "time", ">=", "t_start", ")", "&", "(", "df", ".", "index", ".", "time", "<", "t_end", ")", "]", "return", "df", ".", "resample", "(", "resolution", ")", ".", "min", "(", ")" ]
Compute standby power Parameters ---------- df : pandas.DataFrame or pandas.Series Electricity Power resolution : str, default='d' Resolution of the computation. Data will be resampled to this resolution (as mean) before computation of the minimum. String that can be parsed by the pandas resample function, example ='h', '15min', '6h' time_window : tuple with start-hour and end-hour, default=None Specify the start-time and end-time for the analysis. Only data within this time window will be considered. Both times have to be specified as string ('01:00', '06:30') or as datetime.time() objects Returns ------- df : pandas.Series with DateTimeIndex in the given resolution
[ "Compute", "standby", "power" ]
69b8da3c8fcea9300226c45ef0628cd6d4307651
https://github.com/opengridcc/opengrid/blob/69b8da3c8fcea9300226c45ef0628cd6d4307651/opengrid/library/analysis.py#L72-L115
train
opengridcc/opengrid
opengrid/library/analysis.py
share_of_standby
def share_of_standby(df, resolution='24h', time_window=None): """ Compute the share of the standby power in the total consumption. Parameters ---------- df : pandas.DataFrame or pandas.Series Power (typically electricity, can be anything) resolution : str, default='d' Resolution of the computation. Data will be resampled to this resolution (as mean) before computation of the minimum. String that can be parsed by the pandas resample function, example ='h', '15min', '6h' time_window : tuple with start-hour and end-hour, default=None Specify the start-time and end-time for the analysis. Only data within this time window will be considered. Both times have to be specified as string ('01:00', '06:30') or as datetime.time() objects Returns ------- fraction : float between 0-1 with the share of the standby consumption """ p_sb = standby(df, resolution, time_window) df = df.resample(resolution).mean() p_tot = df.sum() p_standby = p_sb.sum() share_standby = p_standby / p_tot res = share_standby.iloc[0] return res
python
def share_of_standby(df, resolution='24h', time_window=None): """ Compute the share of the standby power in the total consumption. Parameters ---------- df : pandas.DataFrame or pandas.Series Power (typically electricity, can be anything) resolution : str, default='d' Resolution of the computation. Data will be resampled to this resolution (as mean) before computation of the minimum. String that can be parsed by the pandas resample function, example ='h', '15min', '6h' time_window : tuple with start-hour and end-hour, default=None Specify the start-time and end-time for the analysis. Only data within this time window will be considered. Both times have to be specified as string ('01:00', '06:30') or as datetime.time() objects Returns ------- fraction : float between 0-1 with the share of the standby consumption """ p_sb = standby(df, resolution, time_window) df = df.resample(resolution).mean() p_tot = df.sum() p_standby = p_sb.sum() share_standby = p_standby / p_tot res = share_standby.iloc[0] return res
[ "def", "share_of_standby", "(", "df", ",", "resolution", "=", "'24h'", ",", "time_window", "=", "None", ")", ":", "p_sb", "=", "standby", "(", "df", ",", "resolution", ",", "time_window", ")", "df", "=", "df", ".", "resample", "(", "resolution", ")", ".", "mean", "(", ")", "p_tot", "=", "df", ".", "sum", "(", ")", "p_standby", "=", "p_sb", ".", "sum", "(", ")", "share_standby", "=", "p_standby", "/", "p_tot", "res", "=", "share_standby", ".", "iloc", "[", "0", "]", "return", "res" ]
Compute the share of the standby power in the total consumption. Parameters ---------- df : pandas.DataFrame or pandas.Series Power (typically electricity, can be anything) resolution : str, default='d' Resolution of the computation. Data will be resampled to this resolution (as mean) before computation of the minimum. String that can be parsed by the pandas resample function, example ='h', '15min', '6h' time_window : tuple with start-hour and end-hour, default=None Specify the start-time and end-time for the analysis. Only data within this time window will be considered. Both times have to be specified as string ('01:00', '06:30') or as datetime.time() objects Returns ------- fraction : float between 0-1 with the share of the standby consumption
[ "Compute", "the", "share", "of", "the", "standby", "power", "in", "the", "total", "consumption", "." ]
69b8da3c8fcea9300226c45ef0628cd6d4307651
https://github.com/opengridcc/opengrid/blob/69b8da3c8fcea9300226c45ef0628cd6d4307651/opengrid/library/analysis.py#L118-L146
train
opengridcc/opengrid
opengrid/library/analysis.py
count_peaks
def count_peaks(ts): """ Toggle counter for gas boilers Counts the number of times the gas consumption increases with more than 3kW Parameters ---------- ts: Pandas Series Gas consumption in minute resolution Returns ------- int """ on_toggles = ts.diff() > 3000 shifted = np.logical_not(on_toggles.shift(1)) result = on_toggles & shifted count = result.sum() return count
python
def count_peaks(ts): """ Toggle counter for gas boilers Counts the number of times the gas consumption increases with more than 3kW Parameters ---------- ts: Pandas Series Gas consumption in minute resolution Returns ------- int """ on_toggles = ts.diff() > 3000 shifted = np.logical_not(on_toggles.shift(1)) result = on_toggles & shifted count = result.sum() return count
[ "def", "count_peaks", "(", "ts", ")", ":", "on_toggles", "=", "ts", ".", "diff", "(", ")", ">", "3000", "shifted", "=", "np", ".", "logical_not", "(", "on_toggles", ".", "shift", "(", "1", ")", ")", "result", "=", "on_toggles", "&", "shifted", "count", "=", "result", ".", "sum", "(", ")", "return", "count" ]
Toggle counter for gas boilers Counts the number of times the gas consumption increases with more than 3kW Parameters ---------- ts: Pandas Series Gas consumption in minute resolution Returns ------- int
[ "Toggle", "counter", "for", "gas", "boilers" ]
69b8da3c8fcea9300226c45ef0628cd6d4307651
https://github.com/opengridcc/opengrid/blob/69b8da3c8fcea9300226c45ef0628cd6d4307651/opengrid/library/analysis.py#L149-L169
train
opengridcc/opengrid
opengrid/library/analysis.py
load_factor
def load_factor(ts, resolution=None, norm=None): """ Calculate the ratio of input vs. norm over a given interval. Parameters ---------- ts : pandas.Series timeseries resolution : str, optional interval over which to calculate the ratio default: resolution of the input timeseries norm : int | float, optional denominator of the ratio default: the maximum of the input timeseries Returns ------- pandas.Series """ if norm is None: norm = ts.max() if resolution is not None: ts = ts.resample(rule=resolution).mean() lf = ts / norm return lf
python
def load_factor(ts, resolution=None, norm=None): """ Calculate the ratio of input vs. norm over a given interval. Parameters ---------- ts : pandas.Series timeseries resolution : str, optional interval over which to calculate the ratio default: resolution of the input timeseries norm : int | float, optional denominator of the ratio default: the maximum of the input timeseries Returns ------- pandas.Series """ if norm is None: norm = ts.max() if resolution is not None: ts = ts.resample(rule=resolution).mean() lf = ts / norm return lf
[ "def", "load_factor", "(", "ts", ",", "resolution", "=", "None", ",", "norm", "=", "None", ")", ":", "if", "norm", "is", "None", ":", "norm", "=", "ts", ".", "max", "(", ")", "if", "resolution", "is", "not", "None", ":", "ts", "=", "ts", ".", "resample", "(", "rule", "=", "resolution", ")", ".", "mean", "(", ")", "lf", "=", "ts", "/", "norm", "return", "lf" ]
Calculate the ratio of input vs. norm over a given interval. Parameters ---------- ts : pandas.Series timeseries resolution : str, optional interval over which to calculate the ratio default: resolution of the input timeseries norm : int | float, optional denominator of the ratio default: the maximum of the input timeseries Returns ------- pandas.Series
[ "Calculate", "the", "ratio", "of", "input", "vs", ".", "norm", "over", "a", "given", "interval", "." ]
69b8da3c8fcea9300226c45ef0628cd6d4307651
https://github.com/opengridcc/opengrid/blob/69b8da3c8fcea9300226c45ef0628cd6d4307651/opengrid/library/analysis.py#L172-L199
train
christophertbrown/bioscripts
ctbBio/besthits.py
top_hits
def top_hits(hits, num, column, reverse): """ get top hits after sorting by column number """ hits.sort(key = itemgetter(column), reverse = reverse) for hit in hits[0:num]: yield hit
python
def top_hits(hits, num, column, reverse): """ get top hits after sorting by column number """ hits.sort(key = itemgetter(column), reverse = reverse) for hit in hits[0:num]: yield hit
[ "def", "top_hits", "(", "hits", ",", "num", ",", "column", ",", "reverse", ")", ":", "hits", ".", "sort", "(", "key", "=", "itemgetter", "(", "column", ")", ",", "reverse", "=", "reverse", ")", "for", "hit", "in", "hits", "[", "0", ":", "num", "]", ":", "yield", "hit" ]
get top hits after sorting by column number
[ "get", "top", "hits", "after", "sorting", "by", "column", "number" ]
83b2566b3a5745437ec651cd6cafddd056846240
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/besthits.py#L17-L23
train
christophertbrown/bioscripts
ctbBio/besthits.py
numBlast_sort
def numBlast_sort(blast, numHits, evalueT, bitT): """ parse b6 output with sorting """ header = ['#query', 'target', 'pident', 'alen', 'mismatch', 'gapopen', 'qstart', 'qend', 'tstart', 'tend', 'evalue', 'bitscore'] yield header hmm = {h:[] for h in header} for line in blast: if line.startswith('#'): continue line = line.strip().split('\t') # Evalue and Bitscore thresholds line[10], line[11] = float(line[10]), float(line[11]) evalue, bit = line[10], line[11] if evalueT is not False and evalue > evalueT: continue if bitT is not False and bit < bitT: continue for i, h in zip(line, header): hmm[h].append(i) hmm = pd.DataFrame(hmm) for query, df in hmm.groupby(by = ['#query']): df = df.sort_values(by = ['bitscore'], ascending = False) for hit in df[header].values[0:numHits]: yield hit
python
def numBlast_sort(blast, numHits, evalueT, bitT): """ parse b6 output with sorting """ header = ['#query', 'target', 'pident', 'alen', 'mismatch', 'gapopen', 'qstart', 'qend', 'tstart', 'tend', 'evalue', 'bitscore'] yield header hmm = {h:[] for h in header} for line in blast: if line.startswith('#'): continue line = line.strip().split('\t') # Evalue and Bitscore thresholds line[10], line[11] = float(line[10]), float(line[11]) evalue, bit = line[10], line[11] if evalueT is not False and evalue > evalueT: continue if bitT is not False and bit < bitT: continue for i, h in zip(line, header): hmm[h].append(i) hmm = pd.DataFrame(hmm) for query, df in hmm.groupby(by = ['#query']): df = df.sort_values(by = ['bitscore'], ascending = False) for hit in df[header].values[0:numHits]: yield hit
[ "def", "numBlast_sort", "(", "blast", ",", "numHits", ",", "evalueT", ",", "bitT", ")", ":", "header", "=", "[", "'#query'", ",", "'target'", ",", "'pident'", ",", "'alen'", ",", "'mismatch'", ",", "'gapopen'", ",", "'qstart'", ",", "'qend'", ",", "'tstart'", ",", "'tend'", ",", "'evalue'", ",", "'bitscore'", "]", "yield", "header", "hmm", "=", "{", "h", ":", "[", "]", "for", "h", "in", "header", "}", "for", "line", "in", "blast", ":", "if", "line", ".", "startswith", "(", "'#'", ")", ":", "continue", "line", "=", "line", ".", "strip", "(", ")", ".", "split", "(", "'\\t'", ")", "line", "[", "10", "]", ",", "line", "[", "11", "]", "=", "float", "(", "line", "[", "10", "]", ")", ",", "float", "(", "line", "[", "11", "]", ")", "evalue", ",", "bit", "=", "line", "[", "10", "]", ",", "line", "[", "11", "]", "if", "evalueT", "is", "not", "False", "and", "evalue", ">", "evalueT", ":", "continue", "if", "bitT", "is", "not", "False", "and", "bit", "<", "bitT", ":", "continue", "for", "i", ",", "h", "in", "zip", "(", "line", ",", "header", ")", ":", "hmm", "[", "h", "]", ".", "append", "(", "i", ")", "hmm", "=", "pd", ".", "DataFrame", "(", "hmm", ")", "for", "query", ",", "df", "in", "hmm", ".", "groupby", "(", "by", "=", "[", "'#query'", "]", ")", ":", "df", "=", "df", ".", "sort_values", "(", "by", "=", "[", "'bitscore'", "]", ",", "ascending", "=", "False", ")", "for", "hit", "in", "df", "[", "header", "]", ".", "values", "[", "0", ":", "numHits", "]", ":", "yield", "hit" ]
parse b6 output with sorting
[ "parse", "b6", "output", "with", "sorting" ]
83b2566b3a5745437ec651cd6cafddd056846240
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/besthits.py#L25-L50
train
christophertbrown/bioscripts
ctbBio/besthits.py
numBlast
def numBlast(blast, numHits, evalueT = False, bitT = False, sort = False): """ parse b6 output """ if sort is True: for hit in numBlast_sort(blast, numHits, evalueT, bitT): yield hit return header = ['#query', 'target', 'pident', 'alen', 'mismatch', 'gapopen', 'qstart', 'qend', 'tstart', 'tend', 'evalue', 'bitscore'] yield header prev, hits = None, [] for line in blast: line = line.strip().split('\t') ID = line[0] line[10], line[11] = float(line[10]), float(line[11]) evalue, bit = line[10], line[11] if ID != prev: if len(hits) > 0: # column is 1 + line index for hit in top_hits(hits, numHits, 11, True): yield hit hits = [] if evalueT == False and bitT == False: hits.append(line) elif evalue <= evalueT and bitT == False: hits.append(line) elif evalue <= evalueT and bit >= bitT: hits.append(line) elif evalueT == False and bit >= bitT: hits.append(line) prev = ID for hit in top_hits(hits, numHits, 11, True): yield hit
python
def numBlast(blast, numHits, evalueT = False, bitT = False, sort = False): """ parse b6 output """ if sort is True: for hit in numBlast_sort(blast, numHits, evalueT, bitT): yield hit return header = ['#query', 'target', 'pident', 'alen', 'mismatch', 'gapopen', 'qstart', 'qend', 'tstart', 'tend', 'evalue', 'bitscore'] yield header prev, hits = None, [] for line in blast: line = line.strip().split('\t') ID = line[0] line[10], line[11] = float(line[10]), float(line[11]) evalue, bit = line[10], line[11] if ID != prev: if len(hits) > 0: # column is 1 + line index for hit in top_hits(hits, numHits, 11, True): yield hit hits = [] if evalueT == False and bitT == False: hits.append(line) elif evalue <= evalueT and bitT == False: hits.append(line) elif evalue <= evalueT and bit >= bitT: hits.append(line) elif evalueT == False and bit >= bitT: hits.append(line) prev = ID for hit in top_hits(hits, numHits, 11, True): yield hit
[ "def", "numBlast", "(", "blast", ",", "numHits", ",", "evalueT", "=", "False", ",", "bitT", "=", "False", ",", "sort", "=", "False", ")", ":", "if", "sort", "is", "True", ":", "for", "hit", "in", "numBlast_sort", "(", "blast", ",", "numHits", ",", "evalueT", ",", "bitT", ")", ":", "yield", "hit", "return", "header", "=", "[", "'#query'", ",", "'target'", ",", "'pident'", ",", "'alen'", ",", "'mismatch'", ",", "'gapopen'", ",", "'qstart'", ",", "'qend'", ",", "'tstart'", ",", "'tend'", ",", "'evalue'", ",", "'bitscore'", "]", "yield", "header", "prev", ",", "hits", "=", "None", ",", "[", "]", "for", "line", "in", "blast", ":", "line", "=", "line", ".", "strip", "(", ")", ".", "split", "(", "'\\t'", ")", "ID", "=", "line", "[", "0", "]", "line", "[", "10", "]", ",", "line", "[", "11", "]", "=", "float", "(", "line", "[", "10", "]", ")", ",", "float", "(", "line", "[", "11", "]", ")", "evalue", ",", "bit", "=", "line", "[", "10", "]", ",", "line", "[", "11", "]", "if", "ID", "!=", "prev", ":", "if", "len", "(", "hits", ")", ">", "0", ":", "for", "hit", "in", "top_hits", "(", "hits", ",", "numHits", ",", "11", ",", "True", ")", ":", "yield", "hit", "hits", "=", "[", "]", "if", "evalueT", "==", "False", "and", "bitT", "==", "False", ":", "hits", ".", "append", "(", "line", ")", "elif", "evalue", "<=", "evalueT", "and", "bitT", "==", "False", ":", "hits", ".", "append", "(", "line", ")", "elif", "evalue", "<=", "evalueT", "and", "bit", ">=", "bitT", ":", "hits", ".", "append", "(", "line", ")", "elif", "evalueT", "==", "False", "and", "bit", ">=", "bitT", ":", "hits", ".", "append", "(", "line", ")", "prev", "=", "ID", "for", "hit", "in", "top_hits", "(", "hits", ",", "numHits", ",", "11", ",", "True", ")", ":", "yield", "hit" ]
parse b6 output
[ "parse", "b6", "output" ]
83b2566b3a5745437ec651cd6cafddd056846240
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/besthits.py#L52-L85
train
christophertbrown/bioscripts
ctbBio/besthits.py
numDomtblout
def numDomtblout(domtblout, numHits, evalueT, bitT, sort): """ parse hmm domain table output this version is faster but does not work unless the table is sorted """ if sort is True: for hit in numDomtblout_sort(domtblout, numHits, evalueT, bitT): yield hit return header = ['#target name', 'target accession', 'tlen', 'query name', 'query accession', 'qlen', 'full E-value', 'full score', 'full bias', 'domain #', '# domains', 'domain c-Evalue', 'domain i-Evalue', 'domain score', 'domain bias', 'hmm from', 'hmm to', 'seq from', 'seq to', 'env from', 'env to', 'acc', 'target description'] yield header prev, hits = None, [] for line in domtblout: if line.startswith('#'): continue # parse line and get description line = line.strip().split() desc = ' '.join(line[18:]) line = line[0:18] line.append(desc) # create ID based on query name and domain number ID = line[0] + line[9] # domain c-Evalue and domain score thresholds line[11], line[13] = float(line[11]), float(line[13]) evalue, bitscore = line[11], line[13] line[11], line[13] = evalue, bitscore if ID != prev: if len(hits) > 0: for hit in top_hits(hits, numHits, 13, True): yield hit hits = [] if evalueT == False and bitT == False: hits.append(line) elif evalue <= evalueT and bitT == False: hits.append(line) elif evalue <= evalueT and bit >= bitT: hits.append(line) elif evalueT == False and bit >= bitT: hits.append(line) prev = ID for hit in top_hits(hits, numHits, 13, True): yield hit
python
def numDomtblout(domtblout, numHits, evalueT, bitT, sort): """ parse hmm domain table output this version is faster but does not work unless the table is sorted """ if sort is True: for hit in numDomtblout_sort(domtblout, numHits, evalueT, bitT): yield hit return header = ['#target name', 'target accession', 'tlen', 'query name', 'query accession', 'qlen', 'full E-value', 'full score', 'full bias', 'domain #', '# domains', 'domain c-Evalue', 'domain i-Evalue', 'domain score', 'domain bias', 'hmm from', 'hmm to', 'seq from', 'seq to', 'env from', 'env to', 'acc', 'target description'] yield header prev, hits = None, [] for line in domtblout: if line.startswith('#'): continue # parse line and get description line = line.strip().split() desc = ' '.join(line[18:]) line = line[0:18] line.append(desc) # create ID based on query name and domain number ID = line[0] + line[9] # domain c-Evalue and domain score thresholds line[11], line[13] = float(line[11]), float(line[13]) evalue, bitscore = line[11], line[13] line[11], line[13] = evalue, bitscore if ID != prev: if len(hits) > 0: for hit in top_hits(hits, numHits, 13, True): yield hit hits = [] if evalueT == False and bitT == False: hits.append(line) elif evalue <= evalueT and bitT == False: hits.append(line) elif evalue <= evalueT and bit >= bitT: hits.append(line) elif evalueT == False and bit >= bitT: hits.append(line) prev = ID for hit in top_hits(hits, numHits, 13, True): yield hit
[ "def", "numDomtblout", "(", "domtblout", ",", "numHits", ",", "evalueT", ",", "bitT", ",", "sort", ")", ":", "if", "sort", "is", "True", ":", "for", "hit", "in", "numDomtblout_sort", "(", "domtblout", ",", "numHits", ",", "evalueT", ",", "bitT", ")", ":", "yield", "hit", "return", "header", "=", "[", "'#target name'", ",", "'target accession'", ",", "'tlen'", ",", "'query name'", ",", "'query accession'", ",", "'qlen'", ",", "'full E-value'", ",", "'full score'", ",", "'full bias'", ",", "'domain #'", ",", "'# domains'", ",", "'domain c-Evalue'", ",", "'domain i-Evalue'", ",", "'domain score'", ",", "'domain bias'", ",", "'hmm from'", ",", "'hmm to'", ",", "'seq from'", ",", "'seq to'", ",", "'env from'", ",", "'env to'", ",", "'acc'", ",", "'target description'", "]", "yield", "header", "prev", ",", "hits", "=", "None", ",", "[", "]", "for", "line", "in", "domtblout", ":", "if", "line", ".", "startswith", "(", "'#'", ")", ":", "continue", "line", "=", "line", ".", "strip", "(", ")", ".", "split", "(", ")", "desc", "=", "' '", ".", "join", "(", "line", "[", "18", ":", "]", ")", "line", "=", "line", "[", "0", ":", "18", "]", "line", ".", "append", "(", "desc", ")", "ID", "=", "line", "[", "0", "]", "+", "line", "[", "9", "]", "line", "[", "11", "]", ",", "line", "[", "13", "]", "=", "float", "(", "line", "[", "11", "]", ")", ",", "float", "(", "line", "[", "13", "]", ")", "evalue", ",", "bitscore", "=", "line", "[", "11", "]", ",", "line", "[", "13", "]", "line", "[", "11", "]", ",", "line", "[", "13", "]", "=", "evalue", ",", "bitscore", "if", "ID", "!=", "prev", ":", "if", "len", "(", "hits", ")", ">", "0", ":", "for", "hit", "in", "top_hits", "(", "hits", ",", "numHits", ",", "13", ",", "True", ")", ":", "yield", "hit", "hits", "=", "[", "]", "if", "evalueT", "==", "False", "and", "bitT", "==", "False", ":", "hits", ".", "append", "(", "line", ")", "elif", "evalue", "<=", "evalueT", "and", "bitT", "==", "False", ":", "hits", ".", "append", "(", "line", ")", "elif", "evalue", "<=", "evalueT", "and", "bit", ">=", "bitT", ":", "hits", ".", "append", "(", "line", ")", "elif", "evalueT", "==", "False", "and", "bit", ">=", "bitT", ":", "hits", ".", "append", "(", "line", ")", "prev", "=", "ID", "for", "hit", "in", "top_hits", "(", "hits", ",", "numHits", ",", "13", ",", "True", ")", ":", "yield", "hit" ]
parse hmm domain table output this version is faster but does not work unless the table is sorted
[ "parse", "hmm", "domain", "table", "output", "this", "version", "is", "faster", "but", "does", "not", "work", "unless", "the", "table", "is", "sorted" ]
83b2566b3a5745437ec651cd6cafddd056846240
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/besthits.py#L121-L168
train
christophertbrown/bioscripts
ctbBio/stockholm2fa.py
stock2fa
def stock2fa(stock): """ convert stockholm to fasta """ seqs = {} for line in stock: if line.startswith('#') is False and line.startswith(' ') is False and len(line) > 3: id, seq = line.strip().split() id = id.rsplit('/', 1)[0] id = re.split('[0-9]\|', id, 1)[-1] if id not in seqs: seqs[id] = [] seqs[id].append(seq) if line.startswith('//'): break return seqs
python
def stock2fa(stock): """ convert stockholm to fasta """ seqs = {} for line in stock: if line.startswith('#') is False and line.startswith(' ') is False and len(line) > 3: id, seq = line.strip().split() id = id.rsplit('/', 1)[0] id = re.split('[0-9]\|', id, 1)[-1] if id not in seqs: seqs[id] = [] seqs[id].append(seq) if line.startswith('//'): break return seqs
[ "def", "stock2fa", "(", "stock", ")", ":", "seqs", "=", "{", "}", "for", "line", "in", "stock", ":", "if", "line", ".", "startswith", "(", "'#'", ")", "is", "False", "and", "line", ".", "startswith", "(", "' '", ")", "is", "False", "and", "len", "(", "line", ")", ">", "3", ":", "id", ",", "seq", "=", "line", ".", "strip", "(", ")", ".", "split", "(", ")", "id", "=", "id", ".", "rsplit", "(", "'/'", ",", "1", ")", "[", "0", "]", "id", "=", "re", ".", "split", "(", "'[0-9]\\|'", ",", "id", ",", "1", ")", "[", "-", "1", "]", "if", "id", "not", "in", "seqs", ":", "seqs", "[", "id", "]", "=", "[", "]", "seqs", "[", "id", "]", ".", "append", "(", "seq", ")", "if", "line", ".", "startswith", "(", "'//'", ")", ":", "break", "return", "seqs" ]
convert stockholm to fasta
[ "convert", "stockholm", "to", "fasta" ]
83b2566b3a5745437ec651cd6cafddd056846240
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/stockholm2fa.py#L11-L26
train
opengridcc/opengrid
opengrid/library/utils.py
week_schedule
def week_schedule(index, on_time=None, off_time=None, off_days=None): """ Return boolean time series following given week schedule. Parameters ---------- index : pandas.DatetimeIndex Datetime index on_time : str or datetime.time Daily opening time. Default: '09:00' off_time : str or datetime.time Daily closing time. Default: '17:00' off_days : list of str List of weekdays. Default: ['Sunday', 'Monday'] Returns ------- pandas.Series of bool True when on, False otherwise for given datetime index Examples -------- >>> import pandas as pd >>> from opengrid.library.utils import week_schedule >>> index = pd.date_range('20170701', '20170710', freq='H') >>> week_schedule(index) """ if on_time is None: on_time = '9:00' if off_time is None: off_time = '17:00' if off_days is None: off_days = ['Sunday', 'Monday'] if not isinstance(on_time, datetime.time): on_time = pd.to_datetime(on_time, format='%H:%M').time() if not isinstance(off_time, datetime.time): off_time = pd.to_datetime(off_time, format='%H:%M').time() times = (index.time >= on_time) & (index.time < off_time) & (~index.weekday_name.isin(off_days)) return pd.Series(times, index=index)
python
def week_schedule(index, on_time=None, off_time=None, off_days=None): """ Return boolean time series following given week schedule. Parameters ---------- index : pandas.DatetimeIndex Datetime index on_time : str or datetime.time Daily opening time. Default: '09:00' off_time : str or datetime.time Daily closing time. Default: '17:00' off_days : list of str List of weekdays. Default: ['Sunday', 'Monday'] Returns ------- pandas.Series of bool True when on, False otherwise for given datetime index Examples -------- >>> import pandas as pd >>> from opengrid.library.utils import week_schedule >>> index = pd.date_range('20170701', '20170710', freq='H') >>> week_schedule(index) """ if on_time is None: on_time = '9:00' if off_time is None: off_time = '17:00' if off_days is None: off_days = ['Sunday', 'Monday'] if not isinstance(on_time, datetime.time): on_time = pd.to_datetime(on_time, format='%H:%M').time() if not isinstance(off_time, datetime.time): off_time = pd.to_datetime(off_time, format='%H:%M').time() times = (index.time >= on_time) & (index.time < off_time) & (~index.weekday_name.isin(off_days)) return pd.Series(times, index=index)
[ "def", "week_schedule", "(", "index", ",", "on_time", "=", "None", ",", "off_time", "=", "None", ",", "off_days", "=", "None", ")", ":", "if", "on_time", "is", "None", ":", "on_time", "=", "'9:00'", "if", "off_time", "is", "None", ":", "off_time", "=", "'17:00'", "if", "off_days", "is", "None", ":", "off_days", "=", "[", "'Sunday'", ",", "'Monday'", "]", "if", "not", "isinstance", "(", "on_time", ",", "datetime", ".", "time", ")", ":", "on_time", "=", "pd", ".", "to_datetime", "(", "on_time", ",", "format", "=", "'%H:%M'", ")", ".", "time", "(", ")", "if", "not", "isinstance", "(", "off_time", ",", "datetime", ".", "time", ")", ":", "off_time", "=", "pd", ".", "to_datetime", "(", "off_time", ",", "format", "=", "'%H:%M'", ")", ".", "time", "(", ")", "times", "=", "(", "index", ".", "time", ">=", "on_time", ")", "&", "(", "index", ".", "time", "<", "off_time", ")", "&", "(", "~", "index", ".", "weekday_name", ".", "isin", "(", "off_days", ")", ")", "return", "pd", ".", "Series", "(", "times", ",", "index", "=", "index", ")" ]
Return boolean time series following given week schedule. Parameters ---------- index : pandas.DatetimeIndex Datetime index on_time : str or datetime.time Daily opening time. Default: '09:00' off_time : str or datetime.time Daily closing time. Default: '17:00' off_days : list of str List of weekdays. Default: ['Sunday', 'Monday'] Returns ------- pandas.Series of bool True when on, False otherwise for given datetime index Examples -------- >>> import pandas as pd >>> from opengrid.library.utils import week_schedule >>> index = pd.date_range('20170701', '20170710', freq='H') >>> week_schedule(index)
[ "Return", "boolean", "time", "series", "following", "given", "week", "schedule", "." ]
69b8da3c8fcea9300226c45ef0628cd6d4307651
https://github.com/opengridcc/opengrid/blob/69b8da3c8fcea9300226c45ef0628cd6d4307651/opengrid/library/utils.py#L10-L47
train
opengridcc/opengrid
opengrid/library/plotting.py
carpet
def carpet(timeseries, **kwargs): """ Draw a carpet plot of a pandas timeseries. The carpet plot reads like a letter. Every day one line is added to the bottom of the figure, minute for minute moving from left (morning) to right (evening). The color denotes the level of consumption and is scaled logarithmically. If vmin and vmax are not provided as inputs, the minimum and maximum of the colorbar represent the minimum and maximum of the (resampled) timeseries. Parameters ---------- timeseries : pandas.Series vmin, vmax : If not None, either or both of these values determine the range of the z axis. If None, the range is given by the minimum and/or maximum of the (resampled) timeseries. zlabel, title : If not None, these determine the labels of z axis and/or title. If None, the name of the timeseries is used if defined. cmap : matplotlib.cm instance, default coolwarm Examples -------- >>> import numpy as np >>> import pandas as pd >>> from opengrid.library import plotting >>> plt = plotting.plot_style() >>> index = pd.date_range('2015-1-1','2015-12-31',freq='h') >>> ser = pd.Series(np.random.normal(size=len(index)), index=index, name='abc') >>> im = plotting.carpet(ser) """ # define optional input parameters cmap = kwargs.pop('cmap', cm.coolwarm) norm = kwargs.pop('norm', LogNorm()) interpolation = kwargs.pop('interpolation', 'nearest') cblabel = kwargs.pop('zlabel', timeseries.name if timeseries.name else '') title = kwargs.pop('title', 'carpet plot: ' + timeseries.name if timeseries.name else '') # data preparation if timeseries.dropna().empty: print('skipped {} - no data'.format(title)) return ts = timeseries.resample('15min').interpolate() vmin = max(0.1, kwargs.pop('vmin', ts[ts > 0].min())) vmax = max(vmin, kwargs.pop('vmax', ts.quantile(.999))) # convert to dataframe with date as index and time as columns by # first replacing the index by a MultiIndex mpldatetimes = date2num(ts.index.to_pydatetime()) ts.index = pd.MultiIndex.from_arrays( [np.floor(mpldatetimes), 2 + mpldatetimes % 1]) # '2 +': matplotlib bug workaround. # and then unstacking the second index level to columns df = ts.unstack() # data plotting fig, ax = plt.subplots() # define the extent of the axes (remark the +- 0.5 for the y axis in order to obtain aligned date ticks) extent = [df.columns[0], df.columns[-1], df.index[-1] + 0.5, df.index[0] - 0.5] im = plt.imshow(df, vmin=vmin, vmax=vmax, extent=extent, cmap=cmap, aspect='auto', norm=norm, interpolation=interpolation, **kwargs) # figure formatting # x axis ax.xaxis_date() ax.xaxis.set_major_locator(HourLocator(interval=2)) ax.xaxis.set_major_formatter(DateFormatter('%H:%M')) ax.xaxis.grid(True) plt.xlabel('UTC Time') # y axis ax.yaxis_date() dmin, dmax = ax.yaxis.get_data_interval() number_of_days = (num2date(dmax) - num2date(dmin)).days # AutoDateLocator is not suited in case few data is available if abs(number_of_days) <= 35: ax.yaxis.set_major_locator(DayLocator()) else: ax.yaxis.set_major_locator(AutoDateLocator()) ax.yaxis.set_major_formatter(DateFormatter("%a, %d %b %Y")) # plot colorbar cbticks = np.logspace(np.log10(vmin), np.log10(vmax), 11, endpoint=True) cb = plt.colorbar(format='%.0f', ticks=cbticks) cb.set_label(cblabel) # plot title plt.title(title) return im
python
def carpet(timeseries, **kwargs): """ Draw a carpet plot of a pandas timeseries. The carpet plot reads like a letter. Every day one line is added to the bottom of the figure, minute for minute moving from left (morning) to right (evening). The color denotes the level of consumption and is scaled logarithmically. If vmin and vmax are not provided as inputs, the minimum and maximum of the colorbar represent the minimum and maximum of the (resampled) timeseries. Parameters ---------- timeseries : pandas.Series vmin, vmax : If not None, either or both of these values determine the range of the z axis. If None, the range is given by the minimum and/or maximum of the (resampled) timeseries. zlabel, title : If not None, these determine the labels of z axis and/or title. If None, the name of the timeseries is used if defined. cmap : matplotlib.cm instance, default coolwarm Examples -------- >>> import numpy as np >>> import pandas as pd >>> from opengrid.library import plotting >>> plt = plotting.plot_style() >>> index = pd.date_range('2015-1-1','2015-12-31',freq='h') >>> ser = pd.Series(np.random.normal(size=len(index)), index=index, name='abc') >>> im = plotting.carpet(ser) """ # define optional input parameters cmap = kwargs.pop('cmap', cm.coolwarm) norm = kwargs.pop('norm', LogNorm()) interpolation = kwargs.pop('interpolation', 'nearest') cblabel = kwargs.pop('zlabel', timeseries.name if timeseries.name else '') title = kwargs.pop('title', 'carpet plot: ' + timeseries.name if timeseries.name else '') # data preparation if timeseries.dropna().empty: print('skipped {} - no data'.format(title)) return ts = timeseries.resample('15min').interpolate() vmin = max(0.1, kwargs.pop('vmin', ts[ts > 0].min())) vmax = max(vmin, kwargs.pop('vmax', ts.quantile(.999))) # convert to dataframe with date as index and time as columns by # first replacing the index by a MultiIndex mpldatetimes = date2num(ts.index.to_pydatetime()) ts.index = pd.MultiIndex.from_arrays( [np.floor(mpldatetimes), 2 + mpldatetimes % 1]) # '2 +': matplotlib bug workaround. # and then unstacking the second index level to columns df = ts.unstack() # data plotting fig, ax = plt.subplots() # define the extent of the axes (remark the +- 0.5 for the y axis in order to obtain aligned date ticks) extent = [df.columns[0], df.columns[-1], df.index[-1] + 0.5, df.index[0] - 0.5] im = plt.imshow(df, vmin=vmin, vmax=vmax, extent=extent, cmap=cmap, aspect='auto', norm=norm, interpolation=interpolation, **kwargs) # figure formatting # x axis ax.xaxis_date() ax.xaxis.set_major_locator(HourLocator(interval=2)) ax.xaxis.set_major_formatter(DateFormatter('%H:%M')) ax.xaxis.grid(True) plt.xlabel('UTC Time') # y axis ax.yaxis_date() dmin, dmax = ax.yaxis.get_data_interval() number_of_days = (num2date(dmax) - num2date(dmin)).days # AutoDateLocator is not suited in case few data is available if abs(number_of_days) <= 35: ax.yaxis.set_major_locator(DayLocator()) else: ax.yaxis.set_major_locator(AutoDateLocator()) ax.yaxis.set_major_formatter(DateFormatter("%a, %d %b %Y")) # plot colorbar cbticks = np.logspace(np.log10(vmin), np.log10(vmax), 11, endpoint=True) cb = plt.colorbar(format='%.0f', ticks=cbticks) cb.set_label(cblabel) # plot title plt.title(title) return im
[ "def", "carpet", "(", "timeseries", ",", "**", "kwargs", ")", ":", "cmap", "=", "kwargs", ".", "pop", "(", "'cmap'", ",", "cm", ".", "coolwarm", ")", "norm", "=", "kwargs", ".", "pop", "(", "'norm'", ",", "LogNorm", "(", ")", ")", "interpolation", "=", "kwargs", ".", "pop", "(", "'interpolation'", ",", "'nearest'", ")", "cblabel", "=", "kwargs", ".", "pop", "(", "'zlabel'", ",", "timeseries", ".", "name", "if", "timeseries", ".", "name", "else", "''", ")", "title", "=", "kwargs", ".", "pop", "(", "'title'", ",", "'carpet plot: '", "+", "timeseries", ".", "name", "if", "timeseries", ".", "name", "else", "''", ")", "if", "timeseries", ".", "dropna", "(", ")", ".", "empty", ":", "print", "(", "'skipped {} - no data'", ".", "format", "(", "title", ")", ")", "return", "ts", "=", "timeseries", ".", "resample", "(", "'15min'", ")", ".", "interpolate", "(", ")", "vmin", "=", "max", "(", "0.1", ",", "kwargs", ".", "pop", "(", "'vmin'", ",", "ts", "[", "ts", ">", "0", "]", ".", "min", "(", ")", ")", ")", "vmax", "=", "max", "(", "vmin", ",", "kwargs", ".", "pop", "(", "'vmax'", ",", "ts", ".", "quantile", "(", ".999", ")", ")", ")", "mpldatetimes", "=", "date2num", "(", "ts", ".", "index", ".", "to_pydatetime", "(", ")", ")", "ts", ".", "index", "=", "pd", ".", "MultiIndex", ".", "from_arrays", "(", "[", "np", ".", "floor", "(", "mpldatetimes", ")", ",", "2", "+", "mpldatetimes", "%", "1", "]", ")", "df", "=", "ts", ".", "unstack", "(", ")", "fig", ",", "ax", "=", "plt", ".", "subplots", "(", ")", "extent", "=", "[", "df", ".", "columns", "[", "0", "]", ",", "df", ".", "columns", "[", "-", "1", "]", ",", "df", ".", "index", "[", "-", "1", "]", "+", "0.5", ",", "df", ".", "index", "[", "0", "]", "-", "0.5", "]", "im", "=", "plt", ".", "imshow", "(", "df", ",", "vmin", "=", "vmin", ",", "vmax", "=", "vmax", ",", "extent", "=", "extent", ",", "cmap", "=", "cmap", ",", "aspect", "=", "'auto'", ",", "norm", "=", "norm", ",", "interpolation", "=", "interpolation", ",", "**", "kwargs", ")", "ax", ".", "xaxis_date", "(", ")", "ax", ".", "xaxis", ".", "set_major_locator", "(", "HourLocator", "(", "interval", "=", "2", ")", ")", "ax", ".", "xaxis", ".", "set_major_formatter", "(", "DateFormatter", "(", "'%H:%M'", ")", ")", "ax", ".", "xaxis", ".", "grid", "(", "True", ")", "plt", ".", "xlabel", "(", "'UTC Time'", ")", "ax", ".", "yaxis_date", "(", ")", "dmin", ",", "dmax", "=", "ax", ".", "yaxis", ".", "get_data_interval", "(", ")", "number_of_days", "=", "(", "num2date", "(", "dmax", ")", "-", "num2date", "(", "dmin", ")", ")", ".", "days", "if", "abs", "(", "number_of_days", ")", "<=", "35", ":", "ax", ".", "yaxis", ".", "set_major_locator", "(", "DayLocator", "(", ")", ")", "else", ":", "ax", ".", "yaxis", ".", "set_major_locator", "(", "AutoDateLocator", "(", ")", ")", "ax", ".", "yaxis", ".", "set_major_formatter", "(", "DateFormatter", "(", "\"%a, %d %b %Y\"", ")", ")", "cbticks", "=", "np", ".", "logspace", "(", "np", ".", "log10", "(", "vmin", ")", ",", "np", ".", "log10", "(", "vmax", ")", ",", "11", ",", "endpoint", "=", "True", ")", "cb", "=", "plt", ".", "colorbar", "(", "format", "=", "'%.0f'", ",", "ticks", "=", "cbticks", ")", "cb", ".", "set_label", "(", "cblabel", ")", "plt", ".", "title", "(", "title", ")", "return", "im" ]
Draw a carpet plot of a pandas timeseries. The carpet plot reads like a letter. Every day one line is added to the bottom of the figure, minute for minute moving from left (morning) to right (evening). The color denotes the level of consumption and is scaled logarithmically. If vmin and vmax are not provided as inputs, the minimum and maximum of the colorbar represent the minimum and maximum of the (resampled) timeseries. Parameters ---------- timeseries : pandas.Series vmin, vmax : If not None, either or both of these values determine the range of the z axis. If None, the range is given by the minimum and/or maximum of the (resampled) timeseries. zlabel, title : If not None, these determine the labels of z axis and/or title. If None, the name of the timeseries is used if defined. cmap : matplotlib.cm instance, default coolwarm Examples -------- >>> import numpy as np >>> import pandas as pd >>> from opengrid.library import plotting >>> plt = plotting.plot_style() >>> index = pd.date_range('2015-1-1','2015-12-31',freq='h') >>> ser = pd.Series(np.random.normal(size=len(index)), index=index, name='abc') >>> im = plotting.carpet(ser)
[ "Draw", "a", "carpet", "plot", "of", "a", "pandas", "timeseries", "." ]
69b8da3c8fcea9300226c45ef0628cd6d4307651
https://github.com/opengridcc/opengrid/blob/69b8da3c8fcea9300226c45ef0628cd6d4307651/opengrid/library/plotting.py#L34-L125
train
christophertbrown/bioscripts
ctbBio/compare_aligned.py
calc_pident_ignore_gaps
def calc_pident_ignore_gaps(a, b): """ calculate percent identity """ m = 0 # matches mm = 0 # mismatches for A, B in zip(list(a), list(b)): if A == '-' or A == '.' or B == '-' or B == '.': continue if A == B: m += 1 else: mm += 1 try: return float(float(m)/float((m + mm))) * 100 except: return 0
python
def calc_pident_ignore_gaps(a, b): """ calculate percent identity """ m = 0 # matches mm = 0 # mismatches for A, B in zip(list(a), list(b)): if A == '-' or A == '.' or B == '-' or B == '.': continue if A == B: m += 1 else: mm += 1 try: return float(float(m)/float((m + mm))) * 100 except: return 0
[ "def", "calc_pident_ignore_gaps", "(", "a", ",", "b", ")", ":", "m", "=", "0", "mm", "=", "0", "for", "A", ",", "B", "in", "zip", "(", "list", "(", "a", ")", ",", "list", "(", "b", ")", ")", ":", "if", "A", "==", "'-'", "or", "A", "==", "'.'", "or", "B", "==", "'-'", "or", "B", "==", "'.'", ":", "continue", "if", "A", "==", "B", ":", "m", "+=", "1", "else", ":", "mm", "+=", "1", "try", ":", "return", "float", "(", "float", "(", "m", ")", "/", "float", "(", "(", "m", "+", "mm", ")", ")", ")", "*", "100", "except", ":", "return", "0" ]
calculate percent identity
[ "calculate", "percent", "identity" ]
83b2566b3a5745437ec651cd6cafddd056846240
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/compare_aligned.py#L34-L50
train
christophertbrown/bioscripts
ctbBio/compare_aligned.py
remove_gaps
def remove_gaps(A, B): """ skip column if either is a gap """ a_seq, b_seq = [], [] for a, b in zip(list(A), list(B)): if a == '-' or a == '.' or b == '-' or b == '.': continue a_seq.append(a) b_seq.append(b) return ''.join(a_seq), ''.join(b_seq)
python
def remove_gaps(A, B): """ skip column if either is a gap """ a_seq, b_seq = [], [] for a, b in zip(list(A), list(B)): if a == '-' or a == '.' or b == '-' or b == '.': continue a_seq.append(a) b_seq.append(b) return ''.join(a_seq), ''.join(b_seq)
[ "def", "remove_gaps", "(", "A", ",", "B", ")", ":", "a_seq", ",", "b_seq", "=", "[", "]", ",", "[", "]", "for", "a", ",", "b", "in", "zip", "(", "list", "(", "A", ")", ",", "list", "(", "B", ")", ")", ":", "if", "a", "==", "'-'", "or", "a", "==", "'.'", "or", "b", "==", "'-'", "or", "b", "==", "'.'", ":", "continue", "a_seq", ".", "append", "(", "a", ")", "b_seq", ".", "append", "(", "b", ")", "return", "''", ".", "join", "(", "a_seq", ")", ",", "''", ".", "join", "(", "b_seq", ")" ]
skip column if either is a gap
[ "skip", "column", "if", "either", "is", "a", "gap" ]
83b2566b3a5745437ec651cd6cafddd056846240
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/compare_aligned.py#L52-L62
train
christophertbrown/bioscripts
ctbBio/compare_aligned.py
compare_seqs
def compare_seqs(seqs): """ compare pairs of sequences """ A, B, ignore_gaps = seqs a, b = A[1], B[1] # actual sequences if len(a) != len(b): print('# reads are not the same length', file=sys.stderr) exit() if ignore_gaps is True: pident = calc_pident_ignore_gaps(a, b) else: pident = calc_pident(a, b) return A[0], B[0], pident
python
def compare_seqs(seqs): """ compare pairs of sequences """ A, B, ignore_gaps = seqs a, b = A[1], B[1] # actual sequences if len(a) != len(b): print('# reads are not the same length', file=sys.stderr) exit() if ignore_gaps is True: pident = calc_pident_ignore_gaps(a, b) else: pident = calc_pident(a, b) return A[0], B[0], pident
[ "def", "compare_seqs", "(", "seqs", ")", ":", "A", ",", "B", ",", "ignore_gaps", "=", "seqs", "a", ",", "b", "=", "A", "[", "1", "]", ",", "B", "[", "1", "]", "if", "len", "(", "a", ")", "!=", "len", "(", "b", ")", ":", "print", "(", "'# reads are not the same length'", ",", "file", "=", "sys", ".", "stderr", ")", "exit", "(", ")", "if", "ignore_gaps", "is", "True", ":", "pident", "=", "calc_pident_ignore_gaps", "(", "a", ",", "b", ")", "else", ":", "pident", "=", "calc_pident", "(", "a", ",", "b", ")", "return", "A", "[", "0", "]", ",", "B", "[", "0", "]", ",", "pident" ]
compare pairs of sequences
[ "compare", "pairs", "of", "sequences" ]
83b2566b3a5745437ec651cd6cafddd056846240
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/compare_aligned.py#L64-L77
train
christophertbrown/bioscripts
ctbBio/compare_aligned.py
compare_seqs_leven
def compare_seqs_leven(seqs): """ calculate Levenshtein ratio of sequences """ A, B, ignore_gaps = seqs a, b = remove_gaps(A[1], B[1]) # actual sequences if len(a) != len(b): print('# reads are not the same length', file=sys.stderr) exit() pident = lr(a, b) * 100 return A[0], B[0], pident
python
def compare_seqs_leven(seqs): """ calculate Levenshtein ratio of sequences """ A, B, ignore_gaps = seqs a, b = remove_gaps(A[1], B[1]) # actual sequences if len(a) != len(b): print('# reads are not the same length', file=sys.stderr) exit() pident = lr(a, b) * 100 return A[0], B[0], pident
[ "def", "compare_seqs_leven", "(", "seqs", ")", ":", "A", ",", "B", ",", "ignore_gaps", "=", "seqs", "a", ",", "b", "=", "remove_gaps", "(", "A", "[", "1", "]", ",", "B", "[", "1", "]", ")", "if", "len", "(", "a", ")", "!=", "len", "(", "b", ")", ":", "print", "(", "'# reads are not the same length'", ",", "file", "=", "sys", ".", "stderr", ")", "exit", "(", ")", "pident", "=", "lr", "(", "a", ",", "b", ")", "*", "100", "return", "A", "[", "0", "]", ",", "B", "[", "0", "]", ",", "pident" ]
calculate Levenshtein ratio of sequences
[ "calculate", "Levenshtein", "ratio", "of", "sequences" ]
83b2566b3a5745437ec651cd6cafddd056846240
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/compare_aligned.py#L79-L89
train
christophertbrown/bioscripts
ctbBio/compare_aligned.py
pairwise_compare
def pairwise_compare(afa, leven, threads, print_list, ignore_gaps): """ make pairwise sequence comparisons between aligned sequences """ # load sequences into dictionary seqs = {seq[0]: seq for seq in nr_fasta([afa], append_index = True)} num_seqs = len(seqs) # define all pairs pairs = ((i[0], i[1], ignore_gaps) for i in itertools.combinations(list(seqs.values()), 2)) pool = multithread(threads) # calc percent identity between all pairs - parallelize if leven is True: pident = pool.map(compare_seqs_leven, pairs) else: compare = pool.imap_unordered(compare_seqs, pairs) pident = [i for i in tqdm(compare, total = (num_seqs*num_seqs)/2)] pool.close() pool.terminate() pool.join() return to_dictionary(pident, print_list)
python
def pairwise_compare(afa, leven, threads, print_list, ignore_gaps): """ make pairwise sequence comparisons between aligned sequences """ # load sequences into dictionary seqs = {seq[0]: seq for seq in nr_fasta([afa], append_index = True)} num_seqs = len(seqs) # define all pairs pairs = ((i[0], i[1], ignore_gaps) for i in itertools.combinations(list(seqs.values()), 2)) pool = multithread(threads) # calc percent identity between all pairs - parallelize if leven is True: pident = pool.map(compare_seqs_leven, pairs) else: compare = pool.imap_unordered(compare_seqs, pairs) pident = [i for i in tqdm(compare, total = (num_seqs*num_seqs)/2)] pool.close() pool.terminate() pool.join() return to_dictionary(pident, print_list)
[ "def", "pairwise_compare", "(", "afa", ",", "leven", ",", "threads", ",", "print_list", ",", "ignore_gaps", ")", ":", "seqs", "=", "{", "seq", "[", "0", "]", ":", "seq", "for", "seq", "in", "nr_fasta", "(", "[", "afa", "]", ",", "append_index", "=", "True", ")", "}", "num_seqs", "=", "len", "(", "seqs", ")", "pairs", "=", "(", "(", "i", "[", "0", "]", ",", "i", "[", "1", "]", ",", "ignore_gaps", ")", "for", "i", "in", "itertools", ".", "combinations", "(", "list", "(", "seqs", ".", "values", "(", ")", ")", ",", "2", ")", ")", "pool", "=", "multithread", "(", "threads", ")", "if", "leven", "is", "True", ":", "pident", "=", "pool", ".", "map", "(", "compare_seqs_leven", ",", "pairs", ")", "else", ":", "compare", "=", "pool", ".", "imap_unordered", "(", "compare_seqs", ",", "pairs", ")", "pident", "=", "[", "i", "for", "i", "in", "tqdm", "(", "compare", ",", "total", "=", "(", "num_seqs", "*", "num_seqs", ")", "/", "2", ")", "]", "pool", ".", "close", "(", ")", "pool", ".", "terminate", "(", ")", "pool", ".", "join", "(", ")", "return", "to_dictionary", "(", "pident", ",", "print_list", ")" ]
make pairwise sequence comparisons between aligned sequences
[ "make", "pairwise", "sequence", "comparisons", "between", "aligned", "sequences" ]
83b2566b3a5745437ec651cd6cafddd056846240
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/compare_aligned.py#L91-L110
train
christophertbrown/bioscripts
ctbBio/compare_aligned.py
print_pairwise
def print_pairwise(pw, median = False): """ print matrix of pidents to stdout """ names = sorted(set([i for i in pw])) if len(names) != 0: if '>' in names[0]: yield ['#'] + [i.split('>')[1] for i in names if '>' in i] else: yield ['#'] + names for a in names: if '>' in a: yield [a.split('>')[1]] + [pw[a][b] for b in names] else: out = [] for b in names: if b in pw[a]: if median is False: out.append(max(pw[a][b])) else: out.append(np.median(pw[a][b])) else: out.append('-') yield [a] + out
python
def print_pairwise(pw, median = False): """ print matrix of pidents to stdout """ names = sorted(set([i for i in pw])) if len(names) != 0: if '>' in names[0]: yield ['#'] + [i.split('>')[1] for i in names if '>' in i] else: yield ['#'] + names for a in names: if '>' in a: yield [a.split('>')[1]] + [pw[a][b] for b in names] else: out = [] for b in names: if b in pw[a]: if median is False: out.append(max(pw[a][b])) else: out.append(np.median(pw[a][b])) else: out.append('-') yield [a] + out
[ "def", "print_pairwise", "(", "pw", ",", "median", "=", "False", ")", ":", "names", "=", "sorted", "(", "set", "(", "[", "i", "for", "i", "in", "pw", "]", ")", ")", "if", "len", "(", "names", ")", "!=", "0", ":", "if", "'>'", "in", "names", "[", "0", "]", ":", "yield", "[", "'#'", "]", "+", "[", "i", ".", "split", "(", "'>'", ")", "[", "1", "]", "for", "i", "in", "names", "if", "'>'", "in", "i", "]", "else", ":", "yield", "[", "'#'", "]", "+", "names", "for", "a", "in", "names", ":", "if", "'>'", "in", "a", ":", "yield", "[", "a", ".", "split", "(", "'>'", ")", "[", "1", "]", "]", "+", "[", "pw", "[", "a", "]", "[", "b", "]", "for", "b", "in", "names", "]", "else", ":", "out", "=", "[", "]", "for", "b", "in", "names", ":", "if", "b", "in", "pw", "[", "a", "]", ":", "if", "median", "is", "False", ":", "out", ".", "append", "(", "max", "(", "pw", "[", "a", "]", "[", "b", "]", ")", ")", "else", ":", "out", ".", "append", "(", "np", ".", "median", "(", "pw", "[", "a", "]", "[", "b", "]", ")", ")", "else", ":", "out", ".", "append", "(", "'-'", ")", "yield", "[", "a", "]", "+", "out" ]
print matrix of pidents to stdout
[ "print", "matrix", "of", "pidents", "to", "stdout" ]
83b2566b3a5745437ec651cd6cafddd056846240
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/compare_aligned.py#L132-L155
train
christophertbrown/bioscripts
ctbBio/compare_aligned.py
print_comps
def print_comps(comps): """ print stats for comparisons """ if comps == []: print('n/a') else: print('# min: %s, max: %s, mean: %s' % \ (min(comps), max(comps), np.mean(comps)))
python
def print_comps(comps): """ print stats for comparisons """ if comps == []: print('n/a') else: print('# min: %s, max: %s, mean: %s' % \ (min(comps), max(comps), np.mean(comps)))
[ "def", "print_comps", "(", "comps", ")", ":", "if", "comps", "==", "[", "]", ":", "print", "(", "'n/a'", ")", "else", ":", "print", "(", "'# min: %s, max: %s, mean: %s'", "%", "(", "min", "(", "comps", ")", ",", "max", "(", "comps", ")", ",", "np", ".", "mean", "(", "comps", ")", ")", ")" ]
print stats for comparisons
[ "print", "stats", "for", "comparisons" ]
83b2566b3a5745437ec651cd6cafddd056846240
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/compare_aligned.py#L157-L165
train
christophertbrown/bioscripts
ctbBio/compare_aligned.py
compare_clades
def compare_clades(pw): """ print min. pident within each clade and then matrix of between-clade max. """ names = sorted(set([i for i in pw])) for i in range(0, 4): wi, bt = {}, {} for a in names: for b in pw[a]: if ';' not in a or ';' not in b: continue pident = pw[a][b] cA, cB = a.split(';')[i], b.split(';')[i] if i == 0 and '_' in cA and '_' in cB: cA = cA.rsplit('_', 1)[1] cB = cB.rsplit('_', 1)[1] elif '>' in cA or '>' in cB: cA = cA.split('>')[1] cB = cB.split('>')[1] if cA == cB: if cA not in wi: wi[cA] = [] wi[cA].append(pident) else: if cA not in bt: bt[cA] = {} if cB not in bt[cA]: bt[cA][cB] = [] bt[cA][cB].append(pident) print('\n# min. within') for clade, pidents in list(wi.items()): print('\t'.join(['wi:%s' % str(i), clade, str(min(pidents))])) # print matrix of maximum between groups comps = [] print('\n# max. between') for comp in print_pairwise(bt): if comp is not None: print('\t'.join(['bt:%s' % str(i)] + [str(j) for j in comp])) if comp[0] != '#': comps.extend([j for j in comp[1:] if j != '-']) print_comps(comps) # print matrix of median between groups comps = [] print('\n# median between') for comp in print_pairwise(bt, median = True): if comp is not None: print('\t'.join(['bt:%s' % str(i)] + [str(j) for j in comp])) if comp[0] != '#': comps.extend([j for j in comp[1:] if j != '-']) print_comps(comps)
python
def compare_clades(pw): """ print min. pident within each clade and then matrix of between-clade max. """ names = sorted(set([i for i in pw])) for i in range(0, 4): wi, bt = {}, {} for a in names: for b in pw[a]: if ';' not in a or ';' not in b: continue pident = pw[a][b] cA, cB = a.split(';')[i], b.split(';')[i] if i == 0 and '_' in cA and '_' in cB: cA = cA.rsplit('_', 1)[1] cB = cB.rsplit('_', 1)[1] elif '>' in cA or '>' in cB: cA = cA.split('>')[1] cB = cB.split('>')[1] if cA == cB: if cA not in wi: wi[cA] = [] wi[cA].append(pident) else: if cA not in bt: bt[cA] = {} if cB not in bt[cA]: bt[cA][cB] = [] bt[cA][cB].append(pident) print('\n# min. within') for clade, pidents in list(wi.items()): print('\t'.join(['wi:%s' % str(i), clade, str(min(pidents))])) # print matrix of maximum between groups comps = [] print('\n# max. between') for comp in print_pairwise(bt): if comp is not None: print('\t'.join(['bt:%s' % str(i)] + [str(j) for j in comp])) if comp[0] != '#': comps.extend([j for j in comp[1:] if j != '-']) print_comps(comps) # print matrix of median between groups comps = [] print('\n# median between') for comp in print_pairwise(bt, median = True): if comp is not None: print('\t'.join(['bt:%s' % str(i)] + [str(j) for j in comp])) if comp[0] != '#': comps.extend([j for j in comp[1:] if j != '-']) print_comps(comps)
[ "def", "compare_clades", "(", "pw", ")", ":", "names", "=", "sorted", "(", "set", "(", "[", "i", "for", "i", "in", "pw", "]", ")", ")", "for", "i", "in", "range", "(", "0", ",", "4", ")", ":", "wi", ",", "bt", "=", "{", "}", ",", "{", "}", "for", "a", "in", "names", ":", "for", "b", "in", "pw", "[", "a", "]", ":", "if", "';'", "not", "in", "a", "or", "';'", "not", "in", "b", ":", "continue", "pident", "=", "pw", "[", "a", "]", "[", "b", "]", "cA", ",", "cB", "=", "a", ".", "split", "(", "';'", ")", "[", "i", "]", ",", "b", ".", "split", "(", "';'", ")", "[", "i", "]", "if", "i", "==", "0", "and", "'_'", "in", "cA", "and", "'_'", "in", "cB", ":", "cA", "=", "cA", ".", "rsplit", "(", "'_'", ",", "1", ")", "[", "1", "]", "cB", "=", "cB", ".", "rsplit", "(", "'_'", ",", "1", ")", "[", "1", "]", "elif", "'>'", "in", "cA", "or", "'>'", "in", "cB", ":", "cA", "=", "cA", ".", "split", "(", "'>'", ")", "[", "1", "]", "cB", "=", "cB", ".", "split", "(", "'>'", ")", "[", "1", "]", "if", "cA", "==", "cB", ":", "if", "cA", "not", "in", "wi", ":", "wi", "[", "cA", "]", "=", "[", "]", "wi", "[", "cA", "]", ".", "append", "(", "pident", ")", "else", ":", "if", "cA", "not", "in", "bt", ":", "bt", "[", "cA", "]", "=", "{", "}", "if", "cB", "not", "in", "bt", "[", "cA", "]", ":", "bt", "[", "cA", "]", "[", "cB", "]", "=", "[", "]", "bt", "[", "cA", "]", "[", "cB", "]", ".", "append", "(", "pident", ")", "print", "(", "'\\n# min. within'", ")", "for", "clade", ",", "pidents", "in", "list", "(", "wi", ".", "items", "(", ")", ")", ":", "print", "(", "'\\t'", ".", "join", "(", "[", "'wi:%s'", "%", "str", "(", "i", ")", ",", "clade", ",", "str", "(", "min", "(", "pidents", ")", ")", "]", ")", ")", "comps", "=", "[", "]", "print", "(", "'\\n# max. between'", ")", "for", "comp", "in", "print_pairwise", "(", "bt", ")", ":", "if", "comp", "is", "not", "None", ":", "print", "(", "'\\t'", ".", "join", "(", "[", "'bt:%s'", "%", "str", "(", "i", ")", "]", "+", "[", "str", "(", "j", ")", "for", "j", "in", "comp", "]", ")", ")", "if", "comp", "[", "0", "]", "!=", "'#'", ":", "comps", ".", "extend", "(", "[", "j", "for", "j", "in", "comp", "[", "1", ":", "]", "if", "j", "!=", "'-'", "]", ")", "print_comps", "(", "comps", ")", "comps", "=", "[", "]", "print", "(", "'\\n# median between'", ")", "for", "comp", "in", "print_pairwise", "(", "bt", ",", "median", "=", "True", ")", ":", "if", "comp", "is", "not", "None", ":", "print", "(", "'\\t'", ".", "join", "(", "[", "'bt:%s'", "%", "str", "(", "i", ")", "]", "+", "[", "str", "(", "j", ")", "for", "j", "in", "comp", "]", ")", ")", "if", "comp", "[", "0", "]", "!=", "'#'", ":", "comps", ".", "extend", "(", "[", "j", "for", "j", "in", "comp", "[", "1", ":", "]", "if", "j", "!=", "'-'", "]", ")", "print_comps", "(", "comps", ")" ]
print min. pident within each clade and then matrix of between-clade max.
[ "print", "min", ".", "pident", "within", "each", "clade", "and", "then", "matrix", "of", "between", "-", "clade", "max", "." ]
83b2566b3a5745437ec651cd6cafddd056846240
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/compare_aligned.py#L167-L216
train
christophertbrown/bioscripts
ctbBio/compare_aligned.py
matrix2dictionary
def matrix2dictionary(matrix): """ convert matrix to dictionary of comparisons """ pw = {} for line in matrix: line = line.strip().split('\t') if line[0].startswith('#'): names = line[1:] continue a = line[0] for i, pident in enumerate(line[1:]): b = names[i] if a not in pw: pw[a] = {} if b not in pw: pw[b] = {} if pident != '-': pident = float(pident) pw[a][b] = pident pw[b][a] = pident return pw
python
def matrix2dictionary(matrix): """ convert matrix to dictionary of comparisons """ pw = {} for line in matrix: line = line.strip().split('\t') if line[0].startswith('#'): names = line[1:] continue a = line[0] for i, pident in enumerate(line[1:]): b = names[i] if a not in pw: pw[a] = {} if b not in pw: pw[b] = {} if pident != '-': pident = float(pident) pw[a][b] = pident pw[b][a] = pident return pw
[ "def", "matrix2dictionary", "(", "matrix", ")", ":", "pw", "=", "{", "}", "for", "line", "in", "matrix", ":", "line", "=", "line", ".", "strip", "(", ")", ".", "split", "(", "'\\t'", ")", "if", "line", "[", "0", "]", ".", "startswith", "(", "'#'", ")", ":", "names", "=", "line", "[", "1", ":", "]", "continue", "a", "=", "line", "[", "0", "]", "for", "i", ",", "pident", "in", "enumerate", "(", "line", "[", "1", ":", "]", ")", ":", "b", "=", "names", "[", "i", "]", "if", "a", "not", "in", "pw", ":", "pw", "[", "a", "]", "=", "{", "}", "if", "b", "not", "in", "pw", ":", "pw", "[", "b", "]", "=", "{", "}", "if", "pident", "!=", "'-'", ":", "pident", "=", "float", "(", "pident", ")", "pw", "[", "a", "]", "[", "b", "]", "=", "pident", "pw", "[", "b", "]", "[", "a", "]", "=", "pident", "return", "pw" ]
convert matrix to dictionary of comparisons
[ "convert", "matrix", "to", "dictionary", "of", "comparisons" ]
83b2566b3a5745437ec651cd6cafddd056846240
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/compare_aligned.py#L218-L239
train
mkouhei/bootstrap-py
bootstrap_py/commands.py
setoption
def setoption(parser, metadata=None): """Set argument parser option.""" parser.add_argument('-v', action='version', version=__version__) subparsers = parser.add_subparsers(help='sub commands help') create_cmd = subparsers.add_parser('create') create_cmd.add_argument('name', help='Specify Python package name.') create_cmd.add_argument('-d', dest='description', action='store', help='Short description about your package.') create_cmd.add_argument('-a', dest='author', action='store', required=True, help='Python package author name.') create_cmd.add_argument('-e', dest='email', action='store', required=True, help='Python package author email address.') create_cmd.add_argument('-l', dest='license', choices=metadata.licenses().keys(), default='GPLv3+', help='Specify license. (default: %(default)s)') create_cmd.add_argument('-s', dest='status', choices=metadata.status().keys(), default='Alpha', help=('Specify development status. ' '(default: %(default)s)')) create_cmd.add_argument('--no-check', action='store_true', help='No checking package name in PyPI.') create_cmd.add_argument('--with-samples', action='store_true', help='Generate package with sample code.') group = create_cmd.add_mutually_exclusive_group(required=True) group.add_argument('-U', dest='username', action='store', help='Specify GitHub username.') group.add_argument('-u', dest='url', action='store', type=valid_url, help='Python package homepage url.') create_cmd.add_argument('-o', dest='outdir', action='store', default=os.path.abspath(os.path.curdir), help='Specify output directory. (default: $PWD)') list_cmd = subparsers.add_parser('list') list_cmd.add_argument('-l', dest='licenses', action='store_true', help='show license choices.')
python
def setoption(parser, metadata=None): """Set argument parser option.""" parser.add_argument('-v', action='version', version=__version__) subparsers = parser.add_subparsers(help='sub commands help') create_cmd = subparsers.add_parser('create') create_cmd.add_argument('name', help='Specify Python package name.') create_cmd.add_argument('-d', dest='description', action='store', help='Short description about your package.') create_cmd.add_argument('-a', dest='author', action='store', required=True, help='Python package author name.') create_cmd.add_argument('-e', dest='email', action='store', required=True, help='Python package author email address.') create_cmd.add_argument('-l', dest='license', choices=metadata.licenses().keys(), default='GPLv3+', help='Specify license. (default: %(default)s)') create_cmd.add_argument('-s', dest='status', choices=metadata.status().keys(), default='Alpha', help=('Specify development status. ' '(default: %(default)s)')) create_cmd.add_argument('--no-check', action='store_true', help='No checking package name in PyPI.') create_cmd.add_argument('--with-samples', action='store_true', help='Generate package with sample code.') group = create_cmd.add_mutually_exclusive_group(required=True) group.add_argument('-U', dest='username', action='store', help='Specify GitHub username.') group.add_argument('-u', dest='url', action='store', type=valid_url, help='Python package homepage url.') create_cmd.add_argument('-o', dest='outdir', action='store', default=os.path.abspath(os.path.curdir), help='Specify output directory. (default: $PWD)') list_cmd = subparsers.add_parser('list') list_cmd.add_argument('-l', dest='licenses', action='store_true', help='show license choices.')
[ "def", "setoption", "(", "parser", ",", "metadata", "=", "None", ")", ":", "parser", ".", "add_argument", "(", "'-v'", ",", "action", "=", "'version'", ",", "version", "=", "__version__", ")", "subparsers", "=", "parser", ".", "add_subparsers", "(", "help", "=", "'sub commands help'", ")", "create_cmd", "=", "subparsers", ".", "add_parser", "(", "'create'", ")", "create_cmd", ".", "add_argument", "(", "'name'", ",", "help", "=", "'Specify Python package name.'", ")", "create_cmd", ".", "add_argument", "(", "'-d'", ",", "dest", "=", "'description'", ",", "action", "=", "'store'", ",", "help", "=", "'Short description about your package.'", ")", "create_cmd", ".", "add_argument", "(", "'-a'", ",", "dest", "=", "'author'", ",", "action", "=", "'store'", ",", "required", "=", "True", ",", "help", "=", "'Python package author name.'", ")", "create_cmd", ".", "add_argument", "(", "'-e'", ",", "dest", "=", "'email'", ",", "action", "=", "'store'", ",", "required", "=", "True", ",", "help", "=", "'Python package author email address.'", ")", "create_cmd", ".", "add_argument", "(", "'-l'", ",", "dest", "=", "'license'", ",", "choices", "=", "metadata", ".", "licenses", "(", ")", ".", "keys", "(", ")", ",", "default", "=", "'GPLv3+'", ",", "help", "=", "'Specify license. (default: %(default)s)'", ")", "create_cmd", ".", "add_argument", "(", "'-s'", ",", "dest", "=", "'status'", ",", "choices", "=", "metadata", ".", "status", "(", ")", ".", "keys", "(", ")", ",", "default", "=", "'Alpha'", ",", "help", "=", "(", "'Specify development status. '", "'(default: %(default)s)'", ")", ")", "create_cmd", ".", "add_argument", "(", "'--no-check'", ",", "action", "=", "'store_true'", ",", "help", "=", "'No checking package name in PyPI.'", ")", "create_cmd", ".", "add_argument", "(", "'--with-samples'", ",", "action", "=", "'store_true'", ",", "help", "=", "'Generate package with sample code.'", ")", "group", "=", "create_cmd", ".", "add_mutually_exclusive_group", "(", "required", "=", "True", ")", "group", ".", "add_argument", "(", "'-U'", ",", "dest", "=", "'username'", ",", "action", "=", "'store'", ",", "help", "=", "'Specify GitHub username.'", ")", "group", ".", "add_argument", "(", "'-u'", ",", "dest", "=", "'url'", ",", "action", "=", "'store'", ",", "type", "=", "valid_url", ",", "help", "=", "'Python package homepage url.'", ")", "create_cmd", ".", "add_argument", "(", "'-o'", ",", "dest", "=", "'outdir'", ",", "action", "=", "'store'", ",", "default", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "curdir", ")", ",", "help", "=", "'Specify output directory. (default: $PWD)'", ")", "list_cmd", "=", "subparsers", ".", "add_parser", "(", "'list'", ")", "list_cmd", ".", "add_argument", "(", "'-l'", ",", "dest", "=", "'licenses'", ",", "action", "=", "'store_true'", ",", "help", "=", "'show license choices.'", ")" ]
Set argument parser option.
[ "Set", "argument", "parser", "option", "." ]
95d56ed98ef409fd9f019dc352fd1c3711533275
https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/commands.py#L12-L51
train
mkouhei/bootstrap-py
bootstrap_py/commands.py
parse_options
def parse_options(metadata): """Parse argument options.""" parser = argparse.ArgumentParser(description='%(prog)s usage:', prog=__prog__) setoption(parser, metadata=metadata) return parser
python
def parse_options(metadata): """Parse argument options.""" parser = argparse.ArgumentParser(description='%(prog)s usage:', prog=__prog__) setoption(parser, metadata=metadata) return parser
[ "def", "parse_options", "(", "metadata", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'%(prog)s usage:'", ",", "prog", "=", "__prog__", ")", "setoption", "(", "parser", ",", "metadata", "=", "metadata", ")", "return", "parser" ]
Parse argument options.
[ "Parse", "argument", "options", "." ]
95d56ed98ef409fd9f019dc352fd1c3711533275
https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/commands.py#L72-L77
train
mkouhei/bootstrap-py
bootstrap_py/commands.py
main
def main(): """Execute main processes.""" try: pkg_version = Update() if pkg_version.updatable(): pkg_version.show_message() metadata = control.retreive_metadata() parser = parse_options(metadata) argvs = sys.argv if len(argvs) <= 1: parser.print_help() sys.exit(1) args = parser.parse_args() control.print_licences(args, metadata) control.check_repository_existence(args) control.check_package_existence(args) control.generate_package(args) except (RuntimeError, BackendFailure, Conflict) as exc: sys.stderr.write('{0}\n'.format(exc)) sys.exit(1)
python
def main(): """Execute main processes.""" try: pkg_version = Update() if pkg_version.updatable(): pkg_version.show_message() metadata = control.retreive_metadata() parser = parse_options(metadata) argvs = sys.argv if len(argvs) <= 1: parser.print_help() sys.exit(1) args = parser.parse_args() control.print_licences(args, metadata) control.check_repository_existence(args) control.check_package_existence(args) control.generate_package(args) except (RuntimeError, BackendFailure, Conflict) as exc: sys.stderr.write('{0}\n'.format(exc)) sys.exit(1)
[ "def", "main", "(", ")", ":", "try", ":", "pkg_version", "=", "Update", "(", ")", "if", "pkg_version", ".", "updatable", "(", ")", ":", "pkg_version", ".", "show_message", "(", ")", "metadata", "=", "control", ".", "retreive_metadata", "(", ")", "parser", "=", "parse_options", "(", "metadata", ")", "argvs", "=", "sys", ".", "argv", "if", "len", "(", "argvs", ")", "<=", "1", ":", "parser", ".", "print_help", "(", ")", "sys", ".", "exit", "(", "1", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "control", ".", "print_licences", "(", "args", ",", "metadata", ")", "control", ".", "check_repository_existence", "(", "args", ")", "control", ".", "check_package_existence", "(", "args", ")", "control", ".", "generate_package", "(", "args", ")", "except", "(", "RuntimeError", ",", "BackendFailure", ",", "Conflict", ")", "as", "exc", ":", "sys", ".", "stderr", ".", "write", "(", "'{0}\\n'", ".", "format", "(", "exc", ")", ")", "sys", ".", "exit", "(", "1", ")" ]
Execute main processes.
[ "Execute", "main", "processes", "." ]
95d56ed98ef409fd9f019dc352fd1c3711533275
https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/commands.py#L80-L99
train
mkouhei/bootstrap-py
bootstrap_py/package.py
PackageData._check_or_set_default_params
def _check_or_set_default_params(self): """Check key and set default vaule when it does not exists.""" if not hasattr(self, 'date'): self._set_param('date', datetime.utcnow().strftime('%Y-%m-%d')) if not hasattr(self, 'version'): self._set_param('version', self.default_version) # pylint: disable=no-member if not hasattr(self, 'description') or self.description is None: getattr(self, '_set_param')('description', self.warning_message)
python
def _check_or_set_default_params(self): """Check key and set default vaule when it does not exists.""" if not hasattr(self, 'date'): self._set_param('date', datetime.utcnow().strftime('%Y-%m-%d')) if not hasattr(self, 'version'): self._set_param('version', self.default_version) # pylint: disable=no-member if not hasattr(self, 'description') or self.description is None: getattr(self, '_set_param')('description', self.warning_message)
[ "def", "_check_or_set_default_params", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'date'", ")", ":", "self", ".", "_set_param", "(", "'date'", ",", "datetime", ".", "utcnow", "(", ")", ".", "strftime", "(", "'%Y-%m-%d'", ")", ")", "if", "not", "hasattr", "(", "self", ",", "'version'", ")", ":", "self", ".", "_set_param", "(", "'version'", ",", "self", ".", "default_version", ")", "if", "not", "hasattr", "(", "self", ",", "'description'", ")", "or", "self", ".", "description", "is", "None", ":", "getattr", "(", "self", ",", "'_set_param'", ")", "(", "'description'", ",", "self", ".", "warning_message", ")" ]
Check key and set default vaule when it does not exists.
[ "Check", "key", "and", "set", "default", "vaule", "when", "it", "does", "not", "exists", "." ]
95d56ed98ef409fd9f019dc352fd1c3711533275
https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/package.py#L44-L52
train
mkouhei/bootstrap-py
bootstrap_py/package.py
PackageTree.move
def move(self): """Move directory from working directory to output directory.""" if not os.path.isdir(self.outdir): os.makedirs(self.outdir) shutil.move(self.tmpdir, os.path.join(self.outdir, self.name))
python
def move(self): """Move directory from working directory to output directory.""" if not os.path.isdir(self.outdir): os.makedirs(self.outdir) shutil.move(self.tmpdir, os.path.join(self.outdir, self.name))
[ "def", "move", "(", "self", ")", ":", "if", "not", "os", ".", "path", ".", "isdir", "(", "self", ".", "outdir", ")", ":", "os", ".", "makedirs", "(", "self", ".", "outdir", ")", "shutil", ".", "move", "(", "self", ".", "tmpdir", ",", "os", ".", "path", ".", "join", "(", "self", ".", "outdir", ",", "self", ".", "name", ")", ")" ]
Move directory from working directory to output directory.
[ "Move", "directory", "from", "working", "directory", "to", "output", "directory", "." ]
95d56ed98ef409fd9f019dc352fd1c3711533275
https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/package.py#L169-L173
train
mkouhei/bootstrap-py
bootstrap_py/package.py
PackageTree.vcs_init
def vcs_init(self): """Initialize VCS repository.""" VCS(os.path.join(self.outdir, self.name), self.pkg_data)
python
def vcs_init(self): """Initialize VCS repository.""" VCS(os.path.join(self.outdir, self.name), self.pkg_data)
[ "def", "vcs_init", "(", "self", ")", ":", "VCS", "(", "os", ".", "path", ".", "join", "(", "self", ".", "outdir", ",", "self", ".", "name", ")", ",", "self", ".", "pkg_data", ")" ]
Initialize VCS repository.
[ "Initialize", "VCS", "repository", "." ]
95d56ed98ef409fd9f019dc352fd1c3711533275
https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/package.py#L185-L187
train
scottrice/pysteam
pysteam/winutils.py
find_steam_location
def find_steam_location(): """ Finds the location of the current Steam installation on Windows machines. Returns None for any non-Windows machines, or for Windows machines where Steam is not installed. """ if registry is None: return None key = registry.CreateKey(registry.HKEY_CURRENT_USER,"Software\Valve\Steam") return registry.QueryValueEx(key,"SteamPath")[0]
python
def find_steam_location(): """ Finds the location of the current Steam installation on Windows machines. Returns None for any non-Windows machines, or for Windows machines where Steam is not installed. """ if registry is None: return None key = registry.CreateKey(registry.HKEY_CURRENT_USER,"Software\Valve\Steam") return registry.QueryValueEx(key,"SteamPath")[0]
[ "def", "find_steam_location", "(", ")", ":", "if", "registry", "is", "None", ":", "return", "None", "key", "=", "registry", ".", "CreateKey", "(", "registry", ".", "HKEY_CURRENT_USER", ",", "\"Software\\Valve\\Steam\"", ")", "return", "registry", ".", "QueryValueEx", "(", "key", ",", "\"SteamPath\"", ")", "[", "0", "]" ]
Finds the location of the current Steam installation on Windows machines. Returns None for any non-Windows machines, or for Windows machines where Steam is not installed.
[ "Finds", "the", "location", "of", "the", "current", "Steam", "installation", "on", "Windows", "machines", ".", "Returns", "None", "for", "any", "non", "-", "Windows", "machines", "or", "for", "Windows", "machines", "where", "Steam", "is", "not", "installed", "." ]
1eb2254b5235a053a953e596fa7602d0b110245d
https://github.com/scottrice/pysteam/blob/1eb2254b5235a053a953e596fa7602d0b110245d/pysteam/winutils.py#L10-L20
train
smdabdoub/phylotoast
bin/PCoA_bubble.py
plot_PCoA
def plot_PCoA(cat_data, otu_name, unifrac, names, colors, xr, yr, outDir, save_as, plot_style): """ Plot PCoA principal coordinates scaled by the relative abundances of otu_name. """ fig = plt.figure(figsize=(14, 8)) ax = fig.add_subplot(111) for i, cat in enumerate(cat_data): plt.scatter(cat_data[cat]["pc1"], cat_data[cat]["pc2"], cat_data[cat]["size"], color=colors[cat], alpha=0.85, marker="o", edgecolor="black", label=cat) lgnd = plt.legend(loc="best", scatterpoints=3, fontsize=13) for i in range(len(colors.keys())): lgnd.legendHandles[i]._sizes = [80] # Change the legend marker size manually plt.title(" ".join(otu_name.split("_")), style="italic") plt.ylabel("PC2 (Percent Explained Variance {:.3f}%)".format(float(unifrac["varexp"][1]))) plt.xlabel("PC1 (Percent Explained Variance {:.3f}%)".format(float(unifrac["varexp"][0]))) plt.xlim(round(xr[0]*1.5, 1), round(xr[1]*1.5, 1)) plt.ylim(round(yr[0]*1.5, 1), round(yr[1]*1.5, 1)) if plot_style: gu.ggplot2_style(ax) fc = "0.8" else: fc = "none" fig.savefig(os.path.join(outDir, "_".join(otu_name.split())) + "." + save_as, facecolor=fc, edgecolor="none", format=save_as, bbox_inches="tight", pad_inches=0.2) plt.close(fig)
python
def plot_PCoA(cat_data, otu_name, unifrac, names, colors, xr, yr, outDir, save_as, plot_style): """ Plot PCoA principal coordinates scaled by the relative abundances of otu_name. """ fig = plt.figure(figsize=(14, 8)) ax = fig.add_subplot(111) for i, cat in enumerate(cat_data): plt.scatter(cat_data[cat]["pc1"], cat_data[cat]["pc2"], cat_data[cat]["size"], color=colors[cat], alpha=0.85, marker="o", edgecolor="black", label=cat) lgnd = plt.legend(loc="best", scatterpoints=3, fontsize=13) for i in range(len(colors.keys())): lgnd.legendHandles[i]._sizes = [80] # Change the legend marker size manually plt.title(" ".join(otu_name.split("_")), style="italic") plt.ylabel("PC2 (Percent Explained Variance {:.3f}%)".format(float(unifrac["varexp"][1]))) plt.xlabel("PC1 (Percent Explained Variance {:.3f}%)".format(float(unifrac["varexp"][0]))) plt.xlim(round(xr[0]*1.5, 1), round(xr[1]*1.5, 1)) plt.ylim(round(yr[0]*1.5, 1), round(yr[1]*1.5, 1)) if plot_style: gu.ggplot2_style(ax) fc = "0.8" else: fc = "none" fig.savefig(os.path.join(outDir, "_".join(otu_name.split())) + "." + save_as, facecolor=fc, edgecolor="none", format=save_as, bbox_inches="tight", pad_inches=0.2) plt.close(fig)
[ "def", "plot_PCoA", "(", "cat_data", ",", "otu_name", ",", "unifrac", ",", "names", ",", "colors", ",", "xr", ",", "yr", ",", "outDir", ",", "save_as", ",", "plot_style", ")", ":", "fig", "=", "plt", ".", "figure", "(", "figsize", "=", "(", "14", ",", "8", ")", ")", "ax", "=", "fig", ".", "add_subplot", "(", "111", ")", "for", "i", ",", "cat", "in", "enumerate", "(", "cat_data", ")", ":", "plt", ".", "scatter", "(", "cat_data", "[", "cat", "]", "[", "\"pc1\"", "]", ",", "cat_data", "[", "cat", "]", "[", "\"pc2\"", "]", ",", "cat_data", "[", "cat", "]", "[", "\"size\"", "]", ",", "color", "=", "colors", "[", "cat", "]", ",", "alpha", "=", "0.85", ",", "marker", "=", "\"o\"", ",", "edgecolor", "=", "\"black\"", ",", "label", "=", "cat", ")", "lgnd", "=", "plt", ".", "legend", "(", "loc", "=", "\"best\"", ",", "scatterpoints", "=", "3", ",", "fontsize", "=", "13", ")", "for", "i", "in", "range", "(", "len", "(", "colors", ".", "keys", "(", ")", ")", ")", ":", "lgnd", ".", "legendHandles", "[", "i", "]", ".", "_sizes", "=", "[", "80", "]", "plt", ".", "title", "(", "\" \"", ".", "join", "(", "otu_name", ".", "split", "(", "\"_\"", ")", ")", ",", "style", "=", "\"italic\"", ")", "plt", ".", "ylabel", "(", "\"PC2 (Percent Explained Variance {:.3f}%)\"", ".", "format", "(", "float", "(", "unifrac", "[", "\"varexp\"", "]", "[", "1", "]", ")", ")", ")", "plt", ".", "xlabel", "(", "\"PC1 (Percent Explained Variance {:.3f}%)\"", ".", "format", "(", "float", "(", "unifrac", "[", "\"varexp\"", "]", "[", "0", "]", ")", ")", ")", "plt", ".", "xlim", "(", "round", "(", "xr", "[", "0", "]", "*", "1.5", ",", "1", ")", ",", "round", "(", "xr", "[", "1", "]", "*", "1.5", ",", "1", ")", ")", "plt", ".", "ylim", "(", "round", "(", "yr", "[", "0", "]", "*", "1.5", ",", "1", ")", ",", "round", "(", "yr", "[", "1", "]", "*", "1.5", ",", "1", ")", ")", "if", "plot_style", ":", "gu", ".", "ggplot2_style", "(", "ax", ")", "fc", "=", "\"0.8\"", "else", ":", "fc", "=", "\"none\"", "fig", ".", "savefig", "(", "os", ".", "path", ".", "join", "(", "outDir", ",", "\"_\"", ".", "join", "(", "otu_name", ".", "split", "(", ")", ")", ")", "+", "\".\"", "+", "save_as", ",", "facecolor", "=", "fc", ",", "edgecolor", "=", "\"none\"", ",", "format", "=", "save_as", ",", "bbox_inches", "=", "\"tight\"", ",", "pad_inches", "=", "0.2", ")", "plt", ".", "close", "(", "fig", ")" ]
Plot PCoA principal coordinates scaled by the relative abundances of otu_name.
[ "Plot", "PCoA", "principal", "coordinates", "scaled", "by", "the", "relative", "abundances", "of", "otu_name", "." ]
0b74ef171e6a84761710548501dfac71285a58a3
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/bin/PCoA_bubble.py#L36-L65
train
smdabdoub/phylotoast
bin/transpose_biom.py
split_by_category
def split_by_category(biom_cols, mapping, category_id): """ Split up the column data in a biom table by mapping category value. """ columns = defaultdict(list) for i, col in enumerate(biom_cols): columns[mapping[col['id']][category_id]].append((i, col)) return columns
python
def split_by_category(biom_cols, mapping, category_id): """ Split up the column data in a biom table by mapping category value. """ columns = defaultdict(list) for i, col in enumerate(biom_cols): columns[mapping[col['id']][category_id]].append((i, col)) return columns
[ "def", "split_by_category", "(", "biom_cols", ",", "mapping", ",", "category_id", ")", ":", "columns", "=", "defaultdict", "(", "list", ")", "for", "i", ",", "col", "in", "enumerate", "(", "biom_cols", ")", ":", "columns", "[", "mapping", "[", "col", "[", "'id'", "]", "]", "[", "category_id", "]", "]", ".", "append", "(", "(", "i", ",", "col", ")", ")", "return", "columns" ]
Split up the column data in a biom table by mapping category value.
[ "Split", "up", "the", "column", "data", "in", "a", "biom", "table", "by", "mapping", "category", "value", "." ]
0b74ef171e6a84761710548501dfac71285a58a3
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/bin/transpose_biom.py#L17-L25
train
christophertbrown/bioscripts
ctbBio/stockholm2oneline.py
print_line
def print_line(l): """ print line if starts with ... """ print_lines = ['# STOCKHOLM', '#=GF', '#=GS', ' '] if len(l.split()) == 0: return True for start in print_lines: if l.startswith(start): return True return False
python
def print_line(l): """ print line if starts with ... """ print_lines = ['# STOCKHOLM', '#=GF', '#=GS', ' '] if len(l.split()) == 0: return True for start in print_lines: if l.startswith(start): return True return False
[ "def", "print_line", "(", "l", ")", ":", "print_lines", "=", "[", "'# STOCKHOLM'", ",", "'#=GF'", ",", "'#=GS'", ",", "' '", "]", "if", "len", "(", "l", ".", "split", "(", ")", ")", "==", "0", ":", "return", "True", "for", "start", "in", "print_lines", ":", "if", "l", ".", "startswith", "(", "start", ")", ":", "return", "True", "return", "False" ]
print line if starts with ...
[ "print", "line", "if", "starts", "with", "..." ]
83b2566b3a5745437ec651cd6cafddd056846240
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/stockholm2oneline.py#L11-L21
train
christophertbrown/bioscripts
ctbBio/stockholm2oneline.py
stock2one
def stock2one(stock): """ convert stockholm to single line format """ lines = {} for line in stock: line = line.strip() if print_line(line) is True: yield line continue if line.startswith('//'): continue ID, seq = line.rsplit(' ', 1) if ID not in lines: lines[ID] = '' else: # remove preceding white space seq = seq.strip() lines[ID] += seq for ID, line in lines.items(): yield '\t'.join([ID, line]) yield '\n//'
python
def stock2one(stock): """ convert stockholm to single line format """ lines = {} for line in stock: line = line.strip() if print_line(line) is True: yield line continue if line.startswith('//'): continue ID, seq = line.rsplit(' ', 1) if ID not in lines: lines[ID] = '' else: # remove preceding white space seq = seq.strip() lines[ID] += seq for ID, line in lines.items(): yield '\t'.join([ID, line]) yield '\n//'
[ "def", "stock2one", "(", "stock", ")", ":", "lines", "=", "{", "}", "for", "line", "in", "stock", ":", "line", "=", "line", ".", "strip", "(", ")", "if", "print_line", "(", "line", ")", "is", "True", ":", "yield", "line", "continue", "if", "line", ".", "startswith", "(", "'//'", ")", ":", "continue", "ID", ",", "seq", "=", "line", ".", "rsplit", "(", "' '", ",", "1", ")", "if", "ID", "not", "in", "lines", ":", "lines", "[", "ID", "]", "=", "''", "else", ":", "seq", "=", "seq", ".", "strip", "(", ")", "lines", "[", "ID", "]", "+=", "seq", "for", "ID", ",", "line", "in", "lines", ".", "items", "(", ")", ":", "yield", "'\\t'", ".", "join", "(", "[", "ID", ",", "line", "]", ")", "yield", "'\\n//'" ]
convert stockholm to single line format
[ "convert", "stockholm", "to", "single", "line", "format" ]
83b2566b3a5745437ec651cd6cafddd056846240
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/stockholm2oneline.py#L23-L44
train
elbow-jason/Uno-deprecated
uno/helpers.py
math_func
def math_func(f): """ Statics the methods. wut. """ @wraps(f) def wrapper(*args, **kwargs): if len(args) > 0: return_type = type(args[0]) if kwargs.has_key('return_type'): return_type = kwargs['return_type'] kwargs.pop('return_type') return return_type(f(*args, **kwargs)) args = list((setify(x) for x in args)) return return_type(f(*args, **kwargs)) return wrapper
python
def math_func(f): """ Statics the methods. wut. """ @wraps(f) def wrapper(*args, **kwargs): if len(args) > 0: return_type = type(args[0]) if kwargs.has_key('return_type'): return_type = kwargs['return_type'] kwargs.pop('return_type') return return_type(f(*args, **kwargs)) args = list((setify(x) for x in args)) return return_type(f(*args, **kwargs)) return wrapper
[ "def", "math_func", "(", "f", ")", ":", "@", "wraps", "(", "f", ")", "def", "wrapper", "(", "*", "args", ",", "**", "kwargs", ")", ":", "if", "len", "(", "args", ")", ">", "0", ":", "return_type", "=", "type", "(", "args", "[", "0", "]", ")", "if", "kwargs", ".", "has_key", "(", "'return_type'", ")", ":", "return_type", "=", "kwargs", "[", "'return_type'", "]", "kwargs", ".", "pop", "(", "'return_type'", ")", "return", "return_type", "(", "f", "(", "*", "args", ",", "**", "kwargs", ")", ")", "args", "=", "list", "(", "(", "setify", "(", "x", ")", "for", "x", "in", "args", ")", ")", "return", "return_type", "(", "f", "(", "*", "args", ",", "**", "kwargs", ")", ")", "return", "wrapper" ]
Statics the methods. wut.
[ "Statics", "the", "methods", ".", "wut", "." ]
4ad07d7b84e5b6e3e2b2c89db69448906f24b4e4
https://github.com/elbow-jason/Uno-deprecated/blob/4ad07d7b84e5b6e3e2b2c89db69448906f24b4e4/uno/helpers.py#L8-L22
train