repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
sequence
docstring
stringlengths
3
17.3k
docstring_tokens
sequence
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
openvax/isovar
isovar/variant_sequences.py
filter_variant_sequences
def filter_variant_sequences( variant_sequences, preferred_sequence_length, min_variant_sequence_coverage=MIN_VARIANT_SEQUENCE_COVERAGE,): """ Drop variant sequences which are shorter than request or don't have enough supporting reads. """ variant_sequences = trim_variant_sequences( variant_sequences, min_variant_sequence_coverage) return filter_variant_sequences_by_length( variant_sequences=variant_sequences, preferred_sequence_length=preferred_sequence_length)
python
def filter_variant_sequences( variant_sequences, preferred_sequence_length, min_variant_sequence_coverage=MIN_VARIANT_SEQUENCE_COVERAGE,): """ Drop variant sequences which are shorter than request or don't have enough supporting reads. """ variant_sequences = trim_variant_sequences( variant_sequences, min_variant_sequence_coverage) return filter_variant_sequences_by_length( variant_sequences=variant_sequences, preferred_sequence_length=preferred_sequence_length)
[ "def", "filter_variant_sequences", "(", "variant_sequences", ",", "preferred_sequence_length", ",", "min_variant_sequence_coverage", "=", "MIN_VARIANT_SEQUENCE_COVERAGE", ",", ")", ":", "variant_sequences", "=", "trim_variant_sequences", "(", "variant_sequences", ",", "min_variant_sequence_coverage", ")", "return", "filter_variant_sequences_by_length", "(", "variant_sequences", "=", "variant_sequences", ",", "preferred_sequence_length", "=", "preferred_sequence_length", ")" ]
Drop variant sequences which are shorter than request or don't have enough supporting reads.
[ "Drop", "variant", "sequences", "which", "are", "shorter", "than", "request", "or", "don", "t", "have", "enough", "supporting", "reads", "." ]
b39b684920e3f6b344851d6598a1a1c67bce913b
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/variant_sequences.py#L339-L352
train
openvax/isovar
isovar/variant_sequences.py
reads_generator_to_sequences_generator
def reads_generator_to_sequences_generator( variant_and_reads_generator, min_alt_rna_reads=MIN_ALT_RNA_READS, min_variant_sequence_coverage=MIN_VARIANT_SEQUENCE_COVERAGE, preferred_sequence_length=VARIANT_SEQUENCE_LENGTH, variant_sequence_assembly=VARIANT_SEQUENCE_ASSEMBLY): """ For each variant, collect all possible sequence contexts around the variant which are spanned by at least min_reads. Parameters ---------- variant_and_reads_generator : generator Sequence of Variant objects paired with a list of reads which overlap that variant. min_alt_rna_reads : int Minimum number of RNA reads supporting variant allele min_variant_sequence_coverage : int Minimum number of RNA reads supporting each nucleotide of the variant cDNA sequence sequence_length : int Desired sequence length, including variant nucleotides variant_sequence_assembly : bool Construct variant sequences by merging overlapping reads. If False then variant sequences must be fully spanned by cDNA reads. Yields pairs with the following fields: - Variant - list of VariantSequence objects """ for variant, variant_reads in variant_and_reads_generator: variant_sequences = reads_to_variant_sequences( variant=variant, reads=variant_reads, min_alt_rna_reads=min_alt_rna_reads, min_variant_sequence_coverage=min_variant_sequence_coverage, preferred_sequence_length=preferred_sequence_length, variant_sequence_assembly=variant_sequence_assembly) yield variant, variant_sequences
python
def reads_generator_to_sequences_generator( variant_and_reads_generator, min_alt_rna_reads=MIN_ALT_RNA_READS, min_variant_sequence_coverage=MIN_VARIANT_SEQUENCE_COVERAGE, preferred_sequence_length=VARIANT_SEQUENCE_LENGTH, variant_sequence_assembly=VARIANT_SEQUENCE_ASSEMBLY): """ For each variant, collect all possible sequence contexts around the variant which are spanned by at least min_reads. Parameters ---------- variant_and_reads_generator : generator Sequence of Variant objects paired with a list of reads which overlap that variant. min_alt_rna_reads : int Minimum number of RNA reads supporting variant allele min_variant_sequence_coverage : int Minimum number of RNA reads supporting each nucleotide of the variant cDNA sequence sequence_length : int Desired sequence length, including variant nucleotides variant_sequence_assembly : bool Construct variant sequences by merging overlapping reads. If False then variant sequences must be fully spanned by cDNA reads. Yields pairs with the following fields: - Variant - list of VariantSequence objects """ for variant, variant_reads in variant_and_reads_generator: variant_sequences = reads_to_variant_sequences( variant=variant, reads=variant_reads, min_alt_rna_reads=min_alt_rna_reads, min_variant_sequence_coverage=min_variant_sequence_coverage, preferred_sequence_length=preferred_sequence_length, variant_sequence_assembly=variant_sequence_assembly) yield variant, variant_sequences
[ "def", "reads_generator_to_sequences_generator", "(", "variant_and_reads_generator", ",", "min_alt_rna_reads", "=", "MIN_ALT_RNA_READS", ",", "min_variant_sequence_coverage", "=", "MIN_VARIANT_SEQUENCE_COVERAGE", ",", "preferred_sequence_length", "=", "VARIANT_SEQUENCE_LENGTH", ",", "variant_sequence_assembly", "=", "VARIANT_SEQUENCE_ASSEMBLY", ")", ":", "for", "variant", ",", "variant_reads", "in", "variant_and_reads_generator", ":", "variant_sequences", "=", "reads_to_variant_sequences", "(", "variant", "=", "variant", ",", "reads", "=", "variant_reads", ",", "min_alt_rna_reads", "=", "min_alt_rna_reads", ",", "min_variant_sequence_coverage", "=", "min_variant_sequence_coverage", ",", "preferred_sequence_length", "=", "preferred_sequence_length", ",", "variant_sequence_assembly", "=", "variant_sequence_assembly", ")", "yield", "variant", ",", "variant_sequences" ]
For each variant, collect all possible sequence contexts around the variant which are spanned by at least min_reads. Parameters ---------- variant_and_reads_generator : generator Sequence of Variant objects paired with a list of reads which overlap that variant. min_alt_rna_reads : int Minimum number of RNA reads supporting variant allele min_variant_sequence_coverage : int Minimum number of RNA reads supporting each nucleotide of the variant cDNA sequence sequence_length : int Desired sequence length, including variant nucleotides variant_sequence_assembly : bool Construct variant sequences by merging overlapping reads. If False then variant sequences must be fully spanned by cDNA reads. Yields pairs with the following fields: - Variant - list of VariantSequence objects
[ "For", "each", "variant", "collect", "all", "possible", "sequence", "contexts", "around", "the", "variant", "which", "are", "spanned", "by", "at", "least", "min_reads", "." ]
b39b684920e3f6b344851d6598a1a1c67bce913b
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/variant_sequences.py#L474-L516
train
openvax/isovar
isovar/variant_sequences.py
VariantSequence.contains
def contains(self, other): """ Is the other VariantSequence a subsequence of this one? The two sequences must agree on the alt nucleotides, the prefix of the longer must contain the prefix of the shorter, and the suffix of the longer must contain the suffix of the shorter. """ return (self.alt == other.alt and self.prefix.endswith(other.prefix) and self.suffix.startswith(other.suffix))
python
def contains(self, other): """ Is the other VariantSequence a subsequence of this one? The two sequences must agree on the alt nucleotides, the prefix of the longer must contain the prefix of the shorter, and the suffix of the longer must contain the suffix of the shorter. """ return (self.alt == other.alt and self.prefix.endswith(other.prefix) and self.suffix.startswith(other.suffix))
[ "def", "contains", "(", "self", ",", "other", ")", ":", "return", "(", "self", ".", "alt", "==", "other", ".", "alt", "and", "self", ".", "prefix", ".", "endswith", "(", "other", ".", "prefix", ")", "and", "self", ".", "suffix", ".", "startswith", "(", "other", ".", "suffix", ")", ")" ]
Is the other VariantSequence a subsequence of this one? The two sequences must agree on the alt nucleotides, the prefix of the longer must contain the prefix of the shorter, and the suffix of the longer must contain the suffix of the shorter.
[ "Is", "the", "other", "VariantSequence", "a", "subsequence", "of", "this", "one?" ]
b39b684920e3f6b344851d6598a1a1c67bce913b
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/variant_sequences.py#L67-L77
train
openvax/isovar
isovar/variant_sequences.py
VariantSequence.left_overlaps
def left_overlaps(self, other, min_overlap_size=1): """ Does this VariantSequence overlap another on the left side? """ if self.alt != other.alt: # allele must match! return False if len(other.prefix) > len(self.prefix): # only consider strings that overlap like: # self: ppppAssss # other: ppAsssssss # which excludes cases where the other sequence has a longer # prefix return False elif len(other.suffix) < len(self.suffix): # similarly, we throw away cases where the other sequence is shorter # after the alt nucleotides than this sequence return False # is the other sequence a prefix of this sequence? # Example: # p1 a1 s1 = XXXXXXXX Y ZZZZZZ # p2 a2 s2 = XX Y ZZZZZZZZZ # ... # then we can combine them into a longer sequence sequence_overlaps = ( self.prefix.endswith(other.prefix) and other.suffix.startswith(self.suffix) ) prefix_overlap_size = min(len(self.prefix), len(other.prefix)) suffix_overlap_size = min(len(other.suffix), len(self.suffix)) overlap_size = ( prefix_overlap_size + suffix_overlap_size + len(self.alt)) return sequence_overlaps and overlap_size >= min_overlap_size
python
def left_overlaps(self, other, min_overlap_size=1): """ Does this VariantSequence overlap another on the left side? """ if self.alt != other.alt: # allele must match! return False if len(other.prefix) > len(self.prefix): # only consider strings that overlap like: # self: ppppAssss # other: ppAsssssss # which excludes cases where the other sequence has a longer # prefix return False elif len(other.suffix) < len(self.suffix): # similarly, we throw away cases where the other sequence is shorter # after the alt nucleotides than this sequence return False # is the other sequence a prefix of this sequence? # Example: # p1 a1 s1 = XXXXXXXX Y ZZZZZZ # p2 a2 s2 = XX Y ZZZZZZZZZ # ... # then we can combine them into a longer sequence sequence_overlaps = ( self.prefix.endswith(other.prefix) and other.suffix.startswith(self.suffix) ) prefix_overlap_size = min(len(self.prefix), len(other.prefix)) suffix_overlap_size = min(len(other.suffix), len(self.suffix)) overlap_size = ( prefix_overlap_size + suffix_overlap_size + len(self.alt)) return sequence_overlaps and overlap_size >= min_overlap_size
[ "def", "left_overlaps", "(", "self", ",", "other", ",", "min_overlap_size", "=", "1", ")", ":", "if", "self", ".", "alt", "!=", "other", ".", "alt", ":", "return", "False", "if", "len", "(", "other", ".", "prefix", ")", ">", "len", "(", "self", ".", "prefix", ")", ":", "return", "False", "elif", "len", "(", "other", ".", "suffix", ")", "<", "len", "(", "self", ".", "suffix", ")", ":", "return", "False", "sequence_overlaps", "=", "(", "self", ".", "prefix", ".", "endswith", "(", "other", ".", "prefix", ")", "and", "other", ".", "suffix", ".", "startswith", "(", "self", ".", "suffix", ")", ")", "prefix_overlap_size", "=", "min", "(", "len", "(", "self", ".", "prefix", ")", ",", "len", "(", "other", ".", "prefix", ")", ")", "suffix_overlap_size", "=", "min", "(", "len", "(", "other", ".", "suffix", ")", ",", "len", "(", "self", ".", "suffix", ")", ")", "overlap_size", "=", "(", "prefix_overlap_size", "+", "suffix_overlap_size", "+", "len", "(", "self", ".", "alt", ")", ")", "return", "sequence_overlaps", "and", "overlap_size", ">=", "min_overlap_size" ]
Does this VariantSequence overlap another on the left side?
[ "Does", "this", "VariantSequence", "overlap", "another", "on", "the", "left", "side?" ]
b39b684920e3f6b344851d6598a1a1c67bce913b
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/variant_sequences.py#L79-L115
train
openvax/isovar
isovar/variant_sequences.py
VariantSequence.add_reads
def add_reads(self, reads): """ Create another VariantSequence with more supporting reads. """ if len(reads) == 0: return self new_reads = self.reads.union(reads) if len(new_reads) > len(self.reads): return VariantSequence( prefix=self.prefix, alt=self.alt, suffix=self.suffix, reads=new_reads) else: return self
python
def add_reads(self, reads): """ Create another VariantSequence with more supporting reads. """ if len(reads) == 0: return self new_reads = self.reads.union(reads) if len(new_reads) > len(self.reads): return VariantSequence( prefix=self.prefix, alt=self.alt, suffix=self.suffix, reads=new_reads) else: return self
[ "def", "add_reads", "(", "self", ",", "reads", ")", ":", "if", "len", "(", "reads", ")", "==", "0", ":", "return", "self", "new_reads", "=", "self", ".", "reads", ".", "union", "(", "reads", ")", "if", "len", "(", "new_reads", ")", ">", "len", "(", "self", ".", "reads", ")", ":", "return", "VariantSequence", "(", "prefix", "=", "self", ".", "prefix", ",", "alt", "=", "self", ".", "alt", ",", "suffix", "=", "self", ".", "suffix", ",", "reads", "=", "new_reads", ")", "else", ":", "return", "self" ]
Create another VariantSequence with more supporting reads.
[ "Create", "another", "VariantSequence", "with", "more", "supporting", "reads", "." ]
b39b684920e3f6b344851d6598a1a1c67bce913b
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/variant_sequences.py#L117-L131
train
openvax/isovar
isovar/variant_sequences.py
VariantSequence.variant_indices
def variant_indices(self): """ When we combine prefix + alt + suffix into a single string, what are is base-0 index interval which gets us back the alt sequence? First returned index is inclusive, the second is exclusive. """ variant_start_index = len(self.prefix) variant_len = len(self.alt) variant_end_index = variant_start_index + variant_len return variant_start_index, variant_end_index
python
def variant_indices(self): """ When we combine prefix + alt + suffix into a single string, what are is base-0 index interval which gets us back the alt sequence? First returned index is inclusive, the second is exclusive. """ variant_start_index = len(self.prefix) variant_len = len(self.alt) variant_end_index = variant_start_index + variant_len return variant_start_index, variant_end_index
[ "def", "variant_indices", "(", "self", ")", ":", "variant_start_index", "=", "len", "(", "self", ".", "prefix", ")", "variant_len", "=", "len", "(", "self", ".", "alt", ")", "variant_end_index", "=", "variant_start_index", "+", "variant_len", "return", "variant_start_index", ",", "variant_end_index" ]
When we combine prefix + alt + suffix into a single string, what are is base-0 index interval which gets us back the alt sequence? First returned index is inclusive, the second is exclusive.
[ "When", "we", "combine", "prefix", "+", "alt", "+", "suffix", "into", "a", "single", "string", "what", "are", "is", "base", "-", "0", "index", "interval", "which", "gets", "us", "back", "the", "alt", "sequence?", "First", "returned", "index", "is", "inclusive", "the", "second", "is", "exclusive", "." ]
b39b684920e3f6b344851d6598a1a1c67bce913b
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/variant_sequences.py#L169-L178
train
openvax/isovar
isovar/variant_sequences.py
VariantSequence.coverage
def coverage(self): """ Returns NumPy array indicating number of reads covering each nucleotides of this sequence. """ variant_start_index, variant_end_index = self.variant_indices() n_nucleotides = len(self) coverage_array = np.zeros(n_nucleotides, dtype="int32") for read in self.reads: coverage_array[ max(0, variant_start_index - len(read.prefix)): min(n_nucleotides, variant_end_index + len(read.suffix))] += 1 return coverage_array
python
def coverage(self): """ Returns NumPy array indicating number of reads covering each nucleotides of this sequence. """ variant_start_index, variant_end_index = self.variant_indices() n_nucleotides = len(self) coverage_array = np.zeros(n_nucleotides, dtype="int32") for read in self.reads: coverage_array[ max(0, variant_start_index - len(read.prefix)): min(n_nucleotides, variant_end_index + len(read.suffix))] += 1 return coverage_array
[ "def", "coverage", "(", "self", ")", ":", "variant_start_index", ",", "variant_end_index", "=", "self", ".", "variant_indices", "(", ")", "n_nucleotides", "=", "len", "(", "self", ")", "coverage_array", "=", "np", ".", "zeros", "(", "n_nucleotides", ",", "dtype", "=", "\"int32\"", ")", "for", "read", "in", "self", ".", "reads", ":", "coverage_array", "[", "max", "(", "0", ",", "variant_start_index", "-", "len", "(", "read", ".", "prefix", ")", ")", ":", "min", "(", "n_nucleotides", ",", "variant_end_index", "+", "len", "(", "read", ".", "suffix", ")", ")", "]", "+=", "1", "return", "coverage_array" ]
Returns NumPy array indicating number of reads covering each nucleotides of this sequence.
[ "Returns", "NumPy", "array", "indicating", "number", "of", "reads", "covering", "each", "nucleotides", "of", "this", "sequence", "." ]
b39b684920e3f6b344851d6598a1a1c67bce913b
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/variant_sequences.py#L180-L192
train
openvax/isovar
isovar/variant_sequences.py
VariantSequence.trim_by_coverage
def trim_by_coverage(self, min_reads): """ Given the min number of reads overlapping each nucleotide of a variant sequence, trim this sequence by getting rid of positions which are overlapped by fewer reads than specified. """ read_count_array = self.coverage() logger.info("Coverage: %s (len=%d)" % ( read_count_array, len(read_count_array))) sufficient_coverage_mask = read_count_array >= min_reads sufficient_coverage_indices = np.argwhere(sufficient_coverage_mask) if len(sufficient_coverage_indices) == 0: logger.debug("No bases in %s have coverage >= %d" % (self, min_reads)) return VariantSequence(prefix="", alt="", suffix="", reads=self.reads) variant_start_index, variant_end_index = self.variant_indices() # assuming that coverage drops off monotonically away from # variant nucleotides first_covered_index = sufficient_coverage_indices.min() last_covered_index = sufficient_coverage_indices.max() # adding 1 to last_covered_index since it's an inclusive index # whereas variant_end_index is the end of a half-open interval if (first_covered_index > variant_start_index or last_covered_index + 1 < variant_end_index): # Example: # Nucleotide sequence: # ACCCTTTT|AA|GGCGCGCC # Coverage: # 12222333|44|33333211 # Then the mask for bases covered >= 4x would be: # ________|**|________ # with indices: # first_covered_index = 9 # last_covered_index = 10 # variant_start_index = 9 # variant_end_index = 11 logger.debug("Some variant bases in %s don't have coverage >= %d" % ( self, min_reads)) return VariantSequence(prefix="", alt="", suffix="", reads=self.reads) return VariantSequence( prefix=self.prefix[first_covered_index:], alt=self.alt, suffix=self.suffix[:last_covered_index - variant_end_index + 1], reads=self.reads)
python
def trim_by_coverage(self, min_reads): """ Given the min number of reads overlapping each nucleotide of a variant sequence, trim this sequence by getting rid of positions which are overlapped by fewer reads than specified. """ read_count_array = self.coverage() logger.info("Coverage: %s (len=%d)" % ( read_count_array, len(read_count_array))) sufficient_coverage_mask = read_count_array >= min_reads sufficient_coverage_indices = np.argwhere(sufficient_coverage_mask) if len(sufficient_coverage_indices) == 0: logger.debug("No bases in %s have coverage >= %d" % (self, min_reads)) return VariantSequence(prefix="", alt="", suffix="", reads=self.reads) variant_start_index, variant_end_index = self.variant_indices() # assuming that coverage drops off monotonically away from # variant nucleotides first_covered_index = sufficient_coverage_indices.min() last_covered_index = sufficient_coverage_indices.max() # adding 1 to last_covered_index since it's an inclusive index # whereas variant_end_index is the end of a half-open interval if (first_covered_index > variant_start_index or last_covered_index + 1 < variant_end_index): # Example: # Nucleotide sequence: # ACCCTTTT|AA|GGCGCGCC # Coverage: # 12222333|44|33333211 # Then the mask for bases covered >= 4x would be: # ________|**|________ # with indices: # first_covered_index = 9 # last_covered_index = 10 # variant_start_index = 9 # variant_end_index = 11 logger.debug("Some variant bases in %s don't have coverage >= %d" % ( self, min_reads)) return VariantSequence(prefix="", alt="", suffix="", reads=self.reads) return VariantSequence( prefix=self.prefix[first_covered_index:], alt=self.alt, suffix=self.suffix[:last_covered_index - variant_end_index + 1], reads=self.reads)
[ "def", "trim_by_coverage", "(", "self", ",", "min_reads", ")", ":", "read_count_array", "=", "self", ".", "coverage", "(", ")", "logger", ".", "info", "(", "\"Coverage: %s (len=%d)\"", "%", "(", "read_count_array", ",", "len", "(", "read_count_array", ")", ")", ")", "sufficient_coverage_mask", "=", "read_count_array", ">=", "min_reads", "sufficient_coverage_indices", "=", "np", ".", "argwhere", "(", "sufficient_coverage_mask", ")", "if", "len", "(", "sufficient_coverage_indices", ")", "==", "0", ":", "logger", ".", "debug", "(", "\"No bases in %s have coverage >= %d\"", "%", "(", "self", ",", "min_reads", ")", ")", "return", "VariantSequence", "(", "prefix", "=", "\"\"", ",", "alt", "=", "\"\"", ",", "suffix", "=", "\"\"", ",", "reads", "=", "self", ".", "reads", ")", "variant_start_index", ",", "variant_end_index", "=", "self", ".", "variant_indices", "(", ")", "first_covered_index", "=", "sufficient_coverage_indices", ".", "min", "(", ")", "last_covered_index", "=", "sufficient_coverage_indices", ".", "max", "(", ")", "if", "(", "first_covered_index", ">", "variant_start_index", "or", "last_covered_index", "+", "1", "<", "variant_end_index", ")", ":", "logger", ".", "debug", "(", "\"Some variant bases in %s don't have coverage >= %d\"", "%", "(", "self", ",", "min_reads", ")", ")", "return", "VariantSequence", "(", "prefix", "=", "\"\"", ",", "alt", "=", "\"\"", ",", "suffix", "=", "\"\"", ",", "reads", "=", "self", ".", "reads", ")", "return", "VariantSequence", "(", "prefix", "=", "self", ".", "prefix", "[", "first_covered_index", ":", "]", ",", "alt", "=", "self", ".", "alt", ",", "suffix", "=", "self", ".", "suffix", "[", ":", "last_covered_index", "-", "variant_end_index", "+", "1", "]", ",", "reads", "=", "self", ".", "reads", ")" ]
Given the min number of reads overlapping each nucleotide of a variant sequence, trim this sequence by getting rid of positions which are overlapped by fewer reads than specified.
[ "Given", "the", "min", "number", "of", "reads", "overlapping", "each", "nucleotide", "of", "a", "variant", "sequence", "trim", "this", "sequence", "by", "getting", "rid", "of", "positions", "which", "are", "overlapped", "by", "fewer", "reads", "than", "specified", "." ]
b39b684920e3f6b344851d6598a1a1c67bce913b
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/variant_sequences.py#L200-L242
train
openvax/isovar
isovar/string_helpers.py
trim_N_nucleotides
def trim_N_nucleotides(prefix, suffix): """ Drop all occurrences of 'N' from prefix and suffix nucleotide strings by trimming. """ if 'N' in prefix: # trim prefix to exclude all occurrences of N rightmost_index = prefix.rfind('N') logger.debug( "Trimming %d nucleotides from read prefix '%s'", rightmost_index + 1, prefix) prefix = prefix[rightmost_index + 1:] if 'N' in suffix: leftmost_index = suffix.find('N') logger.debug( "Trimming %d nucleotides from read suffix '%s'", len(suffix) - leftmost_index, suffix) suffix = suffix[:leftmost_index] return prefix, suffix
python
def trim_N_nucleotides(prefix, suffix): """ Drop all occurrences of 'N' from prefix and suffix nucleotide strings by trimming. """ if 'N' in prefix: # trim prefix to exclude all occurrences of N rightmost_index = prefix.rfind('N') logger.debug( "Trimming %d nucleotides from read prefix '%s'", rightmost_index + 1, prefix) prefix = prefix[rightmost_index + 1:] if 'N' in suffix: leftmost_index = suffix.find('N') logger.debug( "Trimming %d nucleotides from read suffix '%s'", len(suffix) - leftmost_index, suffix) suffix = suffix[:leftmost_index] return prefix, suffix
[ "def", "trim_N_nucleotides", "(", "prefix", ",", "suffix", ")", ":", "if", "'N'", "in", "prefix", ":", "rightmost_index", "=", "prefix", ".", "rfind", "(", "'N'", ")", "logger", ".", "debug", "(", "\"Trimming %d nucleotides from read prefix '%s'\"", ",", "rightmost_index", "+", "1", ",", "prefix", ")", "prefix", "=", "prefix", "[", "rightmost_index", "+", "1", ":", "]", "if", "'N'", "in", "suffix", ":", "leftmost_index", "=", "suffix", ".", "find", "(", "'N'", ")", "logger", ".", "debug", "(", "\"Trimming %d nucleotides from read suffix '%s'\"", ",", "len", "(", "suffix", ")", "-", "leftmost_index", ",", "suffix", ")", "suffix", "=", "suffix", "[", ":", "leftmost_index", "]", "return", "prefix", ",", "suffix" ]
Drop all occurrences of 'N' from prefix and suffix nucleotide strings by trimming.
[ "Drop", "all", "occurrences", "of", "N", "from", "prefix", "and", "suffix", "nucleotide", "strings", "by", "trimming", "." ]
b39b684920e3f6b344851d6598a1a1c67bce913b
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/string_helpers.py#L23-L44
train
openvax/isovar
isovar/string_helpers.py
convert_from_bytes_if_necessary
def convert_from_bytes_if_necessary(prefix, suffix): """ Depending on how we extract data from pysam we may end up with either a string or a byte array of nucleotides. For consistency and simplicity, we want to only use strings in the rest of our code. """ if isinstance(prefix, bytes): prefix = prefix.decode('ascii') if isinstance(suffix, bytes): suffix = suffix.decode('ascii') return prefix, suffix
python
def convert_from_bytes_if_necessary(prefix, suffix): """ Depending on how we extract data from pysam we may end up with either a string or a byte array of nucleotides. For consistency and simplicity, we want to only use strings in the rest of our code. """ if isinstance(prefix, bytes): prefix = prefix.decode('ascii') if isinstance(suffix, bytes): suffix = suffix.decode('ascii') return prefix, suffix
[ "def", "convert_from_bytes_if_necessary", "(", "prefix", ",", "suffix", ")", ":", "if", "isinstance", "(", "prefix", ",", "bytes", ")", ":", "prefix", "=", "prefix", ".", "decode", "(", "'ascii'", ")", "if", "isinstance", "(", "suffix", ",", "bytes", ")", ":", "suffix", "=", "suffix", ".", "decode", "(", "'ascii'", ")", "return", "prefix", ",", "suffix" ]
Depending on how we extract data from pysam we may end up with either a string or a byte array of nucleotides. For consistency and simplicity, we want to only use strings in the rest of our code.
[ "Depending", "on", "how", "we", "extract", "data", "from", "pysam", "we", "may", "end", "up", "with", "either", "a", "string", "or", "a", "byte", "array", "of", "nucleotides", ".", "For", "consistency", "and", "simplicity", "we", "want", "to", "only", "use", "strings", "in", "the", "rest", "of", "our", "code", "." ]
b39b684920e3f6b344851d6598a1a1c67bce913b
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/string_helpers.py#L46-L58
train
inveniosoftware/invenio-indexer
invenio_indexer/api.py
Producer.publish
def publish(self, data, **kwargs): """Validate operation type.""" assert data.get('op') in {'index', 'create', 'delete', 'update'} return super(Producer, self).publish(data, **kwargs)
python
def publish(self, data, **kwargs): """Validate operation type.""" assert data.get('op') in {'index', 'create', 'delete', 'update'} return super(Producer, self).publish(data, **kwargs)
[ "def", "publish", "(", "self", ",", "data", ",", "**", "kwargs", ")", ":", "assert", "data", ".", "get", "(", "'op'", ")", "in", "{", "'index'", ",", "'create'", ",", "'delete'", ",", "'update'", "}", "return", "super", "(", "Producer", ",", "self", ")", ".", "publish", "(", "data", ",", "**", "kwargs", ")" ]
Validate operation type.
[ "Validate", "operation", "type", "." ]
1460aa8976b449d9a3a99d356322b158e9be6f80
https://github.com/inveniosoftware/invenio-indexer/blob/1460aa8976b449d9a3a99d356322b158e9be6f80/invenio_indexer/api.py#L36-L39
train
inveniosoftware/invenio-indexer
invenio_indexer/api.py
RecordIndexer.index
def index(self, record): """Index a record. The caller is responsible for ensuring that the record has already been committed to the database. If a newer version of a record has already been indexed then the provided record will not be indexed. This behavior can be controlled by providing a different ``version_type`` when initializing ``RecordIndexer``. :param record: Record instance. """ index, doc_type = self.record_to_index(record) return self.client.index( id=str(record.id), version=record.revision_id, version_type=self._version_type, index=index, doc_type=doc_type, body=self._prepare_record(record, index, doc_type), )
python
def index(self, record): """Index a record. The caller is responsible for ensuring that the record has already been committed to the database. If a newer version of a record has already been indexed then the provided record will not be indexed. This behavior can be controlled by providing a different ``version_type`` when initializing ``RecordIndexer``. :param record: Record instance. """ index, doc_type = self.record_to_index(record) return self.client.index( id=str(record.id), version=record.revision_id, version_type=self._version_type, index=index, doc_type=doc_type, body=self._prepare_record(record, index, doc_type), )
[ "def", "index", "(", "self", ",", "record", ")", ":", "index", ",", "doc_type", "=", "self", ".", "record_to_index", "(", "record", ")", "return", "self", ".", "client", ".", "index", "(", "id", "=", "str", "(", "record", ".", "id", ")", ",", "version", "=", "record", ".", "revision_id", ",", "version_type", "=", "self", ".", "_version_type", ",", "index", "=", "index", ",", "doc_type", "=", "doc_type", ",", "body", "=", "self", ".", "_prepare_record", "(", "record", ",", "index", ",", "doc_type", ")", ",", ")" ]
Index a record. The caller is responsible for ensuring that the record has already been committed to the database. If a newer version of a record has already been indexed then the provided record will not be indexed. This behavior can be controlled by providing a different ``version_type`` when initializing ``RecordIndexer``. :param record: Record instance.
[ "Index", "a", "record", "." ]
1460aa8976b449d9a3a99d356322b158e9be6f80
https://github.com/inveniosoftware/invenio-indexer/blob/1460aa8976b449d9a3a99d356322b158e9be6f80/invenio_indexer/api.py#L106-L126
train
inveniosoftware/invenio-indexer
invenio_indexer/api.py
RecordIndexer.process_bulk_queue
def process_bulk_queue(self, es_bulk_kwargs=None): """Process bulk indexing queue. :param dict es_bulk_kwargs: Passed to :func:`elasticsearch:elasticsearch.helpers.bulk`. """ with current_celery_app.pool.acquire(block=True) as conn: consumer = Consumer( connection=conn, queue=self.mq_queue.name, exchange=self.mq_exchange.name, routing_key=self.mq_routing_key, ) req_timeout = current_app.config['INDEXER_BULK_REQUEST_TIMEOUT'] es_bulk_kwargs = es_bulk_kwargs or {} count = bulk( self.client, self._actionsiter(consumer.iterqueue()), stats_only=True, request_timeout=req_timeout, **es_bulk_kwargs ) consumer.close() return count
python
def process_bulk_queue(self, es_bulk_kwargs=None): """Process bulk indexing queue. :param dict es_bulk_kwargs: Passed to :func:`elasticsearch:elasticsearch.helpers.bulk`. """ with current_celery_app.pool.acquire(block=True) as conn: consumer = Consumer( connection=conn, queue=self.mq_queue.name, exchange=self.mq_exchange.name, routing_key=self.mq_routing_key, ) req_timeout = current_app.config['INDEXER_BULK_REQUEST_TIMEOUT'] es_bulk_kwargs = es_bulk_kwargs or {} count = bulk( self.client, self._actionsiter(consumer.iterqueue()), stats_only=True, request_timeout=req_timeout, **es_bulk_kwargs ) consumer.close() return count
[ "def", "process_bulk_queue", "(", "self", ",", "es_bulk_kwargs", "=", "None", ")", ":", "with", "current_celery_app", ".", "pool", ".", "acquire", "(", "block", "=", "True", ")", "as", "conn", ":", "consumer", "=", "Consumer", "(", "connection", "=", "conn", ",", "queue", "=", "self", ".", "mq_queue", ".", "name", ",", "exchange", "=", "self", ".", "mq_exchange", ".", "name", ",", "routing_key", "=", "self", ".", "mq_routing_key", ",", ")", "req_timeout", "=", "current_app", ".", "config", "[", "'INDEXER_BULK_REQUEST_TIMEOUT'", "]", "es_bulk_kwargs", "=", "es_bulk_kwargs", "or", "{", "}", "count", "=", "bulk", "(", "self", ".", "client", ",", "self", ".", "_actionsiter", "(", "consumer", ".", "iterqueue", "(", ")", ")", ",", "stats_only", "=", "True", ",", "request_timeout", "=", "req_timeout", ",", "**", "es_bulk_kwargs", ")", "consumer", ".", "close", "(", ")", "return", "count" ]
Process bulk indexing queue. :param dict es_bulk_kwargs: Passed to :func:`elasticsearch:elasticsearch.helpers.bulk`.
[ "Process", "bulk", "indexing", "queue", "." ]
1460aa8976b449d9a3a99d356322b158e9be6f80
https://github.com/inveniosoftware/invenio-indexer/blob/1460aa8976b449d9a3a99d356322b158e9be6f80/invenio_indexer/api.py#L166-L193
train
inveniosoftware/invenio-indexer
invenio_indexer/api.py
RecordIndexer._bulk_op
def _bulk_op(self, record_id_iterator, op_type, index=None, doc_type=None): """Index record in Elasticsearch asynchronously. :param record_id_iterator: Iterator that yields record UUIDs. :param op_type: Indexing operation (one of ``index``, ``create``, ``delete`` or ``update``). :param index: The Elasticsearch index. (Default: ``None``) :param doc_type: The Elasticsearch doc_type. (Default: ``None``) """ with self.create_producer() as producer: for rec in record_id_iterator: producer.publish(dict( id=str(rec), op=op_type, index=index, doc_type=doc_type ))
python
def _bulk_op(self, record_id_iterator, op_type, index=None, doc_type=None): """Index record in Elasticsearch asynchronously. :param record_id_iterator: Iterator that yields record UUIDs. :param op_type: Indexing operation (one of ``index``, ``create``, ``delete`` or ``update``). :param index: The Elasticsearch index. (Default: ``None``) :param doc_type: The Elasticsearch doc_type. (Default: ``None``) """ with self.create_producer() as producer: for rec in record_id_iterator: producer.publish(dict( id=str(rec), op=op_type, index=index, doc_type=doc_type ))
[ "def", "_bulk_op", "(", "self", ",", "record_id_iterator", ",", "op_type", ",", "index", "=", "None", ",", "doc_type", "=", "None", ")", ":", "with", "self", ".", "create_producer", "(", ")", "as", "producer", ":", "for", "rec", "in", "record_id_iterator", ":", "producer", ".", "publish", "(", "dict", "(", "id", "=", "str", "(", "rec", ")", ",", "op", "=", "op_type", ",", "index", "=", "index", ",", "doc_type", "=", "doc_type", ")", ")" ]
Index record in Elasticsearch asynchronously. :param record_id_iterator: Iterator that yields record UUIDs. :param op_type: Indexing operation (one of ``index``, ``create``, ``delete`` or ``update``). :param index: The Elasticsearch index. (Default: ``None``) :param doc_type: The Elasticsearch doc_type. (Default: ``None``)
[ "Index", "record", "in", "Elasticsearch", "asynchronously", "." ]
1460aa8976b449d9a3a99d356322b158e9be6f80
https://github.com/inveniosoftware/invenio-indexer/blob/1460aa8976b449d9a3a99d356322b158e9be6f80/invenio_indexer/api.py#L209-L225
train
inveniosoftware/invenio-indexer
invenio_indexer/api.py
RecordIndexer._actionsiter
def _actionsiter(self, message_iterator): """Iterate bulk actions. :param message_iterator: Iterator yielding messages from a queue. """ for message in message_iterator: payload = message.decode() try: if payload['op'] == 'delete': yield self._delete_action(payload) else: yield self._index_action(payload) message.ack() except NoResultFound: message.reject() except Exception: message.reject() current_app.logger.error( "Failed to index record {0}".format(payload.get('id')), exc_info=True)
python
def _actionsiter(self, message_iterator): """Iterate bulk actions. :param message_iterator: Iterator yielding messages from a queue. """ for message in message_iterator: payload = message.decode() try: if payload['op'] == 'delete': yield self._delete_action(payload) else: yield self._index_action(payload) message.ack() except NoResultFound: message.reject() except Exception: message.reject() current_app.logger.error( "Failed to index record {0}".format(payload.get('id')), exc_info=True)
[ "def", "_actionsiter", "(", "self", ",", "message_iterator", ")", ":", "for", "message", "in", "message_iterator", ":", "payload", "=", "message", ".", "decode", "(", ")", "try", ":", "if", "payload", "[", "'op'", "]", "==", "'delete'", ":", "yield", "self", ".", "_delete_action", "(", "payload", ")", "else", ":", "yield", "self", ".", "_index_action", "(", "payload", ")", "message", ".", "ack", "(", ")", "except", "NoResultFound", ":", "message", ".", "reject", "(", ")", "except", "Exception", ":", "message", ".", "reject", "(", ")", "current_app", ".", "logger", ".", "error", "(", "\"Failed to index record {0}\"", ".", "format", "(", "payload", ".", "get", "(", "'id'", ")", ")", ",", "exc_info", "=", "True", ")" ]
Iterate bulk actions. :param message_iterator: Iterator yielding messages from a queue.
[ "Iterate", "bulk", "actions", "." ]
1460aa8976b449d9a3a99d356322b158e9be6f80
https://github.com/inveniosoftware/invenio-indexer/blob/1460aa8976b449d9a3a99d356322b158e9be6f80/invenio_indexer/api.py#L227-L246
train
inveniosoftware/invenio-indexer
invenio_indexer/api.py
RecordIndexer._delete_action
def _delete_action(self, payload): """Bulk delete action. :param payload: Decoded message body. :returns: Dictionary defining an Elasticsearch bulk 'delete' action. """ index, doc_type = payload.get('index'), payload.get('doc_type') if not (index and doc_type): record = Record.get_record(payload['id']) index, doc_type = self.record_to_index(record) return { '_op_type': 'delete', '_index': index, '_type': doc_type, '_id': payload['id'], }
python
def _delete_action(self, payload): """Bulk delete action. :param payload: Decoded message body. :returns: Dictionary defining an Elasticsearch bulk 'delete' action. """ index, doc_type = payload.get('index'), payload.get('doc_type') if not (index and doc_type): record = Record.get_record(payload['id']) index, doc_type = self.record_to_index(record) return { '_op_type': 'delete', '_index': index, '_type': doc_type, '_id': payload['id'], }
[ "def", "_delete_action", "(", "self", ",", "payload", ")", ":", "index", ",", "doc_type", "=", "payload", ".", "get", "(", "'index'", ")", ",", "payload", ".", "get", "(", "'doc_type'", ")", "if", "not", "(", "index", "and", "doc_type", ")", ":", "record", "=", "Record", ".", "get_record", "(", "payload", "[", "'id'", "]", ")", "index", ",", "doc_type", "=", "self", ".", "record_to_index", "(", "record", ")", "return", "{", "'_op_type'", ":", "'delete'", ",", "'_index'", ":", "index", ",", "'_type'", ":", "doc_type", ",", "'_id'", ":", "payload", "[", "'id'", "]", ",", "}" ]
Bulk delete action. :param payload: Decoded message body. :returns: Dictionary defining an Elasticsearch bulk 'delete' action.
[ "Bulk", "delete", "action", "." ]
1460aa8976b449d9a3a99d356322b158e9be6f80
https://github.com/inveniosoftware/invenio-indexer/blob/1460aa8976b449d9a3a99d356322b158e9be6f80/invenio_indexer/api.py#L248-L264
train
inveniosoftware/invenio-indexer
invenio_indexer/api.py
RecordIndexer._index_action
def _index_action(self, payload): """Bulk index action. :param payload: Decoded message body. :returns: Dictionary defining an Elasticsearch bulk 'index' action. """ record = Record.get_record(payload['id']) index, doc_type = self.record_to_index(record) return { '_op_type': 'index', '_index': index, '_type': doc_type, '_id': str(record.id), '_version': record.revision_id, '_version_type': self._version_type, '_source': self._prepare_record(record, index, doc_type), }
python
def _index_action(self, payload): """Bulk index action. :param payload: Decoded message body. :returns: Dictionary defining an Elasticsearch bulk 'index' action. """ record = Record.get_record(payload['id']) index, doc_type = self.record_to_index(record) return { '_op_type': 'index', '_index': index, '_type': doc_type, '_id': str(record.id), '_version': record.revision_id, '_version_type': self._version_type, '_source': self._prepare_record(record, index, doc_type), }
[ "def", "_index_action", "(", "self", ",", "payload", ")", ":", "record", "=", "Record", ".", "get_record", "(", "payload", "[", "'id'", "]", ")", "index", ",", "doc_type", "=", "self", ".", "record_to_index", "(", "record", ")", "return", "{", "'_op_type'", ":", "'index'", ",", "'_index'", ":", "index", ",", "'_type'", ":", "doc_type", ",", "'_id'", ":", "str", "(", "record", ".", "id", ")", ",", "'_version'", ":", "record", ".", "revision_id", ",", "'_version_type'", ":", "self", ".", "_version_type", ",", "'_source'", ":", "self", ".", "_prepare_record", "(", "record", ",", "index", ",", "doc_type", ")", ",", "}" ]
Bulk index action. :param payload: Decoded message body. :returns: Dictionary defining an Elasticsearch bulk 'index' action.
[ "Bulk", "index", "action", "." ]
1460aa8976b449d9a3a99d356322b158e9be6f80
https://github.com/inveniosoftware/invenio-indexer/blob/1460aa8976b449d9a3a99d356322b158e9be6f80/invenio_indexer/api.py#L266-L283
train
inveniosoftware/invenio-indexer
invenio_indexer/api.py
RecordIndexer._prepare_record
def _prepare_record(record, index, doc_type): """Prepare record data for indexing. :param record: The record to prepare. :param index: The Elasticsearch index. :param doc_type: The Elasticsearch document type. :returns: The record metadata. """ if current_app.config['INDEXER_REPLACE_REFS']: data = copy.deepcopy(record.replace_refs()) else: data = record.dumps() data['_created'] = pytz.utc.localize(record.created).isoformat() \ if record.created else None data['_updated'] = pytz.utc.localize(record.updated).isoformat() \ if record.updated else None # Allow modification of data prior to sending to Elasticsearch. before_record_index.send( current_app._get_current_object(), json=data, record=record, index=index, doc_type=doc_type, ) return data
python
def _prepare_record(record, index, doc_type): """Prepare record data for indexing. :param record: The record to prepare. :param index: The Elasticsearch index. :param doc_type: The Elasticsearch document type. :returns: The record metadata. """ if current_app.config['INDEXER_REPLACE_REFS']: data = copy.deepcopy(record.replace_refs()) else: data = record.dumps() data['_created'] = pytz.utc.localize(record.created).isoformat() \ if record.created else None data['_updated'] = pytz.utc.localize(record.updated).isoformat() \ if record.updated else None # Allow modification of data prior to sending to Elasticsearch. before_record_index.send( current_app._get_current_object(), json=data, record=record, index=index, doc_type=doc_type, ) return data
[ "def", "_prepare_record", "(", "record", ",", "index", ",", "doc_type", ")", ":", "if", "current_app", ".", "config", "[", "'INDEXER_REPLACE_REFS'", "]", ":", "data", "=", "copy", ".", "deepcopy", "(", "record", ".", "replace_refs", "(", ")", ")", "else", ":", "data", "=", "record", ".", "dumps", "(", ")", "data", "[", "'_created'", "]", "=", "pytz", ".", "utc", ".", "localize", "(", "record", ".", "created", ")", ".", "isoformat", "(", ")", "if", "record", ".", "created", "else", "None", "data", "[", "'_updated'", "]", "=", "pytz", ".", "utc", ".", "localize", "(", "record", ".", "updated", ")", ".", "isoformat", "(", ")", "if", "record", ".", "updated", "else", "None", "before_record_index", ".", "send", "(", "current_app", ".", "_get_current_object", "(", ")", ",", "json", "=", "data", ",", "record", "=", "record", ",", "index", "=", "index", ",", "doc_type", "=", "doc_type", ",", ")", "return", "data" ]
Prepare record data for indexing. :param record: The record to prepare. :param index: The Elasticsearch index. :param doc_type: The Elasticsearch document type. :returns: The record metadata.
[ "Prepare", "record", "data", "for", "indexing", "." ]
1460aa8976b449d9a3a99d356322b158e9be6f80
https://github.com/inveniosoftware/invenio-indexer/blob/1460aa8976b449d9a3a99d356322b158e9be6f80/invenio_indexer/api.py#L286-L313
train
openvax/isovar
isovar/assembly.py
greedy_merge_helper
def greedy_merge_helper( variant_sequences, min_overlap_size=MIN_VARIANT_SEQUENCE_ASSEMBLY_OVERLAP_SIZE): """ Returns a list of merged VariantSequence objects, and True if any were successfully merged. """ merged_variant_sequences = {} merged_any = False # here we'll keep track of sequences that haven't been merged yet, and add them in at the end unmerged_variant_sequences = set(variant_sequences) for i in range(len(variant_sequences)): sequence1 = variant_sequences[i] # it works to loop over the triangle (i+1 onwards) because combine() tries flipping the # arguments if sequence1 is on the right of sequence2 for j in range(i + 1, len(variant_sequences)): sequence2 = variant_sequences[j] combined = sequence1.combine(sequence2) if combined is None: continue if combined.sequence in merged_variant_sequences: existing = merged_variant_sequences[combined.sequence] # the existing VariantSequence and the newly merged # VariantSequence should differ only in which reads support them combined = combined.add_reads(existing.reads) merged_variant_sequences[combined.sequence] = combined unmerged_variant_sequences.discard(sequence1) unmerged_variant_sequences.discard(sequence2) merged_any = True result = list(merged_variant_sequences.values()) + list(unmerged_variant_sequences) return result, merged_any
python
def greedy_merge_helper( variant_sequences, min_overlap_size=MIN_VARIANT_SEQUENCE_ASSEMBLY_OVERLAP_SIZE): """ Returns a list of merged VariantSequence objects, and True if any were successfully merged. """ merged_variant_sequences = {} merged_any = False # here we'll keep track of sequences that haven't been merged yet, and add them in at the end unmerged_variant_sequences = set(variant_sequences) for i in range(len(variant_sequences)): sequence1 = variant_sequences[i] # it works to loop over the triangle (i+1 onwards) because combine() tries flipping the # arguments if sequence1 is on the right of sequence2 for j in range(i + 1, len(variant_sequences)): sequence2 = variant_sequences[j] combined = sequence1.combine(sequence2) if combined is None: continue if combined.sequence in merged_variant_sequences: existing = merged_variant_sequences[combined.sequence] # the existing VariantSequence and the newly merged # VariantSequence should differ only in which reads support them combined = combined.add_reads(existing.reads) merged_variant_sequences[combined.sequence] = combined unmerged_variant_sequences.discard(sequence1) unmerged_variant_sequences.discard(sequence2) merged_any = True result = list(merged_variant_sequences.values()) + list(unmerged_variant_sequences) return result, merged_any
[ "def", "greedy_merge_helper", "(", "variant_sequences", ",", "min_overlap_size", "=", "MIN_VARIANT_SEQUENCE_ASSEMBLY_OVERLAP_SIZE", ")", ":", "merged_variant_sequences", "=", "{", "}", "merged_any", "=", "False", "unmerged_variant_sequences", "=", "set", "(", "variant_sequences", ")", "for", "i", "in", "range", "(", "len", "(", "variant_sequences", ")", ")", ":", "sequence1", "=", "variant_sequences", "[", "i", "]", "for", "j", "in", "range", "(", "i", "+", "1", ",", "len", "(", "variant_sequences", ")", ")", ":", "sequence2", "=", "variant_sequences", "[", "j", "]", "combined", "=", "sequence1", ".", "combine", "(", "sequence2", ")", "if", "combined", "is", "None", ":", "continue", "if", "combined", ".", "sequence", "in", "merged_variant_sequences", ":", "existing", "=", "merged_variant_sequences", "[", "combined", ".", "sequence", "]", "combined", "=", "combined", ".", "add_reads", "(", "existing", ".", "reads", ")", "merged_variant_sequences", "[", "combined", ".", "sequence", "]", "=", "combined", "unmerged_variant_sequences", ".", "discard", "(", "sequence1", ")", "unmerged_variant_sequences", ".", "discard", "(", "sequence2", ")", "merged_any", "=", "True", "result", "=", "list", "(", "merged_variant_sequences", ".", "values", "(", ")", ")", "+", "list", "(", "unmerged_variant_sequences", ")", "return", "result", ",", "merged_any" ]
Returns a list of merged VariantSequence objects, and True if any were successfully merged.
[ "Returns", "a", "list", "of", "merged", "VariantSequence", "objects", "and", "True", "if", "any", "were", "successfully", "merged", "." ]
b39b684920e3f6b344851d6598a1a1c67bce913b
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/assembly.py#L27-L58
train
openvax/isovar
isovar/assembly.py
greedy_merge
def greedy_merge( variant_sequences, min_overlap_size=MIN_VARIANT_SEQUENCE_ASSEMBLY_OVERLAP_SIZE): """ Greedily merge overlapping sequences into longer sequences. Accepts a collection of VariantSequence objects and returns another collection of elongated variant sequences. The reads field of the returned VariantSequence object will contain reads which only partially overlap the full sequence. """ merged_any = True while merged_any: variant_sequences, merged_any = greedy_merge_helper( variant_sequences, min_overlap_size=min_overlap_size) return variant_sequences
python
def greedy_merge( variant_sequences, min_overlap_size=MIN_VARIANT_SEQUENCE_ASSEMBLY_OVERLAP_SIZE): """ Greedily merge overlapping sequences into longer sequences. Accepts a collection of VariantSequence objects and returns another collection of elongated variant sequences. The reads field of the returned VariantSequence object will contain reads which only partially overlap the full sequence. """ merged_any = True while merged_any: variant_sequences, merged_any = greedy_merge_helper( variant_sequences, min_overlap_size=min_overlap_size) return variant_sequences
[ "def", "greedy_merge", "(", "variant_sequences", ",", "min_overlap_size", "=", "MIN_VARIANT_SEQUENCE_ASSEMBLY_OVERLAP_SIZE", ")", ":", "merged_any", "=", "True", "while", "merged_any", ":", "variant_sequences", ",", "merged_any", "=", "greedy_merge_helper", "(", "variant_sequences", ",", "min_overlap_size", "=", "min_overlap_size", ")", "return", "variant_sequences" ]
Greedily merge overlapping sequences into longer sequences. Accepts a collection of VariantSequence objects and returns another collection of elongated variant sequences. The reads field of the returned VariantSequence object will contain reads which only partially overlap the full sequence.
[ "Greedily", "merge", "overlapping", "sequences", "into", "longer", "sequences", "." ]
b39b684920e3f6b344851d6598a1a1c67bce913b
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/assembly.py#L60-L76
train
openvax/isovar
isovar/assembly.py
collapse_substrings
def collapse_substrings(variant_sequences): """ Combine shorter sequences which are fully contained in longer sequences. Parameters ---------- variant_sequences : list List of VariantSequence objects Returns a (potentially shorter) list without any contained subsequences. """ if len(variant_sequences) <= 1: # if we don't have at least two VariantSequences then just # return your input return variant_sequences # dictionary mapping VariantSequence objects to lists of reads # they absorb from substring VariantSequences extra_reads_from_substrings = defaultdict(set) result_list = [] # sort by longest to shortest total length for short_variant_sequence in sorted( variant_sequences, key=lambda seq: -len(seq)): found_superstring = False for long_variant_sequence in result_list: found_superstring = long_variant_sequence.contains(short_variant_sequence) if found_superstring: extra_reads_from_substrings[long_variant_sequence].update( short_variant_sequence.reads) if not found_superstring: result_list.append(short_variant_sequence) # add to each VariantSequence the reads it absorbed from dropped substrings # and then return return [ variant_sequence.add_reads( extra_reads_from_substrings[variant_sequence]) for variant_sequence in result_list ]
python
def collapse_substrings(variant_sequences): """ Combine shorter sequences which are fully contained in longer sequences. Parameters ---------- variant_sequences : list List of VariantSequence objects Returns a (potentially shorter) list without any contained subsequences. """ if len(variant_sequences) <= 1: # if we don't have at least two VariantSequences then just # return your input return variant_sequences # dictionary mapping VariantSequence objects to lists of reads # they absorb from substring VariantSequences extra_reads_from_substrings = defaultdict(set) result_list = [] # sort by longest to shortest total length for short_variant_sequence in sorted( variant_sequences, key=lambda seq: -len(seq)): found_superstring = False for long_variant_sequence in result_list: found_superstring = long_variant_sequence.contains(short_variant_sequence) if found_superstring: extra_reads_from_substrings[long_variant_sequence].update( short_variant_sequence.reads) if not found_superstring: result_list.append(short_variant_sequence) # add to each VariantSequence the reads it absorbed from dropped substrings # and then return return [ variant_sequence.add_reads( extra_reads_from_substrings[variant_sequence]) for variant_sequence in result_list ]
[ "def", "collapse_substrings", "(", "variant_sequences", ")", ":", "if", "len", "(", "variant_sequences", ")", "<=", "1", ":", "return", "variant_sequences", "extra_reads_from_substrings", "=", "defaultdict", "(", "set", ")", "result_list", "=", "[", "]", "for", "short_variant_sequence", "in", "sorted", "(", "variant_sequences", ",", "key", "=", "lambda", "seq", ":", "-", "len", "(", "seq", ")", ")", ":", "found_superstring", "=", "False", "for", "long_variant_sequence", "in", "result_list", ":", "found_superstring", "=", "long_variant_sequence", ".", "contains", "(", "short_variant_sequence", ")", "if", "found_superstring", ":", "extra_reads_from_substrings", "[", "long_variant_sequence", "]", ".", "update", "(", "short_variant_sequence", ".", "reads", ")", "if", "not", "found_superstring", ":", "result_list", ".", "append", "(", "short_variant_sequence", ")", "return", "[", "variant_sequence", ".", "add_reads", "(", "extra_reads_from_substrings", "[", "variant_sequence", "]", ")", "for", "variant_sequence", "in", "result_list", "]" ]
Combine shorter sequences which are fully contained in longer sequences. Parameters ---------- variant_sequences : list List of VariantSequence objects Returns a (potentially shorter) list without any contained subsequences.
[ "Combine", "shorter", "sequences", "which", "are", "fully", "contained", "in", "longer", "sequences", "." ]
b39b684920e3f6b344851d6598a1a1c67bce913b
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/assembly.py#L78-L116
train
openvax/isovar
isovar/assembly.py
iterative_overlap_assembly
def iterative_overlap_assembly( variant_sequences, min_overlap_size=MIN_VARIANT_SEQUENCE_ASSEMBLY_OVERLAP_SIZE): """ Assembles longer sequences from reads centered on a variant by between merging all pairs of overlapping sequences and collapsing shorter sequences onto every longer sequence which contains them. Returns a list of variant sequences, sorted by decreasing read support. """ if len(variant_sequences) <= 1: # if we don't have at least two sequences to start with then # skip the whole mess below return variant_sequences # reduce the number of inputs to the merge algorithm by first collapsing # shorter sequences onto the longer sequences which contain them n_before_collapse = len(variant_sequences) variant_sequences = collapse_substrings(variant_sequences) n_after_collapse = len(variant_sequences) logger.info( "Collapsed %d -> %d sequences", n_before_collapse, n_after_collapse) merged_variant_sequences = greedy_merge(variant_sequences, min_overlap_size) return list(sorted( merged_variant_sequences, key=lambda seq: -len(seq.reads)))
python
def iterative_overlap_assembly( variant_sequences, min_overlap_size=MIN_VARIANT_SEQUENCE_ASSEMBLY_OVERLAP_SIZE): """ Assembles longer sequences from reads centered on a variant by between merging all pairs of overlapping sequences and collapsing shorter sequences onto every longer sequence which contains them. Returns a list of variant sequences, sorted by decreasing read support. """ if len(variant_sequences) <= 1: # if we don't have at least two sequences to start with then # skip the whole mess below return variant_sequences # reduce the number of inputs to the merge algorithm by first collapsing # shorter sequences onto the longer sequences which contain them n_before_collapse = len(variant_sequences) variant_sequences = collapse_substrings(variant_sequences) n_after_collapse = len(variant_sequences) logger.info( "Collapsed %d -> %d sequences", n_before_collapse, n_after_collapse) merged_variant_sequences = greedy_merge(variant_sequences, min_overlap_size) return list(sorted( merged_variant_sequences, key=lambda seq: -len(seq.reads)))
[ "def", "iterative_overlap_assembly", "(", "variant_sequences", ",", "min_overlap_size", "=", "MIN_VARIANT_SEQUENCE_ASSEMBLY_OVERLAP_SIZE", ")", ":", "if", "len", "(", "variant_sequences", ")", "<=", "1", ":", "return", "variant_sequences", "n_before_collapse", "=", "len", "(", "variant_sequences", ")", "variant_sequences", "=", "collapse_substrings", "(", "variant_sequences", ")", "n_after_collapse", "=", "len", "(", "variant_sequences", ")", "logger", ".", "info", "(", "\"Collapsed %d -> %d sequences\"", ",", "n_before_collapse", ",", "n_after_collapse", ")", "merged_variant_sequences", "=", "greedy_merge", "(", "variant_sequences", ",", "min_overlap_size", ")", "return", "list", "(", "sorted", "(", "merged_variant_sequences", ",", "key", "=", "lambda", "seq", ":", "-", "len", "(", "seq", ".", "reads", ")", ")", ")" ]
Assembles longer sequences from reads centered on a variant by between merging all pairs of overlapping sequences and collapsing shorter sequences onto every longer sequence which contains them. Returns a list of variant sequences, sorted by decreasing read support.
[ "Assembles", "longer", "sequences", "from", "reads", "centered", "on", "a", "variant", "by", "between", "merging", "all", "pairs", "of", "overlapping", "sequences", "and", "collapsing", "shorter", "sequences", "onto", "every", "longer", "sequence", "which", "contains", "them", "." ]
b39b684920e3f6b344851d6598a1a1c67bce913b
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/assembly.py#L118-L146
train
openvax/isovar
isovar/common.py
groupby
def groupby(xs, key_fn): """ Group elements of the list `xs` by keys generated from calling `key_fn`. Returns a dictionary which maps keys to sub-lists of `xs`. """ result = defaultdict(list) for x in xs: key = key_fn(x) result[key].append(x) return result
python
def groupby(xs, key_fn): """ Group elements of the list `xs` by keys generated from calling `key_fn`. Returns a dictionary which maps keys to sub-lists of `xs`. """ result = defaultdict(list) for x in xs: key = key_fn(x) result[key].append(x) return result
[ "def", "groupby", "(", "xs", ",", "key_fn", ")", ":", "result", "=", "defaultdict", "(", "list", ")", "for", "x", "in", "xs", ":", "key", "=", "key_fn", "(", "x", ")", "result", "[", "key", "]", ".", "append", "(", "x", ")", "return", "result" ]
Group elements of the list `xs` by keys generated from calling `key_fn`. Returns a dictionary which maps keys to sub-lists of `xs`.
[ "Group", "elements", "of", "the", "list", "xs", "by", "keys", "generated", "from", "calling", "key_fn", "." ]
b39b684920e3f6b344851d6598a1a1c67bce913b
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/common.py#L27-L37
train
bskinn/opan
opan/utils/vector.py
ortho_basis
def ortho_basis(normal, ref_vec=None): """Generates an orthonormal basis in the plane perpendicular to `normal` The orthonormal basis generated spans the plane defined with `normal` as its normal vector. The handedness of `on1` and `on2` in the returned basis is such that: .. math:: \\mathsf{on1} \\times \\mathsf{on2} = {\\mathsf{normal} \\over \\left\\| \\mathsf{normal}\\right\\|} `normal` must be expressible as a one-dimensional |nparray| of length 3. Parameters ---------- normal length-3 |npfloat_| -- The orthonormal basis output will span the plane perpendicular to `normal`. ref_vec length-3 |npfloat_|, optional -- If specified, `on1` will be the normalized projection of `ref_vec` onto the plane perpendicular to `normal`. Default is |None|. Returns ------- on1 length-3 |npfloat_| -- First vector defining the orthonormal basis in the plane normal to `normal` on2 length-3 |npfloat_| -- Second vector defining the orthonormal basis in the plane normal to `normal` Raises ------ ~exceptions.ValueError If `normal` or `ref_vec` is not expressible as a 1-D vector with 3 elements ~opan.error.VectorError (typecode :attr:`~opan.error.VectorError.NONPRL`) If `ref_vec` is specified and it is insufficiently non- parallel with respect to `normal` """ # Imports for library functions import numpy as np from scipy import linalg as spla from scipy import random as sprnd from ..const import PRM from ..error import VectorError # Internal parameters # Magnitude of the perturbation from 'normal' in constructing a random rv RAND_MAG = 0.25 # Test 'normal' for shape and length if not len(normal.shape) == 1: raise ValueError("'normal' is not a vector") ## end if if not normal.shape[0] == 3: raise ValueError("Length of 'normal' is not three") ## end if # Normalize to concise variable 'nv' nv = normal / spla.norm(normal) # Test for specification of ref_vec in the function call if ref_vec is None: # ref_vec not specified. # # Generate reference vector by generation of a random perturbation # vector suitably non-parallel to norm_vec # Generate suitable randomizer, looping as needed rv = nv while parallel_check(nv, rv): rv = np.float64(1.0 - RAND_MAG + 2 * RAND_MAG * sprnd.rand(3)) ## do loop # Calculate rejection of perturbed vector on the normal, then # normalize rv = rej(rv, nv) rv = rv / spla.norm(rv) else: # ref_vec specified, go ahead and use. Start with validity check. if not len(ref_vec.shape) == 1: raise ValueError("ref_vec is not a vector") ## end if if not ref_vec.shape[0] == 3: raise ValueError("ref_vec length is not three") ## end if # Normalize ref_vec to 'rv' rv = ref_vec / spla.norm(ref_vec) # Check for collinearity of nv and rv; raise error if too close if parallel_check(nv, rv): # Essentially equal or opposite vectors, making them too nearly # parallel. raise VectorError(VectorError.NONPRL, "'normal' and 'ref_vec' are too nearly parallel.", "") ## end if # rv is ok to use from here ## end try # on2 is the unit vector parallel to nv x rv on2 = np.cross(nv, rv) on2 = on2 / spla.norm(on2) # on1 is on2 x nv (normalization should not be necessary here, but is # performed just in case) on1 = np.cross(on2, nv) on1 = on1 / spla.norm(on1) # Return the spanning vectors return on1, on2
python
def ortho_basis(normal, ref_vec=None): """Generates an orthonormal basis in the plane perpendicular to `normal` The orthonormal basis generated spans the plane defined with `normal` as its normal vector. The handedness of `on1` and `on2` in the returned basis is such that: .. math:: \\mathsf{on1} \\times \\mathsf{on2} = {\\mathsf{normal} \\over \\left\\| \\mathsf{normal}\\right\\|} `normal` must be expressible as a one-dimensional |nparray| of length 3. Parameters ---------- normal length-3 |npfloat_| -- The orthonormal basis output will span the plane perpendicular to `normal`. ref_vec length-3 |npfloat_|, optional -- If specified, `on1` will be the normalized projection of `ref_vec` onto the plane perpendicular to `normal`. Default is |None|. Returns ------- on1 length-3 |npfloat_| -- First vector defining the orthonormal basis in the plane normal to `normal` on2 length-3 |npfloat_| -- Second vector defining the orthonormal basis in the plane normal to `normal` Raises ------ ~exceptions.ValueError If `normal` or `ref_vec` is not expressible as a 1-D vector with 3 elements ~opan.error.VectorError (typecode :attr:`~opan.error.VectorError.NONPRL`) If `ref_vec` is specified and it is insufficiently non- parallel with respect to `normal` """ # Imports for library functions import numpy as np from scipy import linalg as spla from scipy import random as sprnd from ..const import PRM from ..error import VectorError # Internal parameters # Magnitude of the perturbation from 'normal' in constructing a random rv RAND_MAG = 0.25 # Test 'normal' for shape and length if not len(normal.shape) == 1: raise ValueError("'normal' is not a vector") ## end if if not normal.shape[0] == 3: raise ValueError("Length of 'normal' is not three") ## end if # Normalize to concise variable 'nv' nv = normal / spla.norm(normal) # Test for specification of ref_vec in the function call if ref_vec is None: # ref_vec not specified. # # Generate reference vector by generation of a random perturbation # vector suitably non-parallel to norm_vec # Generate suitable randomizer, looping as needed rv = nv while parallel_check(nv, rv): rv = np.float64(1.0 - RAND_MAG + 2 * RAND_MAG * sprnd.rand(3)) ## do loop # Calculate rejection of perturbed vector on the normal, then # normalize rv = rej(rv, nv) rv = rv / spla.norm(rv) else: # ref_vec specified, go ahead and use. Start with validity check. if not len(ref_vec.shape) == 1: raise ValueError("ref_vec is not a vector") ## end if if not ref_vec.shape[0] == 3: raise ValueError("ref_vec length is not three") ## end if # Normalize ref_vec to 'rv' rv = ref_vec / spla.norm(ref_vec) # Check for collinearity of nv and rv; raise error if too close if parallel_check(nv, rv): # Essentially equal or opposite vectors, making them too nearly # parallel. raise VectorError(VectorError.NONPRL, "'normal' and 'ref_vec' are too nearly parallel.", "") ## end if # rv is ok to use from here ## end try # on2 is the unit vector parallel to nv x rv on2 = np.cross(nv, rv) on2 = on2 / spla.norm(on2) # on1 is on2 x nv (normalization should not be necessary here, but is # performed just in case) on1 = np.cross(on2, nv) on1 = on1 / spla.norm(on1) # Return the spanning vectors return on1, on2
[ "def", "ortho_basis", "(", "normal", ",", "ref_vec", "=", "None", ")", ":", "import", "numpy", "as", "np", "from", "scipy", "import", "linalg", "as", "spla", "from", "scipy", "import", "random", "as", "sprnd", "from", ".", ".", "const", "import", "PRM", "from", ".", ".", "error", "import", "VectorError", "RAND_MAG", "=", "0.25", "if", "not", "len", "(", "normal", ".", "shape", ")", "==", "1", ":", "raise", "ValueError", "(", "\"'normal' is not a vector\"", ")", "if", "not", "normal", ".", "shape", "[", "0", "]", "==", "3", ":", "raise", "ValueError", "(", "\"Length of 'normal' is not three\"", ")", "nv", "=", "normal", "/", "spla", ".", "norm", "(", "normal", ")", "if", "ref_vec", "is", "None", ":", "rv", "=", "nv", "while", "parallel_check", "(", "nv", ",", "rv", ")", ":", "rv", "=", "np", ".", "float64", "(", "1.0", "-", "RAND_MAG", "+", "2", "*", "RAND_MAG", "*", "sprnd", ".", "rand", "(", "3", ")", ")", "rv", "=", "rej", "(", "rv", ",", "nv", ")", "rv", "=", "rv", "/", "spla", ".", "norm", "(", "rv", ")", "else", ":", "if", "not", "len", "(", "ref_vec", ".", "shape", ")", "==", "1", ":", "raise", "ValueError", "(", "\"ref_vec is not a vector\"", ")", "if", "not", "ref_vec", ".", "shape", "[", "0", "]", "==", "3", ":", "raise", "ValueError", "(", "\"ref_vec length is not three\"", ")", "rv", "=", "ref_vec", "/", "spla", ".", "norm", "(", "ref_vec", ")", "if", "parallel_check", "(", "nv", ",", "rv", ")", ":", "raise", "VectorError", "(", "VectorError", ".", "NONPRL", ",", "\"'normal' and 'ref_vec' are too nearly parallel.\"", ",", "\"\"", ")", "on2", "=", "np", ".", "cross", "(", "nv", ",", "rv", ")", "on2", "=", "on2", "/", "spla", ".", "norm", "(", "on2", ")", "on1", "=", "np", ".", "cross", "(", "on2", ",", "nv", ")", "on1", "=", "on1", "/", "spla", ".", "norm", "(", "on1", ")", "return", "on1", ",", "on2" ]
Generates an orthonormal basis in the plane perpendicular to `normal` The orthonormal basis generated spans the plane defined with `normal` as its normal vector. The handedness of `on1` and `on2` in the returned basis is such that: .. math:: \\mathsf{on1} \\times \\mathsf{on2} = {\\mathsf{normal} \\over \\left\\| \\mathsf{normal}\\right\\|} `normal` must be expressible as a one-dimensional |nparray| of length 3. Parameters ---------- normal length-3 |npfloat_| -- The orthonormal basis output will span the plane perpendicular to `normal`. ref_vec length-3 |npfloat_|, optional -- If specified, `on1` will be the normalized projection of `ref_vec` onto the plane perpendicular to `normal`. Default is |None|. Returns ------- on1 length-3 |npfloat_| -- First vector defining the orthonormal basis in the plane normal to `normal` on2 length-3 |npfloat_| -- Second vector defining the orthonormal basis in the plane normal to `normal` Raises ------ ~exceptions.ValueError If `normal` or `ref_vec` is not expressible as a 1-D vector with 3 elements ~opan.error.VectorError (typecode :attr:`~opan.error.VectorError.NONPRL`) If `ref_vec` is specified and it is insufficiently non- parallel with respect to `normal`
[ "Generates", "an", "orthonormal", "basis", "in", "the", "plane", "perpendicular", "to", "normal" ]
0b1b21662df6abc971407a9386db21a8796fbfe5
https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/utils/vector.py#L50-L174
train
bskinn/opan
opan/utils/vector.py
orthonorm_check
def orthonorm_check(a, tol=_DEF.ORTHONORM_TOL, report=False): """Checks orthonormality of the column vectors of a matrix. If a one-dimensional |nparray| is passed to `a`, it is treated as a single column vector, rather than a row matrix of length-one column vectors. The matrix `a` does not need to be square, though it must have at least as many rows as columns, since orthonormality is only possible in N-space with a set of no more than N vectors. (This condition is not directly checked.) Parameters ---------- a R x S |npfloat_| -- 2-D array of column vectors to be checked for orthonormality. tol |npfloat_|, optional -- Tolerance for deviation of dot products from one or zero. Default value is :data:`opan.const.DEF.ORTHONORM_TOL`. report |bool|, optional -- Whether to record and return vectors / vector pairs failing the orthonormality condition. Default is |False|. Returns ------- o |bool| -- Indicates whether column vectors of `a` are orthonormal to within tolerance `tol`. n_fail |list| of |int|, or |None| -- If `report` == |True|: A list of indices of column vectors failing the normality condition, or an empty list if all vectors are normalized. If `report` == |False|: |None| o_fail |list| of 2-tuples of |int|, or |None| -- If `report` == |True|: A list of 2-tuples of indices of column vectors failing the orthogonality condition, or an empty list if all vectors are orthogonal. If `report` == |False|: |None| """ # Imports import numpy as np from .base import delta_fxn #!TODO? orthonorm_check Must add traps to ensure a is a single array, # that it is 2D, that it's all real? To enforce the limits stated # in the docstring? # Initialize return variables orth = True n_fail = [] o_fail = [] # Coerce to float_ matrix. Must treat 1-D vector as column vector. # Should raise an exception for any objects with more than # two dimensions; real and all-numeric are still not yet checked, but # will probably be run-time caught if too bad an object is passed. if len(a.shape) == 1: a_mx = np.matrix(a, dtype=np.float_).T else: a_mx = np.matrix(a, dtype=np.float_) # Split matrix into separate vectors for convenient indexing. a_split = np.hsplit(a_mx, a_mx.shape[1]) # Loop over vectors and check orthonormality. for iter1 in range(a_mx.shape[1]): for iter2 in range(iter1,a_mx.shape[1]): if not abs((a_split[iter1].T * a_split[iter2])[0,0] - np.float_(delta_fxn(iter1, iter2))) <= tol: orth = False if report: if iter1 == iter2: n_fail.append(iter1) else: o_fail.append((iter1, iter2)) # Return results if report: return orth, n_fail, o_fail else: return orth, None, None
python
def orthonorm_check(a, tol=_DEF.ORTHONORM_TOL, report=False): """Checks orthonormality of the column vectors of a matrix. If a one-dimensional |nparray| is passed to `a`, it is treated as a single column vector, rather than a row matrix of length-one column vectors. The matrix `a` does not need to be square, though it must have at least as many rows as columns, since orthonormality is only possible in N-space with a set of no more than N vectors. (This condition is not directly checked.) Parameters ---------- a R x S |npfloat_| -- 2-D array of column vectors to be checked for orthonormality. tol |npfloat_|, optional -- Tolerance for deviation of dot products from one or zero. Default value is :data:`opan.const.DEF.ORTHONORM_TOL`. report |bool|, optional -- Whether to record and return vectors / vector pairs failing the orthonormality condition. Default is |False|. Returns ------- o |bool| -- Indicates whether column vectors of `a` are orthonormal to within tolerance `tol`. n_fail |list| of |int|, or |None| -- If `report` == |True|: A list of indices of column vectors failing the normality condition, or an empty list if all vectors are normalized. If `report` == |False|: |None| o_fail |list| of 2-tuples of |int|, or |None| -- If `report` == |True|: A list of 2-tuples of indices of column vectors failing the orthogonality condition, or an empty list if all vectors are orthogonal. If `report` == |False|: |None| """ # Imports import numpy as np from .base import delta_fxn #!TODO? orthonorm_check Must add traps to ensure a is a single array, # that it is 2D, that it's all real? To enforce the limits stated # in the docstring? # Initialize return variables orth = True n_fail = [] o_fail = [] # Coerce to float_ matrix. Must treat 1-D vector as column vector. # Should raise an exception for any objects with more than # two dimensions; real and all-numeric are still not yet checked, but # will probably be run-time caught if too bad an object is passed. if len(a.shape) == 1: a_mx = np.matrix(a, dtype=np.float_).T else: a_mx = np.matrix(a, dtype=np.float_) # Split matrix into separate vectors for convenient indexing. a_split = np.hsplit(a_mx, a_mx.shape[1]) # Loop over vectors and check orthonormality. for iter1 in range(a_mx.shape[1]): for iter2 in range(iter1,a_mx.shape[1]): if not abs((a_split[iter1].T * a_split[iter2])[0,0] - np.float_(delta_fxn(iter1, iter2))) <= tol: orth = False if report: if iter1 == iter2: n_fail.append(iter1) else: o_fail.append((iter1, iter2)) # Return results if report: return orth, n_fail, o_fail else: return orth, None, None
[ "def", "orthonorm_check", "(", "a", ",", "tol", "=", "_DEF", ".", "ORTHONORM_TOL", ",", "report", "=", "False", ")", ":", "import", "numpy", "as", "np", "from", ".", "base", "import", "delta_fxn", "orth", "=", "True", "n_fail", "=", "[", "]", "o_fail", "=", "[", "]", "if", "len", "(", "a", ".", "shape", ")", "==", "1", ":", "a_mx", "=", "np", ".", "matrix", "(", "a", ",", "dtype", "=", "np", ".", "float_", ")", ".", "T", "else", ":", "a_mx", "=", "np", ".", "matrix", "(", "a", ",", "dtype", "=", "np", ".", "float_", ")", "a_split", "=", "np", ".", "hsplit", "(", "a_mx", ",", "a_mx", ".", "shape", "[", "1", "]", ")", "for", "iter1", "in", "range", "(", "a_mx", ".", "shape", "[", "1", "]", ")", ":", "for", "iter2", "in", "range", "(", "iter1", ",", "a_mx", ".", "shape", "[", "1", "]", ")", ":", "if", "not", "abs", "(", "(", "a_split", "[", "iter1", "]", ".", "T", "*", "a_split", "[", "iter2", "]", ")", "[", "0", ",", "0", "]", "-", "np", ".", "float_", "(", "delta_fxn", "(", "iter1", ",", "iter2", ")", ")", ")", "<=", "tol", ":", "orth", "=", "False", "if", "report", ":", "if", "iter1", "==", "iter2", ":", "n_fail", ".", "append", "(", "iter1", ")", "else", ":", "o_fail", ".", "append", "(", "(", "iter1", ",", "iter2", ")", ")", "if", "report", ":", "return", "orth", ",", "n_fail", ",", "o_fail", "else", ":", "return", "orth", ",", "None", ",", "None" ]
Checks orthonormality of the column vectors of a matrix. If a one-dimensional |nparray| is passed to `a`, it is treated as a single column vector, rather than a row matrix of length-one column vectors. The matrix `a` does not need to be square, though it must have at least as many rows as columns, since orthonormality is only possible in N-space with a set of no more than N vectors. (This condition is not directly checked.) Parameters ---------- a R x S |npfloat_| -- 2-D array of column vectors to be checked for orthonormality. tol |npfloat_|, optional -- Tolerance for deviation of dot products from one or zero. Default value is :data:`opan.const.DEF.ORTHONORM_TOL`. report |bool|, optional -- Whether to record and return vectors / vector pairs failing the orthonormality condition. Default is |False|. Returns ------- o |bool| -- Indicates whether column vectors of `a` are orthonormal to within tolerance `tol`. n_fail |list| of |int|, or |None| -- If `report` == |True|: A list of indices of column vectors failing the normality condition, or an empty list if all vectors are normalized. If `report` == |False|: |None| o_fail |list| of 2-tuples of |int|, or |None| -- If `report` == |True|: A list of 2-tuples of indices of column vectors failing the orthogonality condition, or an empty list if all vectors are orthogonal. If `report` == |False|: |None|
[ "Checks", "orthonormality", "of", "the", "column", "vectors", "of", "a", "matrix", "." ]
0b1b21662df6abc971407a9386db21a8796fbfe5
https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/utils/vector.py#L179-L282
train
bskinn/opan
opan/utils/vector.py
parallel_check
def parallel_check(vec1, vec2): """Checks whether two vectors are parallel OR anti-parallel. Vectors must be of the same dimension. Parameters ---------- vec1 length-R |npfloat_| -- First vector to compare vec2 length-R |npfloat_| -- Second vector to compare Returns ------- par |bool| -- |True| if (anti-)parallel to within :data:`opan.const.PRM.NON_PARALLEL_TOL` degrees. |False| otherwise. """ # Imports from ..const import PRM import numpy as np # Initialize False par = False # Shape check for n,v in enumerate([vec1, vec2]): if not len(v.shape) == 1: raise ValueError("Bad shape for vector #{0}".format(n)) ## end if ## next v,n if not vec1.shape[0] == vec2.shape[0]: raise ValueError("Vector length mismatch") ## end if # Check for (anti-)parallel character and return angle = vec_angle(vec1, vec2) if min([abs(angle), abs(angle - 180.)]) < PRM.NON_PARALLEL_TOL: par = True ## end if return par
python
def parallel_check(vec1, vec2): """Checks whether two vectors are parallel OR anti-parallel. Vectors must be of the same dimension. Parameters ---------- vec1 length-R |npfloat_| -- First vector to compare vec2 length-R |npfloat_| -- Second vector to compare Returns ------- par |bool| -- |True| if (anti-)parallel to within :data:`opan.const.PRM.NON_PARALLEL_TOL` degrees. |False| otherwise. """ # Imports from ..const import PRM import numpy as np # Initialize False par = False # Shape check for n,v in enumerate([vec1, vec2]): if not len(v.shape) == 1: raise ValueError("Bad shape for vector #{0}".format(n)) ## end if ## next v,n if not vec1.shape[0] == vec2.shape[0]: raise ValueError("Vector length mismatch") ## end if # Check for (anti-)parallel character and return angle = vec_angle(vec1, vec2) if min([abs(angle), abs(angle - 180.)]) < PRM.NON_PARALLEL_TOL: par = True ## end if return par
[ "def", "parallel_check", "(", "vec1", ",", "vec2", ")", ":", "from", ".", ".", "const", "import", "PRM", "import", "numpy", "as", "np", "par", "=", "False", "for", "n", ",", "v", "in", "enumerate", "(", "[", "vec1", ",", "vec2", "]", ")", ":", "if", "not", "len", "(", "v", ".", "shape", ")", "==", "1", ":", "raise", "ValueError", "(", "\"Bad shape for vector #{0}\"", ".", "format", "(", "n", ")", ")", "if", "not", "vec1", ".", "shape", "[", "0", "]", "==", "vec2", ".", "shape", "[", "0", "]", ":", "raise", "ValueError", "(", "\"Vector length mismatch\"", ")", "angle", "=", "vec_angle", "(", "vec1", ",", "vec2", ")", "if", "min", "(", "[", "abs", "(", "angle", ")", ",", "abs", "(", "angle", "-", "180.", ")", "]", ")", "<", "PRM", ".", "NON_PARALLEL_TOL", ":", "par", "=", "True", "return", "par" ]
Checks whether two vectors are parallel OR anti-parallel. Vectors must be of the same dimension. Parameters ---------- vec1 length-R |npfloat_| -- First vector to compare vec2 length-R |npfloat_| -- Second vector to compare Returns ------- par |bool| -- |True| if (anti-)parallel to within :data:`opan.const.PRM.NON_PARALLEL_TOL` degrees. |False| otherwise.
[ "Checks", "whether", "two", "vectors", "are", "parallel", "OR", "anti", "-", "parallel", "." ]
0b1b21662df6abc971407a9386db21a8796fbfe5
https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/utils/vector.py#L288-L335
train
bskinn/opan
opan/utils/vector.py
proj
def proj(vec, vec_onto): """ Vector projection. Calculated as: .. math:: \\mathsf{vec\\_onto} * \\frac{\\mathsf{vec}\\cdot\\mathsf{vec\\_onto}} {\\mathsf{vec\\_onto}\\cdot\\mathsf{vec\\_onto}} Parameters ---------- vec length-R |npfloat_| -- Vector to project vec_onto length-R |npfloat_| -- Vector onto which `vec` is to be projected Returns ------- proj_vec length-R |npfloat_| -- Projection of `vec` onto `vec_onto` """ # Imports import numpy as np # Ensure vectors if not len(vec.shape) == 1: raise ValueError("'vec' is not a vector") ## end if if not len(vec_onto.shape) == 1: raise ValueError("'vec_onto' is not a vector") ## end if if not vec.shape[0] == vec_onto.shape[0]: raise ValueError("Shape mismatch between vectors") ## end if # Calculate the projection and return proj_vec = np.float_(np.asscalar(np.dot(vec.T, vec_onto))) / \ np.float_(np.asscalar(np.dot(vec_onto.T, vec_onto))) * vec_onto return proj_vec
python
def proj(vec, vec_onto): """ Vector projection. Calculated as: .. math:: \\mathsf{vec\\_onto} * \\frac{\\mathsf{vec}\\cdot\\mathsf{vec\\_onto}} {\\mathsf{vec\\_onto}\\cdot\\mathsf{vec\\_onto}} Parameters ---------- vec length-R |npfloat_| -- Vector to project vec_onto length-R |npfloat_| -- Vector onto which `vec` is to be projected Returns ------- proj_vec length-R |npfloat_| -- Projection of `vec` onto `vec_onto` """ # Imports import numpy as np # Ensure vectors if not len(vec.shape) == 1: raise ValueError("'vec' is not a vector") ## end if if not len(vec_onto.shape) == 1: raise ValueError("'vec_onto' is not a vector") ## end if if not vec.shape[0] == vec_onto.shape[0]: raise ValueError("Shape mismatch between vectors") ## end if # Calculate the projection and return proj_vec = np.float_(np.asscalar(np.dot(vec.T, vec_onto))) / \ np.float_(np.asscalar(np.dot(vec_onto.T, vec_onto))) * vec_onto return proj_vec
[ "def", "proj", "(", "vec", ",", "vec_onto", ")", ":", "import", "numpy", "as", "np", "if", "not", "len", "(", "vec", ".", "shape", ")", "==", "1", ":", "raise", "ValueError", "(", "\"'vec' is not a vector\"", ")", "if", "not", "len", "(", "vec_onto", ".", "shape", ")", "==", "1", ":", "raise", "ValueError", "(", "\"'vec_onto' is not a vector\"", ")", "if", "not", "vec", ".", "shape", "[", "0", "]", "==", "vec_onto", ".", "shape", "[", "0", "]", ":", "raise", "ValueError", "(", "\"Shape mismatch between vectors\"", ")", "proj_vec", "=", "np", ".", "float_", "(", "np", ".", "asscalar", "(", "np", ".", "dot", "(", "vec", ".", "T", ",", "vec_onto", ")", ")", ")", "/", "np", ".", "float_", "(", "np", ".", "asscalar", "(", "np", ".", "dot", "(", "vec_onto", ".", "T", ",", "vec_onto", ")", ")", ")", "*", "vec_onto", "return", "proj_vec" ]
Vector projection. Calculated as: .. math:: \\mathsf{vec\\_onto} * \\frac{\\mathsf{vec}\\cdot\\mathsf{vec\\_onto}} {\\mathsf{vec\\_onto}\\cdot\\mathsf{vec\\_onto}} Parameters ---------- vec length-R |npfloat_| -- Vector to project vec_onto length-R |npfloat_| -- Vector onto which `vec` is to be projected Returns ------- proj_vec length-R |npfloat_| -- Projection of `vec` onto `vec_onto`
[ "Vector", "projection", "." ]
0b1b21662df6abc971407a9386db21a8796fbfe5
https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/utils/vector.py#L341-L386
train
bskinn/opan
opan/utils/vector.py
rej
def rej(vec, vec_onto): """ Vector rejection. Calculated by subtracting from `vec` the projection of `vec` onto `vec_onto`: .. math:: \\mathsf{vec} - \\mathrm{proj}\\left(\\mathsf{vec}, \\ \\mathsf{vec\\_onto}\\right) Parameters ---------- vec length-R |npfloat_| -- Vector to reject vec_onto length-R |npfloat_| -- Vector onto which `vec` is to be rejected Returns ------- rej_vec length-R |npfloat_| -- Rejection of `vec` onto `vec_onto` """ # Imports import numpy as np # Calculate and return. rej_vec = vec - proj(vec, vec_onto) return rej_vec
python
def rej(vec, vec_onto): """ Vector rejection. Calculated by subtracting from `vec` the projection of `vec` onto `vec_onto`: .. math:: \\mathsf{vec} - \\mathrm{proj}\\left(\\mathsf{vec}, \\ \\mathsf{vec\\_onto}\\right) Parameters ---------- vec length-R |npfloat_| -- Vector to reject vec_onto length-R |npfloat_| -- Vector onto which `vec` is to be rejected Returns ------- rej_vec length-R |npfloat_| -- Rejection of `vec` onto `vec_onto` """ # Imports import numpy as np # Calculate and return. rej_vec = vec - proj(vec, vec_onto) return rej_vec
[ "def", "rej", "(", "vec", ",", "vec_onto", ")", ":", "import", "numpy", "as", "np", "rej_vec", "=", "vec", "-", "proj", "(", "vec", ",", "vec_onto", ")", "return", "rej_vec" ]
Vector rejection. Calculated by subtracting from `vec` the projection of `vec` onto `vec_onto`: .. math:: \\mathsf{vec} - \\mathrm{proj}\\left(\\mathsf{vec}, \\ \\mathsf{vec\\_onto}\\right) Parameters ---------- vec length-R |npfloat_| -- Vector to reject vec_onto length-R |npfloat_| -- Vector onto which `vec` is to be rejected Returns ------- rej_vec length-R |npfloat_| -- Rejection of `vec` onto `vec_onto`
[ "Vector", "rejection", "." ]
0b1b21662df6abc971407a9386db21a8796fbfe5
https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/utils/vector.py#L392-L426
train
bskinn/opan
opan/utils/vector.py
vec_angle
def vec_angle(vec1, vec2): """ Angle between two R-dimensional vectors. Angle calculated as: .. math:: \\arccos\\left[ \\frac{\\mathsf{vec1}\cdot\\mathsf{vec2}} {\\left\\|\\mathsf{vec1}\\right\\| \\left\\|\\mathsf{vec2}\\right\\|} \\right] Parameters ---------- vec1 length-R |npfloat_| -- First vector vec2 length-R |npfloat_| -- Second vector Returns ------- angle |npfloat_| -- Angle between the two vectors in degrees """ # Imports import numpy as np from scipy import linalg as spla from ..const import PRM # Check shape and equal length if len(vec1.shape) != 1: raise ValueError("'vec1' is not a vector") ## end if if len(vec2.shape) != 1: raise ValueError("'vec2' is not a vector") ## end if if vec1.shape[0] != vec2.shape[0]: raise ValueError("Vector lengths are not equal") ## end if # Check magnitudes if spla.norm(vec1) < PRM.ZERO_VEC_TOL: raise ValueError("'vec1' norm is too small") ## end if if spla.norm(vec2) < PRM.ZERO_VEC_TOL: raise ValueError("'vec2' norm is too small") ## end if # Calculate the angle and return. Do in multiple steps to test for # possible >1 or <-1 values from numerical precision errors. dotp = np.dot(vec1, vec2) / spla.norm(vec1) / spla.norm(vec2) if dotp > 1: angle = 0. # pragma: no cover elif dotp < -1: angle = 180. # pragma: no cover else: angle = np.degrees(np.arccos(dotp)) ## end if return angle
python
def vec_angle(vec1, vec2): """ Angle between two R-dimensional vectors. Angle calculated as: .. math:: \\arccos\\left[ \\frac{\\mathsf{vec1}\cdot\\mathsf{vec2}} {\\left\\|\\mathsf{vec1}\\right\\| \\left\\|\\mathsf{vec2}\\right\\|} \\right] Parameters ---------- vec1 length-R |npfloat_| -- First vector vec2 length-R |npfloat_| -- Second vector Returns ------- angle |npfloat_| -- Angle between the two vectors in degrees """ # Imports import numpy as np from scipy import linalg as spla from ..const import PRM # Check shape and equal length if len(vec1.shape) != 1: raise ValueError("'vec1' is not a vector") ## end if if len(vec2.shape) != 1: raise ValueError("'vec2' is not a vector") ## end if if vec1.shape[0] != vec2.shape[0]: raise ValueError("Vector lengths are not equal") ## end if # Check magnitudes if spla.norm(vec1) < PRM.ZERO_VEC_TOL: raise ValueError("'vec1' norm is too small") ## end if if spla.norm(vec2) < PRM.ZERO_VEC_TOL: raise ValueError("'vec2' norm is too small") ## end if # Calculate the angle and return. Do in multiple steps to test for # possible >1 or <-1 values from numerical precision errors. dotp = np.dot(vec1, vec2) / spla.norm(vec1) / spla.norm(vec2) if dotp > 1: angle = 0. # pragma: no cover elif dotp < -1: angle = 180. # pragma: no cover else: angle = np.degrees(np.arccos(dotp)) ## end if return angle
[ "def", "vec_angle", "(", "vec1", ",", "vec2", ")", ":", "import", "numpy", "as", "np", "from", "scipy", "import", "linalg", "as", "spla", "from", ".", ".", "const", "import", "PRM", "if", "len", "(", "vec1", ".", "shape", ")", "!=", "1", ":", "raise", "ValueError", "(", "\"'vec1' is not a vector\"", ")", "if", "len", "(", "vec2", ".", "shape", ")", "!=", "1", ":", "raise", "ValueError", "(", "\"'vec2' is not a vector\"", ")", "if", "vec1", ".", "shape", "[", "0", "]", "!=", "vec2", ".", "shape", "[", "0", "]", ":", "raise", "ValueError", "(", "\"Vector lengths are not equal\"", ")", "if", "spla", ".", "norm", "(", "vec1", ")", "<", "PRM", ".", "ZERO_VEC_TOL", ":", "raise", "ValueError", "(", "\"'vec1' norm is too small\"", ")", "if", "spla", ".", "norm", "(", "vec2", ")", "<", "PRM", ".", "ZERO_VEC_TOL", ":", "raise", "ValueError", "(", "\"'vec2' norm is too small\"", ")", "dotp", "=", "np", ".", "dot", "(", "vec1", ",", "vec2", ")", "/", "spla", ".", "norm", "(", "vec1", ")", "/", "spla", ".", "norm", "(", "vec2", ")", "if", "dotp", ">", "1", ":", "angle", "=", "0.", "elif", "dotp", "<", "-", "1", ":", "angle", "=", "180.", "else", ":", "angle", "=", "np", ".", "degrees", "(", "np", ".", "arccos", "(", "dotp", ")", ")", "return", "angle" ]
Angle between two R-dimensional vectors. Angle calculated as: .. math:: \\arccos\\left[ \\frac{\\mathsf{vec1}\cdot\\mathsf{vec2}} {\\left\\|\\mathsf{vec1}\\right\\| \\left\\|\\mathsf{vec2}\\right\\|} \\right] Parameters ---------- vec1 length-R |npfloat_| -- First vector vec2 length-R |npfloat_| -- Second vector Returns ------- angle |npfloat_| -- Angle between the two vectors in degrees
[ "Angle", "between", "two", "R", "-", "dimensional", "vectors", "." ]
0b1b21662df6abc971407a9386db21a8796fbfe5
https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/utils/vector.py#L432-L499
train
matthewwithanm/django-classbasedsettings
cbsettings/importers.py
new_module
def new_module(name): """ Do all of the gruntwork associated with creating a new module. """ parent = None if '.' in name: parent_name = name.rsplit('.', 1)[0] parent = __import__(parent_name, fromlist=['']) module = imp.new_module(name) sys.modules[name] = module if parent: setattr(parent, name.rsplit('.', 1)[1], module) return module
python
def new_module(name): """ Do all of the gruntwork associated with creating a new module. """ parent = None if '.' in name: parent_name = name.rsplit('.', 1)[0] parent = __import__(parent_name, fromlist=['']) module = imp.new_module(name) sys.modules[name] = module if parent: setattr(parent, name.rsplit('.', 1)[1], module) return module
[ "def", "new_module", "(", "name", ")", ":", "parent", "=", "None", "if", "'.'", "in", "name", ":", "parent_name", "=", "name", ".", "rsplit", "(", "'.'", ",", "1", ")", "[", "0", "]", "parent", "=", "__import__", "(", "parent_name", ",", "fromlist", "=", "[", "''", "]", ")", "module", "=", "imp", ".", "new_module", "(", "name", ")", "sys", ".", "modules", "[", "name", "]", "=", "module", "if", "parent", ":", "setattr", "(", "parent", ",", "name", ".", "rsplit", "(", "'.'", ",", "1", ")", "[", "1", "]", ",", "module", ")", "return", "module" ]
Do all of the gruntwork associated with creating a new module.
[ "Do", "all", "of", "the", "gruntwork", "associated", "with", "creating", "a", "new", "module", "." ]
ac9e4362bd1f4954f3e4679b97726cab2b22aea9
https://github.com/matthewwithanm/django-classbasedsettings/blob/ac9e4362bd1f4954f3e4679b97726cab2b22aea9/cbsettings/importers.py#L5-L19
train
openvax/isovar
isovar/allele_counts.py
allele_counts_dataframe
def allele_counts_dataframe(variant_and_allele_reads_generator): """ Creates a DataFrame containing number of reads supporting the ref vs. alt alleles for each variant. """ df_builder = DataFrameBuilder( AlleleCount, extra_column_fns={ "gene": lambda variant, _: ";".join(variant.gene_names), }) for variant, allele_reads in variant_and_allele_reads_generator: counts = count_alleles_at_variant_locus(variant, allele_reads) df_builder.add(variant, counts) return df_builder.to_dataframe()
python
def allele_counts_dataframe(variant_and_allele_reads_generator): """ Creates a DataFrame containing number of reads supporting the ref vs. alt alleles for each variant. """ df_builder = DataFrameBuilder( AlleleCount, extra_column_fns={ "gene": lambda variant, _: ";".join(variant.gene_names), }) for variant, allele_reads in variant_and_allele_reads_generator: counts = count_alleles_at_variant_locus(variant, allele_reads) df_builder.add(variant, counts) return df_builder.to_dataframe()
[ "def", "allele_counts_dataframe", "(", "variant_and_allele_reads_generator", ")", ":", "df_builder", "=", "DataFrameBuilder", "(", "AlleleCount", ",", "extra_column_fns", "=", "{", "\"gene\"", ":", "lambda", "variant", ",", "_", ":", "\";\"", ".", "join", "(", "variant", ".", "gene_names", ")", ",", "}", ")", "for", "variant", ",", "allele_reads", "in", "variant_and_allele_reads_generator", ":", "counts", "=", "count_alleles_at_variant_locus", "(", "variant", ",", "allele_reads", ")", "df_builder", ".", "add", "(", "variant", ",", "counts", ")", "return", "df_builder", ".", "to_dataframe", "(", ")" ]
Creates a DataFrame containing number of reads supporting the ref vs. alt alleles for each variant.
[ "Creates", "a", "DataFrame", "containing", "number", "of", "reads", "supporting", "the", "ref", "vs", ".", "alt", "alleles", "for", "each", "variant", "." ]
b39b684920e3f6b344851d6598a1a1c67bce913b
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/allele_counts.py#L46-L59
train
portfoliome/postpy
postpy/extensions.py
install_extension
def install_extension(conn, extension: str): """Install Postgres extension.""" query = 'CREATE EXTENSION IF NOT EXISTS "%s";' with conn.cursor() as cursor: cursor.execute(query, (AsIs(extension),)) installed = check_extension(conn, extension) if not installed: raise psycopg2.ProgrammingError( 'Postgres extension failed installation.', extension )
python
def install_extension(conn, extension: str): """Install Postgres extension.""" query = 'CREATE EXTENSION IF NOT EXISTS "%s";' with conn.cursor() as cursor: cursor.execute(query, (AsIs(extension),)) installed = check_extension(conn, extension) if not installed: raise psycopg2.ProgrammingError( 'Postgres extension failed installation.', extension )
[ "def", "install_extension", "(", "conn", ",", "extension", ":", "str", ")", ":", "query", "=", "'CREATE EXTENSION IF NOT EXISTS \"%s\";'", "with", "conn", ".", "cursor", "(", ")", "as", "cursor", ":", "cursor", ".", "execute", "(", "query", ",", "(", "AsIs", "(", "extension", ")", ",", ")", ")", "installed", "=", "check_extension", "(", "conn", ",", "extension", ")", "if", "not", "installed", ":", "raise", "psycopg2", ".", "ProgrammingError", "(", "'Postgres extension failed installation.'", ",", "extension", ")" ]
Install Postgres extension.
[ "Install", "Postgres", "extension", "." ]
fe26199131b15295fc5f669a0ad2a7f47bf490ee
https://github.com/portfoliome/postpy/blob/fe26199131b15295fc5f669a0ad2a7f47bf490ee/postpy/extensions.py#L5-L18
train
portfoliome/postpy
postpy/extensions.py
check_extension
def check_extension(conn, extension: str) -> bool: """Check to see if an extension is installed.""" query = 'SELECT installed_version FROM pg_available_extensions WHERE name=%s;' with conn.cursor() as cursor: cursor.execute(query, (extension,)) result = cursor.fetchone() if result is None: raise psycopg2.ProgrammingError( 'Extension is not available for installation.', extension ) else: extension_version = result[0] return bool(extension_version)
python
def check_extension(conn, extension: str) -> bool: """Check to see if an extension is installed.""" query = 'SELECT installed_version FROM pg_available_extensions WHERE name=%s;' with conn.cursor() as cursor: cursor.execute(query, (extension,)) result = cursor.fetchone() if result is None: raise psycopg2.ProgrammingError( 'Extension is not available for installation.', extension ) else: extension_version = result[0] return bool(extension_version)
[ "def", "check_extension", "(", "conn", ",", "extension", ":", "str", ")", "->", "bool", ":", "query", "=", "'SELECT installed_version FROM pg_available_extensions WHERE name=%s;'", "with", "conn", ".", "cursor", "(", ")", "as", "cursor", ":", "cursor", ".", "execute", "(", "query", ",", "(", "extension", ",", ")", ")", "result", "=", "cursor", ".", "fetchone", "(", ")", "if", "result", "is", "None", ":", "raise", "psycopg2", ".", "ProgrammingError", "(", "'Extension is not available for installation.'", ",", "extension", ")", "else", ":", "extension_version", "=", "result", "[", "0", "]", "return", "bool", "(", "extension_version", ")" ]
Check to see if an extension is installed.
[ "Check", "to", "see", "if", "an", "extension", "is", "installed", "." ]
fe26199131b15295fc5f669a0ad2a7f47bf490ee
https://github.com/portfoliome/postpy/blob/fe26199131b15295fc5f669a0ad2a7f47bf490ee/postpy/extensions.py#L21-L37
train
kmike/opencorpora-tools
opencorpora/reader.py
make_iterable
def make_iterable(obj, default=None): """ Ensure obj is iterable. """ if obj is None: return default or [] if isinstance(obj, (compat.string_types, compat.integer_types)): return [obj] return obj
python
def make_iterable(obj, default=None): """ Ensure obj is iterable. """ if obj is None: return default or [] if isinstance(obj, (compat.string_types, compat.integer_types)): return [obj] return obj
[ "def", "make_iterable", "(", "obj", ",", "default", "=", "None", ")", ":", "if", "obj", "is", "None", ":", "return", "default", "or", "[", "]", "if", "isinstance", "(", "obj", ",", "(", "compat", ".", "string_types", ",", "compat", ".", "integer_types", ")", ")", ":", "return", "[", "obj", "]", "return", "obj" ]
Ensure obj is iterable.
[ "Ensure", "obj", "is", "iterable", "." ]
26fee106aea1180d2975b3825dcf9b3875e80db1
https://github.com/kmike/opencorpora-tools/blob/26fee106aea1180d2975b3825dcf9b3875e80db1/opencorpora/reader.py#L12-L18
train
kmike/opencorpora-tools
opencorpora/reader.py
CorpusReader.iter_documents
def iter_documents(self, fileids=None, categories=None, _destroy=False): """ Return an iterator over corpus documents. """ doc_ids = self._filter_ids(fileids, categories) for doc in imap(self.get_document, doc_ids): yield doc if _destroy: doc.destroy()
python
def iter_documents(self, fileids=None, categories=None, _destroy=False): """ Return an iterator over corpus documents. """ doc_ids = self._filter_ids(fileids, categories) for doc in imap(self.get_document, doc_ids): yield doc if _destroy: doc.destroy()
[ "def", "iter_documents", "(", "self", ",", "fileids", "=", "None", ",", "categories", "=", "None", ",", "_destroy", "=", "False", ")", ":", "doc_ids", "=", "self", ".", "_filter_ids", "(", "fileids", ",", "categories", ")", "for", "doc", "in", "imap", "(", "self", ".", "get_document", ",", "doc_ids", ")", ":", "yield", "doc", "if", "_destroy", ":", "doc", ".", "destroy", "(", ")" ]
Return an iterator over corpus documents.
[ "Return", "an", "iterator", "over", "corpus", "documents", "." ]
26fee106aea1180d2975b3825dcf9b3875e80db1
https://github.com/kmike/opencorpora-tools/blob/26fee106aea1180d2975b3825dcf9b3875e80db1/opencorpora/reader.py#L176-L182
train
kmike/opencorpora-tools
opencorpora/reader.py
CorpusReader._create_meta_cache
def _create_meta_cache(self): """ Try to dump metadata to a file. """ try: with open(self._cache_filename, 'wb') as f: compat.pickle.dump(self._document_meta, f, 1) except (IOError, compat.pickle.PickleError): pass
python
def _create_meta_cache(self): """ Try to dump metadata to a file. """ try: with open(self._cache_filename, 'wb') as f: compat.pickle.dump(self._document_meta, f, 1) except (IOError, compat.pickle.PickleError): pass
[ "def", "_create_meta_cache", "(", "self", ")", ":", "try", ":", "with", "open", "(", "self", ".", "_cache_filename", ",", "'wb'", ")", "as", "f", ":", "compat", ".", "pickle", ".", "dump", "(", "self", ".", "_document_meta", ",", "f", ",", "1", ")", "except", "(", "IOError", ",", "compat", ".", "pickle", ".", "PickleError", ")", ":", "pass" ]
Try to dump metadata to a file.
[ "Try", "to", "dump", "metadata", "to", "a", "file", "." ]
26fee106aea1180d2975b3825dcf9b3875e80db1
https://github.com/kmike/opencorpora-tools/blob/26fee106aea1180d2975b3825dcf9b3875e80db1/opencorpora/reader.py#L295-L301
train
kmike/opencorpora-tools
opencorpora/reader.py
CorpusReader._load_meta_cache
def _load_meta_cache(self): """ Try to load metadata from file. """ try: if self._should_invalidate_cache(): os.remove(self._cache_filename) else: with open(self._cache_filename, 'rb') as f: self._document_meta = compat.pickle.load(f) except (OSError, IOError, compat.pickle.PickleError, ImportError, AttributeError): pass
python
def _load_meta_cache(self): """ Try to load metadata from file. """ try: if self._should_invalidate_cache(): os.remove(self._cache_filename) else: with open(self._cache_filename, 'rb') as f: self._document_meta = compat.pickle.load(f) except (OSError, IOError, compat.pickle.PickleError, ImportError, AttributeError): pass
[ "def", "_load_meta_cache", "(", "self", ")", ":", "try", ":", "if", "self", ".", "_should_invalidate_cache", "(", ")", ":", "os", ".", "remove", "(", "self", ".", "_cache_filename", ")", "else", ":", "with", "open", "(", "self", ".", "_cache_filename", ",", "'rb'", ")", "as", "f", ":", "self", ".", "_document_meta", "=", "compat", ".", "pickle", ".", "load", "(", "f", ")", "except", "(", "OSError", ",", "IOError", ",", "compat", ".", "pickle", ".", "PickleError", ",", "ImportError", ",", "AttributeError", ")", ":", "pass" ]
Try to load metadata from file.
[ "Try", "to", "load", "metadata", "from", "file", "." ]
26fee106aea1180d2975b3825dcf9b3875e80db1
https://github.com/kmike/opencorpora-tools/blob/26fee106aea1180d2975b3825dcf9b3875e80db1/opencorpora/reader.py#L303-L313
train
kmike/opencorpora-tools
opencorpora/reader.py
CorpusReader._compute_document_meta
def _compute_document_meta(self): """ Return documents meta information that can be used for fast document lookups. Meta information consists of documents titles, categories and positions in file. """ meta = OrderedDict() bounds_iter = xml_utils.bounds(self.filename, start_re=r'<text id="(\d+)"[^>]*name="([^"]*)"', end_re=r'</text>') for match, bounds in bounds_iter: doc_id, title = str(match.group(1)), match.group(2) title = xml_utils.unescape_attribute(title) # cache categories xml_data = xml_utils.load_chunk(self.filename, bounds) doc = Document(compat.ElementTree.XML(xml_data.encode('utf8'))) meta[doc_id] = _DocumentMeta(title, bounds, doc.categories()) return meta
python
def _compute_document_meta(self): """ Return documents meta information that can be used for fast document lookups. Meta information consists of documents titles, categories and positions in file. """ meta = OrderedDict() bounds_iter = xml_utils.bounds(self.filename, start_re=r'<text id="(\d+)"[^>]*name="([^"]*)"', end_re=r'</text>') for match, bounds in bounds_iter: doc_id, title = str(match.group(1)), match.group(2) title = xml_utils.unescape_attribute(title) # cache categories xml_data = xml_utils.load_chunk(self.filename, bounds) doc = Document(compat.ElementTree.XML(xml_data.encode('utf8'))) meta[doc_id] = _DocumentMeta(title, bounds, doc.categories()) return meta
[ "def", "_compute_document_meta", "(", "self", ")", ":", "meta", "=", "OrderedDict", "(", ")", "bounds_iter", "=", "xml_utils", ".", "bounds", "(", "self", ".", "filename", ",", "start_re", "=", "r'<text id=\"(\\d+)\"[^>]*name=\"([^\"]*)\"'", ",", "end_re", "=", "r'</text>'", ")", "for", "match", ",", "bounds", "in", "bounds_iter", ":", "doc_id", ",", "title", "=", "str", "(", "match", ".", "group", "(", "1", ")", ")", ",", "match", ".", "group", "(", "2", ")", "title", "=", "xml_utils", ".", "unescape_attribute", "(", "title", ")", "xml_data", "=", "xml_utils", ".", "load_chunk", "(", "self", ".", "filename", ",", "bounds", ")", "doc", "=", "Document", "(", "compat", ".", "ElementTree", ".", "XML", "(", "xml_data", ".", "encode", "(", "'utf8'", ")", ")", ")", "meta", "[", "doc_id", "]", "=", "_DocumentMeta", "(", "title", ",", "bounds", ",", "doc", ".", "categories", "(", ")", ")", "return", "meta" ]
Return documents meta information that can be used for fast document lookups. Meta information consists of documents titles, categories and positions in file.
[ "Return", "documents", "meta", "information", "that", "can", "be", "used", "for", "fast", "document", "lookups", ".", "Meta", "information", "consists", "of", "documents", "titles", "categories", "and", "positions", "in", "file", "." ]
26fee106aea1180d2975b3825dcf9b3875e80db1
https://github.com/kmike/opencorpora-tools/blob/26fee106aea1180d2975b3825dcf9b3875e80db1/opencorpora/reader.py#L320-L342
train
kmike/opencorpora-tools
opencorpora/reader.py
CorpusReader._document_xml
def _document_xml(self, doc_id): """ Return xml Element for the document document_id. """ doc_str = self._get_doc_by_raw_offset(str(doc_id)) return compat.ElementTree.XML(doc_str.encode('utf8'))
python
def _document_xml(self, doc_id): """ Return xml Element for the document document_id. """ doc_str = self._get_doc_by_raw_offset(str(doc_id)) return compat.ElementTree.XML(doc_str.encode('utf8'))
[ "def", "_document_xml", "(", "self", ",", "doc_id", ")", ":", "doc_str", "=", "self", ".", "_get_doc_by_raw_offset", "(", "str", "(", "doc_id", ")", ")", "return", "compat", ".", "ElementTree", ".", "XML", "(", "doc_str", ".", "encode", "(", "'utf8'", ")", ")" ]
Return xml Element for the document document_id.
[ "Return", "xml", "Element", "for", "the", "document", "document_id", "." ]
26fee106aea1180d2975b3825dcf9b3875e80db1
https://github.com/kmike/opencorpora-tools/blob/26fee106aea1180d2975b3825dcf9b3875e80db1/opencorpora/reader.py#L344-L347
train
kmike/opencorpora-tools
opencorpora/reader.py
CorpusReader._get_doc_by_line_offset
def _get_doc_by_line_offset(self, doc_id): """ Load document from xml using line offset information. This is much slower than _get_doc_by_raw_offset but should work everywhere. """ bounds = self._get_meta()[str(doc_id)].bounds return xml_utils.load_chunk(self.filename, bounds, slow=True)
python
def _get_doc_by_line_offset(self, doc_id): """ Load document from xml using line offset information. This is much slower than _get_doc_by_raw_offset but should work everywhere. """ bounds = self._get_meta()[str(doc_id)].bounds return xml_utils.load_chunk(self.filename, bounds, slow=True)
[ "def", "_get_doc_by_line_offset", "(", "self", ",", "doc_id", ")", ":", "bounds", "=", "self", ".", "_get_meta", "(", ")", "[", "str", "(", "doc_id", ")", "]", ".", "bounds", "return", "xml_utils", ".", "load_chunk", "(", "self", ".", "filename", ",", "bounds", ",", "slow", "=", "True", ")" ]
Load document from xml using line offset information. This is much slower than _get_doc_by_raw_offset but should work everywhere.
[ "Load", "document", "from", "xml", "using", "line", "offset", "information", ".", "This", "is", "much", "slower", "than", "_get_doc_by_raw_offset", "but", "should", "work", "everywhere", "." ]
26fee106aea1180d2975b3825dcf9b3875e80db1
https://github.com/kmike/opencorpora-tools/blob/26fee106aea1180d2975b3825dcf9b3875e80db1/opencorpora/reader.py#L357-L364
train
pyviz/imagen
imagen/colorspaces.py
_threeDdot_simple
def _threeDdot_simple(M,a): "Return Ma, where M is a 3x3 transformation matrix, for each pixel" result = np.empty(a.shape,dtype=a.dtype) for i in range(a.shape[0]): for j in range(a.shape[1]): A = np.array([a[i,j,0],a[i,j,1],a[i,j,2]]).reshape((3,1)) L = np.dot(M,A) result[i,j,0] = L[0] result[i,j,1] = L[1] result[i,j,2] = L[2] return result
python
def _threeDdot_simple(M,a): "Return Ma, where M is a 3x3 transformation matrix, for each pixel" result = np.empty(a.shape,dtype=a.dtype) for i in range(a.shape[0]): for j in range(a.shape[1]): A = np.array([a[i,j,0],a[i,j,1],a[i,j,2]]).reshape((3,1)) L = np.dot(M,A) result[i,j,0] = L[0] result[i,j,1] = L[1] result[i,j,2] = L[2] return result
[ "def", "_threeDdot_simple", "(", "M", ",", "a", ")", ":", "\"Return Ma, where M is a 3x3 transformation matrix, for each pixel\"", "result", "=", "np", ".", "empty", "(", "a", ".", "shape", ",", "dtype", "=", "a", ".", "dtype", ")", "for", "i", "in", "range", "(", "a", ".", "shape", "[", "0", "]", ")", ":", "for", "j", "in", "range", "(", "a", ".", "shape", "[", "1", "]", ")", ":", "A", "=", "np", ".", "array", "(", "[", "a", "[", "i", ",", "j", ",", "0", "]", ",", "a", "[", "i", ",", "j", ",", "1", "]", ",", "a", "[", "i", ",", "j", ",", "2", "]", "]", ")", ".", "reshape", "(", "(", "3", ",", "1", ")", ")", "L", "=", "np", ".", "dot", "(", "M", ",", "A", ")", "result", "[", "i", ",", "j", ",", "0", "]", "=", "L", "[", "0", "]", "result", "[", "i", ",", "j", ",", "1", "]", "=", "L", "[", "1", "]", "result", "[", "i", ",", "j", ",", "2", "]", "=", "L", "[", "2", "]", "return", "result" ]
Return Ma, where M is a 3x3 transformation matrix, for each pixel
[ "Return", "Ma", "where", "M", "is", "a", "3x3", "transformation", "matrix", "for", "each", "pixel" ]
53c5685c880f54b42795964d8db50b02e8590e88
https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/colorspaces.py#L49-L62
train
pyviz/imagen
imagen/colorspaces.py
_swaplch
def _swaplch(LCH): "Reverse the order of an LCH numpy dstack or tuple for analysis." try: # Numpy array L,C,H = np.dsplit(LCH,3) return np.dstack((H,C,L)) except: # Tuple L,C,H = LCH return H,C,L
python
def _swaplch(LCH): "Reverse the order of an LCH numpy dstack or tuple for analysis." try: # Numpy array L,C,H = np.dsplit(LCH,3) return np.dstack((H,C,L)) except: # Tuple L,C,H = LCH return H,C,L
[ "def", "_swaplch", "(", "LCH", ")", ":", "\"Reverse the order of an LCH numpy dstack or tuple for analysis.\"", "try", ":", "L", ",", "C", ",", "H", "=", "np", ".", "dsplit", "(", "LCH", ",", "3", ")", "return", "np", ".", "dstack", "(", "(", "H", ",", "C", ",", "L", ")", ")", "except", ":", "L", ",", "C", ",", "H", "=", "LCH", "return", "H", ",", "C", ",", "L" ]
Reverse the order of an LCH numpy dstack or tuple for analysis.
[ "Reverse", "the", "order", "of", "an", "LCH", "numpy", "dstack", "or", "tuple", "for", "analysis", "." ]
53c5685c880f54b42795964d8db50b02e8590e88
https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/colorspaces.py#L435-L442
train
pyviz/imagen
imagen/colorspaces.py
ColorSpace.rgb_to_hsv
def rgb_to_hsv(self,RGB): "linear rgb to hsv" gammaRGB = self._gamma_rgb(RGB) return self._ABC_to_DEF_by_fn(gammaRGB,rgb_to_hsv)
python
def rgb_to_hsv(self,RGB): "linear rgb to hsv" gammaRGB = self._gamma_rgb(RGB) return self._ABC_to_DEF_by_fn(gammaRGB,rgb_to_hsv)
[ "def", "rgb_to_hsv", "(", "self", ",", "RGB", ")", ":", "\"linear rgb to hsv\"", "gammaRGB", "=", "self", ".", "_gamma_rgb", "(", "RGB", ")", "return", "self", ".", "_ABC_to_DEF_by_fn", "(", "gammaRGB", ",", "rgb_to_hsv", ")" ]
linear rgb to hsv
[ "linear", "rgb", "to", "hsv" ]
53c5685c880f54b42795964d8db50b02e8590e88
https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/colorspaces.py#L413-L416
train
pyviz/imagen
imagen/colorspaces.py
ColorSpace.hsv_to_rgb
def hsv_to_rgb(self,HSV): "hsv to linear rgb" gammaRGB = self._ABC_to_DEF_by_fn(HSV,hsv_to_rgb) return self._ungamma_rgb(gammaRGB)
python
def hsv_to_rgb(self,HSV): "hsv to linear rgb" gammaRGB = self._ABC_to_DEF_by_fn(HSV,hsv_to_rgb) return self._ungamma_rgb(gammaRGB)
[ "def", "hsv_to_rgb", "(", "self", ",", "HSV", ")", ":", "\"hsv to linear rgb\"", "gammaRGB", "=", "self", ".", "_ABC_to_DEF_by_fn", "(", "HSV", ",", "hsv_to_rgb", ")", "return", "self", ".", "_ungamma_rgb", "(", "gammaRGB", ")" ]
hsv to linear rgb
[ "hsv", "to", "linear", "rgb" ]
53c5685c880f54b42795964d8db50b02e8590e88
https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/colorspaces.py#L418-L421
train
pyviz/imagen
imagen/colorspaces.py
ColorConverter.image2working
def image2working(self,i): """Transform images i provided into the specified working color space.""" return self.colorspace.convert(self.image_space, self.working_space, i)
python
def image2working(self,i): """Transform images i provided into the specified working color space.""" return self.colorspace.convert(self.image_space, self.working_space, i)
[ "def", "image2working", "(", "self", ",", "i", ")", ":", "return", "self", ".", "colorspace", ".", "convert", "(", "self", ".", "image_space", ",", "self", ".", "working_space", ",", "i", ")" ]
Transform images i provided into the specified working color space.
[ "Transform", "images", "i", "provided", "into", "the", "specified", "working", "color", "space", "." ]
53c5685c880f54b42795964d8db50b02e8590e88
https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/colorspaces.py#L477-L481
train
pyviz/imagen
imagen/colorspaces.py
ColorConverter.working2analysis
def working2analysis(self,r): "Transform working space inputs to the analysis color space." a = self.colorspace.convert(self.working_space, self.analysis_space, r) return self.swap_polar_HSVorder[self.analysis_space](a)
python
def working2analysis(self,r): "Transform working space inputs to the analysis color space." a = self.colorspace.convert(self.working_space, self.analysis_space, r) return self.swap_polar_HSVorder[self.analysis_space](a)
[ "def", "working2analysis", "(", "self", ",", "r", ")", ":", "\"Transform working space inputs to the analysis color space.\"", "a", "=", "self", ".", "colorspace", ".", "convert", "(", "self", ".", "working_space", ",", "self", ".", "analysis_space", ",", "r", ")", "return", "self", ".", "swap_polar_HSVorder", "[", "self", ".", "analysis_space", "]", "(", "a", ")" ]
Transform working space inputs to the analysis color space.
[ "Transform", "working", "space", "inputs", "to", "the", "analysis", "color", "space", "." ]
53c5685c880f54b42795964d8db50b02e8590e88
https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/colorspaces.py#L483-L486
train
pyviz/imagen
imagen/colorspaces.py
ColorConverter.analysis2working
def analysis2working(self,a): "Convert back from the analysis color space to the working space." a = self.swap_polar_HSVorder[self.analysis_space](a) return self.colorspace.convert(self.analysis_space, self.working_space, a)
python
def analysis2working(self,a): "Convert back from the analysis color space to the working space." a = self.swap_polar_HSVorder[self.analysis_space](a) return self.colorspace.convert(self.analysis_space, self.working_space, a)
[ "def", "analysis2working", "(", "self", ",", "a", ")", ":", "\"Convert back from the analysis color space to the working space.\"", "a", "=", "self", ".", "swap_polar_HSVorder", "[", "self", ".", "analysis_space", "]", "(", "a", ")", "return", "self", ".", "colorspace", ".", "convert", "(", "self", ".", "analysis_space", ",", "self", ".", "working_space", ",", "a", ")" ]
Convert back from the analysis color space to the working space.
[ "Convert", "back", "from", "the", "analysis", "color", "space", "to", "the", "working", "space", "." ]
53c5685c880f54b42795964d8db50b02e8590e88
https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/colorspaces.py#L488-L491
train
kmike/opencorpora-tools
opencorpora/xml_utils.py
load_chunk
def load_chunk(filename, bounds, encoding='utf8', slow=False): """ Load a chunk from file using Bounds info. Pass 'slow=True' for an alternative loading method based on line numbers. """ if slow: return _load_chunk_slow(filename, bounds, encoding) with open(filename, 'rb') as f: f.seek(bounds.byte_start) size = bounds.byte_end - bounds.byte_start return f.read(size).decode(encoding)
python
def load_chunk(filename, bounds, encoding='utf8', slow=False): """ Load a chunk from file using Bounds info. Pass 'slow=True' for an alternative loading method based on line numbers. """ if slow: return _load_chunk_slow(filename, bounds, encoding) with open(filename, 'rb') as f: f.seek(bounds.byte_start) size = bounds.byte_end - bounds.byte_start return f.read(size).decode(encoding)
[ "def", "load_chunk", "(", "filename", ",", "bounds", ",", "encoding", "=", "'utf8'", ",", "slow", "=", "False", ")", ":", "if", "slow", ":", "return", "_load_chunk_slow", "(", "filename", ",", "bounds", ",", "encoding", ")", "with", "open", "(", "filename", ",", "'rb'", ")", "as", "f", ":", "f", ".", "seek", "(", "bounds", ".", "byte_start", ")", "size", "=", "bounds", ".", "byte_end", "-", "bounds", ".", "byte_start", "return", "f", ".", "read", "(", "size", ")", ".", "decode", "(", "encoding", ")" ]
Load a chunk from file using Bounds info. Pass 'slow=True' for an alternative loading method based on line numbers.
[ "Load", "a", "chunk", "from", "file", "using", "Bounds", "info", ".", "Pass", "slow", "=", "True", "for", "an", "alternative", "loading", "method", "based", "on", "line", "numbers", "." ]
26fee106aea1180d2975b3825dcf9b3875e80db1
https://github.com/kmike/opencorpora-tools/blob/26fee106aea1180d2975b3825dcf9b3875e80db1/opencorpora/xml_utils.py#L52-L63
train
portfoliome/postpy
postpy/data_types.py
generate_numeric_range
def generate_numeric_range(items, lower_bound, upper_bound): """Generate postgresql numeric range and label for insertion. Parameters ---------- items: iterable labels for ranges. lower_bound: numeric lower bound upper_bound: numeric upper bound """ quantile_grid = create_quantiles(items, lower_bound, upper_bound) labels, bounds = (zip(*quantile_grid)) ranges = ((label, NumericRange(*bound)) for label, bound in zip(labels, bounds)) return ranges
python
def generate_numeric_range(items, lower_bound, upper_bound): """Generate postgresql numeric range and label for insertion. Parameters ---------- items: iterable labels for ranges. lower_bound: numeric lower bound upper_bound: numeric upper bound """ quantile_grid = create_quantiles(items, lower_bound, upper_bound) labels, bounds = (zip(*quantile_grid)) ranges = ((label, NumericRange(*bound)) for label, bound in zip(labels, bounds)) return ranges
[ "def", "generate_numeric_range", "(", "items", ",", "lower_bound", ",", "upper_bound", ")", ":", "quantile_grid", "=", "create_quantiles", "(", "items", ",", "lower_bound", ",", "upper_bound", ")", "labels", ",", "bounds", "=", "(", "zip", "(", "*", "quantile_grid", ")", ")", "ranges", "=", "(", "(", "label", ",", "NumericRange", "(", "*", "bound", ")", ")", "for", "label", ",", "bound", "in", "zip", "(", "labels", ",", "bounds", ")", ")", "return", "ranges" ]
Generate postgresql numeric range and label for insertion. Parameters ---------- items: iterable labels for ranges. lower_bound: numeric lower bound upper_bound: numeric upper bound
[ "Generate", "postgresql", "numeric", "range", "and", "label", "for", "insertion", "." ]
fe26199131b15295fc5f669a0ad2a7f47bf490ee
https://github.com/portfoliome/postpy/blob/fe26199131b15295fc5f669a0ad2a7f47bf490ee/postpy/data_types.py#L30-L44
train
pyviz/imagen
imagen/image.py
edge_average
def edge_average(a): "Return the mean value around the edge of an array." if len(np.ravel(a)) < 2: return float(a[0]) else: top_edge = a[0] bottom_edge = a[-1] left_edge = a[1:-1,0] right_edge = a[1:-1,-1] edge_sum = np.sum(top_edge) + np.sum(bottom_edge) + np.sum(left_edge) + np.sum(right_edge) num_values = len(top_edge)+len(bottom_edge)+len(left_edge)+len(right_edge) return float(edge_sum)/num_values
python
def edge_average(a): "Return the mean value around the edge of an array." if len(np.ravel(a)) < 2: return float(a[0]) else: top_edge = a[0] bottom_edge = a[-1] left_edge = a[1:-1,0] right_edge = a[1:-1,-1] edge_sum = np.sum(top_edge) + np.sum(bottom_edge) + np.sum(left_edge) + np.sum(right_edge) num_values = len(top_edge)+len(bottom_edge)+len(left_edge)+len(right_edge) return float(edge_sum)/num_values
[ "def", "edge_average", "(", "a", ")", ":", "\"Return the mean value around the edge of an array.\"", "if", "len", "(", "np", ".", "ravel", "(", "a", ")", ")", "<", "2", ":", "return", "float", "(", "a", "[", "0", "]", ")", "else", ":", "top_edge", "=", "a", "[", "0", "]", "bottom_edge", "=", "a", "[", "-", "1", "]", "left_edge", "=", "a", "[", "1", ":", "-", "1", ",", "0", "]", "right_edge", "=", "a", "[", "1", ":", "-", "1", ",", "-", "1", "]", "edge_sum", "=", "np", ".", "sum", "(", "top_edge", ")", "+", "np", ".", "sum", "(", "bottom_edge", ")", "+", "np", ".", "sum", "(", "left_edge", ")", "+", "np", ".", "sum", "(", "right_edge", ")", "num_values", "=", "len", "(", "top_edge", ")", "+", "len", "(", "bottom_edge", ")", "+", "len", "(", "left_edge", ")", "+", "len", "(", "right_edge", ")", "return", "float", "(", "edge_sum", ")", "/", "num_values" ]
Return the mean value around the edge of an array.
[ "Return", "the", "mean", "value", "around", "the", "edge", "of", "an", "array", "." ]
53c5685c880f54b42795964d8db50b02e8590e88
https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/image.py#L206-L220
train
pyviz/imagen
imagen/image.py
GenericImage._process_channels
def _process_channels(self,p,**params_to_override): """ Add the channel information to the channel_data attribute. """ orig_image = self._image for i in range(len(self._channel_data)): self._image = self._original_channel_data[i] self._channel_data[i] = self._reduced_call(**params_to_override) self._image = orig_image return self._channel_data
python
def _process_channels(self,p,**params_to_override): """ Add the channel information to the channel_data attribute. """ orig_image = self._image for i in range(len(self._channel_data)): self._image = self._original_channel_data[i] self._channel_data[i] = self._reduced_call(**params_to_override) self._image = orig_image return self._channel_data
[ "def", "_process_channels", "(", "self", ",", "p", ",", "**", "params_to_override", ")", ":", "orig_image", "=", "self", ".", "_image", "for", "i", "in", "range", "(", "len", "(", "self", ".", "_channel_data", ")", ")", ":", "self", ".", "_image", "=", "self", ".", "_original_channel_data", "[", "i", "]", "self", ".", "_channel_data", "[", "i", "]", "=", "self", ".", "_reduced_call", "(", "**", "params_to_override", ")", "self", ".", "_image", "=", "orig_image", "return", "self", ".", "_channel_data" ]
Add the channel information to the channel_data attribute.
[ "Add", "the", "channel", "information", "to", "the", "channel_data", "attribute", "." ]
53c5685c880f54b42795964d8db50b02e8590e88
https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/image.py#L322-L332
train
pyviz/imagen
imagen/image.py
FileImage.set_matrix_dimensions
def set_matrix_dimensions(self, *args): """ Subclassed to delete the cached image when matrix dimensions are changed. """ self._image = None super(FileImage, self).set_matrix_dimensions(*args)
python
def set_matrix_dimensions(self, *args): """ Subclassed to delete the cached image when matrix dimensions are changed. """ self._image = None super(FileImage, self).set_matrix_dimensions(*args)
[ "def", "set_matrix_dimensions", "(", "self", ",", "*", "args", ")", ":", "self", ".", "_image", "=", "None", "super", "(", "FileImage", ",", "self", ")", ".", "set_matrix_dimensions", "(", "*", "args", ")" ]
Subclassed to delete the cached image when matrix dimensions are changed.
[ "Subclassed", "to", "delete", "the", "cached", "image", "when", "matrix", "dimensions", "are", "changed", "." ]
53c5685c880f54b42795964d8db50b02e8590e88
https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/image.py#L425-L431
train
pyviz/imagen
imagen/image.py
FileImage._load_pil_image
def _load_pil_image(self, filename): """ Load image using PIL. """ self._channel_data = [] self._original_channel_data = [] im = Image.open(filename) self._image = ImageOps.grayscale(im) im.load() file_data = np.asarray(im, float) file_data = file_data / file_data.max() # if the image has more than one channel, load them if( len(file_data.shape) == 3 ): num_channels = file_data.shape[2] for i in range(num_channels): self._channel_data.append( file_data[:, :, i]) self._original_channel_data.append( file_data[:, :, i] )
python
def _load_pil_image(self, filename): """ Load image using PIL. """ self._channel_data = [] self._original_channel_data = [] im = Image.open(filename) self._image = ImageOps.grayscale(im) im.load() file_data = np.asarray(im, float) file_data = file_data / file_data.max() # if the image has more than one channel, load them if( len(file_data.shape) == 3 ): num_channels = file_data.shape[2] for i in range(num_channels): self._channel_data.append( file_data[:, :, i]) self._original_channel_data.append( file_data[:, :, i] )
[ "def", "_load_pil_image", "(", "self", ",", "filename", ")", ":", "self", ".", "_channel_data", "=", "[", "]", "self", ".", "_original_channel_data", "=", "[", "]", "im", "=", "Image", ".", "open", "(", "filename", ")", "self", ".", "_image", "=", "ImageOps", ".", "grayscale", "(", "im", ")", "im", ".", "load", "(", ")", "file_data", "=", "np", ".", "asarray", "(", "im", ",", "float", ")", "file_data", "=", "file_data", "/", "file_data", ".", "max", "(", ")", "if", "(", "len", "(", "file_data", ".", "shape", ")", "==", "3", ")", ":", "num_channels", "=", "file_data", ".", "shape", "[", "2", "]", "for", "i", "in", "range", "(", "num_channels", ")", ":", "self", ".", "_channel_data", ".", "append", "(", "file_data", "[", ":", ",", ":", ",", "i", "]", ")", "self", ".", "_original_channel_data", ".", "append", "(", "file_data", "[", ":", ",", ":", ",", "i", "]", ")" ]
Load image using PIL.
[ "Load", "image", "using", "PIL", "." ]
53c5685c880f54b42795964d8db50b02e8590e88
https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/image.py#L450-L469
train
pyviz/imagen
imagen/image.py
FileImage._load_npy
def _load_npy(self, filename): """ Load image using Numpy. """ self._channel_data = [] self._original_channel_data = [] file_channel_data = np.load(filename) file_channel_data = file_channel_data / file_channel_data.max() for i in range(file_channel_data.shape[2]): self._channel_data.append(file_channel_data[:, :, i]) self._original_channel_data.append(file_channel_data[:, :, i]) self._image = file_channel_data.sum(2) / file_channel_data.shape[2]
python
def _load_npy(self, filename): """ Load image using Numpy. """ self._channel_data = [] self._original_channel_data = [] file_channel_data = np.load(filename) file_channel_data = file_channel_data / file_channel_data.max() for i in range(file_channel_data.shape[2]): self._channel_data.append(file_channel_data[:, :, i]) self._original_channel_data.append(file_channel_data[:, :, i]) self._image = file_channel_data.sum(2) / file_channel_data.shape[2]
[ "def", "_load_npy", "(", "self", ",", "filename", ")", ":", "self", ".", "_channel_data", "=", "[", "]", "self", ".", "_original_channel_data", "=", "[", "]", "file_channel_data", "=", "np", ".", "load", "(", "filename", ")", "file_channel_data", "=", "file_channel_data", "/", "file_channel_data", ".", "max", "(", ")", "for", "i", "in", "range", "(", "file_channel_data", ".", "shape", "[", "2", "]", ")", ":", "self", ".", "_channel_data", ".", "append", "(", "file_channel_data", "[", ":", ",", ":", ",", "i", "]", ")", "self", ".", "_original_channel_data", ".", "append", "(", "file_channel_data", "[", ":", ",", ":", ",", "i", "]", ")", "self", ".", "_image", "=", "file_channel_data", ".", "sum", "(", "2", ")", "/", "file_channel_data", ".", "shape", "[", "2", "]" ]
Load image using Numpy.
[ "Load", "image", "using", "Numpy", "." ]
53c5685c880f54b42795964d8db50b02e8590e88
https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/image.py#L472-L485
train
bskinn/opan
opan/utils/decorate.py
kwargfetch.ok_kwarg
def ok_kwarg(val): """Helper method for screening keyword arguments""" import keyword try: return str.isidentifier(val) and not keyword.iskeyword(val) except TypeError: # Non-string values are never a valid keyword arg return False
python
def ok_kwarg(val): """Helper method for screening keyword arguments""" import keyword try: return str.isidentifier(val) and not keyword.iskeyword(val) except TypeError: # Non-string values are never a valid keyword arg return False
[ "def", "ok_kwarg", "(", "val", ")", ":", "import", "keyword", "try", ":", "return", "str", ".", "isidentifier", "(", "val", ")", "and", "not", "keyword", ".", "iskeyword", "(", "val", ")", "except", "TypeError", ":", "return", "False" ]
Helper method for screening keyword arguments
[ "Helper", "method", "for", "screening", "keyword", "arguments" ]
0b1b21662df6abc971407a9386db21a8796fbfe5
https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/utils/decorate.py#L186-L195
train
inveniosoftware/invenio-indexer
invenio_indexer/cli.py
run
def run(delayed, concurrency, version_type=None, queue=None, raise_on_error=True): """Run bulk record indexing.""" if delayed: celery_kwargs = { 'kwargs': { 'version_type': version_type, 'es_bulk_kwargs': {'raise_on_error': raise_on_error}, } } click.secho( 'Starting {0} tasks for indexing records...'.format(concurrency), fg='green') if queue is not None: celery_kwargs.update({'queue': queue}) for c in range(0, concurrency): process_bulk_queue.apply_async(**celery_kwargs) else: click.secho('Indexing records...', fg='green') RecordIndexer(version_type=version_type).process_bulk_queue( es_bulk_kwargs={'raise_on_error': raise_on_error})
python
def run(delayed, concurrency, version_type=None, queue=None, raise_on_error=True): """Run bulk record indexing.""" if delayed: celery_kwargs = { 'kwargs': { 'version_type': version_type, 'es_bulk_kwargs': {'raise_on_error': raise_on_error}, } } click.secho( 'Starting {0} tasks for indexing records...'.format(concurrency), fg='green') if queue is not None: celery_kwargs.update({'queue': queue}) for c in range(0, concurrency): process_bulk_queue.apply_async(**celery_kwargs) else: click.secho('Indexing records...', fg='green') RecordIndexer(version_type=version_type).process_bulk_queue( es_bulk_kwargs={'raise_on_error': raise_on_error})
[ "def", "run", "(", "delayed", ",", "concurrency", ",", "version_type", "=", "None", ",", "queue", "=", "None", ",", "raise_on_error", "=", "True", ")", ":", "if", "delayed", ":", "celery_kwargs", "=", "{", "'kwargs'", ":", "{", "'version_type'", ":", "version_type", ",", "'es_bulk_kwargs'", ":", "{", "'raise_on_error'", ":", "raise_on_error", "}", ",", "}", "}", "click", ".", "secho", "(", "'Starting {0} tasks for indexing records...'", ".", "format", "(", "concurrency", ")", ",", "fg", "=", "'green'", ")", "if", "queue", "is", "not", "None", ":", "celery_kwargs", ".", "update", "(", "{", "'queue'", ":", "queue", "}", ")", "for", "c", "in", "range", "(", "0", ",", "concurrency", ")", ":", "process_bulk_queue", ".", "apply_async", "(", "**", "celery_kwargs", ")", "else", ":", "click", ".", "secho", "(", "'Indexing records...'", ",", "fg", "=", "'green'", ")", "RecordIndexer", "(", "version_type", "=", "version_type", ")", ".", "process_bulk_queue", "(", "es_bulk_kwargs", "=", "{", "'raise_on_error'", ":", "raise_on_error", "}", ")" ]
Run bulk record indexing.
[ "Run", "bulk", "record", "indexing", "." ]
1460aa8976b449d9a3a99d356322b158e9be6f80
https://github.com/inveniosoftware/invenio-indexer/blob/1460aa8976b449d9a3a99d356322b158e9be6f80/invenio_indexer/cli.py#L43-L63
train
inveniosoftware/invenio-indexer
invenio_indexer/cli.py
reindex
def reindex(pid_type): """Reindex all records. :param pid_type: Pid type. """ click.secho('Sending records to indexing queue ...', fg='green') query = (x[0] for x in PersistentIdentifier.query.filter_by( object_type='rec', status=PIDStatus.REGISTERED ).filter( PersistentIdentifier.pid_type.in_(pid_type) ).values( PersistentIdentifier.object_uuid )) RecordIndexer().bulk_index(query) click.secho('Execute "run" command to process the queue!', fg='yellow')
python
def reindex(pid_type): """Reindex all records. :param pid_type: Pid type. """ click.secho('Sending records to indexing queue ...', fg='green') query = (x[0] for x in PersistentIdentifier.query.filter_by( object_type='rec', status=PIDStatus.REGISTERED ).filter( PersistentIdentifier.pid_type.in_(pid_type) ).values( PersistentIdentifier.object_uuid )) RecordIndexer().bulk_index(query) click.secho('Execute "run" command to process the queue!', fg='yellow')
[ "def", "reindex", "(", "pid_type", ")", ":", "click", ".", "secho", "(", "'Sending records to indexing queue ...'", ",", "fg", "=", "'green'", ")", "query", "=", "(", "x", "[", "0", "]", "for", "x", "in", "PersistentIdentifier", ".", "query", ".", "filter_by", "(", "object_type", "=", "'rec'", ",", "status", "=", "PIDStatus", ".", "REGISTERED", ")", ".", "filter", "(", "PersistentIdentifier", ".", "pid_type", ".", "in_", "(", "pid_type", ")", ")", ".", "values", "(", "PersistentIdentifier", ".", "object_uuid", ")", ")", "RecordIndexer", "(", ")", ".", "bulk_index", "(", "query", ")", "click", ".", "secho", "(", "'Execute \"run\" command to process the queue!'", ",", "fg", "=", "'yellow'", ")" ]
Reindex all records. :param pid_type: Pid type.
[ "Reindex", "all", "records", "." ]
1460aa8976b449d9a3a99d356322b158e9be6f80
https://github.com/inveniosoftware/invenio-indexer/blob/1460aa8976b449d9a3a99d356322b158e9be6f80/invenio_indexer/cli.py#L72-L88
train
inveniosoftware/invenio-indexer
invenio_indexer/cli.py
process_actions
def process_actions(actions): """Process queue actions.""" queue = current_app.config['INDEXER_MQ_QUEUE'] with establish_connection() as c: q = queue(c) for action in actions: q = action(q)
python
def process_actions(actions): """Process queue actions.""" queue = current_app.config['INDEXER_MQ_QUEUE'] with establish_connection() as c: q = queue(c) for action in actions: q = action(q)
[ "def", "process_actions", "(", "actions", ")", ":", "queue", "=", "current_app", ".", "config", "[", "'INDEXER_MQ_QUEUE'", "]", "with", "establish_connection", "(", ")", "as", "c", ":", "q", "=", "queue", "(", "c", ")", "for", "action", "in", "actions", ":", "q", "=", "action", "(", "q", ")" ]
Process queue actions.
[ "Process", "queue", "actions", "." ]
1460aa8976b449d9a3a99d356322b158e9be6f80
https://github.com/inveniosoftware/invenio-indexer/blob/1460aa8976b449d9a3a99d356322b158e9be6f80/invenio_indexer/cli.py#L98-L104
train
inveniosoftware/invenio-indexer
invenio_indexer/cli.py
init_queue
def init_queue(): """Initialize indexing queue.""" def action(queue): queue.declare() click.secho('Indexing queue has been initialized.', fg='green') return queue return action
python
def init_queue(): """Initialize indexing queue.""" def action(queue): queue.declare() click.secho('Indexing queue has been initialized.', fg='green') return queue return action
[ "def", "init_queue", "(", ")", ":", "def", "action", "(", "queue", ")", ":", "queue", ".", "declare", "(", ")", "click", ".", "secho", "(", "'Indexing queue has been initialized.'", ",", "fg", "=", "'green'", ")", "return", "queue", "return", "action" ]
Initialize indexing queue.
[ "Initialize", "indexing", "queue", "." ]
1460aa8976b449d9a3a99d356322b158e9be6f80
https://github.com/inveniosoftware/invenio-indexer/blob/1460aa8976b449d9a3a99d356322b158e9be6f80/invenio_indexer/cli.py#L108-L114
train
inveniosoftware/invenio-indexer
invenio_indexer/cli.py
purge_queue
def purge_queue(): """Purge indexing queue.""" def action(queue): queue.purge() click.secho('Indexing queue has been purged.', fg='green') return queue return action
python
def purge_queue(): """Purge indexing queue.""" def action(queue): queue.purge() click.secho('Indexing queue has been purged.', fg='green') return queue return action
[ "def", "purge_queue", "(", ")", ":", "def", "action", "(", "queue", ")", ":", "queue", ".", "purge", "(", ")", "click", ".", "secho", "(", "'Indexing queue has been purged.'", ",", "fg", "=", "'green'", ")", "return", "queue", "return", "action" ]
Purge indexing queue.
[ "Purge", "indexing", "queue", "." ]
1460aa8976b449d9a3a99d356322b158e9be6f80
https://github.com/inveniosoftware/invenio-indexer/blob/1460aa8976b449d9a3a99d356322b158e9be6f80/invenio_indexer/cli.py#L118-L124
train
inveniosoftware/invenio-indexer
invenio_indexer/cli.py
delete_queue
def delete_queue(): """Delete indexing queue.""" def action(queue): queue.delete() click.secho('Indexing queue has been deleted.', fg='green') return queue return action
python
def delete_queue(): """Delete indexing queue.""" def action(queue): queue.delete() click.secho('Indexing queue has been deleted.', fg='green') return queue return action
[ "def", "delete_queue", "(", ")", ":", "def", "action", "(", "queue", ")", ":", "queue", ".", "delete", "(", ")", "click", ".", "secho", "(", "'Indexing queue has been deleted.'", ",", "fg", "=", "'green'", ")", "return", "queue", "return", "action" ]
Delete indexing queue.
[ "Delete", "indexing", "queue", "." ]
1460aa8976b449d9a3a99d356322b158e9be6f80
https://github.com/inveniosoftware/invenio-indexer/blob/1460aa8976b449d9a3a99d356322b158e9be6f80/invenio_indexer/cli.py#L128-L134
train
openvax/isovar
isovar/reference_sequence_key.py
variant_matches_reference_sequence
def variant_matches_reference_sequence(variant, ref_seq_on_transcript, strand): """ Make sure that reference nucleotides we expect to see on the reference transcript from a variant are the same ones we encounter. """ if strand == "-": ref_seq_on_transcript = reverse_complement_dna(ref_seq_on_transcript) return ref_seq_on_transcript == variant.ref
python
def variant_matches_reference_sequence(variant, ref_seq_on_transcript, strand): """ Make sure that reference nucleotides we expect to see on the reference transcript from a variant are the same ones we encounter. """ if strand == "-": ref_seq_on_transcript = reverse_complement_dna(ref_seq_on_transcript) return ref_seq_on_transcript == variant.ref
[ "def", "variant_matches_reference_sequence", "(", "variant", ",", "ref_seq_on_transcript", ",", "strand", ")", ":", "if", "strand", "==", "\"-\"", ":", "ref_seq_on_transcript", "=", "reverse_complement_dna", "(", "ref_seq_on_transcript", ")", "return", "ref_seq_on_transcript", "==", "variant", ".", "ref" ]
Make sure that reference nucleotides we expect to see on the reference transcript from a variant are the same ones we encounter.
[ "Make", "sure", "that", "reference", "nucleotides", "we", "expect", "to", "see", "on", "the", "reference", "transcript", "from", "a", "variant", "are", "the", "same", "ones", "we", "encounter", "." ]
b39b684920e3f6b344851d6598a1a1c67bce913b
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/reference_sequence_key.py#L131-L138
train
openvax/isovar
isovar/reference_sequence_key.py
ReferenceSequenceKey.from_variant_and_transcript
def from_variant_and_transcript( cls, variant, transcript, context_size): """ Extracts the reference sequence around a variant locus on a particular transcript. Parameters ---------- variant : varcode.Variant transcript : pyensembl.Transcript context_size : int Returns SequenceKey object. Can also return None if Transcript lacks sufficiently long sequence """ full_transcript_sequence = transcript.sequence if full_transcript_sequence is None: logger.warn( "Expected transcript %s (overlapping %s) to have sequence", transcript.name, variant) return None # get the interbase range of offsets which capture all reference # bases modified by the variant variant_start_offset, variant_end_offset = \ interbase_range_affected_by_variant_on_transcript( variant=variant, transcript=transcript) reference_cdna_at_variant = full_transcript_sequence[ variant_start_offset:variant_end_offset] if not variant_matches_reference_sequence( variant=variant, strand=transcript.strand, ref_seq_on_transcript=reference_cdna_at_variant): logger.warn( "Variant %s doesn't match reference sequence on transcript %s: " "may span splice junction", variant, transcript) return None if len(full_transcript_sequence) < 6: # need at least 6 nucleotides for a start and stop codon logger.warn( "Sequence of %s (overlapping %s) too short: %d", transcript, variant, len(full_transcript_sequence)) return None logger.info( "Interbase offset range on %s for variant %s = %d:%d", transcript.name, variant, variant_start_offset, variant_end_offset) reference_cdna_before_variant = full_transcript_sequence[ max(0, variant_start_offset - context_size): variant_start_offset] reference_cdna_after_variant = full_transcript_sequence[ variant_end_offset: variant_end_offset + context_size] return ReferenceSequenceKey( strand=transcript.strand, sequence_before_variant_locus=reference_cdna_before_variant, sequence_at_variant_locus=reference_cdna_at_variant, sequence_after_variant_locus=reference_cdna_after_variant)
python
def from_variant_and_transcript( cls, variant, transcript, context_size): """ Extracts the reference sequence around a variant locus on a particular transcript. Parameters ---------- variant : varcode.Variant transcript : pyensembl.Transcript context_size : int Returns SequenceKey object. Can also return None if Transcript lacks sufficiently long sequence """ full_transcript_sequence = transcript.sequence if full_transcript_sequence is None: logger.warn( "Expected transcript %s (overlapping %s) to have sequence", transcript.name, variant) return None # get the interbase range of offsets which capture all reference # bases modified by the variant variant_start_offset, variant_end_offset = \ interbase_range_affected_by_variant_on_transcript( variant=variant, transcript=transcript) reference_cdna_at_variant = full_transcript_sequence[ variant_start_offset:variant_end_offset] if not variant_matches_reference_sequence( variant=variant, strand=transcript.strand, ref_seq_on_transcript=reference_cdna_at_variant): logger.warn( "Variant %s doesn't match reference sequence on transcript %s: " "may span splice junction", variant, transcript) return None if len(full_transcript_sequence) < 6: # need at least 6 nucleotides for a start and stop codon logger.warn( "Sequence of %s (overlapping %s) too short: %d", transcript, variant, len(full_transcript_sequence)) return None logger.info( "Interbase offset range on %s for variant %s = %d:%d", transcript.name, variant, variant_start_offset, variant_end_offset) reference_cdna_before_variant = full_transcript_sequence[ max(0, variant_start_offset - context_size): variant_start_offset] reference_cdna_after_variant = full_transcript_sequence[ variant_end_offset: variant_end_offset + context_size] return ReferenceSequenceKey( strand=transcript.strand, sequence_before_variant_locus=reference_cdna_before_variant, sequence_at_variant_locus=reference_cdna_at_variant, sequence_after_variant_locus=reference_cdna_after_variant)
[ "def", "from_variant_and_transcript", "(", "cls", ",", "variant", ",", "transcript", ",", "context_size", ")", ":", "full_transcript_sequence", "=", "transcript", ".", "sequence", "if", "full_transcript_sequence", "is", "None", ":", "logger", ".", "warn", "(", "\"Expected transcript %s (overlapping %s) to have sequence\"", ",", "transcript", ".", "name", ",", "variant", ")", "return", "None", "variant_start_offset", ",", "variant_end_offset", "=", "interbase_range_affected_by_variant_on_transcript", "(", "variant", "=", "variant", ",", "transcript", "=", "transcript", ")", "reference_cdna_at_variant", "=", "full_transcript_sequence", "[", "variant_start_offset", ":", "variant_end_offset", "]", "if", "not", "variant_matches_reference_sequence", "(", "variant", "=", "variant", ",", "strand", "=", "transcript", ".", "strand", ",", "ref_seq_on_transcript", "=", "reference_cdna_at_variant", ")", ":", "logger", ".", "warn", "(", "\"Variant %s doesn't match reference sequence on transcript %s: \"", "\"may span splice junction\"", ",", "variant", ",", "transcript", ")", "return", "None", "if", "len", "(", "full_transcript_sequence", ")", "<", "6", ":", "logger", ".", "warn", "(", "\"Sequence of %s (overlapping %s) too short: %d\"", ",", "transcript", ",", "variant", ",", "len", "(", "full_transcript_sequence", ")", ")", "return", "None", "logger", ".", "info", "(", "\"Interbase offset range on %s for variant %s = %d:%d\"", ",", "transcript", ".", "name", ",", "variant", ",", "variant_start_offset", ",", "variant_end_offset", ")", "reference_cdna_before_variant", "=", "full_transcript_sequence", "[", "max", "(", "0", ",", "variant_start_offset", "-", "context_size", ")", ":", "variant_start_offset", "]", "reference_cdna_after_variant", "=", "full_transcript_sequence", "[", "variant_end_offset", ":", "variant_end_offset", "+", "context_size", "]", "return", "ReferenceSequenceKey", "(", "strand", "=", "transcript", ".", "strand", ",", "sequence_before_variant_locus", "=", "reference_cdna_before_variant", ",", "sequence_at_variant_locus", "=", "reference_cdna_at_variant", ",", "sequence_after_variant_locus", "=", "reference_cdna_after_variant", ")" ]
Extracts the reference sequence around a variant locus on a particular transcript. Parameters ---------- variant : varcode.Variant transcript : pyensembl.Transcript context_size : int Returns SequenceKey object. Can also return None if Transcript lacks sufficiently long sequence
[ "Extracts", "the", "reference", "sequence", "around", "a", "variant", "locus", "on", "a", "particular", "transcript", "." ]
b39b684920e3f6b344851d6598a1a1c67bce913b
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/reference_sequence_key.py#L51-L128
train
pyviz/imagen
imagen/__init__.py
wrap
def wrap(lower, upper, x): """ Circularly alias the numeric value x into the range [lower,upper). Valid for cyclic quantities like orientations or hues. """ #I have no idea how I came up with this algorithm; it should be simplified. # # Note that Python's % operator works on floats and arrays; # usually one can simply use that instead. E.g. to wrap array or # scalar x into 0,2*pi, just use "x % (2*pi)". range_=upper-lower return lower + np.fmod(x-lower + 2*range_*(1-np.floor(x/(2*range_))), range_)
python
def wrap(lower, upper, x): """ Circularly alias the numeric value x into the range [lower,upper). Valid for cyclic quantities like orientations or hues. """ #I have no idea how I came up with this algorithm; it should be simplified. # # Note that Python's % operator works on floats and arrays; # usually one can simply use that instead. E.g. to wrap array or # scalar x into 0,2*pi, just use "x % (2*pi)". range_=upper-lower return lower + np.fmod(x-lower + 2*range_*(1-np.floor(x/(2*range_))), range_)
[ "def", "wrap", "(", "lower", ",", "upper", ",", "x", ")", ":", "range_", "=", "upper", "-", "lower", "return", "lower", "+", "np", ".", "fmod", "(", "x", "-", "lower", "+", "2", "*", "range_", "*", "(", "1", "-", "np", ".", "floor", "(", "x", "/", "(", "2", "*", "range_", ")", ")", ")", ",", "range_", ")" ]
Circularly alias the numeric value x into the range [lower,upper). Valid for cyclic quantities like orientations or hues.
[ "Circularly", "alias", "the", "numeric", "value", "x", "into", "the", "range", "[", "lower", "upper", ")", "." ]
53c5685c880f54b42795964d8db50b02e8590e88
https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/__init__.py#L503-L515
train
pyviz/imagen
imagen/__init__.py
Line._pixelsize
def _pixelsize(self, p): """Calculate line width necessary to cover at least one pixel on all axes.""" xpixelsize = 1./float(p.xdensity) ypixelsize = 1./float(p.ydensity) return max([xpixelsize,ypixelsize])
python
def _pixelsize(self, p): """Calculate line width necessary to cover at least one pixel on all axes.""" xpixelsize = 1./float(p.xdensity) ypixelsize = 1./float(p.ydensity) return max([xpixelsize,ypixelsize])
[ "def", "_pixelsize", "(", "self", ",", "p", ")", ":", "xpixelsize", "=", "1.", "/", "float", "(", "p", ".", "xdensity", ")", "ypixelsize", "=", "1.", "/", "float", "(", "p", ".", "ydensity", ")", "return", "max", "(", "[", "xpixelsize", ",", "ypixelsize", "]", ")" ]
Calculate line width necessary to cover at least one pixel on all axes.
[ "Calculate", "line", "width", "necessary", "to", "cover", "at", "least", "one", "pixel", "on", "all", "axes", "." ]
53c5685c880f54b42795964d8db50b02e8590e88
https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/__init__.py#L199-L203
train
pyviz/imagen
imagen/__init__.py
Line._count_pixels_on_line
def _count_pixels_on_line(self, y, p): """Count the number of pixels rendered on this line.""" h = line(y, self._effective_thickness(p), 0.0) return h.sum()
python
def _count_pixels_on_line(self, y, p): """Count the number of pixels rendered on this line.""" h = line(y, self._effective_thickness(p), 0.0) return h.sum()
[ "def", "_count_pixels_on_line", "(", "self", ",", "y", ",", "p", ")", ":", "h", "=", "line", "(", "y", ",", "self", ".", "_effective_thickness", "(", "p", ")", ",", "0.0", ")", "return", "h", ".", "sum", "(", ")" ]
Count the number of pixels rendered on this line.
[ "Count", "the", "number", "of", "pixels", "rendered", "on", "this", "line", "." ]
53c5685c880f54b42795964d8db50b02e8590e88
https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/__init__.py#L209-L212
train
pyviz/imagen
imagen/__init__.py
Selector.num_channels
def num_channels(self): """ Get the number of channels in the input generators. """ if(self.inspect_value('index') is None): if(len(self.generators)>0): return self.generators[0].num_channels() return 0 return self.get_current_generator().num_channels()
python
def num_channels(self): """ Get the number of channels in the input generators. """ if(self.inspect_value('index') is None): if(len(self.generators)>0): return self.generators[0].num_channels() return 0 return self.get_current_generator().num_channels()
[ "def", "num_channels", "(", "self", ")", ":", "if", "(", "self", ".", "inspect_value", "(", "'index'", ")", "is", "None", ")", ":", "if", "(", "len", "(", "self", ".", "generators", ")", ">", "0", ")", ":", "return", "self", ".", "generators", "[", "0", "]", ".", "num_channels", "(", ")", "return", "0", "return", "self", ".", "get_current_generator", "(", ")", ".", "num_channels", "(", ")" ]
Get the number of channels in the input generators.
[ "Get", "the", "number", "of", "channels", "in", "the", "input", "generators", "." ]
53c5685c880f54b42795964d8db50b02e8590e88
https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/__init__.py#L567-L576
train
pyviz/imagen
imagen/__init__.py
PowerSpectrum._set_frequency_spacing
def _set_frequency_spacing(self, min_freq, max_freq): """ Frequency spacing to use, i.e. how to map the available frequency range to the discrete sheet rows. NOTE: We're calculating the spacing of a range between the highest and lowest frequencies, the actual segmentation and averaging of the frequencies to fit this spacing occurs in _getAmplitudes(). This method is here solely to provide a minimal overload if custom spacing is required. """ self.frequency_spacing = np.linspace(min_freq, max_freq, num=self._sheet_dimensions[0]+1, endpoint=True)
python
def _set_frequency_spacing(self, min_freq, max_freq): """ Frequency spacing to use, i.e. how to map the available frequency range to the discrete sheet rows. NOTE: We're calculating the spacing of a range between the highest and lowest frequencies, the actual segmentation and averaging of the frequencies to fit this spacing occurs in _getAmplitudes(). This method is here solely to provide a minimal overload if custom spacing is required. """ self.frequency_spacing = np.linspace(min_freq, max_freq, num=self._sheet_dimensions[0]+1, endpoint=True)
[ "def", "_set_frequency_spacing", "(", "self", ",", "min_freq", ",", "max_freq", ")", ":", "self", ".", "frequency_spacing", "=", "np", ".", "linspace", "(", "min_freq", ",", "max_freq", ",", "num", "=", "self", ".", "_sheet_dimensions", "[", "0", "]", "+", "1", ",", "endpoint", "=", "True", ")" ]
Frequency spacing to use, i.e. how to map the available frequency range to the discrete sheet rows. NOTE: We're calculating the spacing of a range between the highest and lowest frequencies, the actual segmentation and averaging of the frequencies to fit this spacing occurs in _getAmplitudes(). This method is here solely to provide a minimal overload if custom spacing is required.
[ "Frequency", "spacing", "to", "use", "i", ".", "e", ".", "how", "to", "map", "the", "available", "frequency", "range", "to", "the", "discrete", "sheet", "rows", "." ]
53c5685c880f54b42795964d8db50b02e8590e88
https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/__init__.py#L1401-L1415
train
portfoliome/postpy
postpy/pg_encodings.py
get_postgres_encoding
def get_postgres_encoding(python_encoding: str) -> str: """Python to postgres encoding map.""" encoding = normalize_encoding(python_encoding.lower()) encoding_ = aliases.aliases[encoding.replace('_', '', 1)].upper() pg_encoding = PG_ENCODING_MAP[encoding_.replace('_', '')] return pg_encoding
python
def get_postgres_encoding(python_encoding: str) -> str: """Python to postgres encoding map.""" encoding = normalize_encoding(python_encoding.lower()) encoding_ = aliases.aliases[encoding.replace('_', '', 1)].upper() pg_encoding = PG_ENCODING_MAP[encoding_.replace('_', '')] return pg_encoding
[ "def", "get_postgres_encoding", "(", "python_encoding", ":", "str", ")", "->", "str", ":", "encoding", "=", "normalize_encoding", "(", "python_encoding", ".", "lower", "(", ")", ")", "encoding_", "=", "aliases", ".", "aliases", "[", "encoding", ".", "replace", "(", "'_'", ",", "''", ",", "1", ")", "]", ".", "upper", "(", ")", "pg_encoding", "=", "PG_ENCODING_MAP", "[", "encoding_", ".", "replace", "(", "'_'", ",", "''", ")", "]", "return", "pg_encoding" ]
Python to postgres encoding map.
[ "Python", "to", "postgres", "encoding", "map", "." ]
fe26199131b15295fc5f669a0ad2a7f47bf490ee
https://github.com/portfoliome/postpy/blob/fe26199131b15295fc5f669a0ad2a7f47bf490ee/postpy/pg_encodings.py#L15-L22
train
bskinn/opan
opan/output.py
OrcaOutput.en_last
def en_last(self): """ Report the energies from the last SCF present in the output. Returns a |dict| providing the various energy values from the last SCF cycle performed in the output. Keys are those of :attr:`~opan.output.OrcaOutput.p_en`. Any energy value not relevant to the parsed output is assigned as |None|. Returns ------- last_ens |dict| of |npfloat_|-- Energies from the last SCF present in the output. """ # Initialize the return dict last_ens = dict() # Iterate and store for (k,l) in self.en.items(): last_ens.update({ k : l[-1] if l != [] else None }) ##next (k,l) # Should be ready to return? return last_ens
python
def en_last(self): """ Report the energies from the last SCF present in the output. Returns a |dict| providing the various energy values from the last SCF cycle performed in the output. Keys are those of :attr:`~opan.output.OrcaOutput.p_en`. Any energy value not relevant to the parsed output is assigned as |None|. Returns ------- last_ens |dict| of |npfloat_|-- Energies from the last SCF present in the output. """ # Initialize the return dict last_ens = dict() # Iterate and store for (k,l) in self.en.items(): last_ens.update({ k : l[-1] if l != [] else None }) ##next (k,l) # Should be ready to return? return last_ens
[ "def", "en_last", "(", "self", ")", ":", "last_ens", "=", "dict", "(", ")", "for", "(", "k", ",", "l", ")", "in", "self", ".", "en", ".", "items", "(", ")", ":", "last_ens", ".", "update", "(", "{", "k", ":", "l", "[", "-", "1", "]", "if", "l", "!=", "[", "]", "else", "None", "}", ")", "return", "last_ens" ]
Report the energies from the last SCF present in the output. Returns a |dict| providing the various energy values from the last SCF cycle performed in the output. Keys are those of :attr:`~opan.output.OrcaOutput.p_en`. Any energy value not relevant to the parsed output is assigned as |None|. Returns ------- last_ens |dict| of |npfloat_|-- Energies from the last SCF present in the output.
[ "Report", "the", "energies", "from", "the", "last", "SCF", "present", "in", "the", "output", "." ]
0b1b21662df6abc971407a9386db21a8796fbfe5
https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/output.py#L792-L818
train
portfoliome/postpy
postpy/connections.py
connect
def connect(host=None, database=None, user=None, password=None, **kwargs): """Create a database connection.""" host = host or os.environ['PGHOST'] database = database or os.environ['PGDATABASE'] user = user or os.environ['PGUSER'] password = password or os.environ['PGPASSWORD'] return psycopg2.connect(host=host, database=database, user=user, password=password, **kwargs)
python
def connect(host=None, database=None, user=None, password=None, **kwargs): """Create a database connection.""" host = host or os.environ['PGHOST'] database = database or os.environ['PGDATABASE'] user = user or os.environ['PGUSER'] password = password or os.environ['PGPASSWORD'] return psycopg2.connect(host=host, database=database, user=user, password=password, **kwargs)
[ "def", "connect", "(", "host", "=", "None", ",", "database", "=", "None", ",", "user", "=", "None", ",", "password", "=", "None", ",", "**", "kwargs", ")", ":", "host", "=", "host", "or", "os", ".", "environ", "[", "'PGHOST'", "]", "database", "=", "database", "or", "os", ".", "environ", "[", "'PGDATABASE'", "]", "user", "=", "user", "or", "os", ".", "environ", "[", "'PGUSER'", "]", "password", "=", "password", "or", "os", ".", "environ", "[", "'PGPASSWORD'", "]", "return", "psycopg2", ".", "connect", "(", "host", "=", "host", ",", "database", "=", "database", ",", "user", "=", "user", ",", "password", "=", "password", ",", "**", "kwargs", ")" ]
Create a database connection.
[ "Create", "a", "database", "connection", "." ]
fe26199131b15295fc5f669a0ad2a7f47bf490ee
https://github.com/portfoliome/postpy/blob/fe26199131b15295fc5f669a0ad2a7f47bf490ee/postpy/connections.py#L8-L20
train
happyleavesaoc/python-orvibo
orvibo/s20.py
_setup
def _setup(): """ Set up module. Open a UDP socket, and listen in a thread. """ _SOCKET.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) _SOCKET.bind(('', PORT)) udp = threading.Thread(target=_listen, daemon=True) udp.start()
python
def _setup(): """ Set up module. Open a UDP socket, and listen in a thread. """ _SOCKET.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) _SOCKET.bind(('', PORT)) udp = threading.Thread(target=_listen, daemon=True) udp.start()
[ "def", "_setup", "(", ")", ":", "_SOCKET", ".", "setsockopt", "(", "socket", ".", "SOL_SOCKET", ",", "socket", ".", "SO_BROADCAST", ",", "1", ")", "_SOCKET", ".", "bind", "(", "(", "''", ",", "PORT", ")", ")", "udp", "=", "threading", ".", "Thread", "(", "target", "=", "_listen", ",", "daemon", "=", "True", ")", "udp", ".", "start", "(", ")" ]
Set up module. Open a UDP socket, and listen in a thread.
[ "Set", "up", "module", "." ]
27210dfe0c44a9e4f2ef4edf2dac221977d7f5c9
https://github.com/happyleavesaoc/python-orvibo/blob/27210dfe0c44a9e4f2ef4edf2dac221977d7f5c9/orvibo/s20.py#L50-L58
train
happyleavesaoc/python-orvibo
orvibo/s20.py
discover
def discover(timeout=DISCOVERY_TIMEOUT): """ Discover devices on the local network. :param timeout: Optional timeout in seconds. :returns: Set of discovered host addresses. """ hosts = {} payload = MAGIC + DISCOVERY for _ in range(RETRIES): _SOCKET.sendto(bytearray(payload), ('255.255.255.255', PORT)) start = time.time() while time.time() < start + timeout: for host, data in _BUFFER.copy().items(): if not _is_discovery_response(data): continue if host not in hosts: _LOGGER.debug("Discovered device at %s", host) entry = {} entry['mac'] = data[7:13] entry['imac'] = data[19:25] entry['next'] = 0 entry['st'] = int(data[-1]) entry['time'] = _device_time(data[37:41]) entry['serverTime'] = int(time.time()) hosts[host] = entry return hosts
python
def discover(timeout=DISCOVERY_TIMEOUT): """ Discover devices on the local network. :param timeout: Optional timeout in seconds. :returns: Set of discovered host addresses. """ hosts = {} payload = MAGIC + DISCOVERY for _ in range(RETRIES): _SOCKET.sendto(bytearray(payload), ('255.255.255.255', PORT)) start = time.time() while time.time() < start + timeout: for host, data in _BUFFER.copy().items(): if not _is_discovery_response(data): continue if host not in hosts: _LOGGER.debug("Discovered device at %s", host) entry = {} entry['mac'] = data[7:13] entry['imac'] = data[19:25] entry['next'] = 0 entry['st'] = int(data[-1]) entry['time'] = _device_time(data[37:41]) entry['serverTime'] = int(time.time()) hosts[host] = entry return hosts
[ "def", "discover", "(", "timeout", "=", "DISCOVERY_TIMEOUT", ")", ":", "hosts", "=", "{", "}", "payload", "=", "MAGIC", "+", "DISCOVERY", "for", "_", "in", "range", "(", "RETRIES", ")", ":", "_SOCKET", ".", "sendto", "(", "bytearray", "(", "payload", ")", ",", "(", "'255.255.255.255'", ",", "PORT", ")", ")", "start", "=", "time", ".", "time", "(", ")", "while", "time", ".", "time", "(", ")", "<", "start", "+", "timeout", ":", "for", "host", ",", "data", "in", "_BUFFER", ".", "copy", "(", ")", ".", "items", "(", ")", ":", "if", "not", "_is_discovery_response", "(", "data", ")", ":", "continue", "if", "host", "not", "in", "hosts", ":", "_LOGGER", ".", "debug", "(", "\"Discovered device at %s\"", ",", "host", ")", "entry", "=", "{", "}", "entry", "[", "'mac'", "]", "=", "data", "[", "7", ":", "13", "]", "entry", "[", "'imac'", "]", "=", "data", "[", "19", ":", "25", "]", "entry", "[", "'next'", "]", "=", "0", "entry", "[", "'st'", "]", "=", "int", "(", "data", "[", "-", "1", "]", ")", "entry", "[", "'time'", "]", "=", "_device_time", "(", "data", "[", "37", ":", "41", "]", ")", "entry", "[", "'serverTime'", "]", "=", "int", "(", "time", ".", "time", "(", ")", ")", "hosts", "[", "host", "]", "=", "entry", "return", "hosts" ]
Discover devices on the local network. :param timeout: Optional timeout in seconds. :returns: Set of discovered host addresses.
[ "Discover", "devices", "on", "the", "local", "network", "." ]
27210dfe0c44a9e4f2ef4edf2dac221977d7f5c9
https://github.com/happyleavesaoc/python-orvibo/blob/27210dfe0c44a9e4f2ef4edf2dac221977d7f5c9/orvibo/s20.py#L66-L91
train
happyleavesaoc/python-orvibo
orvibo/s20.py
S20._discover_mac
def _discover_mac(self): """ Discovers MAC address of device. Discovery is done by sending a UDP broadcast. All configured devices reply. The response contains the MAC address in both needed formats. Discovery of multiple switches must be done synchronously. :returns: Tuple of MAC address and reversed MAC address. """ mac = None mac_reversed = None cmd = MAGIC + DISCOVERY resp = self._udp_transact(cmd, self._discovery_resp, broadcast=True, timeout=DISCOVERY_TIMEOUT) if resp: (mac, mac_reversed) = resp if mac is None: raise S20Exception("Couldn't discover {}".format(self.host)) return (mac, mac_reversed)
python
def _discover_mac(self): """ Discovers MAC address of device. Discovery is done by sending a UDP broadcast. All configured devices reply. The response contains the MAC address in both needed formats. Discovery of multiple switches must be done synchronously. :returns: Tuple of MAC address and reversed MAC address. """ mac = None mac_reversed = None cmd = MAGIC + DISCOVERY resp = self._udp_transact(cmd, self._discovery_resp, broadcast=True, timeout=DISCOVERY_TIMEOUT) if resp: (mac, mac_reversed) = resp if mac is None: raise S20Exception("Couldn't discover {}".format(self.host)) return (mac, mac_reversed)
[ "def", "_discover_mac", "(", "self", ")", ":", "mac", "=", "None", "mac_reversed", "=", "None", "cmd", "=", "MAGIC", "+", "DISCOVERY", "resp", "=", "self", ".", "_udp_transact", "(", "cmd", ",", "self", ".", "_discovery_resp", ",", "broadcast", "=", "True", ",", "timeout", "=", "DISCOVERY_TIMEOUT", ")", "if", "resp", ":", "(", "mac", ",", "mac_reversed", ")", "=", "resp", "if", "mac", "is", "None", ":", "raise", "S20Exception", "(", "\"Couldn't discover {}\"", ".", "format", "(", "self", ".", "host", ")", ")", "return", "(", "mac", ",", "mac_reversed", ")" ]
Discovers MAC address of device. Discovery is done by sending a UDP broadcast. All configured devices reply. The response contains the MAC address in both needed formats. Discovery of multiple switches must be done synchronously. :returns: Tuple of MAC address and reversed MAC address.
[ "Discovers", "MAC", "address", "of", "device", "." ]
27210dfe0c44a9e4f2ef4edf2dac221977d7f5c9
https://github.com/happyleavesaoc/python-orvibo/blob/27210dfe0c44a9e4f2ef4edf2dac221977d7f5c9/orvibo/s20.py#L167-L188
train
happyleavesaoc/python-orvibo
orvibo/s20.py
S20._subscribe
def _subscribe(self): """ Subscribe to the device. A subscription serves two purposes: - Returns state (on/off). - Enables state changes on the device for a short period of time. """ cmd = MAGIC + SUBSCRIBE + self._mac \ + PADDING_1 + self._mac_reversed + PADDING_1 status = self._udp_transact(cmd, self._subscribe_resp) if status is not None: self.last_subscribed = time.time() return status == ON else: raise S20Exception( "No status could be found for {}".format(self.host))
python
def _subscribe(self): """ Subscribe to the device. A subscription serves two purposes: - Returns state (on/off). - Enables state changes on the device for a short period of time. """ cmd = MAGIC + SUBSCRIBE + self._mac \ + PADDING_1 + self._mac_reversed + PADDING_1 status = self._udp_transact(cmd, self._subscribe_resp) if status is not None: self.last_subscribed = time.time() return status == ON else: raise S20Exception( "No status could be found for {}".format(self.host))
[ "def", "_subscribe", "(", "self", ")", ":", "cmd", "=", "MAGIC", "+", "SUBSCRIBE", "+", "self", ".", "_mac", "+", "PADDING_1", "+", "self", ".", "_mac_reversed", "+", "PADDING_1", "status", "=", "self", ".", "_udp_transact", "(", "cmd", ",", "self", ".", "_subscribe_resp", ")", "if", "status", "is", "not", "None", ":", "self", ".", "last_subscribed", "=", "time", ".", "time", "(", ")", "return", "status", "==", "ON", "else", ":", "raise", "S20Exception", "(", "\"No status could be found for {}\"", ".", "format", "(", "self", ".", "host", ")", ")" ]
Subscribe to the device. A subscription serves two purposes: - Returns state (on/off). - Enables state changes on the device for a short period of time.
[ "Subscribe", "to", "the", "device", "." ]
27210dfe0c44a9e4f2ef4edf2dac221977d7f5c9
https://github.com/happyleavesaoc/python-orvibo/blob/27210dfe0c44a9e4f2ef4edf2dac221977d7f5c9/orvibo/s20.py#L190-L206
train
happyleavesaoc/python-orvibo
orvibo/s20.py
S20._control
def _control(self, state): """ Control device state. Possible states are ON or OFF. :param state: Switch to this state. """ # Renew subscription if necessary if not self._subscription_is_recent(): self._subscribe() cmd = MAGIC + CONTROL + self._mac + PADDING_1 + PADDING_2 + state _LOGGER.debug("Sending new state to %s: %s", self.host, ord(state)) ack_state = self._udp_transact(cmd, self._control_resp, state) if ack_state is None: raise S20Exception( "Device didn't acknowledge control request: {}".format( self.host))
python
def _control(self, state): """ Control device state. Possible states are ON or OFF. :param state: Switch to this state. """ # Renew subscription if necessary if not self._subscription_is_recent(): self._subscribe() cmd = MAGIC + CONTROL + self._mac + PADDING_1 + PADDING_2 + state _LOGGER.debug("Sending new state to %s: %s", self.host, ord(state)) ack_state = self._udp_transact(cmd, self._control_resp, state) if ack_state is None: raise S20Exception( "Device didn't acknowledge control request: {}".format( self.host))
[ "def", "_control", "(", "self", ",", "state", ")", ":", "if", "not", "self", ".", "_subscription_is_recent", "(", ")", ":", "self", ".", "_subscribe", "(", ")", "cmd", "=", "MAGIC", "+", "CONTROL", "+", "self", ".", "_mac", "+", "PADDING_1", "+", "PADDING_2", "+", "state", "_LOGGER", ".", "debug", "(", "\"Sending new state to %s: %s\"", ",", "self", ".", "host", ",", "ord", "(", "state", ")", ")", "ack_state", "=", "self", ".", "_udp_transact", "(", "cmd", ",", "self", ".", "_control_resp", ",", "state", ")", "if", "ack_state", "is", "None", ":", "raise", "S20Exception", "(", "\"Device didn't acknowledge control request: {}\"", ".", "format", "(", "self", ".", "host", ")", ")" ]
Control device state. Possible states are ON or OFF. :param state: Switch to this state.
[ "Control", "device", "state", "." ]
27210dfe0c44a9e4f2ef4edf2dac221977d7f5c9
https://github.com/happyleavesaoc/python-orvibo/blob/27210dfe0c44a9e4f2ef4edf2dac221977d7f5c9/orvibo/s20.py#L215-L233
train
happyleavesaoc/python-orvibo
orvibo/s20.py
S20._discovery_resp
def _discovery_resp(self, data): """ Handle a discovery response. :param data: Payload. :param addr: Address tuple. :returns: MAC and reversed MAC. """ if _is_discovery_response(data): _LOGGER.debug("Discovered MAC of %s: %s", self.host, binascii.hexlify(data[7:13]).decode()) return (data[7:13], data[19:25])
python
def _discovery_resp(self, data): """ Handle a discovery response. :param data: Payload. :param addr: Address tuple. :returns: MAC and reversed MAC. """ if _is_discovery_response(data): _LOGGER.debug("Discovered MAC of %s: %s", self.host, binascii.hexlify(data[7:13]).decode()) return (data[7:13], data[19:25])
[ "def", "_discovery_resp", "(", "self", ",", "data", ")", ":", "if", "_is_discovery_response", "(", "data", ")", ":", "_LOGGER", ".", "debug", "(", "\"Discovered MAC of %s: %s\"", ",", "self", ".", "host", ",", "binascii", ".", "hexlify", "(", "data", "[", "7", ":", "13", "]", ")", ".", "decode", "(", ")", ")", "return", "(", "data", "[", "7", ":", "13", "]", ",", "data", "[", "19", ":", "25", "]", ")" ]
Handle a discovery response. :param data: Payload. :param addr: Address tuple. :returns: MAC and reversed MAC.
[ "Handle", "a", "discovery", "response", "." ]
27210dfe0c44a9e4f2ef4edf2dac221977d7f5c9
https://github.com/happyleavesaoc/python-orvibo/blob/27210dfe0c44a9e4f2ef4edf2dac221977d7f5c9/orvibo/s20.py#L235-L245
train
happyleavesaoc/python-orvibo
orvibo/s20.py
S20._subscribe_resp
def _subscribe_resp(self, data): """ Handle a subscribe response. :param data: Payload. :returns: State (ON/OFF) """ if _is_subscribe_response(data): status = bytes([data[23]]) _LOGGER.debug("Successfully subscribed to %s, state: %s", self.host, ord(status)) return status
python
def _subscribe_resp(self, data): """ Handle a subscribe response. :param data: Payload. :returns: State (ON/OFF) """ if _is_subscribe_response(data): status = bytes([data[23]]) _LOGGER.debug("Successfully subscribed to %s, state: %s", self.host, ord(status)) return status
[ "def", "_subscribe_resp", "(", "self", ",", "data", ")", ":", "if", "_is_subscribe_response", "(", "data", ")", ":", "status", "=", "bytes", "(", "[", "data", "[", "23", "]", "]", ")", "_LOGGER", ".", "debug", "(", "\"Successfully subscribed to %s, state: %s\"", ",", "self", ".", "host", ",", "ord", "(", "status", ")", ")", "return", "status" ]
Handle a subscribe response. :param data: Payload. :returns: State (ON/OFF)
[ "Handle", "a", "subscribe", "response", "." ]
27210dfe0c44a9e4f2ef4edf2dac221977d7f5c9
https://github.com/happyleavesaoc/python-orvibo/blob/27210dfe0c44a9e4f2ef4edf2dac221977d7f5c9/orvibo/s20.py#L247-L257
train
happyleavesaoc/python-orvibo
orvibo/s20.py
S20._control_resp
def _control_resp(self, data, state): """ Handle a control response. :param data: Payload. :param state: Requested state. :returns: Acknowledged state. """ if _is_control_response(data): ack_state = bytes([data[22]]) if state == ack_state: _LOGGER.debug("Received state ack from %s, state: %s", self.host, ord(ack_state)) return ack_state
python
def _control_resp(self, data, state): """ Handle a control response. :param data: Payload. :param state: Requested state. :returns: Acknowledged state. """ if _is_control_response(data): ack_state = bytes([data[22]]) if state == ack_state: _LOGGER.debug("Received state ack from %s, state: %s", self.host, ord(ack_state)) return ack_state
[ "def", "_control_resp", "(", "self", ",", "data", ",", "state", ")", ":", "if", "_is_control_response", "(", "data", ")", ":", "ack_state", "=", "bytes", "(", "[", "data", "[", "22", "]", "]", ")", "if", "state", "==", "ack_state", ":", "_LOGGER", ".", "debug", "(", "\"Received state ack from %s, state: %s\"", ",", "self", ".", "host", ",", "ord", "(", "ack_state", ")", ")", "return", "ack_state" ]
Handle a control response. :param data: Payload. :param state: Requested state. :returns: Acknowledged state.
[ "Handle", "a", "control", "response", "." ]
27210dfe0c44a9e4f2ef4edf2dac221977d7f5c9
https://github.com/happyleavesaoc/python-orvibo/blob/27210dfe0c44a9e4f2ef4edf2dac221977d7f5c9/orvibo/s20.py#L259-L271
train
happyleavesaoc/python-orvibo
orvibo/s20.py
S20._udp_transact
def _udp_transact(self, payload, handler, *args, broadcast=False, timeout=TIMEOUT): """ Complete a UDP transaction. UDP is stateless and not guaranteed, so we have to take some mitigation steps: - Send payload multiple times. - Wait for awhile to receive response. :param payload: Payload to send. :param handler: Response handler. :param args: Arguments to pass to response handler. :param broadcast: Send a broadcast instead. :param timeout: Timeout in seconds. """ if self.host in _BUFFER: del _BUFFER[self.host] host = self.host if broadcast: host = '255.255.255.255' retval = None for _ in range(RETRIES): _SOCKET.sendto(bytearray(payload), (host, PORT)) start = time.time() while time.time() < start + timeout: data = _BUFFER.get(self.host, None) if data: retval = handler(data, *args) # Return as soon as a response is received if retval: return retval
python
def _udp_transact(self, payload, handler, *args, broadcast=False, timeout=TIMEOUT): """ Complete a UDP transaction. UDP is stateless and not guaranteed, so we have to take some mitigation steps: - Send payload multiple times. - Wait for awhile to receive response. :param payload: Payload to send. :param handler: Response handler. :param args: Arguments to pass to response handler. :param broadcast: Send a broadcast instead. :param timeout: Timeout in seconds. """ if self.host in _BUFFER: del _BUFFER[self.host] host = self.host if broadcast: host = '255.255.255.255' retval = None for _ in range(RETRIES): _SOCKET.sendto(bytearray(payload), (host, PORT)) start = time.time() while time.time() < start + timeout: data = _BUFFER.get(self.host, None) if data: retval = handler(data, *args) # Return as soon as a response is received if retval: return retval
[ "def", "_udp_transact", "(", "self", ",", "payload", ",", "handler", ",", "*", "args", ",", "broadcast", "=", "False", ",", "timeout", "=", "TIMEOUT", ")", ":", "if", "self", ".", "host", "in", "_BUFFER", ":", "del", "_BUFFER", "[", "self", ".", "host", "]", "host", "=", "self", ".", "host", "if", "broadcast", ":", "host", "=", "'255.255.255.255'", "retval", "=", "None", "for", "_", "in", "range", "(", "RETRIES", ")", ":", "_SOCKET", ".", "sendto", "(", "bytearray", "(", "payload", ")", ",", "(", "host", ",", "PORT", ")", ")", "start", "=", "time", ".", "time", "(", ")", "while", "time", ".", "time", "(", ")", "<", "start", "+", "timeout", ":", "data", "=", "_BUFFER", ".", "get", "(", "self", ".", "host", ",", "None", ")", "if", "data", ":", "retval", "=", "handler", "(", "data", ",", "*", "args", ")", "if", "retval", ":", "return", "retval" ]
Complete a UDP transaction. UDP is stateless and not guaranteed, so we have to take some mitigation steps: - Send payload multiple times. - Wait for awhile to receive response. :param payload: Payload to send. :param handler: Response handler. :param args: Arguments to pass to response handler. :param broadcast: Send a broadcast instead. :param timeout: Timeout in seconds.
[ "Complete", "a", "UDP", "transaction", "." ]
27210dfe0c44a9e4f2ef4edf2dac221977d7f5c9
https://github.com/happyleavesaoc/python-orvibo/blob/27210dfe0c44a9e4f2ef4edf2dac221977d7f5c9/orvibo/s20.py#L273-L303
train
kmike/opencorpora-tools
opencorpora/reader_lxml.py
load
def load(source): """ Load OpenCorpora corpus. The ``source`` can be any of the following: - a file name/path - a file object - a file-like object - a URL using the HTTP or FTP protocol """ parser = get_xml_parser() return etree.parse(source, parser=parser).getroot()
python
def load(source): """ Load OpenCorpora corpus. The ``source`` can be any of the following: - a file name/path - a file object - a file-like object - a URL using the HTTP or FTP protocol """ parser = get_xml_parser() return etree.parse(source, parser=parser).getroot()
[ "def", "load", "(", "source", ")", ":", "parser", "=", "get_xml_parser", "(", ")", "return", "etree", ".", "parse", "(", "source", ",", "parser", "=", "parser", ")", ".", "getroot", "(", ")" ]
Load OpenCorpora corpus. The ``source`` can be any of the following: - a file name/path - a file object - a file-like object - a URL using the HTTP or FTP protocol
[ "Load", "OpenCorpora", "corpus", "." ]
26fee106aea1180d2975b3825dcf9b3875e80db1
https://github.com/kmike/opencorpora-tools/blob/26fee106aea1180d2975b3825dcf9b3875e80db1/opencorpora/reader_lxml.py#L11-L24
train
openvax/isovar
isovar/translation.py
translation_generator
def translation_generator( variant_sequences, reference_contexts, min_transcript_prefix_length, max_transcript_mismatches, include_mismatches_after_variant, protein_sequence_length=None): """ Given all detected VariantSequence objects for a particular variant and all the ReferenceContext objects for that locus, translate multiple protein sequences, up to the number specified by the argument max_protein_sequences_per_variant. Parameters ---------- variant_sequences : list of VariantSequence objects Variant sequences overlapping a single original variant reference_contexts : list of ReferenceContext objects Reference sequence contexts from the same variant as the variant_sequences min_transcript_prefix_length : int Minimum number of nucleotides before the variant to test whether our variant sequence can use the reading frame from a reference transcript. max_transcript_mismatches : int Maximum number of mismatches between coding sequence before variant and reference transcript we're considering for determing the reading frame. include_mismatches_after_variant : bool If true, mismatches occurring after the variant locus will also count toward max_transcript_mismatches filtering. protein_sequence_length : int, optional Truncate protein to be at most this long. Yields a sequence of Translation objects. """ for reference_context in reference_contexts: for variant_sequence in variant_sequences: translation = Translation.from_variant_sequence_and_reference_context( variant_sequence=variant_sequence, reference_context=reference_context, min_transcript_prefix_length=min_transcript_prefix_length, max_transcript_mismatches=max_transcript_mismatches, include_mismatches_after_variant=include_mismatches_after_variant, protein_sequence_length=protein_sequence_length) if translation is not None: yield translation
python
def translation_generator( variant_sequences, reference_contexts, min_transcript_prefix_length, max_transcript_mismatches, include_mismatches_after_variant, protein_sequence_length=None): """ Given all detected VariantSequence objects for a particular variant and all the ReferenceContext objects for that locus, translate multiple protein sequences, up to the number specified by the argument max_protein_sequences_per_variant. Parameters ---------- variant_sequences : list of VariantSequence objects Variant sequences overlapping a single original variant reference_contexts : list of ReferenceContext objects Reference sequence contexts from the same variant as the variant_sequences min_transcript_prefix_length : int Minimum number of nucleotides before the variant to test whether our variant sequence can use the reading frame from a reference transcript. max_transcript_mismatches : int Maximum number of mismatches between coding sequence before variant and reference transcript we're considering for determing the reading frame. include_mismatches_after_variant : bool If true, mismatches occurring after the variant locus will also count toward max_transcript_mismatches filtering. protein_sequence_length : int, optional Truncate protein to be at most this long. Yields a sequence of Translation objects. """ for reference_context in reference_contexts: for variant_sequence in variant_sequences: translation = Translation.from_variant_sequence_and_reference_context( variant_sequence=variant_sequence, reference_context=reference_context, min_transcript_prefix_length=min_transcript_prefix_length, max_transcript_mismatches=max_transcript_mismatches, include_mismatches_after_variant=include_mismatches_after_variant, protein_sequence_length=protein_sequence_length) if translation is not None: yield translation
[ "def", "translation_generator", "(", "variant_sequences", ",", "reference_contexts", ",", "min_transcript_prefix_length", ",", "max_transcript_mismatches", ",", "include_mismatches_after_variant", ",", "protein_sequence_length", "=", "None", ")", ":", "for", "reference_context", "in", "reference_contexts", ":", "for", "variant_sequence", "in", "variant_sequences", ":", "translation", "=", "Translation", ".", "from_variant_sequence_and_reference_context", "(", "variant_sequence", "=", "variant_sequence", ",", "reference_context", "=", "reference_context", ",", "min_transcript_prefix_length", "=", "min_transcript_prefix_length", ",", "max_transcript_mismatches", "=", "max_transcript_mismatches", ",", "include_mismatches_after_variant", "=", "include_mismatches_after_variant", ",", "protein_sequence_length", "=", "protein_sequence_length", ")", "if", "translation", "is", "not", "None", ":", "yield", "translation" ]
Given all detected VariantSequence objects for a particular variant and all the ReferenceContext objects for that locus, translate multiple protein sequences, up to the number specified by the argument max_protein_sequences_per_variant. Parameters ---------- variant_sequences : list of VariantSequence objects Variant sequences overlapping a single original variant reference_contexts : list of ReferenceContext objects Reference sequence contexts from the same variant as the variant_sequences min_transcript_prefix_length : int Minimum number of nucleotides before the variant to test whether our variant sequence can use the reading frame from a reference transcript. max_transcript_mismatches : int Maximum number of mismatches between coding sequence before variant and reference transcript we're considering for determing the reading frame. include_mismatches_after_variant : bool If true, mismatches occurring after the variant locus will also count toward max_transcript_mismatches filtering. protein_sequence_length : int, optional Truncate protein to be at most this long. Yields a sequence of Translation objects.
[ "Given", "all", "detected", "VariantSequence", "objects", "for", "a", "particular", "variant", "and", "all", "the", "ReferenceContext", "objects", "for", "that", "locus", "translate", "multiple", "protein", "sequences", "up", "to", "the", "number", "specified", "by", "the", "argument", "max_protein_sequences_per_variant", "." ]
b39b684920e3f6b344851d6598a1a1c67bce913b
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/translation.py#L355-L405
train
openvax/isovar
isovar/translation.py
translate_variant_reads
def translate_variant_reads( variant, variant_reads, protein_sequence_length, transcript_id_whitelist=None, min_alt_rna_reads=MIN_ALT_RNA_READS, min_variant_sequence_coverage=MIN_VARIANT_SEQUENCE_COVERAGE, min_transcript_prefix_length=MIN_TRANSCRIPT_PREFIX_LENGTH, max_transcript_mismatches=MAX_REFERENCE_TRANSCRIPT_MISMATCHES, include_mismatches_after_variant=INCLUDE_MISMATCHES_AFTER_VARIANT, variant_sequence_assembly=VARIANT_SEQUENCE_ASSEMBLY): """ Given a variant and its associated alt reads, construct variant sequences and translate them into Translation objects. Returns 0 or more Translation objects. Parameters ---------- variant : varcode.Variant variant_reads : sequence or generator AlleleRead objects supporting the variant protein_sequence_length : int Try to translate protein sequences of this length, though sometimes we'll have to return something shorter (depending on the RNAseq data, and presence of stop codons). transcript_id_whitelist : set, optional If given, expected to be a set of transcript IDs which we should use for determining the reading frame around a variant. If omitted, then try to use all overlapping reference transcripts. min_alt_rna_reads : int Drop variant sequences from loci with fewer than this number of RNA reads supporting the alt allele. min_variant_sequence_coverage : int Trim variant sequences to nucleotides covered by at least this many reads. min_transcript_prefix_length : int Minimum number of bases we need to try matching between the reference context and variant sequence. max_transcript_mismatches : int Don't try to determine the reading frame for a transcript if more than this number of bases differ. include_mismatches_after_variant : bool Include mismatches after the variant locus in the count compared against max_transcript_mismatches. variant_sequence_assembly : bool Use overlap assembly to construct longer variant cDNA sequences. """ if len(variant_reads) == 0: logger.info("No supporting reads for variant %s", variant) return [] # Adding an extra codon to the desired RNA sequence length in case we # need to clip nucleotides at the start/end of the sequence cdna_sequence_length = (protein_sequence_length + 1) * 3 variant_sequences = reads_to_variant_sequences( variant=variant, reads=variant_reads, preferred_sequence_length=cdna_sequence_length, min_alt_rna_reads=min_alt_rna_reads, min_variant_sequence_coverage=min_variant_sequence_coverage, variant_sequence_assembly=variant_sequence_assembly) if not variant_sequences: logger.info("No spanning cDNA sequences for variant %s", variant) return [] # try translating the variant sequences from the same set of # ReferenceContext objects, which requires using the longest # context_size to be compatible with all of the sequences. Some # sequences maybe have fewer nucleotides than this before the variant # and will thus have to be trimmed. context_size = max( len(variant_sequence.prefix) for variant_sequence in variant_sequences) reference_contexts = reference_contexts_for_variant( variant, context_size=context_size, transcript_id_whitelist=transcript_id_whitelist) return list(translation_generator( variant_sequences=variant_sequences, reference_contexts=reference_contexts, min_transcript_prefix_length=min_transcript_prefix_length, max_transcript_mismatches=max_transcript_mismatches, include_mismatches_after_variant=include_mismatches_after_variant, protein_sequence_length=protein_sequence_length))
python
def translate_variant_reads( variant, variant_reads, protein_sequence_length, transcript_id_whitelist=None, min_alt_rna_reads=MIN_ALT_RNA_READS, min_variant_sequence_coverage=MIN_VARIANT_SEQUENCE_COVERAGE, min_transcript_prefix_length=MIN_TRANSCRIPT_PREFIX_LENGTH, max_transcript_mismatches=MAX_REFERENCE_TRANSCRIPT_MISMATCHES, include_mismatches_after_variant=INCLUDE_MISMATCHES_AFTER_VARIANT, variant_sequence_assembly=VARIANT_SEQUENCE_ASSEMBLY): """ Given a variant and its associated alt reads, construct variant sequences and translate them into Translation objects. Returns 0 or more Translation objects. Parameters ---------- variant : varcode.Variant variant_reads : sequence or generator AlleleRead objects supporting the variant protein_sequence_length : int Try to translate protein sequences of this length, though sometimes we'll have to return something shorter (depending on the RNAseq data, and presence of stop codons). transcript_id_whitelist : set, optional If given, expected to be a set of transcript IDs which we should use for determining the reading frame around a variant. If omitted, then try to use all overlapping reference transcripts. min_alt_rna_reads : int Drop variant sequences from loci with fewer than this number of RNA reads supporting the alt allele. min_variant_sequence_coverage : int Trim variant sequences to nucleotides covered by at least this many reads. min_transcript_prefix_length : int Minimum number of bases we need to try matching between the reference context and variant sequence. max_transcript_mismatches : int Don't try to determine the reading frame for a transcript if more than this number of bases differ. include_mismatches_after_variant : bool Include mismatches after the variant locus in the count compared against max_transcript_mismatches. variant_sequence_assembly : bool Use overlap assembly to construct longer variant cDNA sequences. """ if len(variant_reads) == 0: logger.info("No supporting reads for variant %s", variant) return [] # Adding an extra codon to the desired RNA sequence length in case we # need to clip nucleotides at the start/end of the sequence cdna_sequence_length = (protein_sequence_length + 1) * 3 variant_sequences = reads_to_variant_sequences( variant=variant, reads=variant_reads, preferred_sequence_length=cdna_sequence_length, min_alt_rna_reads=min_alt_rna_reads, min_variant_sequence_coverage=min_variant_sequence_coverage, variant_sequence_assembly=variant_sequence_assembly) if not variant_sequences: logger.info("No spanning cDNA sequences for variant %s", variant) return [] # try translating the variant sequences from the same set of # ReferenceContext objects, which requires using the longest # context_size to be compatible with all of the sequences. Some # sequences maybe have fewer nucleotides than this before the variant # and will thus have to be trimmed. context_size = max( len(variant_sequence.prefix) for variant_sequence in variant_sequences) reference_contexts = reference_contexts_for_variant( variant, context_size=context_size, transcript_id_whitelist=transcript_id_whitelist) return list(translation_generator( variant_sequences=variant_sequences, reference_contexts=reference_contexts, min_transcript_prefix_length=min_transcript_prefix_length, max_transcript_mismatches=max_transcript_mismatches, include_mismatches_after_variant=include_mismatches_after_variant, protein_sequence_length=protein_sequence_length))
[ "def", "translate_variant_reads", "(", "variant", ",", "variant_reads", ",", "protein_sequence_length", ",", "transcript_id_whitelist", "=", "None", ",", "min_alt_rna_reads", "=", "MIN_ALT_RNA_READS", ",", "min_variant_sequence_coverage", "=", "MIN_VARIANT_SEQUENCE_COVERAGE", ",", "min_transcript_prefix_length", "=", "MIN_TRANSCRIPT_PREFIX_LENGTH", ",", "max_transcript_mismatches", "=", "MAX_REFERENCE_TRANSCRIPT_MISMATCHES", ",", "include_mismatches_after_variant", "=", "INCLUDE_MISMATCHES_AFTER_VARIANT", ",", "variant_sequence_assembly", "=", "VARIANT_SEQUENCE_ASSEMBLY", ")", ":", "if", "len", "(", "variant_reads", ")", "==", "0", ":", "logger", ".", "info", "(", "\"No supporting reads for variant %s\"", ",", "variant", ")", "return", "[", "]", "cdna_sequence_length", "=", "(", "protein_sequence_length", "+", "1", ")", "*", "3", "variant_sequences", "=", "reads_to_variant_sequences", "(", "variant", "=", "variant", ",", "reads", "=", "variant_reads", ",", "preferred_sequence_length", "=", "cdna_sequence_length", ",", "min_alt_rna_reads", "=", "min_alt_rna_reads", ",", "min_variant_sequence_coverage", "=", "min_variant_sequence_coverage", ",", "variant_sequence_assembly", "=", "variant_sequence_assembly", ")", "if", "not", "variant_sequences", ":", "logger", ".", "info", "(", "\"No spanning cDNA sequences for variant %s\"", ",", "variant", ")", "return", "[", "]", "context_size", "=", "max", "(", "len", "(", "variant_sequence", ".", "prefix", ")", "for", "variant_sequence", "in", "variant_sequences", ")", "reference_contexts", "=", "reference_contexts_for_variant", "(", "variant", ",", "context_size", "=", "context_size", ",", "transcript_id_whitelist", "=", "transcript_id_whitelist", ")", "return", "list", "(", "translation_generator", "(", "variant_sequences", "=", "variant_sequences", ",", "reference_contexts", "=", "reference_contexts", ",", "min_transcript_prefix_length", "=", "min_transcript_prefix_length", ",", "max_transcript_mismatches", "=", "max_transcript_mismatches", ",", "include_mismatches_after_variant", "=", "include_mismatches_after_variant", ",", "protein_sequence_length", "=", "protein_sequence_length", ")", ")" ]
Given a variant and its associated alt reads, construct variant sequences and translate them into Translation objects. Returns 0 or more Translation objects. Parameters ---------- variant : varcode.Variant variant_reads : sequence or generator AlleleRead objects supporting the variant protein_sequence_length : int Try to translate protein sequences of this length, though sometimes we'll have to return something shorter (depending on the RNAseq data, and presence of stop codons). transcript_id_whitelist : set, optional If given, expected to be a set of transcript IDs which we should use for determining the reading frame around a variant. If omitted, then try to use all overlapping reference transcripts. min_alt_rna_reads : int Drop variant sequences from loci with fewer than this number of RNA reads supporting the alt allele. min_variant_sequence_coverage : int Trim variant sequences to nucleotides covered by at least this many reads. min_transcript_prefix_length : int Minimum number of bases we need to try matching between the reference context and variant sequence. max_transcript_mismatches : int Don't try to determine the reading frame for a transcript if more than this number of bases differ. include_mismatches_after_variant : bool Include mismatches after the variant locus in the count compared against max_transcript_mismatches. variant_sequence_assembly : bool Use overlap assembly to construct longer variant cDNA sequences.
[ "Given", "a", "variant", "and", "its", "associated", "alt", "reads", "construct", "variant", "sequences", "and", "translate", "them", "into", "Translation", "objects", "." ]
b39b684920e3f6b344851d6598a1a1c67bce913b
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/translation.py#L408-L505
train
openvax/isovar
isovar/translation.py
Translation.as_translation_key
def as_translation_key(self): """ Project Translation object or any other derived class into just a TranslationKey, which has fewer fields and can be used as a dictionary key. """ return TranslationKey(**{ name: getattr(self, name) for name in TranslationKey._fields})
python
def as_translation_key(self): """ Project Translation object or any other derived class into just a TranslationKey, which has fewer fields and can be used as a dictionary key. """ return TranslationKey(**{ name: getattr(self, name) for name in TranslationKey._fields})
[ "def", "as_translation_key", "(", "self", ")", ":", "return", "TranslationKey", "(", "**", "{", "name", ":", "getattr", "(", "self", ",", "name", ")", "for", "name", "in", "TranslationKey", ".", "_fields", "}", ")" ]
Project Translation object or any other derived class into just a TranslationKey, which has fewer fields and can be used as a dictionary key.
[ "Project", "Translation", "object", "or", "any", "other", "derived", "class", "into", "just", "a", "TranslationKey", "which", "has", "fewer", "fields", "and", "can", "be", "used", "as", "a", "dictionary", "key", "." ]
b39b684920e3f6b344851d6598a1a1c67bce913b
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/translation.py#L150-L158
train
openvax/isovar
isovar/translation.py
Translation.from_variant_sequence_and_reference_context
def from_variant_sequence_and_reference_context( cls, variant_sequence, reference_context, min_transcript_prefix_length, max_transcript_mismatches, include_mismatches_after_variant, protein_sequence_length=None): """ Attempt to translate a single VariantSequence using the reading frame from a single ReferenceContext. Parameters ---------- variant_sequence : VariantSequence reference_context : ReferenceContext min_transcript_prefix_length : int Minimum number of nucleotides before the variant to test whether our variant sequence can use the reading frame from a reference transcript. max_transcript_mismatches : int Don't use the reading frame from a context where the cDNA variant sequences disagrees at more than this number of positions before the variant nucleotides. include_mismatches_after_variant : bool If true, mismatches after the variant nucleotides will also count against max_transcript_mismatches filtering. protein_sequence_length : int, optional Truncate protein to be at most this long Returns either a ProteinSequence object or None if the number of mismatches between the RNA and reference transcript sequences exceeds given threshold. """ variant_sequence_in_reading_frame = match_variant_sequence_to_reference_context( variant_sequence, reference_context, min_transcript_prefix_length=min_transcript_prefix_length, max_transcript_mismatches=max_transcript_mismatches, include_mismatches_after_variant=include_mismatches_after_variant) if variant_sequence_in_reading_frame is None: logger.info("Unable to determine reading frame for %s", variant_sequence) return None cdna_sequence = variant_sequence_in_reading_frame.cdna_sequence cdna_codon_offset = variant_sequence_in_reading_frame.offset_to_first_complete_codon # get the offsets into the cDNA sequence which pick out the variant nucleotides cdna_variant_start_offset = variant_sequence_in_reading_frame.variant_cdna_interval_start cdna_variant_end_offset = variant_sequence_in_reading_frame.variant_cdna_interval_end # TODO: determine if the first codon is the start codon of a # transcript, for now any of the unusual start codons like CTG # will translate to leucine instead of methionine. variant_amino_acids, ends_with_stop_codon = translate_cdna( cdna_sequence[cdna_codon_offset:], first_codon_is_start=False, mitochondrial=reference_context.mitochondrial) variant_aa_interval_start, variant_aa_interval_end, frameshift = \ find_mutant_amino_acid_interval( cdna_sequence=cdna_sequence, cdna_first_codon_offset=cdna_codon_offset, cdna_variant_start_offset=cdna_variant_start_offset, cdna_variant_end_offset=cdna_variant_end_offset, n_ref=len(reference_context.sequence_at_variant_locus), n_amino_acids=len(variant_amino_acids)) if protein_sequence_length and len(variant_amino_acids) > protein_sequence_length: if protein_sequence_length <= variant_aa_interval_start: logger.warn( ("Truncating amino acid sequence %s " "to only %d elements loses all variant residues"), variant_amino_acids, protein_sequence_length) return None # if the protein is too long then shorten it, which implies # we're no longer stopping due to a stop codon and that the variant # amino acids might need a new stop index variant_amino_acids = variant_amino_acids[:protein_sequence_length] variant_aa_interval_end = min(variant_aa_interval_end, protein_sequence_length) ends_with_stop_codon = False return Translation( amino_acids=variant_amino_acids, frameshift=frameshift, ends_with_stop_codon=ends_with_stop_codon, variant_aa_interval_start=variant_aa_interval_start, variant_aa_interval_end=variant_aa_interval_end, untrimmed_variant_sequence=variant_sequence, reference_context=reference_context, variant_sequence_in_reading_frame=variant_sequence_in_reading_frame)
python
def from_variant_sequence_and_reference_context( cls, variant_sequence, reference_context, min_transcript_prefix_length, max_transcript_mismatches, include_mismatches_after_variant, protein_sequence_length=None): """ Attempt to translate a single VariantSequence using the reading frame from a single ReferenceContext. Parameters ---------- variant_sequence : VariantSequence reference_context : ReferenceContext min_transcript_prefix_length : int Minimum number of nucleotides before the variant to test whether our variant sequence can use the reading frame from a reference transcript. max_transcript_mismatches : int Don't use the reading frame from a context where the cDNA variant sequences disagrees at more than this number of positions before the variant nucleotides. include_mismatches_after_variant : bool If true, mismatches after the variant nucleotides will also count against max_transcript_mismatches filtering. protein_sequence_length : int, optional Truncate protein to be at most this long Returns either a ProteinSequence object or None if the number of mismatches between the RNA and reference transcript sequences exceeds given threshold. """ variant_sequence_in_reading_frame = match_variant_sequence_to_reference_context( variant_sequence, reference_context, min_transcript_prefix_length=min_transcript_prefix_length, max_transcript_mismatches=max_transcript_mismatches, include_mismatches_after_variant=include_mismatches_after_variant) if variant_sequence_in_reading_frame is None: logger.info("Unable to determine reading frame for %s", variant_sequence) return None cdna_sequence = variant_sequence_in_reading_frame.cdna_sequence cdna_codon_offset = variant_sequence_in_reading_frame.offset_to_first_complete_codon # get the offsets into the cDNA sequence which pick out the variant nucleotides cdna_variant_start_offset = variant_sequence_in_reading_frame.variant_cdna_interval_start cdna_variant_end_offset = variant_sequence_in_reading_frame.variant_cdna_interval_end # TODO: determine if the first codon is the start codon of a # transcript, for now any of the unusual start codons like CTG # will translate to leucine instead of methionine. variant_amino_acids, ends_with_stop_codon = translate_cdna( cdna_sequence[cdna_codon_offset:], first_codon_is_start=False, mitochondrial=reference_context.mitochondrial) variant_aa_interval_start, variant_aa_interval_end, frameshift = \ find_mutant_amino_acid_interval( cdna_sequence=cdna_sequence, cdna_first_codon_offset=cdna_codon_offset, cdna_variant_start_offset=cdna_variant_start_offset, cdna_variant_end_offset=cdna_variant_end_offset, n_ref=len(reference_context.sequence_at_variant_locus), n_amino_acids=len(variant_amino_acids)) if protein_sequence_length and len(variant_amino_acids) > protein_sequence_length: if protein_sequence_length <= variant_aa_interval_start: logger.warn( ("Truncating amino acid sequence %s " "to only %d elements loses all variant residues"), variant_amino_acids, protein_sequence_length) return None # if the protein is too long then shorten it, which implies # we're no longer stopping due to a stop codon and that the variant # amino acids might need a new stop index variant_amino_acids = variant_amino_acids[:protein_sequence_length] variant_aa_interval_end = min(variant_aa_interval_end, protein_sequence_length) ends_with_stop_codon = False return Translation( amino_acids=variant_amino_acids, frameshift=frameshift, ends_with_stop_codon=ends_with_stop_codon, variant_aa_interval_start=variant_aa_interval_start, variant_aa_interval_end=variant_aa_interval_end, untrimmed_variant_sequence=variant_sequence, reference_context=reference_context, variant_sequence_in_reading_frame=variant_sequence_in_reading_frame)
[ "def", "from_variant_sequence_and_reference_context", "(", "cls", ",", "variant_sequence", ",", "reference_context", ",", "min_transcript_prefix_length", ",", "max_transcript_mismatches", ",", "include_mismatches_after_variant", ",", "protein_sequence_length", "=", "None", ")", ":", "variant_sequence_in_reading_frame", "=", "match_variant_sequence_to_reference_context", "(", "variant_sequence", ",", "reference_context", ",", "min_transcript_prefix_length", "=", "min_transcript_prefix_length", ",", "max_transcript_mismatches", "=", "max_transcript_mismatches", ",", "include_mismatches_after_variant", "=", "include_mismatches_after_variant", ")", "if", "variant_sequence_in_reading_frame", "is", "None", ":", "logger", ".", "info", "(", "\"Unable to determine reading frame for %s\"", ",", "variant_sequence", ")", "return", "None", "cdna_sequence", "=", "variant_sequence_in_reading_frame", ".", "cdna_sequence", "cdna_codon_offset", "=", "variant_sequence_in_reading_frame", ".", "offset_to_first_complete_codon", "cdna_variant_start_offset", "=", "variant_sequence_in_reading_frame", ".", "variant_cdna_interval_start", "cdna_variant_end_offset", "=", "variant_sequence_in_reading_frame", ".", "variant_cdna_interval_end", "variant_amino_acids", ",", "ends_with_stop_codon", "=", "translate_cdna", "(", "cdna_sequence", "[", "cdna_codon_offset", ":", "]", ",", "first_codon_is_start", "=", "False", ",", "mitochondrial", "=", "reference_context", ".", "mitochondrial", ")", "variant_aa_interval_start", ",", "variant_aa_interval_end", ",", "frameshift", "=", "find_mutant_amino_acid_interval", "(", "cdna_sequence", "=", "cdna_sequence", ",", "cdna_first_codon_offset", "=", "cdna_codon_offset", ",", "cdna_variant_start_offset", "=", "cdna_variant_start_offset", ",", "cdna_variant_end_offset", "=", "cdna_variant_end_offset", ",", "n_ref", "=", "len", "(", "reference_context", ".", "sequence_at_variant_locus", ")", ",", "n_amino_acids", "=", "len", "(", "variant_amino_acids", ")", ")", "if", "protein_sequence_length", "and", "len", "(", "variant_amino_acids", ")", ">", "protein_sequence_length", ":", "if", "protein_sequence_length", "<=", "variant_aa_interval_start", ":", "logger", ".", "warn", "(", "(", "\"Truncating amino acid sequence %s \"", "\"to only %d elements loses all variant residues\"", ")", ",", "variant_amino_acids", ",", "protein_sequence_length", ")", "return", "None", "variant_amino_acids", "=", "variant_amino_acids", "[", ":", "protein_sequence_length", "]", "variant_aa_interval_end", "=", "min", "(", "variant_aa_interval_end", ",", "protein_sequence_length", ")", "ends_with_stop_codon", "=", "False", "return", "Translation", "(", "amino_acids", "=", "variant_amino_acids", ",", "frameshift", "=", "frameshift", ",", "ends_with_stop_codon", "=", "ends_with_stop_codon", ",", "variant_aa_interval_start", "=", "variant_aa_interval_start", ",", "variant_aa_interval_end", "=", "variant_aa_interval_end", ",", "untrimmed_variant_sequence", "=", "variant_sequence", ",", "reference_context", "=", "reference_context", ",", "variant_sequence_in_reading_frame", "=", "variant_sequence_in_reading_frame", ")" ]
Attempt to translate a single VariantSequence using the reading frame from a single ReferenceContext. Parameters ---------- variant_sequence : VariantSequence reference_context : ReferenceContext min_transcript_prefix_length : int Minimum number of nucleotides before the variant to test whether our variant sequence can use the reading frame from a reference transcript. max_transcript_mismatches : int Don't use the reading frame from a context where the cDNA variant sequences disagrees at more than this number of positions before the variant nucleotides. include_mismatches_after_variant : bool If true, mismatches after the variant nucleotides will also count against max_transcript_mismatches filtering. protein_sequence_length : int, optional Truncate protein to be at most this long Returns either a ProteinSequence object or None if the number of mismatches between the RNA and reference transcript sequences exceeds given threshold.
[ "Attempt", "to", "translate", "a", "single", "VariantSequence", "using", "the", "reading", "frame", "from", "a", "single", "ReferenceContext", "." ]
b39b684920e3f6b344851d6598a1a1c67bce913b
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/translation.py#L161-L258
train
imlonghao/cachet.python
cachet.py
Cachet.postComponents
def postComponents(self, name, status, **kwargs): '''Create a new component. :param name: Name of the component :param status: Status of the component; 1-4 :param description: (optional) Description of the component :param link: (optional) A hyperlink to the component :param order: (optional) Order of the component :param group_id: (optional) The group id that the component is within :param enabled: (optional) :return: :class:`Response <Response>` object :rtype: requests.Response ''' kwargs['name'] = name kwargs['status'] = status return self.__postRequest('/components', kwargs)
python
def postComponents(self, name, status, **kwargs): '''Create a new component. :param name: Name of the component :param status: Status of the component; 1-4 :param description: (optional) Description of the component :param link: (optional) A hyperlink to the component :param order: (optional) Order of the component :param group_id: (optional) The group id that the component is within :param enabled: (optional) :return: :class:`Response <Response>` object :rtype: requests.Response ''' kwargs['name'] = name kwargs['status'] = status return self.__postRequest('/components', kwargs)
[ "def", "postComponents", "(", "self", ",", "name", ",", "status", ",", "**", "kwargs", ")", ":", "kwargs", "[", "'name'", "]", "=", "name", "kwargs", "[", "'status'", "]", "=", "status", "return", "self", ".", "__postRequest", "(", "'/components'", ",", "kwargs", ")" ]
Create a new component. :param name: Name of the component :param status: Status of the component; 1-4 :param description: (optional) Description of the component :param link: (optional) A hyperlink to the component :param order: (optional) Order of the component :param group_id: (optional) The group id that the component is within :param enabled: (optional) :return: :class:`Response <Response>` object :rtype: requests.Response
[ "Create", "a", "new", "component", "." ]
624b0d8e09b551a3be45dec207da6aa89f1e56e8
https://github.com/imlonghao/cachet.python/blob/624b0d8e09b551a3be45dec207da6aa89f1e56e8/cachet.py#L51-L67
train
imlonghao/cachet.python
cachet.py
Cachet.postIncidents
def postIncidents(self, name, message, status, visible, **kwargs): '''Create a new incident. :param name: Name of the incident :param message: A message (supporting Markdown) to explain more. :param status: Status of the incident. :param visible: Whether the incident is publicly visible. :param component_id: (optional) Component to update. :param component_status: (optional) The status to update the given component with. :param notify: (optional) Whether to notify subscribers. :return: :class:`Response <Response>` object :rtype: requests.Response ''' kwargs['name'] = name kwargs['message'] = message kwargs['status'] = status kwargs['visible'] = visible return self.__postRequest('/incidents', kwargs)
python
def postIncidents(self, name, message, status, visible, **kwargs): '''Create a new incident. :param name: Name of the incident :param message: A message (supporting Markdown) to explain more. :param status: Status of the incident. :param visible: Whether the incident is publicly visible. :param component_id: (optional) Component to update. :param component_status: (optional) The status to update the given component with. :param notify: (optional) Whether to notify subscribers. :return: :class:`Response <Response>` object :rtype: requests.Response ''' kwargs['name'] = name kwargs['message'] = message kwargs['status'] = status kwargs['visible'] = visible return self.__postRequest('/incidents', kwargs)
[ "def", "postIncidents", "(", "self", ",", "name", ",", "message", ",", "status", ",", "visible", ",", "**", "kwargs", ")", ":", "kwargs", "[", "'name'", "]", "=", "name", "kwargs", "[", "'message'", "]", "=", "message", "kwargs", "[", "'status'", "]", "=", "status", "kwargs", "[", "'visible'", "]", "=", "visible", "return", "self", ".", "__postRequest", "(", "'/incidents'", ",", "kwargs", ")" ]
Create a new incident. :param name: Name of the incident :param message: A message (supporting Markdown) to explain more. :param status: Status of the incident. :param visible: Whether the incident is publicly visible. :param component_id: (optional) Component to update. :param component_status: (optional) The status to update the given component with. :param notify: (optional) Whether to notify subscribers. :return: :class:`Response <Response>` object :rtype: requests.Response
[ "Create", "a", "new", "incident", "." ]
624b0d8e09b551a3be45dec207da6aa89f1e56e8
https://github.com/imlonghao/cachet.python/blob/624b0d8e09b551a3be45dec207da6aa89f1e56e8/cachet.py#L168-L186
train
imlonghao/cachet.python
cachet.py
Cachet.postMetrics
def postMetrics(self, name, suffix, description, default_value, **kwargs): '''Create a new metric. :param name: Name of metric :param suffix: Measurments in :param description: Description of what the metric is measuring :param default_value: The default value to use when a point is added :param display_chart: (optional) Whether to display the chart on the status page :return: :class:`Response <Response>` object :rtype: requests.Response ''' kwargs['name'] = name kwargs['suffix'] = suffix kwargs['description'] = description kwargs['default_value'] = default_value return self.__postRequest('/metrics', kwargs)
python
def postMetrics(self, name, suffix, description, default_value, **kwargs): '''Create a new metric. :param name: Name of metric :param suffix: Measurments in :param description: Description of what the metric is measuring :param default_value: The default value to use when a point is added :param display_chart: (optional) Whether to display the chart on the status page :return: :class:`Response <Response>` object :rtype: requests.Response ''' kwargs['name'] = name kwargs['suffix'] = suffix kwargs['description'] = description kwargs['default_value'] = default_value return self.__postRequest('/metrics', kwargs)
[ "def", "postMetrics", "(", "self", ",", "name", ",", "suffix", ",", "description", ",", "default_value", ",", "**", "kwargs", ")", ":", "kwargs", "[", "'name'", "]", "=", "name", "kwargs", "[", "'suffix'", "]", "=", "suffix", "kwargs", "[", "'description'", "]", "=", "description", "kwargs", "[", "'default_value'", "]", "=", "default_value", "return", "self", ".", "__postRequest", "(", "'/metrics'", ",", "kwargs", ")" ]
Create a new metric. :param name: Name of metric :param suffix: Measurments in :param description: Description of what the metric is measuring :param default_value: The default value to use when a point is added :param display_chart: (optional) Whether to display the chart on the status page :return: :class:`Response <Response>` object :rtype: requests.Response
[ "Create", "a", "new", "metric", "." ]
624b0d8e09b551a3be45dec207da6aa89f1e56e8
https://github.com/imlonghao/cachet.python/blob/624b0d8e09b551a3be45dec207da6aa89f1e56e8/cachet.py#L223-L239
train
imlonghao/cachet.python
cachet.py
Cachet.postMetricsPointsByID
def postMetricsPointsByID(self, id, value, **kwargs): '''Add a metric point to a given metric. :param id: Metric ID :param value: Value to plot on the metric graph :param timestamp: Unix timestamp of the point was measured :return: :class:`Response <Response>` object :rtype: requests.Response ''' kwargs['value'] = value return self.__postRequest('/metrics/%s/points' % id, kwargs)
python
def postMetricsPointsByID(self, id, value, **kwargs): '''Add a metric point to a given metric. :param id: Metric ID :param value: Value to plot on the metric graph :param timestamp: Unix timestamp of the point was measured :return: :class:`Response <Response>` object :rtype: requests.Response ''' kwargs['value'] = value return self.__postRequest('/metrics/%s/points' % id, kwargs)
[ "def", "postMetricsPointsByID", "(", "self", ",", "id", ",", "value", ",", "**", "kwargs", ")", ":", "kwargs", "[", "'value'", "]", "=", "value", "return", "self", ".", "__postRequest", "(", "'/metrics/%s/points'", "%", "id", ",", "kwargs", ")" ]
Add a metric point to a given metric. :param id: Metric ID :param value: Value to plot on the metric graph :param timestamp: Unix timestamp of the point was measured :return: :class:`Response <Response>` object :rtype: requests.Response
[ "Add", "a", "metric", "point", "to", "a", "given", "metric", "." ]
624b0d8e09b551a3be45dec207da6aa89f1e56e8
https://github.com/imlonghao/cachet.python/blob/624b0d8e09b551a3be45dec207da6aa89f1e56e8/cachet.py#L271-L282
train
bskinn/opan
opan/utils/inertia.py
ctr_mass
def ctr_mass(geom, masses): """Calculate the center of mass of the indicated geometry. Take a geometry and atom masses and compute the location of the center of mass. Parameters ---------- geom length-3N |npfloat_| -- Coordinates of the atoms masses length-N OR length-3N |npfloat_| -- Atomic masses of the atoms. Length-3N option is to allow calculation of a per-coordinate perturbed value. Returns ------- ctr length-3 |npfloat_| -- Vector location of center of mass Raises ------ ~exceptions.ValueError If `geom` & `masses` shapes are inconsistent """ # Imports import numpy as np from .base import safe_cast as scast # Shape check if len(geom.shape) != 1: raise ValueError("Geometry is not a vector") ## end if if len(masses.shape) != 1: raise ValueError("Masses cannot be parsed as a vector") ## end if if not geom.shape[0] % 3 == 0: raise ValueError("Geometry is not length-3N") ## end if if geom.shape[0] != 3*masses.shape[0] and geom.shape[0] != masses.shape[0]: raise ValueError("Inconsistent geometry and masses vector lengths") ## end if # If N masses are provided, expand to 3N; if 3N, retain. if geom.shape[0] == 3*masses.shape[0]: masses = masses.repeat(3) ## end if # Calculate the mass-weighted coordinates, reshape to group by coordinate # column-wise, sum each column, then divide by the sum of masses, which # must further be divided by three because there are three replicates # (possibly perturbed) of the mass of each atom. ctr = np.multiply(geom, masses).reshape((geom.shape[0]//3, 3)) \ .sum(axis=0).squeeze() / (masses.sum() / 3) # Return the vector return ctr
python
def ctr_mass(geom, masses): """Calculate the center of mass of the indicated geometry. Take a geometry and atom masses and compute the location of the center of mass. Parameters ---------- geom length-3N |npfloat_| -- Coordinates of the atoms masses length-N OR length-3N |npfloat_| -- Atomic masses of the atoms. Length-3N option is to allow calculation of a per-coordinate perturbed value. Returns ------- ctr length-3 |npfloat_| -- Vector location of center of mass Raises ------ ~exceptions.ValueError If `geom` & `masses` shapes are inconsistent """ # Imports import numpy as np from .base import safe_cast as scast # Shape check if len(geom.shape) != 1: raise ValueError("Geometry is not a vector") ## end if if len(masses.shape) != 1: raise ValueError("Masses cannot be parsed as a vector") ## end if if not geom.shape[0] % 3 == 0: raise ValueError("Geometry is not length-3N") ## end if if geom.shape[0] != 3*masses.shape[0] and geom.shape[0] != masses.shape[0]: raise ValueError("Inconsistent geometry and masses vector lengths") ## end if # If N masses are provided, expand to 3N; if 3N, retain. if geom.shape[0] == 3*masses.shape[0]: masses = masses.repeat(3) ## end if # Calculate the mass-weighted coordinates, reshape to group by coordinate # column-wise, sum each column, then divide by the sum of masses, which # must further be divided by three because there are three replicates # (possibly perturbed) of the mass of each atom. ctr = np.multiply(geom, masses).reshape((geom.shape[0]//3, 3)) \ .sum(axis=0).squeeze() / (masses.sum() / 3) # Return the vector return ctr
[ "def", "ctr_mass", "(", "geom", ",", "masses", ")", ":", "import", "numpy", "as", "np", "from", ".", "base", "import", "safe_cast", "as", "scast", "if", "len", "(", "geom", ".", "shape", ")", "!=", "1", ":", "raise", "ValueError", "(", "\"Geometry is not a vector\"", ")", "if", "len", "(", "masses", ".", "shape", ")", "!=", "1", ":", "raise", "ValueError", "(", "\"Masses cannot be parsed as a vector\"", ")", "if", "not", "geom", ".", "shape", "[", "0", "]", "%", "3", "==", "0", ":", "raise", "ValueError", "(", "\"Geometry is not length-3N\"", ")", "if", "geom", ".", "shape", "[", "0", "]", "!=", "3", "*", "masses", ".", "shape", "[", "0", "]", "and", "geom", ".", "shape", "[", "0", "]", "!=", "masses", ".", "shape", "[", "0", "]", ":", "raise", "ValueError", "(", "\"Inconsistent geometry and masses vector lengths\"", ")", "if", "geom", ".", "shape", "[", "0", "]", "==", "3", "*", "masses", ".", "shape", "[", "0", "]", ":", "masses", "=", "masses", ".", "repeat", "(", "3", ")", "ctr", "=", "np", ".", "multiply", "(", "geom", ",", "masses", ")", ".", "reshape", "(", "(", "geom", ".", "shape", "[", "0", "]", "//", "3", ",", "3", ")", ")", ".", "sum", "(", "axis", "=", "0", ")", ".", "squeeze", "(", ")", "/", "(", "masses", ".", "sum", "(", ")", "/", "3", ")", "return", "ctr" ]
Calculate the center of mass of the indicated geometry. Take a geometry and atom masses and compute the location of the center of mass. Parameters ---------- geom length-3N |npfloat_| -- Coordinates of the atoms masses length-N OR length-3N |npfloat_| -- Atomic masses of the atoms. Length-3N option is to allow calculation of a per-coordinate perturbed value. Returns ------- ctr length-3 |npfloat_| -- Vector location of center of mass Raises ------ ~exceptions.ValueError If `geom` & `masses` shapes are inconsistent
[ "Calculate", "the", "center", "of", "mass", "of", "the", "indicated", "geometry", "." ]
0b1b21662df6abc971407a9386db21a8796fbfe5
https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/utils/inertia.py#L52-L113
train
bskinn/opan
opan/utils/inertia.py
ctr_geom
def ctr_geom(geom, masses): """ Returns geometry shifted to center of mass. Helper function to automate / encapsulate translation of a geometry to its center of mass. Parameters ---------- geom length-3N |npfloat_| -- Original coordinates of the atoms masses length-N OR length-3N |npfloat_| -- Atomic masses of the atoms. Length-3N option is to allow calculation of a per-coordinate perturbed value. Returns ------- ctr_geom length-3N |npfloat_| -- Atomic coordinates after shift to center of mass Raises ------ ~exceptions.ValueError If shapes of `geom` & `masses` are inconsistent """ # Imports import numpy as np # Calculate the shift vector. Possible bad shape of geom or masses is # addressed internally by the ctr_mass call. shift = np.tile(ctr_mass(geom, masses), geom.shape[0] / 3) # Shift the geometry and return ctr_geom = geom - shift return ctr_geom
python
def ctr_geom(geom, masses): """ Returns geometry shifted to center of mass. Helper function to automate / encapsulate translation of a geometry to its center of mass. Parameters ---------- geom length-3N |npfloat_| -- Original coordinates of the atoms masses length-N OR length-3N |npfloat_| -- Atomic masses of the atoms. Length-3N option is to allow calculation of a per-coordinate perturbed value. Returns ------- ctr_geom length-3N |npfloat_| -- Atomic coordinates after shift to center of mass Raises ------ ~exceptions.ValueError If shapes of `geom` & `masses` are inconsistent """ # Imports import numpy as np # Calculate the shift vector. Possible bad shape of geom or masses is # addressed internally by the ctr_mass call. shift = np.tile(ctr_mass(geom, masses), geom.shape[0] / 3) # Shift the geometry and return ctr_geom = geom - shift return ctr_geom
[ "def", "ctr_geom", "(", "geom", ",", "masses", ")", ":", "import", "numpy", "as", "np", "shift", "=", "np", ".", "tile", "(", "ctr_mass", "(", "geom", ",", "masses", ")", ",", "geom", ".", "shape", "[", "0", "]", "/", "3", ")", "ctr_geom", "=", "geom", "-", "shift", "return", "ctr_geom" ]
Returns geometry shifted to center of mass. Helper function to automate / encapsulate translation of a geometry to its center of mass. Parameters ---------- geom length-3N |npfloat_| -- Original coordinates of the atoms masses length-N OR length-3N |npfloat_| -- Atomic masses of the atoms. Length-3N option is to allow calculation of a per-coordinate perturbed value. Returns ------- ctr_geom length-3N |npfloat_| -- Atomic coordinates after shift to center of mass Raises ------ ~exceptions.ValueError If shapes of `geom` & `masses` are inconsistent
[ "Returns", "geometry", "shifted", "to", "center", "of", "mass", "." ]
0b1b21662df6abc971407a9386db21a8796fbfe5
https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/utils/inertia.py#L119-L158
train
bskinn/opan
opan/utils/inertia.py
inertia_tensor
def inertia_tensor(geom, masses): """Generate the 3x3 moment-of-inertia tensor. Compute the 3x3 moment-of-inertia tensor for the provided geometry and atomic masses. Always recenters the geometry to the center of mass as the first step. Reference for inertia tensor: [Kro92]_, Eq. (2.26) .. todo:: Replace cite eventually with link to exposition in user guide. Parameters ---------- geom length-3N |npfloat_| -- Coordinates of the atoms masses length-N OR length-3N |npfloat_| -- Atomic masses of the atoms. Length-3N option is to allow calculation of a per-coordinate perturbed value. Returns ------- tensor 3 x 3 |npfloat_| -- Moment of inertia tensor for the system Raises ------ ~exceptions.ValueError If shapes of `geom` & `masses` are inconsistent """ # Imports import numpy as np # Center the geometry. Takes care of any improper shapes of geom or # masses via the internal call to 'ctr_mass' within the call to 'ctr_geom' geom = ctr_geom(geom, masses) # Expand the masses if required. Shape should only ever be (N,) or (3N,), # else would raise an exception within the above 'ctr_geom' call if geom.shape[0] == 3*masses.shape[0]: masses = masses.repeat(3) ## end if # Initialize the tensor matrix tensor = np.zeros((3,3)) # Fill the matrix for i in range(3): for j in range(i,3): if i == j: # On-diagonal element; calculate indices to include ind = np.concatenate([np.array(list(map(lambda v: v % 3, range(i+1, i+3)))) + o for o in range(0,geom.shape[0],3)]) # Calculate the tensor element tensor[i,i] = np.multiply(np.square(geom[ind]), masses[ind]).sum() else: # Off-diagonal element; calculate the indices ind_i = np.array(range(i,geom.shape[0]+i,3)) ind_j = np.array(range(j,geom.shape[0]+j,3)) # Calculate the tensor element and its symmetric partner tensor[i,j] = np.multiply( np.sqrt(np.multiply(masses[ind_i], masses[ind_j])) , np.multiply(geom[ind_i], geom[ind_j]) ).sum() * -1 tensor[j,i] = tensor[i,j] ## end if ## next j ## next i # Return the tensor return tensor
python
def inertia_tensor(geom, masses): """Generate the 3x3 moment-of-inertia tensor. Compute the 3x3 moment-of-inertia tensor for the provided geometry and atomic masses. Always recenters the geometry to the center of mass as the first step. Reference for inertia tensor: [Kro92]_, Eq. (2.26) .. todo:: Replace cite eventually with link to exposition in user guide. Parameters ---------- geom length-3N |npfloat_| -- Coordinates of the atoms masses length-N OR length-3N |npfloat_| -- Atomic masses of the atoms. Length-3N option is to allow calculation of a per-coordinate perturbed value. Returns ------- tensor 3 x 3 |npfloat_| -- Moment of inertia tensor for the system Raises ------ ~exceptions.ValueError If shapes of `geom` & `masses` are inconsistent """ # Imports import numpy as np # Center the geometry. Takes care of any improper shapes of geom or # masses via the internal call to 'ctr_mass' within the call to 'ctr_geom' geom = ctr_geom(geom, masses) # Expand the masses if required. Shape should only ever be (N,) or (3N,), # else would raise an exception within the above 'ctr_geom' call if geom.shape[0] == 3*masses.shape[0]: masses = masses.repeat(3) ## end if # Initialize the tensor matrix tensor = np.zeros((3,3)) # Fill the matrix for i in range(3): for j in range(i,3): if i == j: # On-diagonal element; calculate indices to include ind = np.concatenate([np.array(list(map(lambda v: v % 3, range(i+1, i+3)))) + o for o in range(0,geom.shape[0],3)]) # Calculate the tensor element tensor[i,i] = np.multiply(np.square(geom[ind]), masses[ind]).sum() else: # Off-diagonal element; calculate the indices ind_i = np.array(range(i,geom.shape[0]+i,3)) ind_j = np.array(range(j,geom.shape[0]+j,3)) # Calculate the tensor element and its symmetric partner tensor[i,j] = np.multiply( np.sqrt(np.multiply(masses[ind_i], masses[ind_j])) , np.multiply(geom[ind_i], geom[ind_j]) ).sum() * -1 tensor[j,i] = tensor[i,j] ## end if ## next j ## next i # Return the tensor return tensor
[ "def", "inertia_tensor", "(", "geom", ",", "masses", ")", ":", "import", "numpy", "as", "np", "geom", "=", "ctr_geom", "(", "geom", ",", "masses", ")", "if", "geom", ".", "shape", "[", "0", "]", "==", "3", "*", "masses", ".", "shape", "[", "0", "]", ":", "masses", "=", "masses", ".", "repeat", "(", "3", ")", "tensor", "=", "np", ".", "zeros", "(", "(", "3", ",", "3", ")", ")", "for", "i", "in", "range", "(", "3", ")", ":", "for", "j", "in", "range", "(", "i", ",", "3", ")", ":", "if", "i", "==", "j", ":", "ind", "=", "np", ".", "concatenate", "(", "[", "np", ".", "array", "(", "list", "(", "map", "(", "lambda", "v", ":", "v", "%", "3", ",", "range", "(", "i", "+", "1", ",", "i", "+", "3", ")", ")", ")", ")", "+", "o", "for", "o", "in", "range", "(", "0", ",", "geom", ".", "shape", "[", "0", "]", ",", "3", ")", "]", ")", "tensor", "[", "i", ",", "i", "]", "=", "np", ".", "multiply", "(", "np", ".", "square", "(", "geom", "[", "ind", "]", ")", ",", "masses", "[", "ind", "]", ")", ".", "sum", "(", ")", "else", ":", "ind_i", "=", "np", ".", "array", "(", "range", "(", "i", ",", "geom", ".", "shape", "[", "0", "]", "+", "i", ",", "3", ")", ")", "ind_j", "=", "np", ".", "array", "(", "range", "(", "j", ",", "geom", ".", "shape", "[", "0", "]", "+", "j", ",", "3", ")", ")", "tensor", "[", "i", ",", "j", "]", "=", "np", ".", "multiply", "(", "np", ".", "sqrt", "(", "np", ".", "multiply", "(", "masses", "[", "ind_i", "]", ",", "masses", "[", "ind_j", "]", ")", ")", ",", "np", ".", "multiply", "(", "geom", "[", "ind_i", "]", ",", "geom", "[", "ind_j", "]", ")", ")", ".", "sum", "(", ")", "*", "-", "1", "tensor", "[", "j", ",", "i", "]", "=", "tensor", "[", "i", ",", "j", "]", "return", "tensor" ]
Generate the 3x3 moment-of-inertia tensor. Compute the 3x3 moment-of-inertia tensor for the provided geometry and atomic masses. Always recenters the geometry to the center of mass as the first step. Reference for inertia tensor: [Kro92]_, Eq. (2.26) .. todo:: Replace cite eventually with link to exposition in user guide. Parameters ---------- geom length-3N |npfloat_| -- Coordinates of the atoms masses length-N OR length-3N |npfloat_| -- Atomic masses of the atoms. Length-3N option is to allow calculation of a per-coordinate perturbed value. Returns ------- tensor 3 x 3 |npfloat_| -- Moment of inertia tensor for the system Raises ------ ~exceptions.ValueError If shapes of `geom` & `masses` are inconsistent
[ "Generate", "the", "3x3", "moment", "-", "of", "-", "inertia", "tensor", "." ]
0b1b21662df6abc971407a9386db21a8796fbfe5
https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/utils/inertia.py#L164-L242
train
bskinn/opan
opan/utils/inertia.py
rot_consts
def rot_consts(geom, masses, units=_EURC.INV_INERTIA, on_tol=_DEF.ORTHONORM_TOL): """Rotational constants for a given molecular system. Calculates the rotational constants for the provided system with numerical value given in the units provided in `units`. The orthnormality tolerance `on_tol` is required in order to be passed through to the :func:`principals` function. If the system is linear or a single atom, the effectively-zero principal moments of inertia will be assigned values of :data:`opan.const.PRM.ZERO_MOMENT_TOL` before transformation into the appropriate rotational constant units. The moments of inertia are always sorted in increasing order as :math:`0 \\leq I_A \\leq I_B \\leq I_C`; the rotational constants calculated from these will thus always be in **decreasing** order as :math:`B_A \\geq B_B \\geq B_C`, retaining the ordering and association with the three principal ``axes[:,i]`` generated by :func:`principals`. Parameters ---------- geom length-3N |npfloat_| -- Coordinates of the atoms masses length-N OR length-3N |npfloat_| -- Atomic masses of the atoms. Length-3N option is to allow calculation of a per-coordinate perturbed value. units :class:`~opan.const.EnumUnitsRotConst`, optional -- Enum value indicating the desired units of the output rotational constants. Default is :data:`~opan.const.EnumUnitsRotConst.INV_INERTIA` :math:`\\left(1\\over \\mathrm{uB^2}\\right)` on_tol |npfloat_|, optional -- Tolerance for deviation from unity/zero for principal axis dot products, within which axes are considered orthonormal. Default is :data:`opan.const.DEF.ORTHONORM_TOL` Returns ------- rc length-3 |npfloat_| -- Vector of rotational constants in the indicated units """ # Imports import numpy as np from ..const import EnumTopType as ETT, EnumUnitsRotConst as EURC, PRM, PHYS # Ensure units are valid if not units in EURC: raise ValueError("'{0}' is not a valid units value".format(units)) ## end if # Retrieve the moments, axes and top type. Geom and masses are proofed # internally in this call. mom, ax, top = principals(geom, masses, on_tol) # Check for special cases if top == ETT.ATOM: # All moments are zero; set to zero-moment threshold mom = np.repeat(PRM.ZERO_MOMENT_TOL, 3) elif top == ETT.LINEAR: # First moment is zero; set to zero-moment threshold mom[0] = PRM.ZERO_MOMENT_TOL ## end if # Calculate the values in the indicated units if units == EURC.INV_INERTIA: # 1/(amu*B^2) rc = 1.0 / (2.0 * mom) elif units == EURC.ANGFREQ_ATOMIC: # 1/Ta rc = PHYS.PLANCK_BAR / (2.0 * mom * PHYS.ME_PER_AMU) elif units == EURC.ANGFREQ_SECS: # 1/s rc = PHYS.PLANCK_BAR / (2.0 * mom * PHYS.ME_PER_AMU) / PHYS.SEC_PER_TA elif units == EURC.CYCFREQ_ATOMIC: # cyc/Ta rc = PHYS.PLANCK_BAR / (4.0 * np.pi * mom * PHYS.ME_PER_AMU) elif units == EURC.CYCFREQ_HZ: # cyc/s rc = PHYS.PLANCK_BAR / (4.0 * np.pi * mom * PHYS.ME_PER_AMU) / \ PHYS.SEC_PER_TA elif units == EURC.CYCFREQ_MHZ: # Mcyc/s rc = PHYS.PLANCK_BAR / (4.0 * np.pi * mom * PHYS.ME_PER_AMU) / \ PHYS.SEC_PER_TA / 1.0e6 elif units == EURC.WAVENUM_ATOMIC: # cyc/B rc = PHYS.PLANCK / (mom * PHYS.ME_PER_AMU) / \ (8.0 * np.pi**2.0 * PHYS.LIGHT_SPEED) elif units == EURC.WAVENUM_CM: # cyc/cm rc = PHYS.PLANCK / (mom * PHYS.ME_PER_AMU) / \ (8.0 * np.pi**2.0 * PHYS.LIGHT_SPEED * PHYS.ANG_PER_BOHR) * 1.0e8 else: # pragma: no cover -- Valid units; not implemented raise NotImplementedError("Units conversion not yet implemented.") ## end if # Return the result return rc
python
def rot_consts(geom, masses, units=_EURC.INV_INERTIA, on_tol=_DEF.ORTHONORM_TOL): """Rotational constants for a given molecular system. Calculates the rotational constants for the provided system with numerical value given in the units provided in `units`. The orthnormality tolerance `on_tol` is required in order to be passed through to the :func:`principals` function. If the system is linear or a single atom, the effectively-zero principal moments of inertia will be assigned values of :data:`opan.const.PRM.ZERO_MOMENT_TOL` before transformation into the appropriate rotational constant units. The moments of inertia are always sorted in increasing order as :math:`0 \\leq I_A \\leq I_B \\leq I_C`; the rotational constants calculated from these will thus always be in **decreasing** order as :math:`B_A \\geq B_B \\geq B_C`, retaining the ordering and association with the three principal ``axes[:,i]`` generated by :func:`principals`. Parameters ---------- geom length-3N |npfloat_| -- Coordinates of the atoms masses length-N OR length-3N |npfloat_| -- Atomic masses of the atoms. Length-3N option is to allow calculation of a per-coordinate perturbed value. units :class:`~opan.const.EnumUnitsRotConst`, optional -- Enum value indicating the desired units of the output rotational constants. Default is :data:`~opan.const.EnumUnitsRotConst.INV_INERTIA` :math:`\\left(1\\over \\mathrm{uB^2}\\right)` on_tol |npfloat_|, optional -- Tolerance for deviation from unity/zero for principal axis dot products, within which axes are considered orthonormal. Default is :data:`opan.const.DEF.ORTHONORM_TOL` Returns ------- rc length-3 |npfloat_| -- Vector of rotational constants in the indicated units """ # Imports import numpy as np from ..const import EnumTopType as ETT, EnumUnitsRotConst as EURC, PRM, PHYS # Ensure units are valid if not units in EURC: raise ValueError("'{0}' is not a valid units value".format(units)) ## end if # Retrieve the moments, axes and top type. Geom and masses are proofed # internally in this call. mom, ax, top = principals(geom, masses, on_tol) # Check for special cases if top == ETT.ATOM: # All moments are zero; set to zero-moment threshold mom = np.repeat(PRM.ZERO_MOMENT_TOL, 3) elif top == ETT.LINEAR: # First moment is zero; set to zero-moment threshold mom[0] = PRM.ZERO_MOMENT_TOL ## end if # Calculate the values in the indicated units if units == EURC.INV_INERTIA: # 1/(amu*B^2) rc = 1.0 / (2.0 * mom) elif units == EURC.ANGFREQ_ATOMIC: # 1/Ta rc = PHYS.PLANCK_BAR / (2.0 * mom * PHYS.ME_PER_AMU) elif units == EURC.ANGFREQ_SECS: # 1/s rc = PHYS.PLANCK_BAR / (2.0 * mom * PHYS.ME_PER_AMU) / PHYS.SEC_PER_TA elif units == EURC.CYCFREQ_ATOMIC: # cyc/Ta rc = PHYS.PLANCK_BAR / (4.0 * np.pi * mom * PHYS.ME_PER_AMU) elif units == EURC.CYCFREQ_HZ: # cyc/s rc = PHYS.PLANCK_BAR / (4.0 * np.pi * mom * PHYS.ME_PER_AMU) / \ PHYS.SEC_PER_TA elif units == EURC.CYCFREQ_MHZ: # Mcyc/s rc = PHYS.PLANCK_BAR / (4.0 * np.pi * mom * PHYS.ME_PER_AMU) / \ PHYS.SEC_PER_TA / 1.0e6 elif units == EURC.WAVENUM_ATOMIC: # cyc/B rc = PHYS.PLANCK / (mom * PHYS.ME_PER_AMU) / \ (8.0 * np.pi**2.0 * PHYS.LIGHT_SPEED) elif units == EURC.WAVENUM_CM: # cyc/cm rc = PHYS.PLANCK / (mom * PHYS.ME_PER_AMU) / \ (8.0 * np.pi**2.0 * PHYS.LIGHT_SPEED * PHYS.ANG_PER_BOHR) * 1.0e8 else: # pragma: no cover -- Valid units; not implemented raise NotImplementedError("Units conversion not yet implemented.") ## end if # Return the result return rc
[ "def", "rot_consts", "(", "geom", ",", "masses", ",", "units", "=", "_EURC", ".", "INV_INERTIA", ",", "on_tol", "=", "_DEF", ".", "ORTHONORM_TOL", ")", ":", "import", "numpy", "as", "np", "from", ".", ".", "const", "import", "EnumTopType", "as", "ETT", ",", "EnumUnitsRotConst", "as", "EURC", ",", "PRM", ",", "PHYS", "if", "not", "units", "in", "EURC", ":", "raise", "ValueError", "(", "\"'{0}' is not a valid units value\"", ".", "format", "(", "units", ")", ")", "mom", ",", "ax", ",", "top", "=", "principals", "(", "geom", ",", "masses", ",", "on_tol", ")", "if", "top", "==", "ETT", ".", "ATOM", ":", "mom", "=", "np", ".", "repeat", "(", "PRM", ".", "ZERO_MOMENT_TOL", ",", "3", ")", "elif", "top", "==", "ETT", ".", "LINEAR", ":", "mom", "[", "0", "]", "=", "PRM", ".", "ZERO_MOMENT_TOL", "if", "units", "==", "EURC", ".", "INV_INERTIA", ":", "rc", "=", "1.0", "/", "(", "2.0", "*", "mom", ")", "elif", "units", "==", "EURC", ".", "ANGFREQ_ATOMIC", ":", "rc", "=", "PHYS", ".", "PLANCK_BAR", "/", "(", "2.0", "*", "mom", "*", "PHYS", ".", "ME_PER_AMU", ")", "elif", "units", "==", "EURC", ".", "ANGFREQ_SECS", ":", "rc", "=", "PHYS", ".", "PLANCK_BAR", "/", "(", "2.0", "*", "mom", "*", "PHYS", ".", "ME_PER_AMU", ")", "/", "PHYS", ".", "SEC_PER_TA", "elif", "units", "==", "EURC", ".", "CYCFREQ_ATOMIC", ":", "rc", "=", "PHYS", ".", "PLANCK_BAR", "/", "(", "4.0", "*", "np", ".", "pi", "*", "mom", "*", "PHYS", ".", "ME_PER_AMU", ")", "elif", "units", "==", "EURC", ".", "CYCFREQ_HZ", ":", "rc", "=", "PHYS", ".", "PLANCK_BAR", "/", "(", "4.0", "*", "np", ".", "pi", "*", "mom", "*", "PHYS", ".", "ME_PER_AMU", ")", "/", "PHYS", ".", "SEC_PER_TA", "elif", "units", "==", "EURC", ".", "CYCFREQ_MHZ", ":", "rc", "=", "PHYS", ".", "PLANCK_BAR", "/", "(", "4.0", "*", "np", ".", "pi", "*", "mom", "*", "PHYS", ".", "ME_PER_AMU", ")", "/", "PHYS", ".", "SEC_PER_TA", "/", "1.0e6", "elif", "units", "==", "EURC", ".", "WAVENUM_ATOMIC", ":", "rc", "=", "PHYS", ".", "PLANCK", "/", "(", "mom", "*", "PHYS", ".", "ME_PER_AMU", ")", "/", "(", "8.0", "*", "np", ".", "pi", "**", "2.0", "*", "PHYS", ".", "LIGHT_SPEED", ")", "elif", "units", "==", "EURC", ".", "WAVENUM_CM", ":", "rc", "=", "PHYS", ".", "PLANCK", "/", "(", "mom", "*", "PHYS", ".", "ME_PER_AMU", ")", "/", "(", "8.0", "*", "np", ".", "pi", "**", "2.0", "*", "PHYS", ".", "LIGHT_SPEED", "*", "PHYS", ".", "ANG_PER_BOHR", ")", "*", "1.0e8", "else", ":", "raise", "NotImplementedError", "(", "\"Units conversion not yet implemented.\"", ")", "return", "rc" ]
Rotational constants for a given molecular system. Calculates the rotational constants for the provided system with numerical value given in the units provided in `units`. The orthnormality tolerance `on_tol` is required in order to be passed through to the :func:`principals` function. If the system is linear or a single atom, the effectively-zero principal moments of inertia will be assigned values of :data:`opan.const.PRM.ZERO_MOMENT_TOL` before transformation into the appropriate rotational constant units. The moments of inertia are always sorted in increasing order as :math:`0 \\leq I_A \\leq I_B \\leq I_C`; the rotational constants calculated from these will thus always be in **decreasing** order as :math:`B_A \\geq B_B \\geq B_C`, retaining the ordering and association with the three principal ``axes[:,i]`` generated by :func:`principals`. Parameters ---------- geom length-3N |npfloat_| -- Coordinates of the atoms masses length-N OR length-3N |npfloat_| -- Atomic masses of the atoms. Length-3N option is to allow calculation of a per-coordinate perturbed value. units :class:`~opan.const.EnumUnitsRotConst`, optional -- Enum value indicating the desired units of the output rotational constants. Default is :data:`~opan.const.EnumUnitsRotConst.INV_INERTIA` :math:`\\left(1\\over \\mathrm{uB^2}\\right)` on_tol |npfloat_|, optional -- Tolerance for deviation from unity/zero for principal axis dot products, within which axes are considered orthonormal. Default is :data:`opan.const.DEF.ORTHONORM_TOL` Returns ------- rc length-3 |npfloat_| -- Vector of rotational constants in the indicated units
[ "Rotational", "constants", "for", "a", "given", "molecular", "system", "." ]
0b1b21662df6abc971407a9386db21a8796fbfe5
https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/utils/inertia.py#L499-L598
train
bskinn/opan
opan/utils/inertia.py
_fadn_orth
def _fadn_orth(vec, geom): """First non-zero Atomic Displacement Non-Orthogonal to Vec Utility function to identify the first atomic displacement in a geometry that is (a) not the zero vector; and (b) not normal to the reference vector. Parameters ---------- vec length-3 |npfloat_| -- Reference vector. Does not need to be normalized. geom length-3N |npfloat_| -- *CENTERED* molecular geometry Returns ------- out_vec length-3 |npfloat_| -- Normalized non-zero atomic displacement not orthogonal to vec """ # Imports import numpy as np from scipy import linalg as spla from ..const import PRM from ..error import InertiaError from .vector import orthonorm_check as onchk # Geom and vec must both be the right shape if not (len(geom.shape) == 1 and geom.shape[0] % 3 == 0): raise ValueError("Geometry is not length 3N") ## end if if not vec.shape == (3,): raise ValueError("Reference vector is not length 3") ## end if # vec must not be the zero vector if spla.norm(vec) < PRM.ZERO_VEC_TOL: raise ValueError("Reference vector norm is too small") ## end if # Normalize the ref vec vec = vec / spla.norm(vec) # Iterate over reshaped geometry for disp in geom.reshape((geom.shape[0]//3, 3)): # See if the displacement is nonzero and not orthonormal. Trailing # [0] index is to retrieve only the success/fail bool. if spla.norm(disp) >= PRM.ZERO_VEC_TOL and not onchk( np.column_stack((disp / spla.norm(disp), vec / spla.norm(vec))))[0]: # This is the displacement you are looking for out_vec = disp / spla.norm(disp) return out_vec ## end if ## end if ## next disp else: # Nothing fit the bill - must be atom, linear, or planar raise InertiaError(InertiaError.BAD_GEOM, "No suitable atomic displacement found", "")
python
def _fadn_orth(vec, geom): """First non-zero Atomic Displacement Non-Orthogonal to Vec Utility function to identify the first atomic displacement in a geometry that is (a) not the zero vector; and (b) not normal to the reference vector. Parameters ---------- vec length-3 |npfloat_| -- Reference vector. Does not need to be normalized. geom length-3N |npfloat_| -- *CENTERED* molecular geometry Returns ------- out_vec length-3 |npfloat_| -- Normalized non-zero atomic displacement not orthogonal to vec """ # Imports import numpy as np from scipy import linalg as spla from ..const import PRM from ..error import InertiaError from .vector import orthonorm_check as onchk # Geom and vec must both be the right shape if not (len(geom.shape) == 1 and geom.shape[0] % 3 == 0): raise ValueError("Geometry is not length 3N") ## end if if not vec.shape == (3,): raise ValueError("Reference vector is not length 3") ## end if # vec must not be the zero vector if spla.norm(vec) < PRM.ZERO_VEC_TOL: raise ValueError("Reference vector norm is too small") ## end if # Normalize the ref vec vec = vec / spla.norm(vec) # Iterate over reshaped geometry for disp in geom.reshape((geom.shape[0]//3, 3)): # See if the displacement is nonzero and not orthonormal. Trailing # [0] index is to retrieve only the success/fail bool. if spla.norm(disp) >= PRM.ZERO_VEC_TOL and not onchk( np.column_stack((disp / spla.norm(disp), vec / spla.norm(vec))))[0]: # This is the displacement you are looking for out_vec = disp / spla.norm(disp) return out_vec ## end if ## end if ## next disp else: # Nothing fit the bill - must be atom, linear, or planar raise InertiaError(InertiaError.BAD_GEOM, "No suitable atomic displacement found", "")
[ "def", "_fadn_orth", "(", "vec", ",", "geom", ")", ":", "import", "numpy", "as", "np", "from", "scipy", "import", "linalg", "as", "spla", "from", ".", ".", "const", "import", "PRM", "from", ".", ".", "error", "import", "InertiaError", "from", ".", "vector", "import", "orthonorm_check", "as", "onchk", "if", "not", "(", "len", "(", "geom", ".", "shape", ")", "==", "1", "and", "geom", ".", "shape", "[", "0", "]", "%", "3", "==", "0", ")", ":", "raise", "ValueError", "(", "\"Geometry is not length 3N\"", ")", "if", "not", "vec", ".", "shape", "==", "(", "3", ",", ")", ":", "raise", "ValueError", "(", "\"Reference vector is not length 3\"", ")", "if", "spla", ".", "norm", "(", "vec", ")", "<", "PRM", ".", "ZERO_VEC_TOL", ":", "raise", "ValueError", "(", "\"Reference vector norm is too small\"", ")", "vec", "=", "vec", "/", "spla", ".", "norm", "(", "vec", ")", "for", "disp", "in", "geom", ".", "reshape", "(", "(", "geom", ".", "shape", "[", "0", "]", "//", "3", ",", "3", ")", ")", ":", "if", "spla", ".", "norm", "(", "disp", ")", ">=", "PRM", ".", "ZERO_VEC_TOL", "and", "not", "onchk", "(", "np", ".", "column_stack", "(", "(", "disp", "/", "spla", ".", "norm", "(", "disp", ")", ",", "vec", "/", "spla", ".", "norm", "(", "vec", ")", ")", ")", ")", "[", "0", "]", ":", "out_vec", "=", "disp", "/", "spla", ".", "norm", "(", "disp", ")", "return", "out_vec", "else", ":", "raise", "InertiaError", "(", "InertiaError", ".", "BAD_GEOM", ",", "\"No suitable atomic displacement found\"", ",", "\"\"", ")" ]
First non-zero Atomic Displacement Non-Orthogonal to Vec Utility function to identify the first atomic displacement in a geometry that is (a) not the zero vector; and (b) not normal to the reference vector. Parameters ---------- vec length-3 |npfloat_| -- Reference vector. Does not need to be normalized. geom length-3N |npfloat_| -- *CENTERED* molecular geometry Returns ------- out_vec length-3 |npfloat_| -- Normalized non-zero atomic displacement not orthogonal to vec
[ "First", "non", "-", "zero", "Atomic", "Displacement", "Non", "-", "Orthogonal", "to", "Vec" ]
0b1b21662df6abc971407a9386db21a8796fbfe5
https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/utils/inertia.py#L604-L667
train
bskinn/opan
opan/utils/inertia.py
_fadn_par
def _fadn_par(vec, geom): """First non-zero Atomic Displacement that is Non-Parallel with Vec Utility function to identify the first atomic displacement in a geometry that is both (a) not the zero vector and (b) non-(anti-)parallel with a reference vector. Parameters ---------- vec length-3 |npfloat_| -- Reference vector. Does not need to be normalized. geom length-3N |npfloat_| -- *CENTERED* molecular geometry. Returns ------- out_vec length-3 |npfloat_| -- Normalized non-zero atomic displacement not (anti-)parallel to vec. """ # Imports import numpy as np from scipy import linalg as spla from ..const import PRM from ..error import InertiaError from .vector import parallel_check as parchk # Geom and vec must both be the right shape if not (len(geom.shape) == 1 and geom.shape[0] % 3 == 0): raise ValueError("Geometry is not length 3N") ## end if if not vec.shape == (3,): raise ValueError("Reference vector is not length 3") ## end if # vec must not be the zero vector if spla.norm(vec) < PRM.ZERO_VEC_TOL: raise ValueError("Reference vector norm is too small") ## end if # Normalize the ref vec vec = vec / spla.norm(vec) # Iterate over reshaped geometry for disp in geom.reshape((geom.shape[0]//3, 3)): # See if the displacement is nonzero and nonparallel to the ref vec if spla.norm(disp) >= PRM.ZERO_VEC_TOL and \ not parchk(disp.reshape(3), vec): # This is the displacement you are looking for out_vec = disp / spla.norm(disp) break ## end if ## end if ## next disp else: # Nothing fit the bill - must be a linear molecule? raise InertiaError(InertiaError.BAD_GEOM, "Linear molecule, no non-parallel displacement", "") ## end for disp # Return the resulting vector return out_vec
python
def _fadn_par(vec, geom): """First non-zero Atomic Displacement that is Non-Parallel with Vec Utility function to identify the first atomic displacement in a geometry that is both (a) not the zero vector and (b) non-(anti-)parallel with a reference vector. Parameters ---------- vec length-3 |npfloat_| -- Reference vector. Does not need to be normalized. geom length-3N |npfloat_| -- *CENTERED* molecular geometry. Returns ------- out_vec length-3 |npfloat_| -- Normalized non-zero atomic displacement not (anti-)parallel to vec. """ # Imports import numpy as np from scipy import linalg as spla from ..const import PRM from ..error import InertiaError from .vector import parallel_check as parchk # Geom and vec must both be the right shape if not (len(geom.shape) == 1 and geom.shape[0] % 3 == 0): raise ValueError("Geometry is not length 3N") ## end if if not vec.shape == (3,): raise ValueError("Reference vector is not length 3") ## end if # vec must not be the zero vector if spla.norm(vec) < PRM.ZERO_VEC_TOL: raise ValueError("Reference vector norm is too small") ## end if # Normalize the ref vec vec = vec / spla.norm(vec) # Iterate over reshaped geometry for disp in geom.reshape((geom.shape[0]//3, 3)): # See if the displacement is nonzero and nonparallel to the ref vec if spla.norm(disp) >= PRM.ZERO_VEC_TOL and \ not parchk(disp.reshape(3), vec): # This is the displacement you are looking for out_vec = disp / spla.norm(disp) break ## end if ## end if ## next disp else: # Nothing fit the bill - must be a linear molecule? raise InertiaError(InertiaError.BAD_GEOM, "Linear molecule, no non-parallel displacement", "") ## end for disp # Return the resulting vector return out_vec
[ "def", "_fadn_par", "(", "vec", ",", "geom", ")", ":", "import", "numpy", "as", "np", "from", "scipy", "import", "linalg", "as", "spla", "from", ".", ".", "const", "import", "PRM", "from", ".", ".", "error", "import", "InertiaError", "from", ".", "vector", "import", "parallel_check", "as", "parchk", "if", "not", "(", "len", "(", "geom", ".", "shape", ")", "==", "1", "and", "geom", ".", "shape", "[", "0", "]", "%", "3", "==", "0", ")", ":", "raise", "ValueError", "(", "\"Geometry is not length 3N\"", ")", "if", "not", "vec", ".", "shape", "==", "(", "3", ",", ")", ":", "raise", "ValueError", "(", "\"Reference vector is not length 3\"", ")", "if", "spla", ".", "norm", "(", "vec", ")", "<", "PRM", ".", "ZERO_VEC_TOL", ":", "raise", "ValueError", "(", "\"Reference vector norm is too small\"", ")", "vec", "=", "vec", "/", "spla", ".", "norm", "(", "vec", ")", "for", "disp", "in", "geom", ".", "reshape", "(", "(", "geom", ".", "shape", "[", "0", "]", "//", "3", ",", "3", ")", ")", ":", "if", "spla", ".", "norm", "(", "disp", ")", ">=", "PRM", ".", "ZERO_VEC_TOL", "and", "not", "parchk", "(", "disp", ".", "reshape", "(", "3", ")", ",", "vec", ")", ":", "out_vec", "=", "disp", "/", "spla", ".", "norm", "(", "disp", ")", "break", "else", ":", "raise", "InertiaError", "(", "InertiaError", ".", "BAD_GEOM", ",", "\"Linear molecule, no non-parallel displacement\"", ",", "\"\"", ")", "return", "out_vec" ]
First non-zero Atomic Displacement that is Non-Parallel with Vec Utility function to identify the first atomic displacement in a geometry that is both (a) not the zero vector and (b) non-(anti-)parallel with a reference vector. Parameters ---------- vec length-3 |npfloat_| -- Reference vector. Does not need to be normalized. geom length-3N |npfloat_| -- *CENTERED* molecular geometry. Returns ------- out_vec length-3 |npfloat_| -- Normalized non-zero atomic displacement not (anti-)parallel to vec.
[ "First", "non", "-", "zero", "Atomic", "Displacement", "that", "is", "Non", "-", "Parallel", "with", "Vec" ]
0b1b21662df6abc971407a9386db21a8796fbfe5
https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/utils/inertia.py#L674-L740
train
openvax/isovar
isovar/reference_context.py
reference_contexts_for_variants
def reference_contexts_for_variants( variants, context_size, transcript_id_whitelist=None): """ Extract a set of reference contexts for each variant in the collection. Parameters ---------- variants : varcode.VariantCollection context_size : int Max of nucleotides to include to the left and right of the variant in the context sequence. transcript_id_whitelist : set, optional If given, then only consider transcripts whose IDs are in this set. Returns a dictionary from variants to lists of ReferenceContext objects, sorted by max coding sequence length of any transcript. """ result = OrderedDict() for variant in variants: result[variant] = reference_contexts_for_variant( variant=variant, context_size=context_size, transcript_id_whitelist=transcript_id_whitelist) return result
python
def reference_contexts_for_variants( variants, context_size, transcript_id_whitelist=None): """ Extract a set of reference contexts for each variant in the collection. Parameters ---------- variants : varcode.VariantCollection context_size : int Max of nucleotides to include to the left and right of the variant in the context sequence. transcript_id_whitelist : set, optional If given, then only consider transcripts whose IDs are in this set. Returns a dictionary from variants to lists of ReferenceContext objects, sorted by max coding sequence length of any transcript. """ result = OrderedDict() for variant in variants: result[variant] = reference_contexts_for_variant( variant=variant, context_size=context_size, transcript_id_whitelist=transcript_id_whitelist) return result
[ "def", "reference_contexts_for_variants", "(", "variants", ",", "context_size", ",", "transcript_id_whitelist", "=", "None", ")", ":", "result", "=", "OrderedDict", "(", ")", "for", "variant", "in", "variants", ":", "result", "[", "variant", "]", "=", "reference_contexts_for_variant", "(", "variant", "=", "variant", ",", "context_size", "=", "context_size", ",", "transcript_id_whitelist", "=", "transcript_id_whitelist", ")", "return", "result" ]
Extract a set of reference contexts for each variant in the collection. Parameters ---------- variants : varcode.VariantCollection context_size : int Max of nucleotides to include to the left and right of the variant in the context sequence. transcript_id_whitelist : set, optional If given, then only consider transcripts whose IDs are in this set. Returns a dictionary from variants to lists of ReferenceContext objects, sorted by max coding sequence length of any transcript.
[ "Extract", "a", "set", "of", "reference", "contexts", "for", "each", "variant", "in", "the", "collection", "." ]
b39b684920e3f6b344851d6598a1a1c67bce913b
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/reference_context.py#L141-L168
train
openvax/isovar
isovar/reference_context.py
variants_to_reference_contexts_dataframe
def variants_to_reference_contexts_dataframe( variants, context_size, transcript_id_whitelist=None): """ Given a collection of variants, find all reference sequence contexts around each variant. Parameters ---------- variants : varcode.VariantCollection context_size : int Max of nucleotides to include to the left and right of the variant in the context sequence. transcript_id_whitelist : set, optional If given, then only consider transcripts whose IDs are in this set. Returns a DataFrame with {"chr", "pos", "ref", "alt"} columns for variants, as well as all the fields of ReferenceContext. """ df_builder = DataFrameBuilder( ReferenceContext, exclude=["variant"], converters=dict(transcripts=lambda ts: ";".join(t.name for t in ts)), extra_column_fns={ "gene": lambda variant, _: ";".join(variant.gene_names), }) for variant, reference_contexts in reference_contexts_for_variants( variants=variants, context_size=context_size, transcript_id_whitelist=transcript_id_whitelist).items(): df_builder.add_many(variant, reference_contexts) return df_builder.to_dataframe()
python
def variants_to_reference_contexts_dataframe( variants, context_size, transcript_id_whitelist=None): """ Given a collection of variants, find all reference sequence contexts around each variant. Parameters ---------- variants : varcode.VariantCollection context_size : int Max of nucleotides to include to the left and right of the variant in the context sequence. transcript_id_whitelist : set, optional If given, then only consider transcripts whose IDs are in this set. Returns a DataFrame with {"chr", "pos", "ref", "alt"} columns for variants, as well as all the fields of ReferenceContext. """ df_builder = DataFrameBuilder( ReferenceContext, exclude=["variant"], converters=dict(transcripts=lambda ts: ";".join(t.name for t in ts)), extra_column_fns={ "gene": lambda variant, _: ";".join(variant.gene_names), }) for variant, reference_contexts in reference_contexts_for_variants( variants=variants, context_size=context_size, transcript_id_whitelist=transcript_id_whitelist).items(): df_builder.add_many(variant, reference_contexts) return df_builder.to_dataframe()
[ "def", "variants_to_reference_contexts_dataframe", "(", "variants", ",", "context_size", ",", "transcript_id_whitelist", "=", "None", ")", ":", "df_builder", "=", "DataFrameBuilder", "(", "ReferenceContext", ",", "exclude", "=", "[", "\"variant\"", "]", ",", "converters", "=", "dict", "(", "transcripts", "=", "lambda", "ts", ":", "\";\"", ".", "join", "(", "t", ".", "name", "for", "t", "in", "ts", ")", ")", ",", "extra_column_fns", "=", "{", "\"gene\"", ":", "lambda", "variant", ",", "_", ":", "\";\"", ".", "join", "(", "variant", ".", "gene_names", ")", ",", "}", ")", "for", "variant", ",", "reference_contexts", "in", "reference_contexts_for_variants", "(", "variants", "=", "variants", ",", "context_size", "=", "context_size", ",", "transcript_id_whitelist", "=", "transcript_id_whitelist", ")", ".", "items", "(", ")", ":", "df_builder", ".", "add_many", "(", "variant", ",", "reference_contexts", ")", "return", "df_builder", ".", "to_dataframe", "(", ")" ]
Given a collection of variants, find all reference sequence contexts around each variant. Parameters ---------- variants : varcode.VariantCollection context_size : int Max of nucleotides to include to the left and right of the variant in the context sequence. transcript_id_whitelist : set, optional If given, then only consider transcripts whose IDs are in this set. Returns a DataFrame with {"chr", "pos", "ref", "alt"} columns for variants, as well as all the fields of ReferenceContext.
[ "Given", "a", "collection", "of", "variants", "find", "all", "reference", "sequence", "contexts", "around", "each", "variant", "." ]
b39b684920e3f6b344851d6598a1a1c67bce913b
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/reference_context.py#L170-L205
train
pyviz/imagen
imagen/patternfn.py
exponential
def exponential(x, y, xscale, yscale): """ Two-dimensional oriented exponential decay pattern. """ if xscale==0.0 or yscale==0.0: return x*0.0 with float_error_ignore(): x_w = np.divide(x,xscale) y_h = np.divide(y,yscale) return np.exp(-np.sqrt(x_w*x_w+y_h*y_h))
python
def exponential(x, y, xscale, yscale): """ Two-dimensional oriented exponential decay pattern. """ if xscale==0.0 or yscale==0.0: return x*0.0 with float_error_ignore(): x_w = np.divide(x,xscale) y_h = np.divide(y,yscale) return np.exp(-np.sqrt(x_w*x_w+y_h*y_h))
[ "def", "exponential", "(", "x", ",", "y", ",", "xscale", ",", "yscale", ")", ":", "if", "xscale", "==", "0.0", "or", "yscale", "==", "0.0", ":", "return", "x", "*", "0.0", "with", "float_error_ignore", "(", ")", ":", "x_w", "=", "np", ".", "divide", "(", "x", ",", "xscale", ")", "y_h", "=", "np", ".", "divide", "(", "y", ",", "yscale", ")", "return", "np", ".", "exp", "(", "-", "np", ".", "sqrt", "(", "x_w", "*", "x_w", "+", "y_h", "*", "y_h", ")", ")" ]
Two-dimensional oriented exponential decay pattern.
[ "Two", "-", "dimensional", "oriented", "exponential", "decay", "pattern", "." ]
53c5685c880f54b42795964d8db50b02e8590e88
https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/patternfn.py#L82-L92
train
pyviz/imagen
imagen/patternfn.py
line
def line(y, thickness, gaussian_width): """ Infinite-length line with a solid central region, then Gaussian fall-off at the edges. """ distance_from_line = abs(y) gaussian_y_coord = distance_from_line - thickness/2.0 sigmasq = gaussian_width*gaussian_width if sigmasq==0.0: falloff = y*0.0 else: with float_error_ignore(): falloff = np.exp(np.divide(-gaussian_y_coord*gaussian_y_coord,2*sigmasq)) return np.where(gaussian_y_coord<=0, 1.0, falloff)
python
def line(y, thickness, gaussian_width): """ Infinite-length line with a solid central region, then Gaussian fall-off at the edges. """ distance_from_line = abs(y) gaussian_y_coord = distance_from_line - thickness/2.0 sigmasq = gaussian_width*gaussian_width if sigmasq==0.0: falloff = y*0.0 else: with float_error_ignore(): falloff = np.exp(np.divide(-gaussian_y_coord*gaussian_y_coord,2*sigmasq)) return np.where(gaussian_y_coord<=0, 1.0, falloff)
[ "def", "line", "(", "y", ",", "thickness", ",", "gaussian_width", ")", ":", "distance_from_line", "=", "abs", "(", "y", ")", "gaussian_y_coord", "=", "distance_from_line", "-", "thickness", "/", "2.0", "sigmasq", "=", "gaussian_width", "*", "gaussian_width", "if", "sigmasq", "==", "0.0", ":", "falloff", "=", "y", "*", "0.0", "else", ":", "with", "float_error_ignore", "(", ")", ":", "falloff", "=", "np", ".", "exp", "(", "np", ".", "divide", "(", "-", "gaussian_y_coord", "*", "gaussian_y_coord", ",", "2", "*", "sigmasq", ")", ")", "return", "np", ".", "where", "(", "gaussian_y_coord", "<=", "0", ",", "1.0", ",", "falloff", ")" ]
Infinite-length line with a solid central region, then Gaussian fall-off at the edges.
[ "Infinite", "-", "length", "line", "with", "a", "solid", "central", "region", "then", "Gaussian", "fall", "-", "off", "at", "the", "edges", "." ]
53c5685c880f54b42795964d8db50b02e8590e88
https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/patternfn.py#L114-L128
train
pyviz/imagen
imagen/patternfn.py
disk
def disk(x, y, height, gaussian_width): """ Circular disk with Gaussian fall-off after the solid central region. """ disk_radius = height/2.0 distance_from_origin = np.sqrt(x**2+y**2) distance_outside_disk = distance_from_origin - disk_radius sigmasq = gaussian_width*gaussian_width if sigmasq==0.0: falloff = x*0.0 else: with float_error_ignore(): falloff = np.exp(np.divide(-distance_outside_disk*distance_outside_disk, 2*sigmasq)) return np.where(distance_outside_disk<=0,1.0,falloff)
python
def disk(x, y, height, gaussian_width): """ Circular disk with Gaussian fall-off after the solid central region. """ disk_radius = height/2.0 distance_from_origin = np.sqrt(x**2+y**2) distance_outside_disk = distance_from_origin - disk_radius sigmasq = gaussian_width*gaussian_width if sigmasq==0.0: falloff = x*0.0 else: with float_error_ignore(): falloff = np.exp(np.divide(-distance_outside_disk*distance_outside_disk, 2*sigmasq)) return np.where(distance_outside_disk<=0,1.0,falloff)
[ "def", "disk", "(", "x", ",", "y", ",", "height", ",", "gaussian_width", ")", ":", "disk_radius", "=", "height", "/", "2.0", "distance_from_origin", "=", "np", ".", "sqrt", "(", "x", "**", "2", "+", "y", "**", "2", ")", "distance_outside_disk", "=", "distance_from_origin", "-", "disk_radius", "sigmasq", "=", "gaussian_width", "*", "gaussian_width", "if", "sigmasq", "==", "0.0", ":", "falloff", "=", "x", "*", "0.0", "else", ":", "with", "float_error_ignore", "(", ")", ":", "falloff", "=", "np", ".", "exp", "(", "np", ".", "divide", "(", "-", "distance_outside_disk", "*", "distance_outside_disk", ",", "2", "*", "sigmasq", ")", ")", "return", "np", ".", "where", "(", "distance_outside_disk", "<=", "0", ",", "1.0", ",", "falloff", ")" ]
Circular disk with Gaussian fall-off after the solid central region.
[ "Circular", "disk", "with", "Gaussian", "fall", "-", "off", "after", "the", "solid", "central", "region", "." ]
53c5685c880f54b42795964d8db50b02e8590e88
https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/patternfn.py#L131-L148
train