complexity
int64
1
56
n_identifiers
int64
1
114
code
stringlengths
19
12.7k
path
stringlengths
8
134
n_ast_nodes
int64
12
2.35k
ast_errors
stringlengths
0
4.01k
repo
stringlengths
3
28
documentation
dict
n_words
int64
2
866
language
stringclasses
1 value
vocab_size
int64
2
323
commit_id
stringlengths
40
40
file_name
stringlengths
5
79
id
int64
243
338k
nloc
int64
1
228
token_counts
int64
5
1.4k
fun_name
stringlengths
1
77
url
stringlengths
31
60
commit_message
stringlengths
3
15.3k
n_whitespaces
int64
1
3.23k
n_ast_errors
int64
0
20
d_id
int64
74
121k
ast_levels
int64
4
29
1
20
def paths(self, request, pk): obj = get_object_or_404(self.queryset, pk=pk) cablepaths = CablePath.objects.filter(_nodes__contains=obj).prefetch_related('origin', 'destination') serializer = serializers.CablePathSerializer(cablepaths, context={'request': request}, many=True) return Response(serializer.data) # # Regions #
netbox/dcim/api/views.py
113
netbox
{ "docstring": "\n Return all CablePaths which traverse a given pass-through port.\n ", "language": "en", "n_whitespaces": 24, "n_words": 9, "vocab_size": 9 }
24
Python
20
3a461d02793e6f9d41c2b1a92647e691de1abaac
views.py
264,877
5
68
paths
https://github.com/netbox-community/netbox.git
Update Cable instantiations to match new signature
56
0
77,892
12
1
19
def test_read_only_buffer(): rng = np.random.RandomState(0) clf = ElasticNet(alpha=0.1, copy_X=True, random_state=rng) X = np.asfortranarray(rng.uniform(size=(100, 10))) X.setflags(write=False) y = rng.rand(100) clf.fit(X, y)
sklearn/linear_model/tests/test_coordinate_descent.py
118
scikit-learn
{ "docstring": "Test that sparse coordinate descent works for read-only buffers", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
20
Python
17
3bb4bad1425ee7add6001a32f0d83cb459ffa30c
test_coordinate_descent.py
260,476
7
76
test_read_only_buffer
https://github.com/scikit-learn/scikit-learn.git
MNT Replaced `np.ndarray` with memview where applicable in `linear_model/_cd_fast.pyx` (#23147) Co-authored-by: Thomas J. Fan <[email protected]>
41
0
76,274
12
3
20
def test_cases(self) -> Dict[str, Type[unittest.TestCase]]: test_cases = {} for category, items_map in self._filtered_test_items.items(): test_case_name = str('OnnxBackend{}Test').format(category) test_case = self._get_test_case(test_case_name) for name, item in sorted(items_map.items()): setattr(test_case, name, item.func) test_cases[test_case_name] = test_case return test_cases
onnx/backend/test/runner/__init__.py
137
onnx
{ "docstring": "\n List of test cases to be applied on the parent scope\n Example usage:\n globals().update(BackendTest(backend).test_cases)\n ", "language": "en", "n_whitespaces": 47, "n_words": 14, "vocab_size": 14 }
32
Python
24
83fa57c74edfd13ddac9548b8a12f9e3e2ed05bd
__init__.py
255,147
14
86
test_cases
https://github.com/onnx/onnx.git
Use Python type annotations rather than comments (#3962) * These have been supported since Python 3.5. ONNX doesn't support Python < 3.6, so we can use the annotations. Diffs generated by https://pypi.org/project/com2ann/. Signed-off-by: Gary Miguel <[email protected]> * Remove MYPY conditional logic in gen_proto.py It breaks the type annotations and shouldn't be needed. Signed-off-by: Gary Miguel <[email protected]> * Get rid of MYPY bool from more scripts Signed-off-by: Gary Miguel <[email protected]> * move Descriptors class above where its referenced in type annotation Signed-off-by: Gary Miguel <[email protected]> * fixes Signed-off-by: Gary Miguel <[email protected]> * remove extra blank line Signed-off-by: Gary Miguel <[email protected]> * fix type annotations Signed-off-by: Gary Miguel <[email protected]> * fix type annotation in gen_docs Signed-off-by: Gary Miguel <[email protected]> * fix Operators.md Signed-off-by: Gary Miguel <[email protected]> * fix TestCoverage.md Signed-off-by: Gary Miguel <[email protected]> * fix protoc-gen-mypy.py Signed-off-by: Gary Miguel <[email protected]>
119
0
74,735
13
1
5
def _may_have_unstable_default(self) -> bool: return callable(self._default)
bokeh/core/property/bases.py
27
bokeh
{ "docstring": " False if we have a default that is immutable, and will be the\n same every time (some defaults are generated on demand by a function\n to be called).\n\n ", "language": "en", "n_whitespaces": 50, "n_words": 28, "vocab_size": 26 }
6
Python
6
1b3e6acd6eebd352106cc5ecf5e12dbf90e0607c
bases.py
212,147
7
15
_may_have_unstable_default
https://github.com/bokeh/bokeh.git
Add Init signatures to Bokeh models (#12035) * Add signatures to Bokeh Model initializers * use explicit type for override default * move InstanceDefault to bokeh.core.properties * enable assertions
20
0
53,177
8
6
17
def __call__(self, bboxes1, bboxes2, mode='iou', is_aligned=False): bboxes1 = get_box_tensor(bboxes1) bboxes2 = get_box_tensor(bboxes2) assert bboxes1.size(-1) in [0, 4, 5] assert bboxes2.size(-1) in [0, 4, 5] if bboxes2.size(-1) == 5: bboxes2 = bboxes2[..., :4] if bboxes1.size(-1) == 5: bboxes1 = bboxes1[..., :4] if self.dtype == 'fp16': # change tensor type to save cpu and cuda memory and keep speed bboxes1 = cast_tensor_type(bboxes1, self.scale, self.dtype) bboxes2 = cast_tensor_type(bboxes2, self.scale, self.dtype) overlaps = bbox_overlaps(bboxes1, bboxes2, mode, is_aligned) if not overlaps.is_cuda and overlaps.dtype == torch.float16: # resume cpu float32 overlaps = overlaps.float() return overlaps return bbox_overlaps(bboxes1, bboxes2, mode, is_aligned)
mmdet/models/task_modules/assigners/iou2d_calculator.py
279
mmdetection
{ "docstring": "Calculate IoU between 2D bboxes.\n\n Args:\n bboxes1 (Tensor or :obj:`BaseBoxes`): bboxes have shape (m, 4)\n in <x1, y1, x2, y2> format, or shape (m, 5) in <x1, y1, x2,\n y2, score> format.\n bboxes2 (Tensor or :obj:`BaseBoxes`): bboxes have shape (m, 4)\n in <x1, y1, x2, y2> format, shape (m, 5) in <x1, y1, x2, y2,\n score> format, or be empty. If ``is_aligned `` is ``True``,\n then m and n must be equal.\n mode (str): \"iou\" (intersection over union), \"iof\" (intersection\n over foreground), or \"giou\" (generalized intersection over\n union).\n is_aligned (bool, optional): If True, then m and n must be equal.\n Default False.\n\n Returns:\n Tensor: shape (m, n) if ``is_aligned `` is False else shape (m,)\n ", "language": "en", "n_whitespaces": 311, "n_words": 115, "vocab_size": 64 }
94
Python
54
d915740fa8228cf57741b27d9e5d66e358456b8e
iou2d_calculator.py
245,712
17
183
__call__
https://github.com/open-mmlab/mmdetection.git
[Refactor] Refactor anchor head and base head with boxlist (#8625) * Refactor anchor head * Update * Update * Update * Add a series of boxes tools * Fix box type to support n x box_dim boxes * revert box type changes * Add docstring * refactor retina_head * Update * Update * Fix comments * modify docstring of coder and ioucalculator * Replace with_boxlist with use_box_type
275
0
70,858
12
4
11
def to_key_val_list(value): if value is None: return None if isinstance(value, (str, bytes, bool, int)): raise ValueError("cannot encode objects that are not 2-tuples") if isinstance(value, Mapping): value = value.items() return list(value) # From mitsuhiko/werkzeug (used with permission).
pipenv/patched/pip/_vendor/requests/utils.py
88
pipenv
{ "docstring": "Take an object and test to see if it can be represented as a\n dictionary. If it can be, return a list of tuples, e.g.,\n\n ::\n\n >>> to_key_val_list([('key', 'val')])\n [('key', 'val')]\n >>> to_key_val_list({'key': 'val'})\n [('key', 'val')]\n >>> to_key_val_list('string')\n Traceback (most recent call last):\n ...\n ValueError: cannot encode objects that are not 2-tuples\n\n :rtype: list\n ", "language": "en", "n_whitespaces": 122, "n_words": 54, "vocab_size": 46 }
36
Python
31
cd5a9683be69c86c8f3adcd13385a9bc5db198ec
utils.py
22,153
8
54
to_key_val_list
https://github.com/pypa/pipenv.git
Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.
71
0
4,224
10
2
11
def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, ViltEncoder): module.gradient_checkpointing = value VILT_START_DOCSTRING = r VILT_INPUTS_DOCSTRING = r VILT_IMAGES_AND_TEXT_CLASSIFICATION_INPUTS_DOCSTRING = r @add_start_docstrings( "The bare ViLT Model transformer outputting raw hidden-states without any specific head on top.", VILT_START_DOCSTRING, )
src/transformers/models/vilt/modeling_vilt.py
71
@add_start_docstrings( "The bare ViLT Model transformer outputting raw hidden-states without any specific head on top.", VILT_START_DOCSTRING, )
transformers
{ "docstring": "\n This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ subclass. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ViltConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n\n Args:\n input_ids (`torch.LongTensor` of shape `({0})`):\n Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`BertTokenizer`]. See\n [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input\n IDs?](../glossary#input-ids)\n\n attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n [What are attention masks?](../glossary#attention-mask)\n\n token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n [What are token type IDs?](../glossary#token-type-ids)\n\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`ViltFeatureExtractor`]. See\n [`ViltFeatureExtractor.__call__`] for details.\n\n pixel_mask (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):\n Mask to avoid performing attention on padding pixel values. Mask values selected in `[0, 1]`:\n\n - 1 for pixels that are real (i.e. **not masked**),\n - 0 for pixels that are padding (i.e. **masked**).\n `What are attention masks? <../glossary.html#attention-mask>`__\n\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n\n image_embeds (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`, *optional*):\n Optionally, instead of passing `pixel_values`, you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert `pixel_values` into patch embeddings.\n\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n\n Args:\n input_ids (`torch.LongTensor` of shape `({0})`):\n Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`BertTokenizer`]. See\n [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input\n IDs?](../glossary#input-ids)\n\n attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n [What are attention masks?](../glossary#attention-mask)\n\n token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n [What are token type IDs?](../glossary#token-type-ids)\n\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_images, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`ViltFeatureExtractor`]. See\n [`ViltFeatureExtractor.__call__`] for details.\n\n pixel_mask (`torch.LongTensor` of shape `(batch_size, num_images, height, width)`, *optional*):\n Mask to avoid performing attention on padding pixel values. Mask values selected in `[0, 1]`:\n\n - 1 for pixels that are real (i.e. **not masked**),\n - 0 for pixels that are padding (i.e. **masked**).\n `What are attention masks? <../glossary.html#attention-mask>`__\n\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n\n image_embeds (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`, *optional*):\n Optionally, instead of passing `pixel_values`, you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert `pixel_values` into patch embeddings.\n\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n", "language": "en", "n_whitespaces": 1685, "n_words": 802, "vocab_size": 200 }
36
Python
31
ac227093e41cecb07c7e0f2fc9a504850907bd06
modeling_vilt.py
34,307
3
24
_set_gradient_checkpointing
https://github.com/huggingface/transformers.git
Add ViLT (#14895) * First commit * Add conversion script * Make conversion script work for base model * More improvements * Update conversion script, works for vqa * Add indexing argument to meshgrid * Make conversion script work for ViltForPreTraining * Add ViltForPreTraining to docs * Fix device issue * Add processor * Add MinMaxResize to feature extractor * Implement call method of ViltProcessor * Fix tests * Add integration test * Add loss calculation for VQA * Improve tests * Improve some more tests * Debug tests * Small improvements * Add support for attention_mask * Remove mask_it * Add pixel_mask * Add tests for ViltFeatureExtractor * Improve tests * Add ViltForNaturalLanguageVisualReasoning * Add ViltForNaturalLanguageVisualReasoning to conversion script * Minor fixes * Add support for image_embeds, update docstrings to markdown * Update docs to markdown * Improve conversion script * Rename ViltForPreTraining to ViltForMaskedLM * Improve conversion script * Convert docstrings to markdown * Fix code example of retrieval model * Properly convert masked language model * Add integration test for nlvr * Fix code quality * Apply suggestions from code review * Add copied from statements * Fix pretrained_config_archive_map * Fix docs * Add model to README * Apply suggestions from code review Co-authored-by: Sylvain Gugger <[email protected]> * Apply more suggestions from code review * Make code more readable * Add ViltForNaturalLanguageVisualReasoning to the tests * Rename ViltForVisualQuestionAnswering to ViltForQuestionAnswering * Replace pixel_values_2 by single tensor * Add hidden_states and attentions * Fix one more test * Fix all tests * Update year * Fix rebase issues * Fix another rebase issue * Remove ViltForPreTraining from auto mapping * Rename ViltForImageRetrievalTextRetrieval to ViltForImageAndTextRetrieval * Make it possible to use BertTokenizerFast in the processor * Use BertTokenizerFast by default * Rename ViltForNaturalLanguageVisualReasoning, define custom model output Co-authored-by: Sylvain Gugger <[email protected]>
54
1
6,254
9
1
7
def expunge(self): name = 'EXPUNGE' typ, dat = self._simple_command(name) return self._untagged_response(typ, dat, name)
python3.10.4/Lib/imaplib.py
51
XX-Net
{ "docstring": "Permanently remove deleted items from selected mailbox.\n\n Generates 'EXPUNGE' response for each deleted message.\n\n (typ, [data]) = <instance>.expunge()\n\n 'data' is list of 'EXPUNGE'd message numbers in order received.\n ", "language": "en", "n_whitespaces": 56, "n_words": 28, "vocab_size": 27 }
13
Python
12
8198943edd73a363c266633e1aa5b2a9e9c9f526
imaplib.py
217,981
4
30
expunge
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
41
0
55,053
8
3
15
def test_next_dagrun_after_auto_align(self): dag = DAG( dag_id='test_scheduler_auto_align_1', start_date=timezone.datetime(2016, 1, 1, 10, 10, 0), schedule_interval="4 5 * * *", ) EmptyOperator(task_id='dummy', dag=dag, owner='airflow') next_info = dag.next_dagrun_info(None) assert next_info and next_info.logical_date == timezone.datetime(2016, 1, 2, 5, 4) dag = DAG( dag_id='test_scheduler_auto_align_2', start_date=timezone.datetime(2016, 1, 1, 10, 10, 0), schedule_interval="10 10 * * *", ) EmptyOperator(task_id='dummy', dag=dag, owner='airflow') next_info = dag.next_dagrun_info(None) assert next_info and next_info.logical_date == timezone.datetime(2016, 1, 1, 10, 10)
tests/models/test_dag.py
235
airflow
{ "docstring": "\n Test if the schedule_interval will be auto aligned with the start_date\n such that if the start_date coincides with the schedule the first\n execution_date will be start_date, otherwise it will be start_date +\n interval.\n ", "language": "en", "n_whitespaces": 69, "n_words": 33, "vocab_size": 21 }
66
Python
32
49e336ae0302b386a2f47269a6d13988382d975f
test_dag.py
47,565
17
156
test_next_dagrun_after_auto_align
https://github.com/apache/airflow.git
Replace usage of `DummyOperator` with `EmptyOperator` (#22974) * Replace usage of `DummyOperator` with `EmptyOperator`
209
0
9,160
11
1
2
def tickwidth(self): return self["tickwidth"]
packages/python/plotly/plotly/graph_objs/_ohlc.py
22
plotly.py
{ "docstring": "\n Sets the width of the open/close tick marks relative to the \"x\"\n minimal interval.\n\n The 'tickwidth' property is a number and may be specified as:\n - An int or float in the interval [0, 0.5]\n\n Returns\n -------\n int|float\n ", "language": "en", "n_whitespaces": 97, "n_words": 38, "vocab_size": 35 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_ohlc.py
227,484
2
11
tickwidth
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
59,157
7
1
5
def median_approximate(self, method="default"): return self.quantile(q=0.5, method=method)
dask/dataframe/core.py
39
dask
{ "docstring": "Return the approximate median of the values over the requested axis.\n\n Parameters\n ----------\n method : {'default', 'tdigest', 'dask'}, optional\n What method to use. By default will use Dask's internal custom\n algorithm (``\"dask\"``). If set to ``\"tdigest\"`` will use tdigest\n for floats and ints and fallback to the ``\"dask\"`` otherwise.\n ", "language": "en", "n_whitespaces": 111, "n_words": 49, "vocab_size": 40 }
6
Python
6
142de2608df2494bf11e08038aadddb544b4500c
core.py
156,986
2
25
median_approximate
https://github.com/dask/dask.git
Add `DataFrame` and `Series` `median` method (#9483)
20
0
36,823
8
1
15
def add_metadata_summerizer(): docs = [ Document( content=, meta={ "sub_content": "Pegasus Example", "topic": "California's Electricity", "context": "Dummy - PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires.", }, ), Document( content=, meta={"sub_content": "Paris best tour best tour", "topic": "Eiffel tower"}, ), ] # Original input is overwrote after the "predict". So adding the same input as check_output to assess the output check_output = deepcopy(docs) summarizer = TransformersSummarizer(model_name_or_path="google/pegasus-xsum") summary = summarizer.predict(documents=docs) assert len(summary[0].meta) == len(check_output[0].meta) assert len(summary[1].meta) - 1 == len(check_output[1].meta) assert ( summary[0].meta["context"] == ) summary = summarizer.predict(documents=docs, generate_single_summary=True) assert len(summary) == 1 assert not summary[0].meta # Metadata is not returned in case of a single summary
test/nodes/test_summarizer.py
273
haystack
{ "docstring": "PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.The tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey building, and the tallest structure in Paris. Its base is square, measuring 125 metres (410 ft) on each side. During its construction, the Eiffel Tower surpassed the Washington Monument to become the tallest man-made structure in the world, a title it held for 41 years until the Chrysler Building in New York City was finished in 1930. It was the first structure to reach a height of 300 metres. Due to the addition of a broadcasting aerial at the top of the tower in 1957, it is now taller than the Chrysler Building by 5.2 metres (17 ft). Excluding transmitters, the Eiffel Tower is the second tallest free-standing structure in France after the Millau Viaduct.PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.", "language": "en", "n_whitespaces": 221, "n_words": 222, "vocab_size": 117 }
122
Python
88
4d8f40425bc4e7346359b7609720a50ac10b8af9
test_summarizer.py
257,543
27
162
add_metadata_summerizer
https://github.com/deepset-ai/haystack.git
Passing the meta-data in the summerizer response (#2179) * Passing the all the meta-data in the summerizer * Disable metadata forwarding if `generate_single_summary` is `True` * Update Documentation & Code Style * simplify tests * Update Documentation & Code Style Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
304
0
75,094
13
2
14
def dump_full(self) -> None: styles = {} if term.supports_colors(sys.stdout.fileno()): styles[self] = term.Style16(color='magenta', bold=True) print(self.root.pdebugformat(styles=styles))
edb/ir/scopetree.py
94
edgedb
{ "docstring": "Do a debug dump of the root but hilight the current node.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 11 }
14
Python
13
e59a77b36afa41b93518b8bc4128e6e90da08fda
scopetree.py
176,140
6
56
dump_full
https://github.com/edgedb/edgedb.git
Add a scopetree method to dump the root but to highlight the current node (#3330)
53
0
41,716
12
1
8
def local_devices_fixture(): return json.loads(load_fixture("awair/local_devices.json")) @pytest.fixture(name="gen1_data", scope="session")
tests/components/awair/conftest.py
54
@pytest.fixture(name="gen1_data", scope="session")
core
{ "docstring": "Fixture representing devices returned by Awair local API.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
6
Python
6
ebbff7b60e43f17d65ead811d314602b9daddfc4
conftest.py
303,763
2
15
local_devices_fixture
https://github.com/home-assistant/core.git
Add Awair Local API support (#75535)
11
1
102,572
10
1
2
def root(self): return self["root"]
packages/python/plotly/plotly/graph_objs/_icicle.py
22
plotly.py
{ "docstring": "\n The 'root' property is an instance of Root\n that may be specified as:\n - An instance of :class:`plotly.graph_objs.icicle.Root`\n - A dict of string/value properties that will be passed\n to the Root constructor\n\n Supported dict properties:\n\n color\n sets the color of the root node for a\n sunburst/treemap/icicle trace. this has no\n effect when a colorscale is used to set the\n markers.\n\n Returns\n -------\n plotly.graph_objs.icicle.Root\n ", "language": "en", "n_whitespaces": 237, "n_words": 63, "vocab_size": 47 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_icicle.py
227,179
2
11
root
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
58,852
7
1
31
def test_stream_slices_no_state_close_to_now(self, api, async_manager_mock, recent_start_date): start_date = recent_start_date end_date = pendulum.now() stream = AdsInsights(api=api, start_date=start_date, end_date=end_date) async_manager_mock.completed_jobs.return_value = [1, 2, 3] slices = list(stream.stream_slices(stream_state=None, sync_mode=SyncMode.incremental)) assert slices == [{"insight_job": 1}, {"insight_job": 2}, {"insight_job": 3}] async_manager_mock.assert_called_once() args, kwargs = async_manager_mock.call_args generated_jobs = list(kwargs["jobs"]) assert len(generated_jobs) == (end_date - start_date).days + 1 assert generated_jobs[0].interval.start == start_date.date() assert generated_jobs[1].interval.start == start_date.date() + duration(days=1)
airbyte-integrations/connectors/source-facebook-marketing/unit_tests/test_base_insight_streams.py
259
airbyte
{ "docstring": "Stream will use start_date when there is not state and start_date within 28d from now", "language": "en", "n_whitespaces": 14, "n_words": 15, "vocab_size": 14 }
60
Python
44
a3aae8017a0a40ff2006e2567f71dccb04c997a5
test_base_insight_streams.py
3,829
13
165
test_stream_slices_no_state_close_to_now
https://github.com/airbytehq/airbyte.git
🎉 🎉 Source FB Marketing: performance and reliability fixes (#9805) * Facebook Marketing performance improvement * add comments and little refactoring * fix integration tests with the new config * improve job status handling, limit concurrency to 10 * fix campaign jobs, refactor manager * big refactoring of async jobs, support random order of slices * update source _read_incremental to hook new state logic * fix issues with timeout * remove debugging and clean up, improve retry logic * merge changes from #8234 * fix call super _read_increment * generalize batch execution, add use_batch flag * improve coverage, do some refactoring of spec * update test, remove overrides of source * add split by AdSet * add smaller insights * fix end_date < start_date case * add account_id to PK * add notes * fix new streams * fix reversed incremental stream * update spec.json for SAT * upgrade CDK and bump version Co-authored-by: Dmytro Rezchykov <[email protected]> Co-authored-by: Eugene Kulak <[email protected]>
151
0
574
12
2
19
def execute(): frappe.reload_doc("stock", "doctype", "purchase_receipt") frappe.reload_doc("stock", "doctype", "purchase_receipt_item") frappe.reload_doc("stock", "doctype", "delivery_note") frappe.reload_doc("stock", "doctype", "delivery_note_item") frappe.reload_doc("stock", "doctype", "stock_settings") def update_from_return_docs(doctype): for return_doc in frappe.get_all( doctype, filters={"is_return": 1, "docstatus": 1, "return_against": ("!=", "")} ): # Update original receipt/delivery document from return return_doc = frappe.get_cached_doc(doctype, return_doc.name) try: return_doc.update_prevdoc_status() except OverAllowanceError: frappe.db.rollback() continue return_against = frappe.get_doc(doctype, return_doc.return_against) return_against.update_billing_status() frappe.db.commit() # Set received qty in stock uom in PR, as returned qty is checked against it frappe.db.sql( ) for doctype in ("Purchase Receipt", "Delivery Note"): update_from_return_docs(doctype)
erpnext/patches/v13_0/update_returned_qty_in_pr_dn.py
297
erpnext
{ "docstring": " update `tabPurchase Receipt Item`\n\t\tset received_stock_qty = received_qty * conversion_factor\n\t\twhere docstatus = 1 ", "language": "en", "n_whitespaces": 13, "n_words": 14, "vocab_size": 13 }
81
Python
63
494bd9ef78313436f0424b918f200dab8fc7c20b
update_returned_qty_in_pr_dn.py
66,831
14
77
execute
https://github.com/frappe/erpnext.git
style: format code with black
56
0
14,353
15
6
32
def geometric_edges(G, radius, p): nodes_pos = G.nodes(data="pos") try: import scipy as sp import scipy.spatial # call as sp.spatial except ImportError: # no scipy KDTree so compute by for-loop radius_p = radius**p edges = [ (u, v) for (u, pu), (v, pv) in combinations(nodes_pos, 2) if sum(abs(a - b) ** p for a, b in zip(pu, pv)) <= radius_p ] return edges # scipy KDTree is available nodes, coords = list(zip(*nodes_pos)) kdtree = sp.spatial.cKDTree(coords) # Cannot provide generator. edge_indexes = kdtree.query_pairs(radius, p) edges = [(nodes[u], nodes[v]) for u, v in sorted(edge_indexes)] return edges @py_random_state(5) @nodes_or_number(0)
networkx/generators/geometric.py
252
@py_random_state(5) @nodes_or_number(0)
networkx
{ "docstring": "Returns edge list of node pairs within `radius` of each other.\n\n Parameters\n ----------\n G : networkx graph\n The graph from which to generate the edge list. The nodes in `G` should\n have an attribute ``pos`` corresponding to the node position, which is\n used to compute the distance to other nodes.\n radius : scalar\n The distance threshold. Edges are included in the edge list if the\n distance between the two nodes is less than `radius`.\n p : scalar\n The `Minkowski distance metric\n <https://en.wikipedia.org/wiki/Minkowski_distance>`_ use to compute\n distances.\n\n Returns\n -------\n edges : list\n List of edges whose distances are less than `radius`\n\n Notes\n -----\n Radius uses Minkowski distance metric `p`.\n If scipy is available, `scipy.spatial.cKDTree` is used to speed computation.\n\n Examples\n --------\n Create a graph with nodes that have a \"pos\" attribute representing 2D\n coordinates.\n\n >>> G = nx.Graph()\n >>> G.add_nodes_from([\n ... (0, {\"pos\": (0, 0)}),\n ... (1, {\"pos\": (3, 0)}),\n ... (2, {\"pos\": (8, 0)}),\n ... ])\n >>> p = 2 # Euclidean distance\n >>> nx.geometric_edges(G, radius=1, p=p)\n []\n >>> nx.geometric_edges(G, radius=4, p=p)\n [(0, 1)]\n >>> nx.geometric_edges(G, radius=6, p=p)\n [(0, 1), (1, 2)]\n >>> nx.geometric_edges(G, radius=9, p=p)\n [(0, 1), (0, 2), (1, 2)]\n ", "language": "en", "n_whitespaces": 364, "n_words": 192, "vocab_size": 112 }
94
Python
70
f6755ffa00211b523c6c0bec5398bc6c3c43c8b1
geometric.py
176,489
18
151
geometric_edges
https://github.com/networkx/networkx.git
Update black (#5438) * CI: sync up black dev requirements version with precommit * Run black Co-authored-by: Jarrod Millman <[email protected]>
206
1
41,932
18
4
22
def _proc_function_remote(self, *, fun, low, user, tag, jid, daemonize=True): if daemonize and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize() # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() # pack a few things into low low["__jid__"] = jid low["__user__"] = user low["__tag__"] = tag try: return self.cmd_sync(low) except salt.exceptions.EauthAuthenticationError as exc: log.error(exc)
salt/client/mixins.py
175
salt
{ "docstring": "\n Run this method in a multiprocess target to execute the function on the\n master and fire the return data on the event bus\n ", "language": "en", "n_whitespaces": 45, "n_words": 23, "vocab_size": 19 }
53
Python
47
c78f1ee4f49df35ab04e921a45de0878716d8bf5
mixins.py
216,481
12
105
_proc_function_remote
https://github.com/saltstack/salt.git
Implement ``__getstate__`` and ``__setstate__`` instead of using ``classmethod`` Signed-off-by: Pedro Algarvio <[email protected]>
186
0
54,603
11
1
14
def test_text_qtest(self, qtest_key, qtbot, key_tester): with qtbot.wait_signal(key_tester.got_text): qtbot.keyPress(key_tester, qtest_key.member) info = keyutils.KeyInfo(qtest_key.member) assert info.text() == key_tester.text.lower()
tests/unit/keyinput/test_keyutils.py
91
qutebrowser
{ "docstring": "Make sure KeyInfo.text() lines up with QTest::keyToAscii.\n\n See key_data.py for inputs and expected values.\n ", "language": "en", "n_whitespaces": 28, "n_words": 14, "vocab_size": 14 }
16
Python
16
623b06bc3dabfd53f637e611ec8d3e4feb521189
test_keyutils.py
321,569
5
56
test_text_qtest
https://github.com/qutebrowser/qutebrowser.git
Fix remaining enum/flag issues
55
0
117,802
10
1
11
def mixin_head_parser(parser): gp = add_arg_group(parser, title='Head') gp.add_argument( '--uses-before-address', type=str, help='The address of the uses-before runtime', ) gp.add_argument( '--uses-after-address', type=str, help='The address of the uses-before runtime', ) gp.add_argument( '--connection-list', type=str, help='dictionary JSON with a list of connections to configure', ) gp.add_argument( '--disable-reduce', action='store_true', default=False, help='Disable the built-in reduce mechanism, set this if the reduction is to be handled by the Executor connected to this Head', )
jina/parsers/orchestrate/runtimes/head.py
137
jina
{ "docstring": "Mixing in arguments required by head pods and runtimes into the given parser.\n :param parser: the parser instance to which we add arguments\n ", "language": "en", "n_whitespaces": 29, "n_words": 23, "vocab_size": 21 }
65
Python
44
c7ad27e5614dfb2b1684f4718c5508840cd55de0
head.py
11,483
23
80
mixin_head_parser
https://github.com/jina-ai/jina.git
refactor: add disable_reduce args (#4424)
186
0
2,054
10
2
20
def test_run_clm_no_trainer(self): tmp_dir = self.get_auto_remove_tmp_dir() testargs = f.split() if torch.cuda.device_count() > 1: # Skipping because there are not enough batches to train the model + would need a drop_last to work. return run_command(self._launch_args + testargs) result = get_results(tmp_dir) self.assertLess(result["perplexity"], 100) self.assertTrue(os.path.exists(os.path.join(tmp_dir, "epoch_0"))) self.assertTrue(os.path.exists(os.path.join(tmp_dir, "clm_no_trainer")))
examples/pytorch/test_accelerate_examples.py
180
transformers
{ "docstring": "\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n ", "language": "en", "n_whitespaces": 149, "n_words": 20, "vocab_size": 18 }
44
Python
39
99eb9b523f9b9ea6096323ce5610ce6633acc88a
test_accelerate_examples.py
32,329
22
101
test_run_clm_no_trainer
https://github.com/huggingface/transformers.git
Fix `no_trainer` CI (#18242) * Fix all tests
121
0
5,903
12
2
20
def bokeh_chart(self, figure, use_container_width=False): import bokeh if bokeh.__version__ != ST_BOKEH_VERSION: raise StreamlitAPIException( f"Streamlit only supports Bokeh version {ST_BOKEH_VERSION}, " f"but you have version {bokeh.__version__} installed. Please " f"run `pip install --force-reinstall --no-deps bokeh==" f"{ST_BOKEH_VERSION}` to install the correct version." ) # Generate element ID from delta path delta_path = self.dg._get_delta_path_str() element_id = hashlib.md5(delta_path.encode()).hexdigest() bokeh_chart_proto = BokehChartProto() marshall(bokeh_chart_proto, figure, use_container_width, element_id) return self.dg._enqueue("bokeh_chart", bokeh_chart_proto)
lib/streamlit/elements/bokeh_chart.py
153
streamlit
{ "docstring": "Display an interactive Bokeh chart.\n\n Bokeh is a charting library for Python. The arguments to this function\n closely follow the ones for Bokeh's `show` function. You can find\n more about Bokeh at https://bokeh.pydata.org.\n\n Parameters\n ----------\n figure : bokeh.plotting.figure.Figure\n A Bokeh figure to plot.\n\n use_container_width : bool\n If True, set the chart width to the column width. This takes\n precedence over Bokeh's native `width` value.\n\n To show Bokeh charts in Streamlit, call `st.bokeh_chart`\n wherever you would call Bokeh's `show`.\n\n Example\n -------\n >>> import streamlit as st\n >>> from bokeh.plotting import figure\n >>>\n >>> x = [1, 2, 3, 4, 5]\n >>> y = [6, 7, 2, 4, 5]\n >>>\n >>> p = figure(\n ... title='simple line example',\n ... x_axis_label='x',\n ... y_axis_label='y')\n ...\n >>> p.line(x, y, legend_label='Trend', line_width=2)\n >>>\n >>> st.bokeh_chart(p, use_container_width=True)\n\n .. output::\n https://share.streamlit.io/streamlit/docs/main/python/api-examples-source/charts.bokeh_chart.py\n height: 700px\n\n ", "language": "en", "n_whitespaces": 389, "n_words": 135, "vocab_size": 102 }
63
Python
57
72703b38029f9358a0ec7ca5ed875a6b438ece19
bokeh_chart.py
118,727
14
84
bokeh_chart
https://github.com/streamlit/streamlit.git
Replace static apps with live Cloud apps (#4317) Co-authored-by: kajarenc <[email protected]>
208
0
26,384
13
3
30
def matthews_corrcoef(y_true, y_pred, *, sample_weight=None): y_type, y_true, y_pred = _check_targets(y_true, y_pred) check_consistent_length(y_true, y_pred, sample_weight) if y_type not in {"binary", "multiclass"}: raise ValueError("%s is not supported" % y_type) lb = LabelEncoder() lb.fit(np.hstack([y_true, y_pred])) y_true = lb.transform(y_true) y_pred = lb.transform(y_pred) C = confusion_matrix(y_true, y_pred, sample_weight=sample_weight) t_sum = C.sum(axis=1, dtype=np.float64) p_sum = C.sum(axis=0, dtype=np.float64) n_correct = np.trace(C, dtype=np.float64) n_samples = p_sum.sum() cov_ytyp = n_correct * n_samples - np.dot(t_sum, p_sum) cov_ypyp = n_samples**2 - np.dot(p_sum, p_sum) cov_ytyt = n_samples**2 - np.dot(t_sum, t_sum) if cov_ypyp * cov_ytyt == 0: return 0.0 else: return cov_ytyp / np.sqrt(cov_ytyt * cov_ypyp)
sklearn/metrics/_classification.py
336
scikit-learn
{ "docstring": "Compute the Matthews correlation coefficient (MCC).\n\n The Matthews correlation coefficient is used in machine learning as a\n measure of the quality of binary and multiclass classifications. It takes\n into account true and false positives and negatives and is generally\n regarded as a balanced measure which can be used even if the classes are of\n very different sizes. The MCC is in essence a correlation coefficient value\n between -1 and +1. A coefficient of +1 represents a perfect prediction, 0\n an average random prediction and -1 an inverse prediction. The statistic\n is also known as the phi coefficient. [source: Wikipedia]\n\n Binary and multiclass labels are supported. Only in the binary case does\n this relate to information about true and false positives and negatives.\n See references below.\n\n Read more in the :ref:`User Guide <matthews_corrcoef>`.\n\n Parameters\n ----------\n y_true : array, shape = [n_samples]\n Ground truth (correct) target values.\n\n y_pred : array, shape = [n_samples]\n Estimated targets as returned by a classifier.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights.\n\n .. versionadded:: 0.18\n\n Returns\n -------\n mcc : float\n The Matthews correlation coefficient (+1 represents a perfect\n prediction, 0 an average random prediction and -1 and inverse\n prediction).\n\n References\n ----------\n .. [1] `Baldi, Brunak, Chauvin, Andersen and Nielsen, (2000). Assessing the\n accuracy of prediction algorithms for classification: an overview\n <https://doi.org/10.1093/bioinformatics/16.5.412>`_.\n\n .. [2] `Wikipedia entry for the Matthews Correlation Coefficient\n <https://en.wikipedia.org/wiki/Matthews_correlation_coefficient>`_.\n\n .. [3] `Gorodkin, (2004). Comparing two K-category assignments by a\n K-category correlation coefficient\n <https://www.sciencedirect.com/science/article/pii/S1476927104000799>`_.\n\n .. [4] `Jurman, Riccadonna, Furlanello, (2012). A Comparison of MCC and CEN\n Error Measures in MultiClass Prediction\n <https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0041882>`_.\n\n Examples\n --------\n >>> from sklearn.metrics import matthews_corrcoef\n >>> y_true = [+1, +1, +1, -1]\n >>> y_pred = [+1, -1, +1, +1]\n >>> matthews_corrcoef(y_true, y_pred)\n -0.33...\n ", "language": "en", "n_whitespaces": 482, "n_words": 283, "vocab_size": 177 }
93
Python
62
1fc86b6aacd89da44a3b4e8abf7c3e2ba4336ffe
_classification.py
258,915
21
218
matthews_corrcoef
https://github.com/scikit-learn/scikit-learn.git
MNT Update black to stable version (#22474)
168
0
75,481
12
2
33
def test_fed_filtering(self): fed_hostname = self.hs.hostname + "2" subspace = "#subspace:" + fed_hostname # Create a few rooms which will have different properties. public_room = "#public:" + fed_hostname knock_room = "#knock:" + fed_hostname not_invited_room = "#not_invited:" + fed_hostname invited_room = "#invited:" + fed_hostname restricted_room = "#restricted:" + fed_hostname restricted_accessible_room = "#restricted_accessible:" + fed_hostname world_readable_room = "#world_readable:" + fed_hostname joined_room = self.helper.create_room_as(self.user, tok=self.token) # Poke an invite over federation into the database. self._poke_fed_invite(invited_room, "@remote:" + fed_hostname) # Note that these entries are brief, but should contain enough info. children_rooms = ( ( public_room, { "room_id": public_room, "world_readable": False, "join_rules": JoinRules.PUBLIC, }, ), ( knock_room, { "room_id": knock_room, "world_readable": False, "join_rules": JoinRules.KNOCK, }, ), ( not_invited_room, { "room_id": not_invited_room, "world_readable": False, "join_rules": JoinRules.INVITE, }, ), ( invited_room, { "room_id": invited_room, "world_readable": False, "join_rules": JoinRules.INVITE, }, ), ( restricted_room, { "room_id": restricted_room, "world_readable": False, "join_rules": JoinRules.RESTRICTED, "allowed_room_ids": [], }, ), ( restricted_accessible_room, { "room_id": restricted_accessible_room, "world_readable": False, "join_rules": JoinRules.RESTRICTED, "allowed_room_ids": [self.room], }, ), ( world_readable_room, { "room_id": world_readable_room, "world_readable": True, "join_rules": JoinRules.INVITE, }, ), ( joined_room, { "room_id": joined_room, "world_readable": False, "join_rules": JoinRules.INVITE, }, ), ) subspace_room_entry = _RoomEntry( subspace, { "room_id": subspace, "world_readable": True, }, # Place each room in the sub-space. [ { "type": EventTypes.SpaceChild, "room_id": subspace, "state_key": room_id, "content": {"via": [fed_hostname]}, } for room_id, _ in children_rooms ], )
tests/handlers/test_room_summary.py
544
synapse
{ "docstring": "\n Rooms returned over federation should be properly filtered to only include\n rooms the user has access to.\n ", "language": "en", "n_whitespaces": 39, "n_words": 17, "vocab_size": 17 }
218
Python
104
7754af24ab163a3666bc04c7df409e59ace0d763
test_room_summary.py
247,085
129
484
test_fed_filtering
https://github.com/matrix-org/synapse.git
Remove the unstable `/spaces` endpoint. (#12073) ...and various code supporting it. The /spaces endpoint was from an old version of MSC2946 and included both a Client-Server and Server-Server API. Note that the unstable /hierarchy endpoint (from the final version of MSC2946) is not yet removed.
1,598
0
71,495
14
1
25
def test_tweedie_log_identity_consistency(p): half_tweedie_log = HalfTweedieLoss(power=p) half_tweedie_identity = HalfTweedieLossIdentity(power=p) n_samples = 10 y_true, raw_prediction = random_y_true_raw_prediction( loss=half_tweedie_log, n_samples=n_samples, seed=42 ) y_pred = half_tweedie_log.link.inverse(raw_prediction) # exp(raw_prediction) # Let's compare the loss values, up to some constant term that is dropped # in HalfTweedieLoss but not in HalfTweedieLossIdentity. loss_log = half_tweedie_log.loss( y_true=y_true, raw_prediction=raw_prediction ) + half_tweedie_log.constant_to_optimal_zero(y_true) loss_identity = half_tweedie_identity.loss( y_true=y_true, raw_prediction=y_pred ) + half_tweedie_identity.constant_to_optimal_zero(y_true) # Note that HalfTweedieLoss ignores different constant terms than # HalfTweedieLossIdentity. Constant terms means terms not depending on # raw_prediction. By adding these terms, `constant_to_optimal_zero`, both losses # give the same values. assert_allclose(loss_log, loss_identity) # For gradients and hessians, the constant terms do not matter. We have, however, # to account for the chain rule, i.e. with x=raw_prediction # gradient_log(x) = d/dx loss_log(x) # = d/dx loss_identity(exp(x)) # = exp(x) * gradient_identity(exp(x)) # Similarly, # hessian_log(x) = exp(x) * gradient_identity(exp(x)) # + exp(x)**2 * hessian_identity(x) gradient_log, hessian_log = half_tweedie_log.gradient_hessian( y_true=y_true, raw_prediction=raw_prediction ) gradient_identity, hessian_identity = half_tweedie_identity.gradient_hessian( y_true=y_true, raw_prediction=y_pred ) assert_allclose(gradient_log, y_pred * gradient_identity) assert_allclose( hessian_log, y_pred * gradient_identity + y_pred**2 * hessian_identity )
sklearn/_loss/tests/test_loss.py
255
scikit-learn
{ "docstring": "Test for identical losses when only the link function is different.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
174
Python
109
75a94f518f7bd7d0bf581ffb67d9f961e3c4efbc
test_loss.py
259,434
25
155
test_tweedie_log_identity_consistency
https://github.com/scikit-learn/scikit-learn.git
ENH migrate GLMs / TweedieRegressor to linear loss (#22548) Co-authored-by: Olivier Grisel <[email protected]> Co-authored-by: Thomas J. Fan <[email protected]>
383
0
75,768
10
1
6
def restart_subscription(name): subscription = frappe.get_doc("Subscription", name) subscription.restart_subscription() @frappe.whitelist()
erpnext/accounts/doctype/subscription/subscription.py
50
@frappe.whitelist()
erpnext
{ "docstring": "\n\tRestarts a cancelled `Subscription`. The `Subscription` will 'forget' the history of\n\tall invoices it has generated\n\t", "language": "en", "n_whitespaces": 14, "n_words": 16, "vocab_size": 16 }
8
Python
8
494bd9ef78313436f0424b918f200dab8fc7c20b
subscription.py
65,075
3
21
restart_subscription
https://github.com/frappe/erpnext.git
style: format code with black
4
1
13,786
9
4
15
def set_exception(self, exception): if self._state != _PENDING: raise exceptions.InvalidStateError(f'{self._state}: {self!r}') if isinstance(exception, type): exception = exception() if type(exception) is StopIteration: raise TypeError("StopIteration interacts badly with generators " "and cannot be raised into a Future") self._exception = exception self._state = _FINISHED self.__schedule_callbacks() self.__log_traceback = True
python3.10.4/Lib/asyncio/futures.py
132
XX-Net
{ "docstring": "Mark the future done and set an exception.\n\n If the future is already done when this method is called, raises\n InvalidStateError.\n ", "language": "en", "n_whitespaces": 42, "n_words": 21, "vocab_size": 17 }
44
Python
36
8198943edd73a363c266633e1aa5b2a9e9c9f526
futures.py
220,514
12
70
set_exception
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
160
0
56,024
12
4
6
def _move_into_position(self, width, height): if self.should_center: self.center() if height is not None: self.height = height if width is not None: self.width = width
manim/mobject/svg/svg_mobject.py
68
manim
{ "docstring": "Uses the SVGMobject's config dictionary to set the Mobject's\n width, height, and/or center it. Use ``width``, ``height``, and\n ``should_center`` respectively to modify this.\n ", "language": "en", "n_whitespaces": 44, "n_words": 23, "vocab_size": 21 }
23
Python
15
902e7eb4f0147b5882a613b67467e38a1d47f01e
svg_mobject.py
189,467
7
42
_move_into_position
https://github.com/ManimCommunity/manim.git
Hide more private methods from the docs. (#2468) * hide privs from text_mobject.py * hide privs from tex_mobject.py * hide privs from code_mobject.py * hide privs from svg_mobject.py * remove SVGPath and utils from __init__.py * don't import string_to_numbers * hide privs from geometry.py * hide privs from matrix.py * hide privs from numbers.py * hide privs from three_dimensions.py * forgot underscore under set_stroke_width_from_length * there were more i missed * unhidea method that was used in docs * forgot other text2hash * remove svg_path from docs
84
0
46,075
9
1
11
def upgrade(): with op.batch_alter_table("task_instance", schema=None) as batch_op: batch_op.alter_column("pool_slots", existing_type=sa.Integer, nullable=False, server_default='1')
airflow/migrations/versions/8646922c8a04_change_default_pool_slots_to_1.py
70
airflow
{ "docstring": "Change default pool_slots to 1 and make pool_slots not nullable", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 9 }
11
Python
11
66c342d033bd3cb959b4dc4e7e4b8aad597aab63
8646922c8a04_change_default_pool_slots_to_1.py
45,007
3
39
upgrade
https://github.com/apache/airflow.git
Support generating SQL script for upgrades (#20962) This PR attempts to add support for generating sql scripts for upgrade. Example command: `airflow db upgrade --revision-range e8d98d8ss99:78daisdu38d` `airflow db upgrade --range 2.0.0:2.2.3`
24
0
8,439
11
2
20
async def test_setup_temporary_error(hass, aioclient_mock): fake_async_add_entities = MagicMock() errors = [HTTPStatus.TOO_MANY_REQUESTS, HTTPStatus.INTERNAL_SERVER_ERROR] for error in errors: aioclient_mock.get(re.compile("api.foobot.io/v2/owner/.*"), status=error) with pytest.raises(PlatformNotReady): await foobot.async_setup_platform( hass, VALID_CONFIG, fake_async_add_entities )
tests/components/foobot/test_sensor.py
104
core
{ "docstring": "Expected failures caused by temporary errors in API response.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
25
Python
23
8896229ea641a558161d8caed796895e9a78f457
test_sensor.py
304,817
9
63
test_setup_temporary_error
https://github.com/home-assistant/core.git
Improve type hint in foobot sensor entity (#77164)
88
0
103,612
12
1
17
def test_constrained_layout22(): fig, ax = plt.subplots(layout="constrained") fig.draw_without_rendering() extents0 = np.copy(ax.get_position().extents) fig.suptitle("Suptitle", y=0.5) fig.draw_without_rendering() extents1 = np.copy(ax.get_position().extents) np.testing.assert_allclose(extents0, extents1)
lib/matplotlib/tests/test_constrainedlayout.py
129
matplotlib
{ "docstring": "#11035: suptitle should not be include in CL if manually positioned", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
18
Python
14
ec4dfbc3c83866f487ff0bc9c87b0d43a1c02b22
test_constrainedlayout.py
107,161
8
77
test_constrained_layout22
https://github.com/matplotlib/matplotlib.git
ENH: implement and use base layout_engine for more flexible layout.
42
0
22,616
11
1
25
def test_syntax_highlight_ranges(): syntax = Syntax( CODE, lexer="python", line_numbers=True, word_wrap=False, highlight_ranges=[ SyntaxHighlightRange( # overline the 2nd char of the 1st line: start=SyntaxPosition(1, 1), end=SyntaxPosition(1, 2), style=Style(overline=True), ), SyntaxHighlightRange( start=SyntaxPosition(1, len("def loop_")), end=SyntaxPosition(1, len("def loop_first_last")), style=Style(underline=True), ), SyntaxHighlightRange( start=SyntaxPosition(1, len("def loop_first")), end=SyntaxPosition(3, len(" iter_values = iter")), style=Style(bold=True), ), SyntaxHighlightRange( start=SyntaxPosition(9, len(" for ")), end=SyntaxPosition(9, len(" for value in")), style=Style(strike=True), ), SyntaxHighlightRange( start=SyntaxPosition(6, len(" except ")), end=SyntaxPosition(6, len(" except StopIteration")), style=Style(reverse=True), ), # Those should be out of range, and have no impact: SyntaxHighlightRange( start=SyntaxPosition(1, 100), # `column_index` is out of range end=SyntaxPosition(2, 2), style=Style(bold=True), ), SyntaxHighlightRange( start=SyntaxPosition(1, 1), end=SyntaxPosition(30, 2), # `line_number` is out of range style=Style(bold=True), ), ], ) rendered_syntax = render(syntax, True) print(repr(rendered_syntax)) expected = '\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 1 \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34md\x1b[0m\x1b[53;38;2;102;217;239;48;2;39;40;34me\x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mf\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;166;226;46;48;2;39;40;34mloop_\x1b[0m\x1b[4;38;2;166;226;46;48;2;39;40;34mfirst\x1b[0m\x1b[1;4;38;2;166;226;46;48;2;39;40;34m_last\x1b[0m\x1b[1;38;2;248;248;242;48;2;39;40;34m(\x1b[0m\x1b[1;38;2;248;248;242;48;2;39;40;34mvalues\x1b[0m\x1b[1;38;2;248;248;242;48;2;39;40;34m:\x1b[0m\x1b[1;38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[1;38;2;248;248;242;48;2;39;40;34mIterable\x1b[0m\x1b[1;38;2;248;248;242;48;2;39;40;34m[\x1b[0m\x1b[1;38;2;248;248;242;48;2;39;40;34mT\x1b[0m\x1b[1;38;2;248;248;242;48;2;39;40;34m]\x1b[0m\x1b[1;38;2;248;248;242;48;2;39;40;34m)\x1b[0m\x1b[1;38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[1;38;2;249;38;114;48;2;39;40;34m-\x1b[0m\x1b[1;38;2;249;38;114;48;2;39;40;34m>\x1b[0m\x1b[1;38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[1;38;2;248;248;242;48;2;39;40;34mIterable\x1b[0m\x1b[1;38;2;248;248;242;48;2;39;40;34m[\x1b[0m\x1b[1;38;2;248;248;242;48;2;39;40;34mTuple\x1b[0m\x1b[1;38;2;248;248;242;48;2;39;40;34m[\x1b[0m\x1b[1;38;2;248;248;242;48;2;39;40;34mbool\x1b[0m\x1b[1;38;2;248;248;242;48;2;39;40;34m,\x1b[0m\x1b[1;38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[1;38;2;248;248;242;48;2;39;40;34mbool\x1b[0m\x1b[1;38;2;248;248;242;48;2;39;40;34m,\x1b[0m\x1b[1;38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[1;38;2;248;248;242;48;2;39;40;34mT\x1b[0m\x1b[1;38;2;248;248;242;48;2;39;40;34m]\x1b[0m\x1b[1;38;2;248;248;242;48;2;39;40;34m]\x1b[0m\x1b[1;38;2;248;248;242;48;2;39;40;34m:\x1b[0m\n\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 2 \x1b[0m\x1b[1;38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[1;38;2;230;219;116;48;2;39;40;34m\x1b[0m\n\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 3 \x1b[0m\x1b[1;38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[1;38;2;248;248;242;48;2;39;40;34miter_values\x1b[0m\x1b[1;38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[1;38;2;249;38;114;48;2;39;40;34m=\x1b[0m\x1b[1;38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[1;38;2;248;248;242;48;2;39;40;34miter\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m(\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mvalues\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m)\x1b[0m\n\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 4 \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mtry\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m:\x1b[0m\n\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 5 \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mprevious_value\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;249;38;114;48;2;39;40;34m=\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mnext\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m(\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34miter_values\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m)\x1b[0m\n\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 6 \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mexcept\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[7;38;2;166;226;46;48;2;39;40;34mStopIteration\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m:\x1b[0m\n\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 7 \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mreturn\x1b[0m\n\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 8 \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mfirst\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;249;38;114;48;2;39;40;34m=\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mTrue\x1b[0m\n\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 9 \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mfor\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[9;38;2;248;248;242;48;2;39;40;34mvalue\x1b[0m\x1b[9;38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[9;38;2;249;38;114;48;2;39;40;34min\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34miter_values\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m:\x1b[0m\n\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m10 \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34myield\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mfirst\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m,\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mFalse\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m,\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mprevious_value\x1b[0m\n\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m11 \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mfirst\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;249;38;114;48;2;39;40;34m=\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mFalse\x1b[0m\n\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m12 \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mprevious_value\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;249;38;114;48;2;39;40;34m=\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mvalue\x1b[0m\n\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m13 \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34myield\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mfirst\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m,\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mTrue\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m,\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mprevious_value\x1b[0m\n' assert rendered_syntax == expected
tests/test_syntax.py
728
rich
{ "docstring": "Iterate and generate a tuple with a flag for first and last value.", "language": "en", "n_whitespaces": 12, "n_words": 13, "vocab_size": 11 }
193
Python
121
ebc5d2797e7bfb595183fe61aac50be58c9a5174
test_syntax.py
161,775
48
291
test_syntax_highlight_ranges
https://github.com/Textualize/rich.git
[syntax] add a `highlight_ranges` optional arg to the Syntax ctor With this new API we can apply a style from (LINE A, COLUMN A) to (LINE B, COLUMN B) - which is something we will need to be able to add arbitrary ranges to Syntax
833
0
39,061
18
2
18
def str_presenter(dumper, data): if len(data.splitlines()) > 1: # check for multiline string return dumper.represent_scalar("tag:yaml.org,2002:str", data, style="|") return dumper.represent_scalar("tag:yaml.org,2002:str", data) yaml.add_representer(str, str_presenter) yaml.representer.SafeRepresenter.add_representer(str, str_presenter) deployment_app = PrefectTyper( name="deployment", help="Commands for working with deployments." ) app.add_typer(deployment_app)
src/prefect/cli/deployment.py
135
prefect
{ "docstring": "\n configures yaml for dumping multiline strings\n Ref: https://stackoverflow.com/questions/8640959/how-can-i-control-what-scalar-form-pyyaml-uses-for-my-data\n ", "language": "en", "n_whitespaces": 18, "n_words": 8, "vocab_size": 8 }
34
Python
30
36d9870433a22fff3944fa07f8e2feeb1b622bd9
deployment.py
57,811
4
42
str_presenter
https://github.com/PrefectHQ/prefect.git
Working YAML generation with lots of bells and whistles
49
0
11,712
11
2
27
def test_lookup_using_custom_divider(self): jane = Employee.objects.create(name="Jane,Green", department=self.design) modeladmin = EmployeeCustomDividerFilterAdmin(Employee, site) employees = [jane, self.jack] request = self.request_factory.get( "/", {"name__in": "|".join(e.name for e in employees)} ) # test for lookup with custom divider request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) # Make sure the correct queryset is returned queryset = changelist.get_queryset(request) self.assertEqual(list(queryset), employees) # test for lookup with comma in the lookup string request = self.request_factory.get("/", {"name": jane.name}) request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) # Make sure the correct queryset is returned queryset = changelist.get_queryset(request) self.assertEqual(list(queryset), [jane])
tests/admin_filters/tests.py
259
django
{ "docstring": "\n Filter __in lookups with a custom divider.\n ", "language": "en", "n_whitespaces": 22, "n_words": 7, "vocab_size": 7 }
85
Python
48
9c19aff7c7561e3a82978a272ecdaad40dda5c00
tests.py
207,137
16
156
test_lookup_using_custom_divider
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
229
0
51,875
13
1
4
def blend_soft_light(self, rgb, intensity): return 2 * intensity * rgb + (1 - 2 * intensity) * rgb**2
lib/matplotlib/colors.py
44
matplotlib
{ "docstring": "\n Combine an RGB image with an intensity map using \"soft light\" blending,\n using the \"pegtop\" formula.\n\n Parameters\n ----------\n rgb : ndarray\n An MxNx3 RGB array of floats ranging from 0 to 1 (color image).\n intensity : ndarray\n An MxNx1 array of floats ranging from 0 to 1 (grayscale image).\n\n Returns\n -------\n ndarray\n An MxNx3 RGB array representing the combined images.\n ", "language": "en", "n_whitespaces": 164, "n_words": 60, "vocab_size": 38 }
18
Python
14
9b6abd0b4933811e0a45c2535ab8fd107db65dd9
colors.py
110,264
2
28
blend_soft_light
https://github.com/matplotlib/matplotlib.git
DOC: improve grammar and consistency
32
0
24,006
10
1
3
def __call__(self): return list(self)
lib/matplotlib/cm.py
21
matplotlib
{ "docstring": "\n Return a list of the registered colormap names.\n\n This exists only for backward-compatibility in `.pyplot` which had a\n ``plt.colormaps()`` method. The recommended way to get this list is\n now ``list(colormaps)``.\n ", "language": "en", "n_whitespaces": 66, "n_words": 30, "vocab_size": 28 }
4
Python
4
686c9e5a413e31c46bb049407d5eca285bcab76d
cm.py
108,461
2
11
__call__
https://github.com/matplotlib/matplotlib.git
Fix spelling errors
18
0
23,201
7
4
8
def get_custom_object_name(obj): if hasattr(obj, "name"): # Accept `Loss` instance as `Metric`. return obj.name elif hasattr(obj, "__name__"): # Function. return obj.__name__ elif hasattr(obj, "__class__"): # Class instance. return generic_utils.to_snake_case(obj.__class__.__name__) else: # Unrecognized object. return None
keras/engine/compile_utils.py
95
keras
{ "docstring": "Returns the name to use for a custom loss or metric callable.\n\n Args:\n obj: Custom loss of metric callable\n\n Returns:\n Name to use, or `None` if the object was not recognized.\n ", "language": "en", "n_whitespaces": 50, "n_words": 31, "vocab_size": 26 }
34
Python
25
84afc5193d38057e2e2badf9c889ea87d80d8fbf
compile_utils.py
271,050
9
53
get_custom_object_name
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
81
0
80,685
12
1
14
def permute_dimensions(x, pattern): return tf.compat.v1.transpose(x, perm=pattern) @keras_export("keras.backend.resize_images") @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs
keras/backend.py
66
@keras_export("keras.backend.resize_images") @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs
keras
{ "docstring": "Permutes axes in a tensor.\n\n Args:\n x: Tensor or variable.\n pattern: A tuple of\n dimension indices, e.g. `(0, 2, 1)`.\n\n Returns:\n A tensor.\n\n Example:\n\n >>> a = tf.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])\n >>> a\n <tf.Tensor: shape=(4, 3), dtype=int32, numpy=\n array([[ 1, 2, 3],\n [ 4, 5, 6],\n [ 7, 8, 9],\n [10, 11, 12]], dtype=int32)>\n >>> tf.keras.backend.permute_dimensions(a, pattern=(1, 0))\n <tf.Tensor: shape=(3, 4), dtype=int32, numpy=\n array([[ 1, 4, 7, 10],\n [ 2, 5, 8, 11],\n [ 3, 6, 9, 12]], dtype=int32)>\n\n ", "language": "en", "n_whitespaces": 238, "n_words": 87, "vocab_size": 57 }
9
Python
9
84afc5193d38057e2e2badf9c889ea87d80d8fbf
backend.py
269,576
2
23
permute_dimensions
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
12
1
80,199
9
1
9
def get_sal_struct(company, currency, salary_slip_based_on_timesheet, condition): return frappe.db.sql_list( .format( condition=condition ), { "company": company, "currency": currency, "salary_slip_based_on_timesheet": salary_slip_based_on_timesheet, }, )
erpnext/payroll/doctype/payroll_entry/payroll_entry.py
68
erpnext
{ "docstring": "\n\t\tselect\n\t\t\tname from `tabSalary Structure`\n\t\twhere\n\t\t\tdocstatus = 1 and\n\t\t\tis_active = 'Yes'\n\t\t\tand company = %(company)s\n\t\t\tand currency = %(currency)s and\n\t\t\tifnull(salary_slip_based_on_timesheet,0) = %(salary_slip_based_on_timesheet)s\n\t\t\t{condition}", "language": "en", "n_whitespaces": 17, "n_words": 26, "vocab_size": 19 }
19
Python
17
494bd9ef78313436f0424b918f200dab8fc7c20b
payroll_entry.py
66,913
20
43
get_sal_struct
https://github.com/frappe/erpnext.git
style: format code with black
8
0
14,378
10
1
8
def gelu_new(x): return 0.5 * x * (1 + paddle.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * paddle.pow(x, 3))))
modules/image/text_to_image/disco_diffusion_cnclip_vitb16/cn_clip/clip/modeling_bert.py
72
PaddleHub
{ "docstring": " Implementation of the gelu activation function currently in Google Bert repo (identical to OpenAI GPT).\n Also see https://arxiv.org/abs/1606.08415\n ", "language": "en", "n_whitespaces": 29, "n_words": 18, "vocab_size": 18 }
19
Python
15
f4d6e64cdc132ae868699a0ba442f4ab1d304a14
modeling_bert.py
49,732
2
49
gelu_new
https://github.com/PaddlePaddle/PaddleHub.git
add disco_diffusion_cnclip_vitb16 module
25
0
9,898
16
4
19
def clean_stale_components(self): with self._components_lock: stale_components = [] stale_component_ids = [] for id, component in self._components.items(): elapsed = time.monotonic() - component.last_reported_time if elapsed > self._component_timeout_s: stale_component_ids.append(id) logger.info( "Metrics from a worker ({}) is cleaned up due to " "timeout. Time since last report {}s".format(id, elapsed) ) for id in stale_component_ids: stale_components.append(self._components.pop(id)) return stale_components # TODO(sang): add start and end timestamp
python/ray/_private/metrics_agent.py
154
ray
{ "docstring": "Clean up stale components.\n\n Stale means the component is dead or unresponsive.\n\n Stale components won't be reported to Prometheus anymore.\n ", "language": "en", "n_whitespaces": 41, "n_words": 20, "vocab_size": 19 }
59
Python
52
073e7bc04d989607848552537f9f5ac91fa07d85
metrics_agent.py
136,720
15
90
clean_stale_components
https://github.com/ray-project/ray.git
[Dashboard] Remove opencensus from agent proxy export (#30469) This PR removes the Opencensus usage on proxy export. Previously, OpenCensus APIs we are using for proxy export deepcopies the whole data {labels -> data} whenever there's a new export which causes O(N^2) write on metrics record. See the below section for more details on removing Opencensus. Instead of using their APIs, we will store the aggregation data in memory and export them using a custom Prometheus exporter (0 deepcopies, purely done by lock). Below is the flamegraph for the same workload (100 actors + submitting 1000 tasks per second + 1 second metrics export). Before this fix, the CPU usage was > 100% all the time. With this fix, the CPU usage is only about 10~15% with the same workload.
279
0
30,979
17
1
20
def _annotate_pose(cls, image, face): center = np.array((face.aligned.size / 2, face.aligned.size / 2)).astype("int32").reshape(1, 2) center = np.rint(face.aligned.transform_points(center, invert=True)).astype("int32") points = face.aligned.pose.xyz_2d * face.aligned.size points = np.rint(face.aligned.transform_points(points, invert=True)).astype("int32") cv2.line(image, tuple(center), tuple(points[1]), (0, 255, 0), 2) cv2.line(image, tuple(center), tuple(points[0]), (255, 0, 0), 2) cv2.line(image, tuple(center), tuple(points[2]), (0, 0, 255), 2)
tools/alignments/jobs.py
291
faceswap
{ "docstring": " Annotate the pose onto the frame.\n\n Parameters\n ----------\n image: :class:`numpy.ndarray`\n The frame that pose is to be annotated on to\n face: :class:`lib.align.AlignedFace`\n The aligned face loaded for head centering\n ", "language": "en", "n_whitespaces": 87, "n_words": 29, "vocab_size": 25 }
47
Python
29
5e73437be47f2410439a3c6716de96354e6a0c94
jobs.py
101,251
9
196
_annotate_pose
https://github.com/deepfakes/faceswap.git
lib.align updates: - alignments.py - Add typed dicts for imported alignments - Explicitly check for presence of thumb value in alignments dict - linting - detected_face.py - Typing - Linting - Legacy support for pre-aligned face - Update dependencies to new property names
129
0
20,671
16
3
7
def filter_on_submodules(all_modules, submodule): filtered_modules = [ mod for mod in all_modules if PACKAGE + submodule in mod.__name__ ] return filtered_modules
keras/tests/keras_doctest.py
43
keras
{ "docstring": "Filters all the modules based on the module flag.\n\n The module flag has to be relative to the core package imported.\n For example, if `submodule=keras.layers` then, this function will return\n all the modules in the submodule.\n\n Args:\n all_modules: All the modules in the core package.\n submodule: Submodule to filter from all the modules.\n\n Returns:\n All the modules in the submodule.\n ", "language": "en", "n_whitespaces": 75, "n_words": 60, "vocab_size": 38 }
20
Python
17
a449efe29b092e658a29cd847e0494979a47d252
keras_doctest.py
268,868
5
27
filter_on_submodules
https://github.com/keras-team/keras.git
Add a keras doctest modeled on tensorflow doctest PiperOrigin-RevId: 424672415
29
0
79,737
10
1
15
def transform(self, X): check_is_fitted(self) X = self._validate_data( X, accept_sparse=("csr", "csc"), dtype=[np.float64, np.float32], reset=False ) W = self._solve_W(X, self.components_, self._transform_max_iter) return W
sklearn/decomposition/_nmf.py
96
scikit-learn
{ "docstring": "Transform the data X according to the fitted MiniBatchNMF model.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Data matrix to be transformed by the model.\n\n Returns\n -------\n W : ndarray of shape (n_samples, n_components)\n Transformed data.\n ", "language": "en", "n_whitespaces": 111, "n_words": 40, "vocab_size": 31 }
21
Python
19
69132ebbd39f070590ca01813340b5b12c0d02ab
_nmf.py
259,702
7
62
transform
https://github.com/scikit-learn/scikit-learn.git
FEA Online implementation of non-negative matrix factorization (#16948) Co-authored-by: Tom Dupré la Tour <[email protected]> Co-authored-by: jeremie du boisberranger <[email protected]> Co-authored-by: Thomas J. Fan <[email protected]> Co-authored-by: Jérémie du Boisberranger <[email protected]>
74
0
75,877
11
1
10
def kubernetes_manifest(): template = Template( (prefect.__module_path__ / "cli" / "templates" / "kubernetes.yaml").read_text() ) manifest = template.substitute( { "image_name": get_prefect_image_name(), } ) print(manifest)
src/prefect/cli/orion.py
83
prefect
{ "docstring": "\n Generates a kubernetes manifest for to deploy Orion to a cluster.\n\n Example:\n $ prefect orion kubernetes-manifest | kubectl apply -f -\n ", "language": "en", "n_whitespaces": 38, "n_words": 21, "vocab_size": 19 }
22
Python
18
23365cf7727c45f38ad983d610ffec5c15ceca21
orion.py
53,269
10
44
kubernetes_manifest
https://github.com/PrefectHQ/prefect.git
Add kubernetes manifest commands
72
0
10,764
15
1
10
def active_count(self): return self.order_by().exclude(inventory_sources__source='controller').values(name_lower=Lower('name')).distinct().count()
awx/main/managers.py
68
awx
{ "docstring": "Return count of active, unique hosts for licensing.\n Construction of query involves:\n - remove any ordering specified in model's Meta\n - Exclude hosts sourced from another Tower\n - Restrict the query to only return the name column\n - Only consider results that are unique\n - Return the count of this query\n ", "language": "en", "n_whitespaces": 105, "n_words": 51, "vocab_size": 37 }
4
Python
4
f52ef6e9677b01c111b012a8725da43a2580d8f1
managers.py
80,945
2
37
active_count
https://github.com/ansible/awx.git
Fixes case sensitive host count
18
0
17,116
15
1
18
def test_partial_fit_weight_class_balanced(klass): # partial_fit with class_weight='balanced' not supported
sklearn/linear_model/tests/test_sgd.py
121
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
scikit-learn
{ "docstring": "\n regex = (\n r\"class_weight 'balanced' is not supported for \"\n r\"partial_fit\\. In order to use 'balanced' weights, \"\n r\"use compute_class_weight\\('balanced', classes=classes, y=y\\). \"\n r\"In place of y you can use a large enough sample \"\n r\"of the full training set target to properly \"\n r\"estimate the class frequency distributions\\. \"\n r\"Pass the resulting weights as the class_weight \"\n r\"parameter\\.\"\n )\n with pytest.raises(ValueError, match=regex):\n klass(class_weight=\"balanced\").partial_fit(X, Y, classes=np.unique(Y))\n\n\[email protected](\"klass\", [SGDClassifier, SparseSGDClassifier])", "language": "en", "n_whitespaces": 140, "n_words": 69, "vocab_size": 57 }
8
Python
8
7f0b57e626d36a7c6d8f417261c6bbfe05376a98
test_sgd.py
260,298
13
59
test_partial_fit_weight_class_balanced
https://github.com/scikit-learn/scikit-learn.git
MAINT parameter validation in SGD*, PassiveAgressive* and Perceptron (#23521) Co-authored-by: jeremie du boisberranger <[email protected]> Co-authored-by: Guillaume Lemaitre <[email protected]> Co-authored-by: Meekail Zain <[email protected]>
10
1
76,168
13
1
9
def test_naive_all_pairs_lowest_common_ancestor6(self): G = self.DG.copy() G.add_node(-1) gen = naive_all_pairs_lca(G, [(-1, -1), (-1, 0)]) assert dict(gen) == {(-1, -1): -1}
networkx/algorithms/tests/test_lowest_common_ancestors.py
101
networkx
{ "docstring": "Test that pairs with no LCA specified emits nothing.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
19
Python
18
b2f91c34a23058dd70b41784af0d87890216026a
test_lowest_common_ancestors.py
177,019
5
63
test_naive_all_pairs_lowest_common_ancestor6
https://github.com/networkx/networkx.git
Naive lowest common ancestor implementation (#5736) * Add naive lca methods * Naive algorithm implementation for LCA * Modify naive lca functions * Correct parameters of nx.ancestors * Update lowest_common_ancestors.py * Parametrize tests * Apply suggestions from code review Co-authored-by: Dan Schult <[email protected]> * Yield instead of append * Tests for naive lca * Correct test cases for naive lca algorithms * Apply suggestions from code review Co-authored-by: Mridul Seth <[email protected]> * Fix function name -when calling * Make requested changes * Inlining _get_a_lowest_common_ancestor Co-authored-by: dtuncturk <[email protected]> Co-authored-by: Dan Schult <[email protected]> Co-authored-by: Mridul Seth <[email protected]>
54
0
42,231
11
5
19
def get_columns(salary_slips): columns = [ _("Salary Slip ID") + ":Link/Salary Slip:150", _("Employee") + ":Link/Employee:120", _("Employee Name") + "::140", _("Date of Joining") + "::80", _("Branch") + ":Link/Branch:-1", _("Department") + ":Link/Department:-1", _("Designation") + ":Link/Designation:120", _("Company") + ":Link/Company:120", _("Start Date") + "::80", _("End Date") + "::80", _("Leave Without Pay") + ":Float:50", _("Payment Days") + ":Float:120", ] salary_components = {_("Earning"): [], _("Deduction"): []} for component in frappe.db.sql( % (", ".join(["%s"] * len(salary_slips))), tuple([d.name for d in salary_slips]), as_dict=1, ): salary_components[_(component.type)].append(component.salary_component) columns = ( columns + [(e + ":Currency:120") for e in salary_components[_("Earning")]] + [_("Gross Pay") + ":Currency:120"] + [(d + ":Currency:120") for d in salary_components[_("Deduction")]] + [ _("Loan Repayment") + ":Currency:120", _("Total Deduction") + ":Currency:120", _("Net Pay") + ":Currency:120", ] ) return columns, salary_components[_("Earning")], salary_components[_("Deduction")]
erpnext/payroll/report/salary_register/salary_register.py
483
erpnext
{ "docstring": "\n\tcolumns = [\n\t _(\"Salary Slip ID\") + \":Link/Salary Slip:150\",\n\t _(\"Employee\") + \":Link/Employee:120\",\n\t _(\"Employee Name\") + \"::140\",\n\t _(\"Date of Joining\") + \"::80\",\n\t _(\"Branch\") + \":Link/Branch:120\",\n\t _(\"Department\") + \":Link/Department:120\",\n\t _(\"Designation\") + \":Link/Designation:120\",\n\t _(\"Company\") + \":Link/Company:120\",\n\t _(\"Start Date\") + \"::80\",\n\t _(\"End Date\") + \"::80\",\n\t _(\"Leave Without Pay\") + \":Float:130\",\n\t _(\"Payment Days\") + \":Float:120\",\n\t _(\"Currency\") + \":Link/Currency:80\"\n\t]\n\tselect distinct sd.salary_component, sc.type\n\t\tfrom `tabSalary Detail` sd, `tabSalary Component` sc\n\t\twhere sc.name=sd.salary_component and sd.amount != 0 and sd.parent in (%s)", "language": "en", "n_whitespaces": 161, "n_words": 75, "vocab_size": 58 }
121
Python
79
494bd9ef78313436f0424b918f200dab8fc7c20b
salary_register.py
66,968
37
267
get_columns
https://github.com/frappe/erpnext.git
style: format code with black
87
0
14,394
17
1
4
def serialize(metric): return serialize_keras_object(metric) @keras_export("keras.metrics.deserialize")
keras/metrics/__init__.py
32
@keras_export("keras.metrics.deserialize")
keras
{ "docstring": "Serializes metric function or `Metric` instance.\n\n Args:\n metric: A Keras `Metric` instance or a metric function.\n\n Returns:\n Metric configuration dictionary.\n ", "language": "en", "n_whitespaces": 39, "n_words": 20, "vocab_size": 17 }
5
Python
5
84afc5193d38057e2e2badf9c889ea87d80d8fbf
__init__.py
274,614
2
11
serialize
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
10
1
81,240
7
4
23
def get_data(filters): data = [] component_types = frappe.db.sql() component_types = [comp_type[0] for comp_type in component_types] if not len(component_types): return [] conditions = get_conditions(filters) entry = frappe.db.sql( % (conditions , ", ".join(['%s']*len(component_types))), tuple(component_types), as_dict=1) for d in entry: data.append({ "employee": d.employee, "employee_name": d.employee_name, "it_comp": d.salary_component, "posting_date": d.posting_date, "it_amount": d.amount, "gross_pay": d.gross_pay }) return data
erpnext/payroll/report/income_tax_deductions/income_tax_deductions.py
220
erpnext
{ "docstring": " select name from `tabSalary Component`\n\t\twhere is_income_tax_component = 1 select sal.employee, sal.employee_name, sal.posting_date, ded.salary_component, ded.amount,sal.gross_pay\n\t\tfrom `tabSalary Slip` sal, `tabSalary Detail` ded\n\t\twhere sal.name = ded.parent\n\t\tand ded.parentfield = 'deductions'\n\t\tand ded.parenttype = 'Salary Slip'\n\t\tand sal.docstatus = 1 %s\n\t\tand ded.salary_component in (%s)\n\t", "language": "en", "n_whitespaces": 38, "n_words": 44, "vocab_size": 31 }
53
Python
43
3936d8b70e4847dddd49bf467fcbc6e2fcd106c5
income_tax_deductions.py
64,417
26
133
get_data
https://github.com/frappe/erpnext.git
refactor: remove India specific code
35
0
13,631
15
3
10
def generator(self): K = self.module.number_field return K.ext.alias if K and K.ext.is_aliased else self.T.gen
sympy/polys/numberfields/modules.py
53
sympy
{ "docstring": "\n Return a :py:class:`~.Symbol` to be used when expressing this element\n as a polynomial.\n\n If we have an associated :py:class:`~.AlgebraicField` whose primitive\n element has an alias symbol, we use that. Otherwise we use the variable\n of the minimal polynomial defining the power basis to which we belong.\n ", "language": "en", "n_whitespaces": 89, "n_words": 46, "vocab_size": 36 }
13
Python
12
d37a3c05b98c8144d401fa264af687a525b5e39c
modules.py
197,803
3
33
generator
https://github.com/sympy/sympy.git
Improve printing for `PrimeIdeal` * Support latex printing * Rename `_pretty()` --> `repr()` since this is not 2D printing. * Provide a `__str__()` method, which prints less info than the `__repr__()` method.
34
0
48,701
9
1
16
def test_disabling_background_update_sleep(self): self.get_success( self.store.db_pool.simple_insert( "background_updates", values={"update_name": "test_update", "progress_json": '{"my_key": 1}'}, ) ) self.update_handler.side_effect = self.update self.update_handler.reset_mock() self.updates.start_doing_background_updates(), # 2: advance the reactor very little self.reactor.pump([0.025]) # check that an update has run self.update_handler.assert_called()
tests/storage/test_background_update.py
133
synapse
{ "docstring": "\n Test that disabling sleep in the config results in bg update not sleeping\n ", "language": "en", "n_whitespaces": 28, "n_words": 13, "vocab_size": 12 }
33
Python
31
ef3619e61d84493d98470eb2a69131d15eb1166b
test_background_update.py
247,568
12
77
test_disabling_background_update_sleep
https://github.com/matrix-org/synapse.git
Add config settings for background update parameters (#11980)
155
0
71,746
13
1
11
def test_get_global_no_mutability(self) -> None: # First add some account data to set up the test. self.get_success( self._store.add_account_data_for_user( self.user_id, "test.data", {"wombat": True} ) ) # Now request that data and then mutate it (out of negligence or otherwise). the_data = self.get_success( self._account_data_mgr.get_global(self.user_id, "test.data") ) with self.assertRaises(TypeError): # This throws an exception because it's a frozen dict. the_data["wombat"] = False
tests/module_api/test_account_data_manager.py
114
synapse
{ "docstring": "\n Tests that modules can't introduce bugs into Synapse by mutating the result\n of `get_global`.\n ", "language": "en", "n_whitespaces": 36, "n_words": 14, "vocab_size": 14 }
58
Python
51
85ca963c1add5ca12f59238a50dfc63df4846bb7
test_account_data_manager.py
247,972
15
64
test_get_global_no_mutability
https://github.com/matrix-org/synapse.git
Add Module API for reading and writing global account data. (#12391)
184
0
72,031
12
1
5
def size(self) -> Size: return Size(self.width, self.height)
src/textual/geometry.py
32
textual
{ "docstring": "Get the size of the region.\n\n Returns:\n Size: Size of the region.\n\n ", "language": "en", "n_whitespaces": 37, "n_words": 12, "vocab_size": 8 }
7
Python
7
6ee4d41bb7a39238a18949f5648773562c6a1c9b
geometry.py
184,573
8
19
size
https://github.com/Textualize/textual.git
docs
21
0
44,676
8
1
4
async def test_remove_order(): removals: list[str] = []
tests/test_widget_removing.py
27
textual
{ "docstring": "The removal of a top-level widget should cause bottom-first removal.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
7
Python
7
9748850657337ba31f220387e4a7777a87ec019a
test_widget_removing.py
185,774
10
87
test_remove_order
https://github.com/Textualize/textual.git
Add a unit test for removal ordering via Widget.remove
13
0
45,174
8
4
9
def createPreModuleLoadCode(self, module): # This is only relevant on standalone mode for Windows if not isStandaloneMode(): return full_name = module.getFullName() if full_name == self.binding_name and isWin32Windows(): code = yield ( code, "Adding binary folder to runtime 'PATH' environment variable for proper Qt loading.", )
nuitka/plugins/standard/PySidePyQtPlugin.py
78
Nuitka
{ "docstring": "Method called when a module is being imported.\n\n Notes:\n If full name equals to the binding we insert code to include the dist\n folder in the 'PATH' environment variable (on Windows only).\n\n Args:\n module: the module object\n Returns:\n Code to insert and descriptive text (tuple), or (None, None).\n import os\npath = os.environ.get(\"PATH\", \"\")\nif not path.startswith(__nuitka_binary_dir):\n os.environ[\"PATH\"] = __nuitka_binary_dir + \";\" + path\n", "language": "en", "n_whitespaces": 136, "n_words": 64, "vocab_size": 54 }
44
Python
40
6b317645a6edf73a8628229c540555142725478d
PySidePyQtPlugin.py
178,696
14
43
createPreModuleLoadCode
https://github.com/Nuitka/Nuitka.git
Plugins: Minor cleanups
154
0
42,792
10
1
10
def ledoit_wolf(X, *, assume_centered=False, block_size=1000): estimator = LedoitWolf( assume_centered=assume_centered, block_size=block_size, store_precision=False, ).fit(X) return estimator.covariance_, estimator.shrinkage_
sklearn/covariance/_shrunk_covariance.py
70
scikit-learn
{ "docstring": "Estimate the shrunk Ledoit-Wolf covariance matrix.\n\n Read more in the :ref:`User Guide <shrunk_covariance>`.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Data from which to compute the covariance estimate.\n\n assume_centered : bool, default=False\n If True, data will not be centered before computation.\n Useful to work with data whose mean is significantly equal to\n zero but is not exactly zero.\n If False, data will be centered before computation.\n\n block_size : int, default=1000\n Size of blocks into which the covariance matrix will be split.\n This is purely a memory optimization and does not affect results.\n\n Returns\n -------\n shrunk_cov : ndarray of shape (n_features, n_features)\n Shrunk covariance.\n\n shrinkage : float\n Coefficient in the convex combination used for the computation\n of the shrunk estimate.\n\n Notes\n -----\n The regularized (shrunk) covariance is:\n\n (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features)\n\n where mu = trace(cov) / n_features\n ", "language": "en", "n_whitespaces": 263, "n_words": 145, "vocab_size": 103 }
15
Python
15
9a90af51510c0722ab880061107e5cfdcf09192f
_shrunk_covariance.py
261,798
7
46
ledoit_wolf
https://github.com/scikit-learn/scikit-learn.git
MAINT Parameters validation for covariance.ledoit_wolf (#24870) Co-authored-by: Guillaume Lemaitre <[email protected]> Co-authored-by: jeremiedbb <[email protected]>
48
0
77,003
11
3
15
def dist_get_direct_url(dist): # type: (Distribution) -> Optional[DirectUrl] if not dist.has_metadata(DIRECT_URL_METADATA_NAME): return None try: return DirectUrl.from_json(dist.get_metadata(DIRECT_URL_METADATA_NAME)) except ( DirectUrlValidationError, json.JSONDecodeError, UnicodeDecodeError, ) as e: logger.warning( "Error parsing %s for %s: %s", DIRECT_URL_METADATA_NAME, dist.project_name, e, ) return None
.venv/lib/python3.8/site-packages/pip/_internal/utils/direct_url_helpers.py
100
transferlearning
{ "docstring": "Obtain a DirectUrl from a pkg_resource.Distribution.\n\n Returns None if the distribution has no `direct_url.json` metadata,\n or if `direct_url.json` is invalid.\n ", "language": "en", "n_whitespaces": 29, "n_words": 20, "vocab_size": 17 }
36
Python
32
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
direct_url_helpers.py
61,169
17
62
dist_get_direct_url
https://github.com/jindongwang/transferlearning.git
upd; format
154
0
12,422
11
1
2
def cmax(self): return self["cmax"]
packages/python/plotly/plotly/graph_objs/_cone.py
22
plotly.py
{ "docstring": "\n Sets the upper bound of the color domain. Value should have the\n same units as u/v/w norm and if set, `cmin` must be set as\n well.\n\n The 'cmax' property is a number and may be specified as:\n - An int or float\n\n Returns\n -------\n int|float\n ", "language": "en", "n_whitespaces": 111, "n_words": 45, "vocab_size": 40 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_cone.py
226,571
2
11
cmax
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
58,244
7
4
20
def tcp_pseudoheader(tcp): # type: (TCP) -> bytes if isinstance(tcp.underlayer, IP): plen = len(bytes(tcp)) return in4_pseudoheader(socket.IPPROTO_TCP, tcp.underlayer, plen) elif conf.ipv6_enabled and _is_ipv6_layer(tcp.underlayer): plen = len(bytes(tcp)) return raw(scapy.layers.inet6.in6_pseudoheader( socket.IPPROTO_TCP, tcp.underlayer, plen)) else: raise ValueError("TCP packet does not have IP or IPv6 underlayer")
scapy/layers/inet.py
142
scapy
{ "docstring": "Pseudoheader of a TCP packet as bytes\n\n Requires underlayer to be either IP or IPv6\n ", "language": "en", "n_whitespaces": 21, "n_words": 15, "vocab_size": 15 }
40
Python
35
20ac1d00389d0735e6d8cd1347f0a53f478144ba
inet.py
209,126
10
88
tcp_pseudoheader
https://github.com/secdev/scapy.git
Support TCP-MD5 and TCP-AO (#3358) Support TCP-MD5 and TCP-AO
101
0
52,615
14
2
19
def get_collection_version_metadata(self, namespace, name, version): api_path = self.available_api_versions.get('v3', self.available_api_versions.get('v2')) url_paths = [self.api_server, api_path, 'collections', namespace, name, 'versions', version, '/'] n_collection_url = _urljoin(*url_paths) error_context_msg = 'Error when getting collection version metadata for %s.%s:%s from %s (%s)' \ % (namespace, name, version, self.name, self.api_server) data = self._call_galaxy(n_collection_url, error_context_msg=error_context_msg, cache=True) self._set_cache() signatures = data.get('signatures') or [] return CollectionVersionMetadata(data['namespace']['name'], data['collection']['name'], data['version'], data['download_url'], data['artifact']['sha256'], data['metadata']['dependencies'], data['href'], signatures)
lib/ansible/galaxy/api.py
262
ansible
{ "docstring": "\n Gets the collection information from the Galaxy server about a specific Collection version.\n\n :param namespace: The collection namespace.\n :param name: The collection name.\n :param version: Version of the collection to get the information for.\n :return: CollectionVersionMetadata about the collection at the version requested.\n ", "language": "en", "n_whitespaces": 86, "n_words": 43, "vocab_size": 29 }
62
Python
53
43e55db20821a1341d21ffa1e4e7e6185b244105
api.py
266,587
12
163
get_collection_version_metadata
https://github.com/ansible/ansible.git
ansible-galaxy - add signature verification of the MANIFEST.json (#76681) * ansible-galaxy collection install|verify: - Support verifying the origin of the MANIFEST.json when the Galaxy server has provided signatures. - Allow supplemental signatures to use during verification on the CLI/requirements file. * ansible-galaxy collection install: - Support disabling signature verification. This silences the warning provided by ansible-galaxy if the Galaxy server provided signatures it cannot use because no keyring is configured. - Store Galaxy server metadata alongside installed collections for provenance. This is used by 'ansible-galaxy collection verify --offline'. * Add unit tests for method that gets signatures from a Galaxy server * Add integration tests for user-provided signature sources - Test CLI option combinations - Test installing collections with valid/invalid signature sources - Test disabling GPG verification when installing collections - Test verifying collections with valid/invalid signature sources * Make signature verification advisory-by-default if signatures are provided by the Galaxy server - Make the default keyring None - Warn if the keyring is None but the Galaxy server provided signatures - Error if the keyring is None but the user supplied signatures - Error if the keyring is not None but is invalid * changelog * add ansible-galaxy user documentation for new options Co-authored-by: Matt Martz <[email protected]> Co-authored-by: Sviatoslav Sydorenko <[email protected]> Co-authored-by: Martin Krizek <[email protected]> Co-authored-by: Sandra McCann <[email protected]> Co-authored-by: Andy Mott <[email protected]> Co-authored-by: John R Barker <[email protected]>
232
0
78,486
11
6
22
def get_args(tp): if isinstance(tp, _AnnotatedAlias): return (tp.__origin__,) + tp.__metadata__ if isinstance(tp, (typing._GenericAlias, GenericAlias)): if getattr(tp, "_special", False): return () res = tp.__args__ if get_origin(tp) is collections.abc.Callable and res[0] is not Ellipsis: res = (list(res[:-1]), res[-1]) return res return () # 3.10+ if hasattr(typing, 'TypeAlias'): TypeAlias = typing.TypeAlias # 3.9 elif sys.version_info[:2] >= (3, 9):
pipenv/patched/notpip/_vendor/typing_extensions.py
203
pipenv
{ "docstring": "Get type arguments with all substitutions performed.\n\n For unions, basic simplifications used by Union constructor are performed.\n Examples::\n get_args(Dict[str, int]) == (str, int)\n get_args(int) == ()\n get_args(Union[int, Union[T, int], str][int]) == (int, str)\n get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int])\n get_args(Callable[[], T][int]) == ([], int)\n ", "language": "en", "n_whitespaces": 121, "n_words": 45, "vocab_size": 36 }
54
Python
39
f3166e673fe8d40277b804d35d77dcdb760fc3b3
typing_extensions.py
20,897
11
101
get_args
https://github.com/pypa/pipenv.git
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
166
0
3,611
16
1
23
def test_following_previous_schedule(self): local_tz = pendulum.timezone('Europe/Zurich') start = local_tz.convert(datetime.datetime(2018, 10, 28, 2, 55), dst_rule=pendulum.PRE_TRANSITION) assert start.isoformat() == "2018-10-28T02:55:00+02:00", "Pre-condition: start date is in DST" utc = timezone.convert_to_utc(start) assert utc.isoformat() == "2018-10-28T00:55:00+00:00", "Pre-condition: correct DST->UTC conversion" dag = DAG('tz_dag', start_date=start, schedule_interval='*/5 * * * *') _next = dag.following_schedule(utc) next_local = local_tz.convert(_next) assert _next.isoformat() == "2018-10-28T01:00:00+00:00" assert next_local.isoformat() == "2018-10-28T02:00:00+01:00" prev = dag.previous_schedule(utc) prev_local = local_tz.convert(prev) assert prev_local.isoformat() == "2018-10-28T02:50:00+02:00" prev = dag.previous_schedule(_next) prev_local = local_tz.convert(prev) assert prev_local.isoformat() == "2018-10-28T02:55:00+02:00" assert prev == utc
tests/models/test_dag.py
284
airflow
{ "docstring": "\n Make sure DST transitions are properly observed\n ", "language": "en", "n_whitespaces": 22, "n_words": 7, "vocab_size": 7 }
81
Python
50
2fdc23333909096d427171002582e2906f8bbc0a
test_dag.py
43,883
18
167
test_following_previous_schedule
https://github.com/apache/airflow.git
Fix remaining mypy issues in "core" Airflow (#20795) Co-authored-by: Josh Fell <[email protected]> Co-authored-by: Tzu-ping Chung <[email protected]> Co-authored-by: Jarek Potiuk <[email protected]>
207
0
8,084
10
3
21
def close(self) -> None: if not self.has_closed: # update the run.txt with stopping time self.run_txt_data["stop_time"] = datetime.datetime.now().isoformat(sep=" ") with open(os.path.join(self.submit_config.run_dir, "run.txt"), "w") as f: pprint.pprint(self.run_txt_data, stream=f, indent=4, width=200, compact=False) self.has_closed = True # detach the global singleton global _run_context if _run_context is self: _run_context = None
reconstruction/ostec/external/stylegan2/dnnlib/submission/run_context.py
163
insightface
{ "docstring": "Close the context and clean up.\n Should only be called once.", "language": "en", "n_whitespaces": 17, "n_words": 11, "vocab_size": 11 }
46
Python
37
7375ee364e0df2a417f92593e09557f1b2a3575a
run_context.py
9,376
11
97
close
https://github.com/deepinsight/insightface.git
initialize ostec
167
0
1,590
16
7
20
def wrapCommandForDebuggerForExec(*args): gdb_path = getExecutablePath("gdb") # Windows extra ball, attempt the downloaded one. if isWin32Windows() and gdb_path is None: from nuitka.Options import assumeYesForDownloads mingw64_gcc_path = getCachedDownloadedMinGW64( target_arch=getArchitecture(), assume_yes_for_downloads=assumeYesForDownloads(), ) with withEnvironmentPathAdded("PATH", os.path.dirname(mingw64_gcc_path)): lldb_path = getExecutablePath("lldb") if gdb_path is None and lldb_path is None: lldb_path = getExecutablePath("lldb") if lldb_path is None: general.sysexit("Error, no 'gdb' or 'lldb' binary found in path.") if gdb_path is not None: args = (gdb_path, "gdb", "-ex=run", "-ex=where", "-ex=quit", "--args") + args else: args = (lldb_path, "lldb", "-o", "run", "-o", "bt", "-o", "quit", "--") + args return args
nuitka/utils/Execution.py
254
Nuitka
{ "docstring": "Wrap a command for system debugger to call exec\n\n Args:\n args: (list of str) args for call to be debugged\n Returns:\n args tuple with debugger command inserted\n\n Notes:\n Currently only gdb and lldb are supported, but adding more\n debuggers would be very welcome.\n ", "language": "en", "n_whitespaces": 83, "n_words": 43, "vocab_size": 36 }
90
Python
60
98badaaafd4e56529378947358acae489035fa1e
Execution.py
178,724
19
142
wrapCommandForDebuggerForExec
https://github.com/Nuitka/Nuitka.git
Windows: Make running in debugger work with cmd files as well
214
0
42,804
14
9
29
def sort_bbox(end2end_xywh_bboxes, no_match_end2end_indexes): groups = [] bbox_groups = [] for index, end2end_xywh_bbox in zip(no_match_end2end_indexes, end2end_xywh_bboxes): this_bbox = end2end_xywh_bbox if len(groups) == 0: groups.append([index]) bbox_groups.append([this_bbox]) else: flag = False for g, bg in zip(groups, bbox_groups): # this_bbox is belong to bg's row or not if is_abs_lower_than_threshold(this_bbox, bg[0]): g.append(index) bg.append(this_bbox) flag = True break if not flag: # this_bbox is not belong to bg's row, create a row. groups.append([index]) bbox_groups.append([this_bbox]) # sorted bboxes in a group tmp_groups, tmp_bbox_groups = [], [] for g, bg in zip(groups, bbox_groups): g_sorted, bg_sorted = sort_line_bbox(g, bg) tmp_groups.append(g_sorted) tmp_bbox_groups.append(bg_sorted) # sorted groups, sort by coord y's value. sorted_groups = [None] * len(tmp_groups) sorted_bbox_groups = [None] * len(tmp_bbox_groups) ys = [bg[0][1] for bg in tmp_bbox_groups] sorted_ys = sorted(ys) for g, bg in zip(tmp_groups, tmp_bbox_groups): idx = sorted_ys.index(bg[0][1]) sorted_groups[idx] = g sorted_bbox_groups[idx] = bg # flatten, get final result end2end_sorted_idx_list, end2end_sorted_bbox_list \ = flatten(sorted_groups, sorted_bbox_groups) # check sorted #img = cv2.imread('/data_0/yejiaquan/data/TableRecognization/singleVal/PMC3286376_004_00.png') #img = drawBboxAfterSorted(img, sorted_groups, sorted_bbox_groups) return end2end_sorted_idx_list, end2end_sorted_bbox_list, sorted_groups, sorted_bbox_groups
ppstructure/table/table_master_match.py
411
PaddleOCR
{ "docstring": "\n This function will group the render end2end bboxes in row.\n :param end2end_xywh_bboxes:\n :param no_match_end2end_indexes:\n :return:\n ", "language": "en", "n_whitespaces": 31, "n_words": 15, "vocab_size": 14 }
162
Python
98
ddaa2c2552e19635cd6cdf38619f1f176c358f89
table_master_match.py
24,501
36
260
sort_bbox
https://github.com/PaddlePaddle/PaddleOCR.git
add SLANet
534
0
4,748
16
20
17
def process_directive(self, directive): # Parse the line: split it up, make sure the right number of words # is there, and return the relevant words. 'action' is always # defined: it's the first word of the line. Which of the other # three are defined depends on the action; it'll be either # patterns, (dir and patterns), or (dirpattern). action, patterns, thedir, dirpattern = self._parse_directive(directive) # OK, now we know that the action is valid and we have the # right number of words on the line for that action -- so we # can proceed with minimal error-checking. if action == 'include': for pattern in patterns: if not self._include_pattern(pattern, anchor=True): logger.warning('no files found matching %r', pattern) elif action == 'exclude': for pattern in patterns: found = self._exclude_pattern(pattern, anchor=True) #if not found: # logger.warning('no previously-included files ' # 'found matching %r', pattern) elif action == 'global-include': for pattern in patterns: if not self._include_pattern(pattern, anchor=False): logger.warning('no files found matching %r ' 'anywhere in distribution', pattern) elif action == 'global-exclude': for pattern in patterns: found = self._exclude_pattern(pattern, anchor=False) #if not found: # logger.warning('no previously-included files ' # 'matching %r found anywhere in ' # 'distribution', pattern) elif action == 'recursive-include': for pattern in patterns: if not self._include_pattern(pattern, prefix=thedir): logger.warning('no files found matching %r ' 'under directory %r', pattern, thedir) elif action == 'recursive-exclude': for pattern in patterns: found = self._exclude_pattern(pattern, prefix=thedir) #if not found: # logger.warning('no previously-included files ' # 'matching %r found under directory %r', # pattern, thedir) elif action == 'graft': if not self._include_pattern(None, prefix=dirpattern): logger.warning('no directories found matching %r', dirpattern) elif action == 'prune': if not self._exclude_pattern(None, prefix=dirpattern): logger.warning('no previously-included directories found ' 'matching %r', dirpattern) else: # pragma: no cover # This should never happen, as it should be caught in # _parse_template_line raise DistlibException( 'invalid action %r' % action) # # Private API #
.venv/lib/python3.8/site-packages/pip/_vendor/distlib/manifest.py
437
transferlearning
{ "docstring": "\n Process a directive which either adds some files from ``allfiles`` to\n ``files``, or removes some files from ``files``.\n\n :param directive: The directive to process. This should be in a format\n compatible with distutils ``MANIFEST.in`` files:\n\n http://docs.python.org/distutils/sourcedist.html#commands\n ", "language": "en", "n_whitespaces": 105, "n_words": 36, "vocab_size": 30 }
307
Python
136
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
manifest.py
62,044
36
247
process_directive
https://github.com/jindongwang/transferlearning.git
upd; format
1,158
0
12,851
16
1
8
def hex6(self) -> str: r, g, b, a = self.clamped return f"#{r:02X}{g:02X}{b:02X}"
src/textual/color.py
54
textual
{ "docstring": "The color in CSS hex form, with 6 digits for RGB. Alpha is ignored.\n\n Returns:\n str: A CSS hex-style color, e.g. \"#46b3de\"\n\n ", "language": "en", "n_whitespaces": 47, "n_words": 22, "vocab_size": 21 }
12
Python
12
6f7d3b5ad711aa7df62ca6b3fca5cd638dcec665
color.py
184,969
9
22
hex6
https://github.com/Textualize/textual.git
text color
33
0
44,871
8
10
16
def is_matching(G, matching): if isinstance(matching, dict): matching = matching_dict_to_set(matching) nodes = set() for edge in matching: if len(edge) != 2: raise nx.NetworkXError(f"matching has non-2-tuple edge {edge}") u, v = edge if u not in G or v not in G: raise nx.NetworkXError(f"matching contains edge {edge} with node not in G") if u == v: return False if not G.has_edge(u, v): return False if u in nodes or v in nodes: return False nodes.update(edge) return True
networkx/algorithms/matching.py
185
networkx
{ "docstring": "Return True if ``matching`` is a valid matching of ``G``\n\n A *matching* in a graph is a set of edges in which no two distinct\n edges share a common endpoint. Each node is incident to at most one\n edge in the matching. The edges are said to be independent.\n\n Parameters\n ----------\n G : NetworkX graph\n\n matching : dict or set\n A dictionary or set representing a matching. If a dictionary, it\n must have ``matching[u] == v`` and ``matching[v] == u`` for each\n edge ``(u, v)`` in the matching. If a set, it must have elements\n of the form ``(u, v)``, where ``(u, v)`` is an edge in the\n matching.\n\n Returns\n -------\n bool\n Whether the given set or dictionary represents a valid matching\n in the graph.\n\n Raises\n ------\n NetworkXError\n If the proposed matching has an edge to a node not in G.\n Or if the matching is not a collection of 2-tuple edges.\n\n ", "language": "en", "n_whitespaces": 257, "n_words": 152, "vocab_size": 86 }
75
Python
44
28b3014d68d2b4e40d3e02219770296a827bd55c
matching.py
176,367
18
111
is_matching
https://github.com/networkx/networkx.git
Update matching functions for error validation and speed (#4897) * First steps to update matching functions for #4644 Expand tests Change API to raise NetworkXError when matching involves nodes not in G Update is_*_matching to 100+ times faster. * improve matching_dict_to_set and docs for min_weight_matching * fix sphinx error
201
0
41,853
13
2
14
def test_google_type_conversion(mock_fields_meta_data): desired_mapping = { "accessible_bidding_strategy.target_impression_share.location": "string", # "ENUM" "campaign.name": ["string", "null"], # STRING "campaign.end_date": ["string", "null"], # DATE "campaign.optimization_score": ["number", "null"], # DOUBLE "campaign.resource_name": ["string", "null"], # RESOURCE_NAME "campaign.shopping_setting.campaign_priority": ["integer", "null"], # INT32 "campaign.shopping_setting.merchant_id": ["integer", "null"], # INT64 "campaign_budget.explicitly_shared": ["boolean", "null"], # BOOLEAN "bidding_strategy.enhanced_cpc": ["string", "null"], # MESSAGE "segments.date": ["string", "null"], # autoadded, should be DATE } # query is select field of each type query = instance = stream_instance(query=query, api_mock=mock_fields_meta_data) final_schema = instance.get_json_schema() schema_properties = final_schema.get("properties") for prop, value in schema_properties.items(): assert desired_mapping[prop] == value.get("type"), f"{prop} should be {value}"
airbyte-integrations/connectors/source-google-ads/unit_tests/test_source.py
285
airbyte
{ "docstring": "\n query may be invalid (fields incompatibility did not checked).\n But we are just testing types, without submitting the query and further steps.\n Doing that with all possible types.\n \n SELECT\n accessible_bidding_strategy.target_impression_share.location,\n campaign.name,\n campaign.end_date,\n campaign.optimization_score,\n campaign.resource_name,\n campaign.shopping_setting.campaign_priority,\n campaign.shopping_setting.merchant_id,\n campaign_budget.explicitly_shared,\n bidding_strategy.enhanced_cpc\n FROM campaign\n ", "language": "en", "n_whitespaces": 174, "n_words": 40, "vocab_size": 39 }
91
Python
60
d4f8b25b8e3e109db866352cf1dcec0d73c92cbd
test_source.py
5,061
31
142
test_google_type_conversion
https://github.com/airbytehq/airbyte.git
Source Google Ads: Improve unit and integration tests (#12651) * #12650 source Googel ads: tests * #12650 source google ads: add changelog item * #12650 source google ads: add comments to tests * auto-bump connector version Co-authored-by: Octavia Squidington III <[email protected]>
206
0
714
11
1
5
def has_refs(self) -> bool: return len(self._session_report_run_counts) > 0
lib/streamlit/forward_msg_cache.py
30
streamlit
{ "docstring": "True if this Entry has references from any AppSession.\n\n If not, it can be removed from the cache.\n ", "language": "en", "n_whitespaces": 40, "n_words": 18, "vocab_size": 17 }
8
Python
8
704eab3478cf69847825b23dabf15813a8ac9fa2
forward_msg_cache.py
118,557
6
17
has_refs
https://github.com/streamlit/streamlit.git
Rename and refactor `Report` machinery (#4141) This refactor renames (almost) everything related to the outdated "report" concept with more precise concepts that we use throughout our code, primarily "script run", "session", and "app".
30
0
26,290
9
1
10
def test_multi_part_language_bad_format(self, m): m.return_value = ["chi_sim", "eng"] msgs = check_default_language_available(None) self.assertEqual(len(msgs), 1) self.assertEqual(msgs[0].level, ERROR)
src/paperless_tesseract/tests/test_checks.py
78
paperless-ngx
{ "docstring": "\n GIVEN:\n - An OCR language which is multi part (ie chi-sim)\n - The language is correctly NOT formatted\n WHEN:\n - Installed packages are checked\n THEN:\n - No errors are reported\n ", "language": "en", "n_whitespaces": 103, "n_words": 30, "vocab_size": 24 }
14
Python
13
55ef0d4a1b62c3abe8500cad97ddeecf9f746b84
test_checks.py
320,360
5
47
test_multi_part_language_bad_format
https://github.com/paperless-ngx/paperless-ngx.git
Fixes language code checks around two part languages
49
0
117,148
9
1
15
def simple_test(self, feats, img_metas, **kwargs): all_cls_scores, all_mask_preds = self(feats, img_metas) mask_cls_results = all_cls_scores[-1] mask_pred_results = all_mask_preds[-1] # upsample masks img_shape = img_metas[0]['batch_input_shape'] mask_pred_results = F.interpolate( mask_pred_results, size=(img_shape[0], img_shape[1]), mode='bilinear', align_corners=False) return mask_cls_results, mask_pred_results
mmdet/models/dense_heads/maskformer_head.py
125
mmdetection
{ "docstring": "Test without augmentaton.\n\n Args:\n feats (list[Tensor]): Multi-level features from the\n upstream network, each is a 4D-tensor.\n img_metas (list[dict]): List of image information.\n\n Returns:\n tuple: A tuple contains two tensors.\n\n - mask_cls_results (Tensor): Mask classification logits,\\\n shape (batch_size, num_queries, cls_out_channels).\n Note `cls_out_channels` should includes background.\n - mask_pred_results (Tensor): Mask logits, shape \\\n (batch_size, num_queries, h, w).\n ", "language": "en", "n_whitespaces": 191, "n_words": 55, "vocab_size": 49 }
33
Python
27
4bb184bae070f37febb10f82bee3a217dc1ad7c5
maskformer_head.py
244,108
11
80
simple_test
https://github.com/open-mmlab/mmdetection.git
[Enhance] MaskFormer refactor (#7471) * maskformer refactor update docstring update docstring update unit test update unit test update unit test * remove redundant code * update unit test
133
0
70,242
11
1
12
def test_query_by_embedding_excluded_meta_data_return_embedding_true(self, mocked_document_store): mocked_document_store.return_embedding = True mocked_document_store.excluded_meta_data = ["foo", "embedding"] mocked_document_store.query_by_embedding(self.query_emb) _, kwargs = mocked_document_store.client.search.call_args # we expect "embedding" was removed from the final query assert kwargs["body"]["_source"] == {"excludes": ["foo"]}
test/document_stores/test_opensearch.py
102
haystack
{ "docstring": "\n Test that when `return_embedding==True` the field should NOT be excluded even if it\n was added to `excluded_meta_data`\n ", "language": "en", "n_whitespaces": 39, "n_words": 17, "vocab_size": 17 }
30
Python
28
e7627c3f8b241654b61f8523479c81f855102f0a
test_opensearch.py
257,656
6
57
test_query_by_embedding_excluded_meta_data_return_embedding_true
https://github.com/deepset-ai/haystack.git
Use opensearch-py in OpenSearchDocumentStore (#2691) * add Opensearch extras * let OpenSearchDocumentStore use opensearch-py * Update Documentation & Code Style * fix a bug found after adding tests Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: Sara Zan <[email protected]>
79
0
75,108
10
1
3
def id() -> str: return _distro.id()
pipenv/patched/notpip/_vendor/distro/distro.py
25
pipenv
{ "docstring": "\n Return the distro ID of the current distribution, as a\n machine-readable string.\n\n For a number of OS distributions, the returned distro ID value is\n *reliable*, in the sense that it is documented and that it does not change\n across releases of the distribution.\n\n This package maintains the following reliable distro ID values:\n\n ============== =========================================\n Distro ID Distribution\n ============== =========================================\n \"ubuntu\" Ubuntu\n \"debian\" Debian\n \"rhel\" RedHat Enterprise Linux\n \"centos\" CentOS\n \"fedora\" Fedora\n \"sles\" SUSE Linux Enterprise Server\n \"opensuse\" openSUSE\n \"amzn\" Amazon Linux\n \"arch\" Arch Linux\n \"cloudlinux\" CloudLinux OS\n \"exherbo\" Exherbo Linux\n \"gentoo\" GenToo Linux\n \"ibm_powerkvm\" IBM PowerKVM\n \"kvmibm\" KVM for IBM z Systems\n \"linuxmint\" Linux Mint\n \"mageia\" Mageia\n \"mandriva\" Mandriva Linux\n \"parallels\" Parallels\n \"pidora\" Pidora\n \"raspbian\" Raspbian\n \"oracle\" Oracle Linux (and Oracle Enterprise Linux)\n \"scientific\" Scientific Linux\n \"slackware\" Slackware\n \"xenserver\" XenServer\n \"openbsd\" OpenBSD\n \"netbsd\" NetBSD\n \"freebsd\" FreeBSD\n \"midnightbsd\" MidnightBSD\n \"rocky\" Rocky Linux\n \"aix\" AIX\n ============== =========================================\n\n If you have a need to get distros for reliable IDs added into this set,\n or if you find that the :func:`distro.id` function returns a different\n distro ID for one of the listed distros, please create an issue in the\n `distro issue tracker`_.\n\n **Lookup hierarchy and transformations:**\n\n First, the ID is obtained from the following sources, in the specified\n order. The first available and non-empty value is used:\n\n * the value of the \"ID\" attribute of the os-release file,\n\n * the value of the \"Distributor ID\" attribute returned by the lsb_release\n command,\n\n * the first part of the file name of the distro release file,\n\n The so determined ID value then passes the following transformations,\n before it is returned by this method:\n\n * it is translated to lower case,\n\n * blanks (which should not be there anyway) are translated to underscores,\n\n * a normalization of the ID is performed, based upon\n `normalization tables`_. The purpose of this normalization is to ensure\n that the ID is as reliable as possible, even across incompatible changes\n in the OS distributions. A common reason for an incompatible change is\n the addition of an os-release file, or the addition of the lsb_release\n command, with ID values that differ from what was previously determined\n from the distro release file name.\n ", "language": "en", "n_whitespaces": 754, "n_words": 359, "vocab_size": 208 }
6
Python
6
c69d55f7c82d5ae2cce542bcfb98d043ca4836a0
distro.py
21,509
79
13
id
https://github.com/pypa/pipenv.git
Vendor in pip 22.1.2
12
0
3,889
7
1
33
def test_execute_task_instances_backfill_tasks_wont_execute(self, dag_maker): dag_id = 'SchedulerJobTest.test_execute_task_instances_backfill_tasks_wont_execute' task_id_1 = 'dummy_task' with dag_maker(dag_id=dag_id): task1 = EmptyOperator(task_id=task_id_1) self.scheduler_job = SchedulerJob(subdir=os.devnull) session = settings.Session() dr1 = dag_maker.create_dagrun(run_type=DagRunType.BACKFILL_JOB) ti1 = TaskInstance(task1, run_id=dr1.run_id) ti1.refresh_from_db() ti1.state = State.SCHEDULED session.merge(ti1) session.flush() assert dr1.is_backfill self.scheduler_job._critical_section_execute_task_instances(session) session.flush() ti1.refresh_from_db() assert State.SCHEDULED == ti1.state session.rollback()
tests/jobs/test_scheduler_job.py
222
airflow
{ "docstring": "\n Tests that backfill tasks won't get executed.\n ", "language": "en", "n_whitespaces": 22, "n_words": 7, "vocab_size": 7 }
43
Python
31
49e336ae0302b386a2f47269a6d13988382d975f
test_scheduler_job.py
47,528
19
131
test_execute_task_instances_backfill_tasks_wont_execute
https://github.com/apache/airflow.git
Replace usage of `DummyOperator` with `EmptyOperator` (#22974) * Replace usage of `DummyOperator` with `EmptyOperator`
180
0
9,148
11
1
24
def rand_series_with_duplicate_datetimeindex() -> Series: dates = [ datetime(2000, 1, 2), datetime(2000, 1, 2), datetime(2000, 1, 2), datetime(2000, 1, 3), datetime(2000, 1, 3), datetime(2000, 1, 3), datetime(2000, 1, 4), datetime(2000, 1, 4), datetime(2000, 1, 4), datetime(2000, 1, 5), ] return Series(np.random.randn(len(dates)), index=dates) # ---------------------------------------------------------------- # Scalars # ---------------------------------------------------------------- @pytest.fixture( params=[ ( Interval(left=0, right=5, inclusive="right"), IntervalDtype("int64", inclusive="right"), ), ( Interval(left=0.1, right=0.5, inclusive="right"), IntervalDtype("float64", inclusive="right"), ), (Period("2012-01", freq="M"), "period[M]"), (Period("2012-02-01", freq="D"), "period[D]"), ( Timestamp("2011-01-01", tz="US/Eastern"), DatetimeTZDtype(tz="US/Eastern"), ), (Timedelta(seconds=500), "timedelta64[ns]"), ] )
pandas/conftest.py
360
@pytest.fixture( params=[ ( Interval(left=0, right=5, inclusive="right"), IntervalDtype("int64", inclusive="right"), ), ( Interval(left=0.1, right=0.5, inclusive="right"), IntervalDtype("float64", inclusive="right"), ), (Period("2012-01", freq="M"), "period[M]"), (Period("2012-02-01", freq="D"), "period[D]"), ( Timestamp("2011-01-01", tz="US/Eastern"), DatetimeTZDtype(tz="US/Eastern"), ), (Timedelta(seconds=500), "timedelta64[ns]"), ] )
pandas
{ "docstring": "\n Fixture for Series with a DatetimeIndex that has duplicates.\n ", "language": "en", "n_whitespaces": 16, "n_words": 9, "vocab_size": 9 }
78
Python
43
f538568afc2c76c2d738d32e3544cf9fe6742960
conftest.py
167,613
17
120
rand_series_with_duplicate_datetimeindex
https://github.com/pandas-dev/pandas.git
TYP: misc return type annotations (#47558)
290
1
40,065
13
2
5
def execute(): name = frappe.db.sql( ) if not name: frappe.db.sql( "update `tabProduction Order` pro \ set \ description = (select description from tabItem where name=pro.production_item) \ where \ ifnull(description, '') = ''" )
erpnext/patches/v5_7/update_item_description_based_on_item_master.py
54
erpnext
{ "docstring": " select name from `tabPatch Log` \\\n\t\twhere \\\n\t\t\tpatch like 'execute:frappe.db.sql(\"update `tabProduction Order` pro set description%' ", "language": "en", "n_whitespaces": 15, "n_words": 16, "vocab_size": 15 }
33
Python
24
494bd9ef78313436f0424b918f200dab8fc7c20b
update_item_description_based_on_item_master.py
66,864
14
26
execute
https://github.com/frappe/erpnext.git
style: format code with black
22
0
14,363
10
2
11
def iter_mapped_dependencies(self) -> Iterator["Operator"]: from airflow.models.xcom_arg import XComArg for ref in XComArg.iter_xcom_args(self._get_expansion_kwargs()): yield ref.operator
airflow/models/mappedoperator.py
62
airflow
{ "docstring": "Upstream dependencies that provide XComs used by this task for task mapping.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 11 }
14
Python
14
197cff3194e855b9207c3c0da8ae093a0d5dda55
mappedoperator.py
47,756
5
37
iter_mapped_dependencies
https://github.com/apache/airflow.git
Ensure TaskMap only checks "relevant" dependencies (#23053) When looking for "mapped dependants" of a task, we only want a task if it not only is a direct downstream of the task, but also it actually "uses" the task's pushed XCom for task mapping. So we need to peek into the mapped downstream task's expansion kwargs, and only count it as a mapped dependant if the upstream is referenced there.
46
0
9,245
9
6
16
def get_expiry_date(self, **kwargs): try: modification = kwargs['modification'] except KeyError: modification = timezone.now() # Same comment as in get_expiry_age try: expiry = kwargs['expiry'] except KeyError: expiry = self.get('_session_expiry') if isinstance(expiry, datetime): return expiry elif isinstance(expiry, str): return datetime.fromisoformat(expiry) expiry = expiry or self.get_session_cookie_age() return modification + timedelta(seconds=expiry)
django/contrib/sessions/backends/base.py
152
django
{ "docstring": "Get session the expiry date (as a datetime object).\n\n Optionally, this function accepts `modification` and `expiry` keyword\n arguments specifying the modification and expiry of the session.\n ", "language": "en", "n_whitespaces": 47, "n_words": 26, "vocab_size": 22 }
46
Python
30
436862787cbdbd68b0ba20ed8c23b295e3679df3
base.py
203,048
15
89
get_expiry_date
https://github.com/django/django.git
Refs #29708 -- Made SessionBase store expiry as string.
182
0
50,223
12
2
27
def evaluate(model, criterion, metric, data_loader): model.eval() metric.reset() losses = [] for batch in data_loader: input_ids, token_type_ids, labels = batch logits = model(input_ids, token_type_ids) loss = criterion(logits, labels) probs = F.sigmoid(logits) losses.append(loss.numpy()) metric.update(probs, labels) auc, f1_score = metric.accumulate() print("eval loss: %.5f, auc: %.5f, f1 score: %.5f" % (np.mean(losses), auc, f1_score)) model.train() metric.reset()
examples/text_classification/multi_label/train.py
187
PaddleNLP
{ "docstring": "\n Given a dataset, it evals model and computes the metric.\n\n Args:\n model(obj:`paddle.nn.Layer`): A model to classify texts.\n criterion(obj:`paddle.nn.Layer`): It can compute the loss.\n metric(obj:`paddle.metric.Metric`): The evaluation metric.\n data_loader(obj:`paddle.io.DataLoader`): The dataset loader which generates batches.\n ", "language": "en", "n_whitespaces": 72, "n_words": 34, "vocab_size": 30 }
51
Python
41
621357338437ee420eabbbf5ab19065bc85e73a5
train.py
322,167
16
116
evaluate
https://github.com/PaddlePaddle/PaddleNLP.git
Update neural search readme and Add Paddle Serving Support (#1558) * add recall inference similarity * update examples * updatea readme * update dir name * update neural search readme * update milvus readme * update domain adaptive pretraining readme * fix the mistakes * update readme * add recall Paddle Serving Support * update readme * update readme and format the code * reformat the files * move the files * reformat the code * remove redundant code Co-authored-by: Zeyu Chen <[email protected]> Co-authored-by: tianxin <[email protected]>
129
0
118,077
11
1
2
def ygap(self): return self["ygap"]
packages/python/plotly/plotly/graph_objs/_heatmap.py
22
plotly.py
{ "docstring": "\n Sets the vertical gap (in pixels) between bricks.\n\n The 'ygap' property is a number and may be specified as:\n - An int or float in the interval [0, inf]\n\n Returns\n -------\n int|float\n ", "language": "en", "n_whitespaces": 84, "n_words": 32, "vocab_size": 31 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_heatmap.py
226,903
2
11
ygap
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
58,576
7
1
3
def __enter__(self): raise NotImplementedError()
python3.10.4/Lib/asyncio/unix_events.py
20
XX-Net
{ "docstring": "Enter the watcher's context and allow starting new processes\n\n This function must return self", "language": "en", "n_whitespaces": 20, "n_words": 14, "vocab_size": 14 }
4
Python
4
8198943edd73a363c266633e1aa5b2a9e9c9f526
unix_events.py
220,938
2
10
__enter__
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
18
0
56,170
7
1
2
def adapt_streams_if_testing(func):
airbyte-integrations/connectors/source-mixpanel/source_mixpanel/testing.py
13
airbyte
{ "docstring": "\n Due to API limitations (60 requests per hour) there is unavailable to make acceptance tests in normal mode,\n so we're reducing amount of requests by, if `is_testing` flag is set in config:\n\n 1. Take time range in only 1 month\n 2. Patch Funnels, so we download data only for one Funnel entity\n 3. Removing RPS limit for faster testing\n ", "language": "en", "n_whitespaces": 78, "n_words": 59, "vocab_size": 51 }
2
Python
2
d79b319819650f99fae2ab8c6c8d3ab25d474cf1
testing.py
5,687
4
15
adapt_streams_if_testing
https://github.com/airbytehq/airbyte.git
:tada: Source Mixpanel: Beta preparation (#13372) * Add extra mode to Source, to allow run acceptance tests * move streams into distinct modules * Add property name transformation for Export stream for avoiding collisions * Update doc * Add `date_window_size`
5
0
808
6
1
6
def getsource(object): lines, lnum = getsourcelines(object) return ''.join(lines) # --------------------------------------------------- class tree extraction
python3.10.4/Lib/inspect.py
40
XX-Net
{ "docstring": "Return the text of the source code for an object.\n\n The argument may be a module, class, method, function, traceback, frame,\n or code object. The source code is returned as a single string. An\n OSError is raised if the source code cannot be retrieved.", "language": "en", "n_whitespaces": 54, "n_words": 44, "vocab_size": 32 }
13
Python
13
8198943edd73a363c266633e1aa5b2a9e9c9f526
inspect.py
218,386
3
21
getsource
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
21
0
55,274
8
2
11
def set_floatx(value): global _FLOATX accepted_dtypes = {"float16", "float32", "float64"} if value not in accepted_dtypes: raise ValueError( f"Unknown `floatx` value: {value}. " f"Expected one of {accepted_dtypes}" ) _FLOATX = str(value) @keras_export("keras.backend.image_data_format") @tf.__internal__.dispatch.add_dispatch_support
keras/backend_config.py
98
@keras_export("keras.backend.image_data_format") @tf.__internal__.dispatch.add_dispatch_support
keras
{ "docstring": "Sets the default float type.\n\n Note: It is not recommended to set this to float16 for training, as this\n will likely cause numeric stability issues. Instead, mixed precision, which\n is using a mix of float16 and float32, can be used by calling\n `tf.keras.mixed_precision.set_global_policy('mixed_float16')`. See the\n [mixed precision guide](\n https://www.tensorflow.org/guide/keras/mixed_precision) for details.\n\n Args:\n value: String; `'float16'`, `'float32'`, or `'float64'`.\n\n Example:\n >>> tf.keras.backend.floatx()\n 'float32'\n >>> tf.keras.backend.set_floatx('float64')\n >>> tf.keras.backend.floatx()\n 'float64'\n >>> tf.keras.backend.set_floatx('float32')\n\n Raises:\n ValueError: In case of invalid value.\n ", "language": "en", "n_whitespaces": 140, "n_words": 76, "vocab_size": 65 }
31
Python
29
f3cafc77c269f7ecbf80bb4cf4b54e28c153f4e6
backend_config.py
277,755
9
37
set_floatx
https://github.com/keras-team/keras.git
resolve line-too-long in root directory
80
1
82,185
12
3
45
def test_fetch_final_taken_task(business_client): config = dict( title='test_label_races', is_published=True, label_config= ) annotation_result = json.dumps([{ 'from_name': 'text_class', 'to_name': 'text', 'type': 'choices', 'value': {'choices': ['class_A']} }]) project = make_project(config, business_client.user) project.sampling = Project.SEQUENCE project.save() ann1 = make_annotator({'email': '[email protected]'}, project, True) ann2 = make_annotator({'email': '[email protected]'}, project, True) # create tasks tasks = [] num_tasks = 2 for i in range(num_tasks): tasks.append({'data': {'text': f'this is {str(i)}'}}) r = business_client.post( f'/api/projects/{project.id}/tasks/bulk/', data=json.dumps(tasks), content_type='application/json') assert r.status_code == 201 # set max annotations r = business_client.patch( f'/api/projects/{project.id}/', data=json.dumps({'maximum_annotations': 2}), content_type='application/json' ) assert r.status_code == 200 print('ann1 takes any task and complete it') r = ann1.get(f'/api/projects/{project.id}/next') task_id = json.loads(r.content)['id'] ann1.post(f'/api/tasks/{task_id}/annotations/', data={'task': task_id, 'result': annotation_result}) print('ann2 takes the same task (because of depth-first) but just lock it - don\'t complete') r = ann2.get(f'/api/projects/{project.id}/next') assert json.loads(r.content)['id'] == task_id print('ann1 takes another task') r = ann1.get(f'/api/projects/{project.id}/next') another_task_id = json.loads(r.content)['id'] assert another_task_id != task_id print('ann1 should never take task_id since he has completed it') for i in range(3): r = ann1.get(f'/api/projects/{project.id}/next') assert json.loads(r.content)['id'] == another_task_id @pytest.mark.skipif(not redis_healthcheck(), reason='Multi user locks only supported with redis enabled') @pytest.mark.django_db
label_studio/tests/test_next_task.py
662
@pytest.mark.skipif(not redis_healthcheck(), reason='Multi user locks only supported with redis enabled') @pytest.mark.django_db
label-studio
{ "docstring": "\n <View>\n <Text name=\"text\" value=\"$text\"></Text>\n <Choices name=\"text_class\" choice=\"single\" toName=\"text\">\n <Choice value=\"class_A\"></Choice>\n <Choice value=\"class_B\"></Choice>\n </Choices>\n </View>", "language": "en", "n_whitespaces": 105, "n_words": 14, "vocab_size": 13 }
172
Python
119
aaa022d8acbeb002eab2930965da276e9298cd54
test_next_task.py
177,569
52
330
test_fetch_final_taken_task
https://github.com/heartexlabs/label-studio.git
[ext] Add video interpolation by param (DEV-74) (#1735) * Add video interpolation by param * Change label-studio-tools commit * Fix typo and add some comments * Fix context field * Fix label-studio-tools link * fix link to ext dep * Update requirements for label_studio_tools * Change label-studio-tools commit with refactoring * Change label-studio-tools requirement * Change label-studio-tools version to dev3 * Change base settings * Add interpolate_key_frames option in ExportMixin * Change serializer options to context * Add serializer for Export - Add serializer for Export - Switch to is_video_object_tracking and new extract_key_frames logic - Change label-studio-tools requirement * Fix serializer fields * Fix export type in serializer * Add exportType to support both export params * Move to parsed_config in is_video_object_tracking * Add interpolate_key_frames to SerializationOptionsSerializer * Change label-studio-tools to version with sequence * Change label-studio-tools with time fix * Add parse_label_config to Project model * Fix new project condition * Change from presave signal to save method * Fix input data for tests * Upgrade label-studio-tools version * Change label-studio-tools version with key frames order fix Co-authored-by: Sergey Zhuk <[email protected]> Co-authored-by: Max Tkachenko <[email protected]> Co-authored-by: Sergei Ivashchenko <[email protected]>
363
1
42,442
17
2
16
def create_central_storage_strategy(): compute_devices = ( ["cpu:0", "gpu:0"] if (tf.config.list_logical_devices("GPU")) else ["cpu:0"] ) return tf.distribute.experimental.CentralStorageStrategy( compute_devices, parameter_device="cpu:0" ) TESTCASES = ( {"testcase_name": "base", "strategy_fn": default_strategy_fn}, {"testcase_name": "distribute", "strategy_fn": create_mirrored_strategy}, ) @test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
keras/mixed_precision/layer_test.py
156
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
keras
{ "docstring": "Create a CentralStorageStrategy, using a GPU if it is available.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 9 }
31
Python
25
84afc5193d38057e2e2badf9c889ea87d80d8fbf
layer_test.py
274,999
9
44
create_central_storage_strategy
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
77
1
81,282
12
1
21
def decode_predictions(preds, top=5): return imagenet_utils.decode_predictions(preds, top=top) preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format( mode='', ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_TF, error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC) decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__ DOC = setattr(ResNetRS50, '__doc__', ResNetRS50.__doc__ + DOC) setattr(ResNetRS152, '__doc__', ResNetRS152.__doc__ + DOC) setattr(ResNetRS200, '__doc__', ResNetRS200.__doc__ + DOC) setattr(ResNetRS270, '__doc__', ResNetRS270.__doc__ + DOC) setattr(ResNetRS350, '__doc__', ResNetRS350.__doc__ + DOC) setattr(ResNetRS420, '__doc__', ResNetRS420.__doc__ + DOC)
keras/applications/resnet_rs.py
205
keras
{ "docstring": "\n\n Reference:\n [Revisiting ResNets: Improved Training and Scaling Strategies](\n https://arxiv.org/pdf/2103.07579.pdf)\n\n For image classification use cases, see\n [this page for detailed examples](\n https://keras.io/api/applications/#usage-examples-for-image-classification-models).\n\n For transfer learning use cases, make sure to read the\n [guide to transfer learning & fine-tuning](\n https://keras.io/guides/transfer_learning/).\n\n Note: each Keras Application expects a specific kind of input preprocessing.\n For ResNetRs, by default input preprocessing is included as a part of the\n model (as a `Rescaling` layer), and thus\n `tf.keras.applications.resnet_rs.preprocess_input` is actually a\n pass-through function. In this use case, ResNetRS models expect their inputs\n to be float tensors of pixels with values in the [0-255] range.\n At the same time, preprocessing as a part of the model (i.e. `Rescaling`\n layer) can be disabled by setting `include_preprocessing` argument to False.\n With preprocessing disabled ResNetRS models expect their inputs to be float\n tensors of pixels with values in the [-1, 1] range.\n\n Args:\n depth: Depth of ResNet network.\n dropout_rate: dropout rate before final classifier layer.\n bn_momentum: Momentum parameter for Batch Normalization layers.\n bn_epsilon: Epsilon parameter for Batch Normalization layers.\n activation: activation function.\n block_args: list of dicts, parameters to construct block modules.\n se_ratio: Squeeze and Excitation layer ratio.\n model_name: name of the model.\n drop_connect_rate: dropout rate at skip connections.\n include_top: whether to include the fully-connected layer at the top of\n the network.\n weights: one of `None` (random initialization), `'imagenet'`\n (pre-training on ImageNet), or the path to the weights file to be loaded.\n Note: one model can have multiple imagenet variants depending on\n input shape it was trained with. For input_shape 224x224 pass\n `imagenet-i224` as argument. By default, highest input shape weights are\n downloaded.\n input_tensor: optional Keras tensor (i.e. output of `layers.Input()`) to\n use as image input for the model.\n input_shape: optional shape tuple. It should have exactly 3 inputs\n channels, and width and height should be no smaller than 32.\n E.g. (200, 200, 3) would be one valid value.\n pooling: optional pooling mode for feature extraction when `include_top`\n is `False`.\n - `None` means that the output of the model will be\n the 4D tensor output of the\n last convolutional layer.\n - `avg` means that global average pooling\n will be applied to the output of the\n last convolutional layer, and thus\n the output of the model will be a 2D tensor.\n - `max` means that global max pooling will\n be applied.\n classes: optional number of classes to classify images into, only to be\n specified if `include_top` is True, and if no `weights` argument is\n specified.\n classifier_activation: A `str` or callable. The activation function to\n use on the \"top\" layer. Ignored unless `include_top=True`. Set\n `classifier_activation=None` to return the logits of the \"top\" layer.\n include_preprocessing: Boolean, whether to include the preprocessing layer\n (`Rescaling`) at the bottom of the network. Defaults to `True`.\n\n Returns:\n A `keras.Model` instance.\n", "language": "en", "n_whitespaces": 883, "n_words": 450, "vocab_size": 256 }
47
Python
30
c223693db91473c9a71c330d4e38a751d149f93c
resnet_rs.py
268,893
2
20
decode_predictions
https://github.com/keras-team/keras.git
KERAS application addition of Resnet-RS model
48
0
79,758
8
4
2
def all_nodes(self):
jina/serve/runtimes/gateway/graph/topology_graph.py
13
jina
{ "docstring": "\n The set of all the nodes inside this Graph\n\n :return: A list of nodes\n ", "language": "en", "n_whitespaces": 36, "n_words": 14, "vocab_size": 12 }
2
Python
2
ef662b529b2a2eecea7bb99759a9f7b9d86d3062
topology_graph.py
12,505
11
69
all_nodes
https://github.com/jina-ai/jina.git
feat: add grpc health checking (#4779)
9
0
2,324
6
11
60
def filtered_queryset(self): qs = self.model.objects.all() # FIXME: the following fields will be attached to the wrong object # if they are included in prefetch_related because of # https://github.com/django-polymorphic/django-polymorphic/issues/68 # 'job_template', 'job', 'project', 'project_update', 'workflow_job', # 'inventory_source', 'workflow_job_template' q = Q(user=self.user) inventory_set = Inventory.accessible_pk_qs(self.user, 'read_role') if inventory_set: q |= ( Q(ad_hoc_command__inventory__in=inventory_set) | Q(inventory__in=inventory_set) | Q(host__inventory__in=inventory_set) | Q(group__inventory__in=inventory_set) | Q(inventory_source__inventory__in=inventory_set) | Q(inventory_update__inventory_source__inventory__in=inventory_set) ) credential_set = Credential.accessible_pk_qs(self.user, 'read_role') if credential_set: q |= Q(credential__in=credential_set) auditing_orgs = ( (Organization.accessible_objects(self.user, 'admin_role') | Organization.accessible_objects(self.user, 'auditor_role')) .distinct() .values_list('id', flat=True) ) if auditing_orgs: q |= ( Q(user__in=auditing_orgs.values('member_role__members')) | Q(organization__in=auditing_orgs) | Q(notification_template__organization__in=auditing_orgs) | Q(notification__notification_template__organization__in=auditing_orgs) | Q(label__organization__in=auditing_orgs) | Q(role__in=Role.objects.filter(ancestors__in=self.user.roles.all()) if auditing_orgs else []) ) project_set = Project.accessible_pk_qs(self.user, 'read_role') if project_set: q |= Q(project__in=project_set) | Q(project_update__project__in=project_set) jt_set = JobTemplate.accessible_pk_qs(self.user, 'read_role') if jt_set: q |= Q(job_template__in=jt_set) | Q(job__job_template__in=jt_set) wfjt_set = WorkflowJobTemplate.accessible_pk_qs(self.user, 'read_role') if wfjt_set: q |= ( Q(workflow_job_template__in=wfjt_set) | Q(workflow_job_template_node__workflow_job_template__in=wfjt_set) | Q(workflow_job__workflow_job_template__in=wfjt_set) ) team_set = Team.accessible_pk_qs(self.user, 'read_role') if team_set: q |= Q(team__in=team_set) app_set = OAuth2ApplicationAccess(self.user).filtered_queryset() if app_set: q |= Q(o_auth2_application__in=app_set) token_set = OAuth2TokenAccess(self.user).filtered_queryset() if token_set: q |= Q(o_auth2_access_token__in=token_set) return qs.filter(q).distinct()
awx/main/access.py
665
awx
{ "docstring": "\n The full set is returned if the user is:\n - System Administrator\n - System Auditor\n These users will be able to see orphaned activity stream items\n (the related resource has been deleted), as well as the other\n obscure cases listed here\n\n Complex permissions omitted from the activity stream of a normal user:\n - host access via group\n - permissions (from prior versions)\n - notifications via team admin access\n\n Activity stream events that have been omitted from list for\n normal users since 2.4:\n - unified job templates\n - unified jobs\n - schedules\n - custom inventory scripts\n ", "language": "en", "n_whitespaces": 224, "n_words": 95, "vocab_size": 71 }
167
Python
99
e87e041a2a2a6d168a84d3eeea6664985f1c8ab8
access.py
82,255
53
404
filtered_queryset
https://github.com/ansible/awx.git
Break up and conditionally add the RBAC checks for ActivityStream (#13279) This should vastly improve the queries executed when accessing any of the activity stream endpoints as a normal user, in many cases.
753
0
17,336
21
11
35
def read_packet(self, size=CAN_MTU): # type: (int) -> Optional[Packet] line = self.f.readline() line = line.lstrip() if len(line) < 16: raise EOFError is_log_file_format = orb(line[0]) == orb(b"(") fd_flags = None if is_log_file_format: t_b, intf, f = line.split() if b'##' in f: idn, data = f.split(b'##') fd_flags = orb(data[0]) data = data[1:] else: idn, data = f.split(b'#') le = None t = float(t_b[1:-1]) # type: Optional[float] else: h, data = line.split(b']') intf, idn, le = h.split() t = None if self.ifilter is not None and \ intf.decode('ASCII') not in self.ifilter: return None data = data.replace(b' ', b'') data = data.strip() if len(data) <= 8 and fd_flags is None: pkt = CAN(identifier=int(idn, 16), data=hex_bytes(data)) else: pkt = CANFD(identifier=int(idn, 16), fd_flags=fd_flags, data=hex_bytes(data)) if le is not None: pkt.length = int(le[1:]) else: pkt.length = len(pkt.data) if len(idn) > 3: pkt.flags = 0b100 if t is not None: pkt.time = t return pkt
scapy/layers/can.py
500
scapy
{ "docstring": "Read a packet from the specified file.\n\n This function will raise EOFError when no more packets are available.\n\n :param size: Not used. Just here to follow the function signature for\n SuperSocket emulation.\n :return: A single packet read from the file or None if filters apply\n ", "language": "en", "n_whitespaces": 93, "n_words": 45, "vocab_size": 40 }
146
Python
77
ada91610ad55339bce4d84bc7d5e44ee1cab0c6f
can.py
209,950
40
312
read_packet
https://github.com/secdev/scapy.git
Add support of CANFD (#3782) * Add support of CANFD Co-authored-by: superuserx * fix tests * fix flake * fix test * fix test for python2 * fix test for python2 * fix test for python2 Co-authored-by: superuserx <[email protected]> Co-authored-by: Nils Weiss <[email protected]>
554
0
52,837
14
3
9
def _set_speaker_encoder_paths_from_tts_config(self): if hasattr(self.tts_config, "model_args") and hasattr( self.tts_config.model_args, "speaker_encoder_config_path" ): self.encoder_checkpoint = self.tts_config.model_args.speaker_encoder_model_path self.encoder_config = self.tts_config.model_args.speaker_encoder_config_path
TTS/utils/synthesizer.py
82
TTS
{ "docstring": "Set the encoder paths from the tts model config for models with speaker encoders.", "language": "en", "n_whitespaces": 13, "n_words": 14, "vocab_size": 13 }
16
Python
15
8fd1ee1926a956a146188179baee143ef11a003d
synthesizer.py
261,841
6
49
_set_speaker_encoder_paths_from_tts_config
https://github.com/coqui-ai/TTS.git
Print urls when BadZipError
70
0
77,026
11
1
13
def alg_field_from_poly(self, poly, alias=None, root_index=-1): r from sympy.polys.rootoftools import CRootOf root = CRootOf(poly, root_index) alpha = AlgebraicNumber(root, alias=alias) return self.algebraic_field(alpha, alias=alias)
sympy/polys/domains/domain.py
81
sympy
{ "docstring": "\n Convenience method to construct an algebraic extension on a root of a\n polynomial, chosen by root index.\n\n Parameters\n ==========\n\n poly : :py:class:`~.Poly`\n The polynomial whose root generates the extension.\n alias : str, optional (default=None)\n Symbol name for the generator of the extension.\n E.g. \"alpha\" or \"theta\".\n root_index : int, optional (default=-1)\n Specifies which root of the polynomial is desired. The ordering is\n as defined by the :py:class:`~.ComplexRootOf` class. The default of\n ``-1`` selects the most natural choice in the common cases of\n quadratic and cyclotomic fields (the square root on the positive\n real or imaginary axis, resp. $\\mathrm{e}^{2\\pi i/n}$).\n\n Examples\n ========\n\n >>> from sympy import QQ, Poly\n >>> from sympy.abc import x\n >>> f = Poly(x**2 - 2)\n >>> K = QQ.alg_field_from_poly(f)\n >>> K.ext.minpoly == f\n True\n >>> g = Poly(8*x**3 - 6*x - 1)\n >>> L = QQ.alg_field_from_poly(g, \"alpha\")\n >>> L.ext.minpoly == g\n True\n >>> L.to_sympy(L([1, 1, 1]))\n alpha**2 + alpha + 1\n\n ", "language": "en", "n_whitespaces": 397, "n_words": 154, "vocab_size": 107 }
21
Python
19
1af5040d2466d2e6455eb07454f7da8dd345a9b8
domain.py
197,778
41
55
alg_field_from_poly
https://github.com/sympy/sympy.git
Support `alias` for prim. elt. of `AlgebraicField`
55
0
48,688
9
14
35
def _update_defaults(self, defaults): # type: (Dict[str, Any]) -> Dict[str, Any] # Accumulate complex default state. self.values = optparse.Values(self.defaults) late_eval = set() # Then set the options with those values for key, val in self._get_ordered_configuration_items(): # '--' because configuration supports only long names option = self.get_option("--" + key) # Ignore options not present in this parser. E.g. non-globals put # in [global] by users that want them to apply to all applicable # commands. if option is None: continue assert option.dest is not None if option.action in ("store_true", "store_false"): try: val = strtobool(val) except ValueError: self.error( "{} is not a valid value for {} option, " # noqa "please specify a boolean value like yes/no, " "true/false or 1/0 instead.".format(val, key) ) elif option.action == "count": with suppress(ValueError): val = strtobool(val) with suppress(ValueError): val = int(val) if not isinstance(val, int) or val < 0: self.error( "{} is not a valid value for {} option, " # noqa "please instead specify either a non-negative integer " "or a boolean value like yes/no or false/true " "which is equivalent to 1/0.".format(val, key) ) elif option.action == "append": val = val.split() val = [self.check_default(option, key, v) for v in val] elif option.action == "callback": assert option.callback is not None late_eval.add(option.dest) opt_str = option.get_opt_string() val = option.convert_value(opt_str, val) # From take_action args = option.callback_args or () kwargs = option.callback_kwargs or {} option.callback(option, opt_str, val, self, *args, **kwargs) else: val = self.check_default(option, key, val) defaults[option.dest] = val for key in late_eval: defaults[key] = getattr(self.values, key) self.values = None return defaults
.venv/lib/python3.8/site-packages/pip/_internal/cli/parser.py
518
transferlearning
{ "docstring": "Updates the given defaults with values from the config files and\n the environ. Does a little special handling for certain types of\n options (lists).", "language": "en", "n_whitespaces": 37, "n_words": 24, "vocab_size": 22 }
254
Python
148
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
parser.py
60,539
47
308
_update_defaults
https://github.com/jindongwang/transferlearning.git
upd; format
1,029
0
12,199
18
1
21
def test_start_new_processes_with_same_filepath(self): manager = DagFileProcessorManager( dag_directory='directory', max_runs=1, processor_timeout=timedelta(days=365), signal_conn=MagicMock(), dag_ids=[], pickle_dags=False, async_mode=True, ) file_1 = 'file_1.py' file_2 = 'file_2.py' file_3 = 'file_3.py' manager._file_path_queue = [file_1, file_2, file_3] # Mock that only one processor exists. This processor runs with 'file_1' manager._processors[file_1] = MagicMock() # Start New Processes manager.start_new_processes() # Because of the config: '[scheduler] parsing_processes = 2' # verify that only one extra process is created # and since a processor with 'file_1' already exists, # even though it is first in '_file_path_queue' # a new processor is created with 'file_2' and not 'file_1'. assert file_1 in manager._processors.keys() assert file_2 in manager._processors.keys() assert [file_3] == manager._file_path_queue
tests/dag_processing/test_manager.py
185
airflow
{ "docstring": "\n Test that when a processor already exist with a filepath, a new processor won't be created\n with that filepath. The filepath will just be removed from the list.\n ", "language": "en", "n_whitespaces": 50, "n_words": 28, "vocab_size": 22 }
105
Python
71
18da1217d7ae593ff33c681353b027fac9252523
test_manager.py
46,452
19
110
test_start_new_processes_with_same_filepath
https://github.com/apache/airflow.git
Replace timedelta.max with year long timdelta in test_manager (#22527) Timedelta.max used in tests is not realistic and in some circumstances, when it is added to date, it might cause date OverflowError. Using long (but not 999999999 days long) timedelta solves the problem.
315
0
8,892
12
1
8
def search(self, value, user=None, object_types=None, lookup=DEFAULT_LOOKUP_TYPE): raise NotImplementedError
netbox/netbox/search/backends.py
33
netbox
{ "docstring": "\n Search cached object representations for the given value.\n ", "language": "en", "n_whitespaces": 23, "n_words": 8, "vocab_size": 8 }
8
Python
8
9628dead07ccef9608b32906aa8194bc948e5a09
backends.py
265,890
2
22
search
https://github.com/netbox-community/netbox.git
Closes #10560: New global search (#10676) * Initial work on new search backend * Clean up search backends * Return only the most relevant result per object * Clear any pre-existing cached entries on cache() * #6003: Implement global search functionality for custom field values * Tweak field weights & document guidance * Extend search() to accept a lookup type * Move get_registry() out of SearchBackend * Enforce object permissions when returning search results * Add indexers for remaining models * Avoid calling remove() on non-cacheable objects * Use new search backend by default * Extend search backend to filter by object type * Clean up search view form * Enable specifying lookup logic * Add indexes for value field * Remove object type selector from search bar * Introduce SearchTable and enable HTMX for results * Enable pagination * Remove legacy search backend * Cleanup * Use a UUID for CachedValue primary key * Refactoring search methods * Define max search results limit * Extend reindex command to support specifying particular models * Add clear() and size to SearchBackend * Optimize bulk caching performance * Highlight matched portion of field value * Performance improvements for reindexing * Started on search tests * Cleanup & docs * Documentation updates * Clean up SearchIndex * Flatten search registry to register by app_label.model_name * Clean up search backend classes * Clean up RestrictedGenericForeignKey and RestrictedPrefetch * Resolve migrations conflict
22
0
78,230
6
7
59
def call_load(self, other_args): parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="load", description=, ) parser.add_argument( "-c", "--coin", help="Coin to get. Must be coin symbol (e.g., btc, eth)", dest="coin", type=str, required="-h" not in other_args, ) parser.add_argument( "-s", "--start", type=valid_date, default=(datetime.now() - timedelta(days=1100)).strftime("%Y-%m-%d"), dest="start", help="The starting date (format YYYY-MM-DD) of the crypto", ) parser.add_argument( "--exchange", help="Exchange to search", dest="exchange", type=str, default="binance", choices=self.exchanges, ) parser.add_argument( "-e", "--end", type=valid_date, default=datetime.now().strftime("%Y-%m-%d"), dest="end", help="The ending date (format YYYY-MM-DD) of the crypto", ) parser.add_argument( "-i", "--interval", action="store", dest="interval", type=str, default="1440", choices=["1", "5", "15", "30", "60", "240", "1440", "10080", "43200"], help="The interval of the crypto", ) parser.add_argument( "--vs", help="Quote currency (what to view coin vs). e.g., usdc, usdt, ... if source is ccxt, usd, eur, ... otherwise", # noqa dest="vs", default="usdt", type=str, ) if other_args and "-" not in other_args[0][0]: other_args.insert(0, "-c") ns_parser = self.parse_known_args_and_warn( parser, other_args, export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED ) if ns_parser: if ns_parser.source in ("YahooFinance", "CoinGecko"): if ns_parser.vs == "usdt": ns_parser.vs = "usd" (self.current_df) = cryptocurrency_helpers.load( symbol=ns_parser.coin.lower(), vs_currency=ns_parser.vs, end_date=ns_parser.end.strftime("%Y-%m-%d"), start_date=ns_parser.start.strftime("%Y-%m-%d"), interval=ns_parser.interval, source=ns_parser.source, exchange=ns_parser.exchange, ) if not self.current_df.empty: self.vs = ns_parser.vs self.exchange = ns_parser.exchange self.source = ns_parser.source self.current_interval = ns_parser.interval self.current_currency = ns_parser.vs self.symbol = ns_parser.coin.lower() cryptocurrency_helpers.show_quick_performance( self.current_df, self.symbol, self.current_currency, ns_parser.source, ns_parser.exchange, self.current_interval, ) export_data( ns_parser.export, os.path.dirname(os.path.abspath(__file__)), "load", self.current_df.copy(), )
openbb_terminal/parent_classes.py
791
OpenBBTerminal
{ "docstring": "Process load command.Load crypto currency to perform analysis on.\n Yahoo Finance is used as default source.\n Other sources can be used such as 'ccxt' or 'cg' with --source.\n If you select 'ccxt', you can then select any exchange with --exchange.\n You can also select a specific interval with --interval.", "language": "en", "n_whitespaces": 92, "n_words": 49, "vocab_size": 40 }
198
Python
141
46141766d7250671b7bc75872e2034afe4938374
parent_classes.py
286,499
99
486
call_load
https://github.com/OpenBB-finance/OpenBBTerminal.git
Sdk dates (#3354) * example changes in slopes * change lettering size and side bar capitalization * revert back to Fira * start automatic website generation * this was autogen * add examples to slopes model * generate slopes doc * change to _index.md * allow italic formatting * fix regex * option to regenerate paths * update alt docs * fix generate * update alt * fix generate * update common * target italic only for types * format alt * format italic common * add sig indentation * update sig indent alt * update common ident * add todo * generate docstrings for all menus * fix maxdd * fix returns font size * fix keys docs * fix more docstrings * escape literal symbols * escape literal symbols * reformat keys * format opt * remove literal escape * remove another literal escape * remove another literal escape * unindent returns * update docs return unindent * add comma in last arg * fix funcs without params * fix signature * compact some code * refactor some more code * refactor some code * some final cleanup * write docstrings * change main * move futures paths * generate futures docs * add external axes references * fix typo * revert to double docstring * fix small bug * remove docs folder * generate.py in website folder * add forecast to docs * clear some warnings * fix underscore * remove cite * refresh website docs * fix forecast docstrings * fix po * fix po docs and remove italic * fix more docstrings * remove last warning * codespell * flake8 * exclude website contente from flake * noqa on optimizer * update website * fix mypy * remove setup from mypy * mypy to openbbterminal * update precommit * pylint * try to remove sdk loading issue * fix dates active command * fix crypto.change formats * fix eb formats * nonzero fix * format dates crypto.load * format supply transac * format hr altindex * format load crypto * regenerate docs * format ba trend dates * regenerate docs * format ba trend * candle defaults * fix sentiment test * remove unused import * shopt * shopt again * revert crypto helpers * test shopt * fix some tests * skip trending test * fix alcoin test * helpers * write docs * rewrite helper Co-authored-by: Jeroen Bouma <[email protected]>
1,328
0
85,834
16