complexity
int64
1
56
n_identifiers
int64
1
114
code
stringlengths
19
12.7k
path
stringlengths
8
134
n_ast_nodes
int64
12
2.35k
ast_errors
stringlengths
0
4.01k
repo
stringlengths
3
28
documentation
dict
n_words
int64
2
866
language
stringclasses
1 value
vocab_size
int64
2
323
commit_id
stringlengths
40
40
file_name
stringlengths
5
79
id
int64
243
338k
nloc
int64
1
228
token_counts
int64
5
1.4k
fun_name
stringlengths
1
77
url
stringlengths
31
60
commit_message
stringlengths
3
15.3k
n_whitespaces
int64
1
3.23k
n_ast_errors
int64
0
20
d_id
int64
74
121k
ast_levels
int64
4
29
6
31
def convert_to_bytes(file_or_bytes, resize=None, fill=False): if isinstance(file_or_bytes, str): img = PIL.Image.open(file_or_bytes) else: try: img = PIL.Image.open(io.BytesIO(base64.b64decode(file_or_bytes))) except Exception as e: dataBytesIO = io.BytesIO(file_or_bytes) img = PIL.Image.open(dataBytesIO) cur_width, cur_height = img.size if resize: new_width, new_height = resize scale = min(new_height / cur_height, new_width / cur_width) img = img.resize((int(cur_width * scale), int(cur_height * scale)), PIL.Image.ANTIALIAS) if fill: if resize is not None: img = make_square(img, resize[0]) with io.BytesIO() as bio: img.save(bio, format="PNG") del img return bio.getvalue() ""`'
DemoPrograms/Demo_Emoji_Toolbar_PIL.py
295
PySimpleGUI
{ "docstring": "\n Will convert into bytes and optionally resize an image that is a file or a base64 bytes object.\n Turns into PNG format in the process so that can be displayed by tkinter\n :param file_or_bytes: either a string filename or a bytes base64 image object\n :type file_or_bytes: (Union[str, bytes])\n :param resize: optional new size\n :type resize: (Tuple[int, int] or None)\n :param fill: If True then the image is filled/padded so that the image is not distorted\n :type fill: (bool)\n :return: (bytes) a byte-string object\n :rtype: (bytes)\n \nM`YM dP \nM mm. mm. M 88 \nM MMM MMM M .d8888b. 88 .dP .d8888b. \nM MMM MMM M 88' `88 88888\" 88ooood8 \nM MMM MMM M 88. .88 88 `8b. 88. ... \nM MMM MMM M `88888P8 dP `YP `88888P' \nMMMMMMMMMMMMMM \n \nM\"\"MMM\"\"MMM\"\"M oo dP \nM MMM MMM M 88 \nM MMP MMP M dP 88d888b. .d888b88 .d8888b. dP dP dP \nM MM' MM' .M 88 88' `88 88' `88 88' `88 88 88 88 \nM `' . '' .MM 88 88 88 88. .88 88. .88 88.88b.88' \nM .d .dMMM dP dP dP `88888P8 `88888P' 8888P Y8P \nMMMMMMMMMMMMMM\n", "language": "en", "n_whitespaces": 524, "n_words": 184, "vocab_size": 92 }
74
Python
54
d363bd761fef3de10d162809199ad3c351081914
Demo_Emoji_Toolbar_PIL.py
212,684
21
181
convert_to_bytes
https://github.com/PySimpleGUI/PySimpleGUI.git
New Demo - Emoji Toolbar
208
0
53,340
17
14
19
def handle(self, *args, **options): jt = options['jt'] threshold = options['threshold'] history = options['history'] ignore = options['ignore'] print('## ' + JobTemplate.objects.get(pk=jt).name + f' (last {history} runs)\n') with connection.cursor() as cursor: cursor.execute( f ) slowest_events = cursor.fetchall()
awx/main/management/commands/bottleneck.py
150
awx
{ "docstring": "\n SELECT\n b.id, b.job_id, b.host_name, b.created - a.created delta,\n b.task task,\n b.event_data::json->'task_action' task_action,\n b.event_data::json->'task_path' task_path\n FROM main_jobevent a JOIN main_jobevent b\n ON b.parent_uuid = a.parent_uuid AND a.host_name = b.host_name\n WHERE\n a.event = 'runner_on_start' AND\n b.event != 'runner_on_start' AND\n b.event != 'runner_on_skipped' AND\n b.failed = false AND\n a.job_id IN (\n SELECT unifiedjob_ptr_id FROM main_job\n WHERE job_template_id={jt}\n ORDER BY unifiedjob_ptr_id DESC\n LIMIT {history}\n )\n ORDER BY delta DESC;\n ", "language": "en", "n_whitespaces": 439, "n_words": 65, "vocab_size": 48 }
35
Python
30
d3eb2c197595c29c4a3f7b38cd609ce953009623
bottleneck.py
82,002
67
398
handle
https://github.com/ansible/awx.git
Add new flak8 rules to do some meaningful corrections
124
0
17,294
13
1
2
def user_id_get_param(self): return "accountId"
src/sentry/integrations/jira/client.py
18
sentry
{ "docstring": "\n Jira-Cloud requires GDPR compliant API usage so we have to use accountId\n ", "language": "en", "n_whitespaces": 27, "n_words": 12, "vocab_size": 12 }
4
Python
4
2fbf550ec05c8501cbc9eca62e73526e717dcbdf
client.py
93,682
2
8
user_id_get_param
https://github.com/getsentry/sentry.git
ref(Jira): Split Jira Cloud and Jira Server (#37034) * Split Jira Cloud and Jira Server
18
0
19,005
6
3
18
def save(self) -> bytes: filters = self.get_filters(flush_after=True) state = {} policy_specs = {} connector_enabled = self.policy_config.get("enable_connectors", False) for pid in self.policy_map: state[pid] = self.policy_map[pid].get_state() policy_spec = self.policy_map.policy_specs[pid] # If connectors are enabled, try serializing the policy spec # instead of picking the spec object. policy_specs[pid] = ( policy_spec.serialize() if connector_enabled else policy_spec ) return pickle.dumps( { "filters": filters, "state": state, "policy_specs": policy_specs, } )
rllib/evaluation/rollout_worker.py
174
ray
{ "docstring": "Serializes this RolloutWorker's current state and returns it.\n\n Returns:\n The current state of this RolloutWorker as a serialized, pickled\n byte sequence.\n ", "language": "en", "n_whitespaces": 57, "n_words": 21, "vocab_size": 18 }
64
Python
51
d83bbda2816b1781eb61342b4539578149eeb686
rollout_worker.py
124,040
24
106
save
https://github.com/ray-project/ray.git
[RLlib] Save serialized PolicySpec. Extract `num_gpus` related logics into a util function. (#25954)
268
0
27,501
12
2
11
def post_process_segmentation(self, outputs, target_sizes, threshold=0.9, mask_threshold=0.5): out_logits, raw_masks = outputs.logits, outputs.pred_masks preds = []
src/transformers/models/yolos/feature_extraction_yolos.py
51
transformers
{ "docstring": "\n Converts the output of [`DetrForSegmentation`] into image segmentation predictions. Only supports PyTorch.\n\n Parameters:\n outputs ([`DetrSegmentationOutput`]):\n Raw outputs of the model.\n target_sizes (`torch.Tensor` of shape `(batch_size, 2)` or `List[Tuple]` of length `batch_size`):\n Torch Tensor (or list) corresponding to the requested final size (h, w) of each prediction.\n threshold (`float`, *optional*, defaults to 0.9):\n Threshold to use to filter out queries.\n mask_threshold (`float`, *optional*, defaults to 0.5):\n Threshold to use when turning the predicted masks into binary values.\n\n Returns:\n `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels, and masks for an image\n in the batch as predicted by the model.\n ", "language": "en", "n_whitespaces": 256, "n_words": 101, "vocab_size": 73 }
14
Python
13
1ac698744c4dbdf1495d303246d08ffacdf4f5b8
feature_extraction_yolos.py
37,632
16
196
post_process_segmentation
https://github.com/huggingface/transformers.git
Add YOLOS (#16848) * First draft * Add YolosForObjectDetection * Make forward pass work * Add mid position embeddings * Add interpolation of position encodings * Add expected values * Add YOLOS to tests * Add integration test * Support tiny model as well * Support all models in conversion script * Remove mid_pe_size attribute * Make more tests pass * Add model to README and fix config * Add copied from statements * Rename base_model_prefix to vit * Add missing YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP * Apply suggestions from code review * Apply more suggestions from code review * Convert remaining checkpoints * Improve docstrings * Add YolosFeatureExtractor * Add feature extractor to docs * Add corresponding tests * Fix style * Fix docs * Apply suggestion from code review * Fix bad rebase * Fix some more bad rebase * Fix missing character * Improve docs and variable names Co-authored-by: Niels Rogge <[email protected]>
35
0
6,842
8
3
15
def pi(self): total = 0.0 label_freqs = FreqDist(x["labels"] for x in self.data) for k, f in label_freqs.items(): total += f**2 Ae = total / ((len(self.I) * len(self.C)) ** 2) return (self.avg_Ao() - Ae) / (1 - Ae)
nltk/metrics/agreement.py
128
nltk
{ "docstring": "Scott 1955; here, multi-pi.\n Equivalent to K from Siegel and Castellan (1988).\n\n ", "language": "en", "n_whitespaces": 26, "n_words": 12, "vocab_size": 12 }
37
Python
28
0fac0c0f8e4618c2bdd3d2137d5fb8a80f581246
agreement.py
42,462
7
81
pi
https://github.com/nltk/nltk.git
Update black to 22.3.0 The most recent release of Click (8.1.0) was breaking Black. See psf/black#2964
90
0
7,551
14
3
11
def get_image_type(image): fmt = imghdr.what(None, h=image) if fmt is None: # if imghdr can't figure it out, could be svg. with contextlib.suppress(UnicodeDecodeError): if image.decode("utf-8").startswith("<svg"): return "svg+xml" return fmt
homeassistant/components/generic/config_flow.py
88
core
{ "docstring": "Get the format of downloaded bytes that could be an image.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
28
Python
23
c1a2be72fc8b76b55cfde1823c5688100e397369
config_flow.py
294,612
7
48
get_image_type
https://github.com/home-assistant/core.git
Generic IP Camera configflow 2 (#52360) Co-authored-by: J. Nick Koston <[email protected]>
80
0
93,646
14
1
15
def __record_outcome(self, test, f, t): f2, t2 = self._name2ft.get(test.name, (0,0)) self._name2ft[test.name] = (f+f2, t+t2) self.failures += f self.tries += t __LINECACHE_FILENAME_RE = re.compile(r'<doctest ' r'(?P<name>.+)' r'\[(?P<examplenum>\d+)\]>$')
python3.10.4/Lib/doctest.py
112
XX-Net
{ "docstring": "\n Record the fact that the given DocTest (`test`) generated `f`\n failures out of `t` tried examples.\n ", "language": "en", "n_whitespaces": 38, "n_words": 16, "vocab_size": 15 }
26
Python
23
8198943edd73a363c266633e1aa5b2a9e9c9f526
doctest.py
223,468
5
60
__record_outcome
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
144
0
56,919
9
6
43
def migrate_json_fields_expensive(table, columns): batchsize = 50000 ct = ContentType.objects.get_by_natural_key(*table.split('_', 1)) model = ct.model_class() # Phase 1: add the new columns, making them nullable to avoid populating them with connection.schema_editor() as schema_editor: # See: https://docs.djangoproject.com/en/3.1/ref/schema-editor/ for colname in columns: f = model._meta.get_field(colname) _, _, args, kwargs = f.deconstruct() kwargs['null'] = True new_f = f.__class__(*args, **kwargs) new_f.set_attributes_from_name(f'_{colname}') schema_editor.add_field(model, new_f) # Create a trigger to make sure new data automatically gets put in both fields. with connection.cursor() as cursor: # It's a little annoying, I think this trigger will re-do # the same work as the update query in Phase 2 cursor.execute( f ) cursor.execute( f ) # Phase 2: copy over the data with connection.cursor() as cursor: rows = 0 for i in itertools.count(0, batchsize): cursor.execute(f"select count(1) from {table} where id >= %s;", (i,)) if not cursor.fetchone()[0]: break column_expr = ', '.join(f"_{colname} = {colname}::jsonb" for colname in columns) cursor.execute( f, (i, i + batchsize), ) rows += cursor.rowcount logger.debug(f"Batch {i} to {i + batchsize} copied on {table}.") logger.warning(f"Data copied for {rows} rows on {table}.") # Phase 3: drop the old column and rename the new one with connection.schema_editor() as schema_editor: # FIXME: Grab a lock explicitly here? for colname in columns: with connection.cursor() as cursor: cursor.execute(f"drop trigger {table}_{colname}_trigger;") cursor.execute(f"drop function update_{table}_{colname};") f = model._meta.get_field(colname) _, _, args, kwargs = f.deconstruct() kwargs['null'] = True new_f = f.__class__(*args, **kwargs) new_f.set_attributes_from_name(f'_{colname}') schema_editor.remove_field(model, f) _, _, args, kwargs = new_f.deconstruct() f = new_f.__class__(*args, **kwargs) f.set_attributes_from_name(colname) schema_editor.alter_field(model, new_f, f) @task(queue=get_local_queuename)
awx/main/tasks/system.py
710
@task(queue=get_local_queuename)
awx
{ "docstring": "\n create or replace function update_{table}_{colname}()\n returns trigger as $body$\n begin\n new._{colname} = new.{colname}::jsonb\n return new;\n end\n $body$ language plpgsql;\n \n create trigger {table}_{colname}_trigger\n before insert or update\n on {table}\n for each row\n execute procedure update_{table}_{colname};\n \n update {table}\n set {column_expr}\n where id >= %s and id < %s;\n ", "language": "en", "n_whitespaces": 403, "n_words": 46, "vocab_size": 39 }
243
Python
141
676b8f6d8ff85c10e66cebe0a471d3d97434a6c4
system.py
80,933
66
358
migrate_json_fields_expensive
https://github.com/ansible/awx.git
Implement an out-of-band migration to change the json fields
806
1
17,114
15
1
15
def test_complex_pipeline_with_shared_prompt_model_yaml(tmp_path): with open(tmp_path / "tmp_config.yml", "w") as tmp_file: tmp_file.write( f ) pipeline = Pipeline.load_from_yaml(path=tmp_path / "tmp_config.yml") result = pipeline.run(query="not relevant", documents=[Document("Berlin is an amazing city.")]) assert "Berlin" in result["results"][0] assert len(result["meta"]["invocation_context"]) > 0
test/nodes/test_prompt_node.py
141
haystack
{ "docstring": "\n version: ignore\n components:\n - name: pmodel\n type: PromptModel\n - name: p1\n params:\n model_name_or_path: pmodel\n default_prompt_template: question-generation\n output_variable: questions\n type: PromptNode\n - name: p2\n params:\n model_name_or_path: pmodel\n default_prompt_template: question-answering\n type: PromptNode\n pipelines:\n - name: query\n nodes:\n - name: p1\n inputs:\n - Query\n - name: p2\n inputs:\n - p1\n ", "language": "en", "n_whitespaces": 371, "n_words": 47, "vocab_size": 23 }
34
Python
31
9ebf164cfdfb320503b7161493420c1b0ec577a3
test_prompt_node.py
258,375
34
78
test_complex_pipeline_with_shared_prompt_model_yaml
https://github.com/deepset-ai/haystack.git
feat: Expand LLM support with PromptModel, PromptNode, and PromptTemplate (#3667) Co-authored-by: ZanSara <[email protected]>
73
0
75,230
13
1
5
def drop(self, *args, **kwargs): raise NotImplementedError()
src/datasets/table.py
28
datasets
{ "docstring": "\n Drop one or more columns and return a new table.\n\n Args:\n columns (`List[str]`):\n List of field names referencing existing columns.\n\n Raises:\n `KeyError` : if any of the passed columns name are not existing.\n\n Returns:\n `datasets.table.Table`: New table without the columns.\n ", "language": "en", "n_whitespaces": 124, "n_words": 40, "vocab_size": 35 }
6
Python
6
c902456677116a081f762fa2b4aad13a0aa04d6e
table.py
106,140
2
16
drop
https://github.com/huggingface/datasets.git
Clean up Table class docstrings (#5355) * clean table docstrings * apply review Co-authored-by: Quentin Lhoest <[email protected]> Co-authored-by: Quentin Lhoest <[email protected]>
20
0
22,304
7
3
10
def _dialect_is_microsoft_sql(self): if self._dialect_is_microsoft_sql_cache is None: self._dialect_is_microsoft_sql_cache = False if self.lib == _SQLALCHEMY_LIB_NAME: from sqlalchemy import create_engine self._dialect_is_microsoft_sql_cache = create_engine( *self.args, **self.kwargs ).driver in ("pymssql", "pyodbc") return self._dialect_is_microsoft_sql_cache
modin/db_conn.py
96
modin
{ "docstring": "\n Tell whether this connection requires Microsoft SQL dialect.\n\n If this is a sqlalchemy connection, create an engine from args and\n kwargs. If that engine's driver is pymssql or pyodbc, this\n connection requires Microsoft SQL. Otherwise, it doesn't.\n\n Returns\n -------\n Boolean\n ", "language": "en", "n_whitespaces": 97, "n_words": 40, "vocab_size": 33 }
28
Python
23
2d40797b2b700d81d4db4a4cd023d563edf6431f
db_conn.py
153,457
9
57
_dialect_is_microsoft_sql
https://github.com/modin-project/modin.git
FEAT-#979: Enable reading from SQL server. (#4279) Co-authored-by: eavidan <[email protected]> Co-authored-by: Devin Petersohn <[email protected]> Signed-off-by: mvashishtha <[email protected]>
135
0
35,406
16
1
4
def get_config_var(name): return get_config_vars().get(name)
pipenv/patched/notpip/_vendor/distlib/_backport/sysconfig.py
28
pipenv
{ "docstring": "Return the value of a single variable using the dictionary returned by\n 'get_config_vars()'.\n\n Equivalent to get_config_vars().get(name)\n ", "language": "en", "n_whitespaces": 25, "n_words": 16, "vocab_size": 15 }
4
Python
4
c69d55f7c82d5ae2cce542bcfb98d043ca4836a0
sysconfig.py
21,365
2
15
get_config_var
https://github.com/pypa/pipenv.git
Vendor in pip 22.1.2
10
0
3,789
9
2
26
def test_to_device(self): torch, _ = try_import_torch() # sample batch includes # a numpy array (a) # a nested stucture of dict, tuple and lists (b) of numpys and None # info dict # a nested structure that ends up with tensors and ints(c) # a tensor with float64 values (d) # a float64 tensor with possibly wrong device (depends on if cuda available) # repeated value object with np.array leaves (f) cuda_available = int(os.environ.get("RLLIB_NUM_GPUS", "0")) > 0 cuda_if_possible = torch.device("cuda:0" if cuda_available else "cpu") s = SampleBatch( { "a": np.array([1, 2]), "b": {"c": (np.array([4, 5]), np.array([5, 6]))}, "c": {"d": torch.Tensor([1, 2]), "g": (torch.Tensor([3, 4]), 1)}, "d": torch.Tensor([1.0, 2.0]).double(), "e": torch.Tensor([1.0, 2.0]).double().to(cuda_if_possible), "f": RepeatedValues(np.array([[1, 2, 0, 0]]), lengths=[2], max_len=4), SampleBatch.SEQ_LENS: np.array([2, 3, 1]), "state_in_0": np.array([1.0, 3.0, 4.0]), # INFO can have arbitrary elements, others need to conform in size SampleBatch.INFOS: np.array([{"a": 1}, {"b": [1, 2]}, {"c": None}]), } ) # inplace operation for sample_batch s.to_device(cuda_if_possible, framework="torch")
rllib/policy/tests/test_sample_batch.py
439
ray
{ "docstring": "Tests whether to_device works properly under different circumstances", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
155
Python
125
2fe96302d962b2372b12d4d1584b43a3e953bca8
test_sample_batch.py
136,086
27
432
test_to_device
https://github.com/ray-project/ray.git
[RLlib] Enable counting of SampleBatch length by traversing nested structures (#30067) Signed-off-by: Artur Niederfahrenhorst <[email protected]>
453
0
30,823
17
1
7
def parsing_hooks(cls) -> Tuple[Type["Block"], Type["Sentence"], Type["Statements"]]: return Block, Sentence, Statements
certbot-nginx/certbot_nginx/_internal/parser_obj.py
50
certbot
{ "docstring": "Returns object types that this class should be able to `parse` recusrively.\n The order of the objects indicates the order in which the parser should\n try to parse each subitem.\n :returns: A list of Parsable classes.\n :rtype list:\n ", "language": "en", "n_whitespaces": 73, "n_words": 38, "vocab_size": 32 }
10
Python
10
16aad35d31a887dab157f9d4f5e0fe9218d06064
parser_obj.py
186,612
8
30
parsing_hooks
https://github.com/certbot/certbot.git
Fully type certbot-nginx module (#9124) * Work in progress * Fix type * Work in progress * Work in progress * Work in progress * Work in progress * Work in progress * Oups. * Fix typing in UnspacedList * Fix logic * Finish typing * List certbot-nginx as fully typed in tox * Fix lint * Fix checks * Organize imports * Fix typing for Python 3.6 * Fix checks * Fix lint * Update certbot-nginx/certbot_nginx/_internal/configurator.py Co-authored-by: alexzorin <[email protected]> * Update certbot-nginx/certbot_nginx/_internal/configurator.py Co-authored-by: alexzorin <[email protected]> * Fix signature of deploy_cert regarding the installer interface * Update certbot-nginx/certbot_nginx/_internal/obj.py Co-authored-by: alexzorin <[email protected]> * Fix types * Update certbot-nginx/certbot_nginx/_internal/parser.py Co-authored-by: alexzorin <[email protected]> * Precise type * Precise _coerce possible inputs/outputs * Fix type * Update certbot-nginx/certbot_nginx/_internal/http_01.py Co-authored-by: ohemorange <[email protected]> * Fix type * Remove an undesirable implementation. * Fix type Co-authored-by: alexzorin <[email protected]> Co-authored-by: ohemorange <[email protected]>
24
0
45,524
7
1
2
def disable_plumbum(): with patch("plumbum.local"), patch("plumbum.colors"): yield
tests/components/habitica/conftest.py
39
core
{ "docstring": "Disable plumbum in tests as it can cause the test suite to fail.\n\n plumbum can leave behind PlumbumTimeoutThreads\n ", "language": "en", "n_whitespaces": 24, "n_words": 18, "vocab_size": 16 }
6
Python
6
c96781a7957e3887f55cd669002b333539c834c3
conftest.py
298,338
3
17
disable_plumbum
https://github.com/home-assistant/core.git
Prevent plumbum from causing the testsuite to fail (#70400)
19
0
97,282
10
1
2
def histogram2d(self): return self["histogram2d"]
packages/python/plotly/plotly/graph_objs/layout/template/_data.py
22
plotly.py
{ "docstring": "\n The 'histogram2d' property is a tuple of instances of\n Histogram2d that may be specified as:\n - A list or tuple of instances of plotly.graph_objs.layout.template.data.Histogram2d\n - A list or tuple of dicts of string/value properties that\n will be passed to the Histogram2d constructor\n\n Supported dict properties:\n\n Returns\n -------\n tuple[plotly.graph_objs.layout.template.data.Histogram2d]\n ", "language": "en", "n_whitespaces": 131, "n_words": 48, "vocab_size": 33 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_data.py
232,591
2
11
histogram2d
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
64,035
7
6
16
def __getattr__(self, name): attr = None if name.startswith('do_'): module = name.replace('do_', '') if module_loader.find_plugin(module): setattr(self, name, lambda arg, module=module: self.default(module + ' ' + arg)) attr = object.__getattr__(self, name) elif name.startswith('help_'): module = name.replace('help_', '') if module_loader.find_plugin(module): setattr(self, name, lambda module=module: self.helpdefault(module)) attr = object.__getattr__(self, name) if attr is None: raise AttributeError(f"{self.__class__} does not have a {name} attribute") return attr
lib/ansible/cli/console.py
240
ansible
{ "docstring": " handle not found to populate dynamically a module function if module matching name exists ", "language": "en", "n_whitespaces": 15, "n_words": 14, "vocab_size": 13 }
60
Python
38
34f8168afc1d7047c47adec3730c591a58f4f899
console.py
267,520
15
138
__getattr__
https://github.com/ansible/ansible.git
ansible-console fixes (#78064) * list collection task actions too * dynamically add execute/help functions when module is found * handle redirection and short names
217
0
78,939
17
1
6
def test_CategoricalSelector_fit(): op = CategoricalSelector() ret_op = op.fit(iris_data) assert ret_op==op
tests/feature_transformers_tests.py
40
tpot
{ "docstring": "Assert that fit() in CategoricalSelector does nothing.", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 7 }
10
Python
9
388616b6247ca4ea8de4e2f340d6206aee523541
feature_transformers_tests.py
181,640
4
22
test_CategoricalSelector_fit
https://github.com/EpistasisLab/tpot.git
Revert "Deployed 7ccda9a with MkDocs version: 1.3.0" This reverts commit bd9629c40e01241766197119b581a99409b07068.
22
0
43,428
8
6
25
def execute(): frappe.reload_doc("manufacturing", "doctype", "job_card_time_log") if frappe.db.table_exists("Job Card") and frappe.get_meta("Job Card").has_field( "actual_start_date" ): time_logs = [] for d in frappe.get_all( "Job Card", fields=["actual_start_date", "actual_end_date", "time_in_mins", "name", "for_quantity"], filters={"docstatus": ("<", 2)}, ): if d.actual_start_date: time_logs.append( [ d.actual_start_date, d.actual_end_date, d.time_in_mins, d.for_quantity, d.name, "Job Card", "time_logs", frappe.generate_hash("", 10), ] ) if time_logs: frappe.db.sql( .format( values=",".join(["%s"] * len(time_logs)) ), tuple(time_logs), ) frappe.reload_doc("manufacturing", "doctype", "job_card") frappe.db.sql( )
erpnext/patches/v11_1/make_job_card_time_logs.py
304
erpnext
{ "docstring": " INSERT INTO\n `tabJob Card Time Log`\n (from_time, to_time, time_in_mins, completed_qty, parent, parenttype, parentfield, name)\n values {values}\n update `tabJob Card` set total_completed_qty = for_quantity,\n total_time_in_mins = time_in_mins where docstatus < 2 ", "language": "en", "n_whitespaces": 103, "n_words": 30, "vocab_size": 28 }
62
Python
52
494bd9ef78313436f0424b918f200dab8fc7c20b
make_job_card_time_logs.py
66,583
40
176
execute
https://github.com/frappe/erpnext.git
style: format code with black
28
0
14,230
19
1
6
def list_indexes(self) -> List[str]: return list(self._indexes)
src/datasets/search.py
31
datasets
{ "docstring": "List the `colindex_nameumns`/identifiers of all the attached indexes.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 7 }
6
Python
6
cd3169f3f35afcf73a36a8276113e1881d92e5e0
search.py
106,063
3
18
list_indexes
https://github.com/huggingface/datasets.git
Clean up Dataset and DatasetDict (#5344) * clean up docstrings * make style * apply review Co-authored-by: Quentin Lhoest <[email protected]> Co-authored-by: Quentin Lhoest <[email protected]>
20
0
22,272
8
2
15
def forward_loss(self, pixel_values, pred, mask): target = self.patchify(pixel_values) if self.config.norm_pix_loss: mean = target.mean(dim=-1, keepdim=True) var = target.var(dim=-1, keepdim=True) target = (target - mean) / (var + 1.0e-6) ** 0.5 loss = (pred - target) ** 2 loss = loss.mean(dim=-1) # [N, L], mean loss per patch loss = (loss * mask).sum() / mask.sum() # mean loss on removed patches return loss
src/transformers/models/vit_mae/modeling_vit_mae.py
177
transformers
{ "docstring": "\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values.\n pred (`torch.FloatTensor` of shape `(batch_size, num_patches, patch_size**2 * num_channels)`:\n Predicted pixel values.\n mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):\n Tensor indicating which patches are masked (1) and which are not (0).\n\n Returns:\n `torch.FloatTensor`: Pixel reconstruction loss.\n ", "language": "en", "n_whitespaces": 157, "n_words": 46, "vocab_size": 34 }
61
Python
42
b681e12d5963490d29c2a77ba7346ee050e46def
modeling_vit_mae.py
31,423
10
117
forward_loss
https://github.com/huggingface/transformers.git
[ViTMAE] Fix docstrings and variable names (#17710) * Fix docstrings and variable names * Rename x to something better * Improve messages * Fix docstrings and add test for greyscale images Co-authored-by: Niels Rogge <[email protected]>
145
0
5,740
12
1
5
def get_latest_progress(self) -> "StepID": return asyncio_run(self._get(self._key_workflow_progress(), True))["step_id"]
python/ray/workflow/workflow_storage.py
49
ray
{ "docstring": "Load the latest progress of a workflow. This is used by a\n virtual actor.\n\n Raises:\n DataLoadError: if we fail to load the progress.\n\n Returns:\n The step that contains the latest output.\n ", "language": "en", "n_whitespaces": 81, "n_words": 31, "vocab_size": 27 }
7
Python
7
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
workflow_storage.py
133,506
11
27
get_latest_progress
https://github.com/ray-project/ray.git
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
21
0
30,039
12
3
5
def can_link_svml(): if NPY_DISABLE_SVML: return False machine = platform.machine() system = platform.system() return "x86_64" in machine and system == "Linux"
numpy/core/setup.py
60
numpy
{ "docstring": "SVML library is supported only on x86_64 architecture and currently\n only on linux\n ", "language": "en", "n_whitespaces": 19, "n_words": 13, "vocab_size": 11 }
20
Python
16
50d5f1af8406165128a8567b0796ce244542f70c
setup.py
159,691
6
32
can_link_svml
https://github.com/numpy/numpy.git
BLD: Add NPY_DISABLE_SVML env var to opt out of SVML
42
0
38,396
8
2
33
def call_cglosers(self, other_args): parser = argparse.ArgumentParser( prog="cglosers", add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=, ) parser.add_argument( "-p", "--period", dest="period", type=str, help="time period, one from {14d,1h,1y,200d,24h,30d,7d}", default="1h", choices=pycoingecko_model.API_PERIODS, ) parser.add_argument( "-l", "--limit", dest="limit", type=check_positive, help="Number of records to display", default=15, ) parser.add_argument( "-s", "--sort", dest="sortby", nargs="+", help="Sort by given column. Default: Market Cap Rank", default="Market Cap Rank", ) ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED ) if ns_parser: pycoingecko_view.display_losers( period=ns_parser.period, top=ns_parser.limit, export=ns_parser.export, sortby=" ".join(ns_parser.sortby), )
gamestonk_terminal/cryptocurrency/discovery/discovery_controller.py
261
OpenBBTerminal
{ "docstring": "Process losers command\n Shows Largest Losers - coins which price dropped the most in given period\n You can use parameter --period to set which timeframe are you interested in: {14d,1h,1y,200d,24h,30d,7d}\n You can look on only N number of records with --limit,\n You can sort by {Symbol,Name,Price [$],Market Cap [$],Market Cap Rank,Volume [$]} with --sort.\n ", "language": "en", "n_whitespaces": 105, "n_words": 54, "vocab_size": 46 }
69
Python
59
4501dfd442d371150b8785d379c5354095b6954b
discovery_controller.py
282,069
47
161
call_cglosers
https://github.com/OpenBB-finance/OpenBBTerminal.git
Crypto features: Replace coingecko scrapping (#1156) * replaced cgcategories with api * added coingecko categories * refactoring commands to use api, added coins to cryptocontroller and merged find and coins * autocompletion for coins * removed unused vars * added dappradar features * refactoring commands position * refactoring commands position * adding visual commands and fixed report * skipped tests for now * lint notebook * correct report * black formatter keeps crying because notebook * removed unused imports * Fixed black * Keep kernel metadata 'cause it's required by papermill * Change jupyter cleanup hook to one based on nbconvert * Try fix the hook I just broke * Fix trailing commas in the crypto notebook * Change the jupyter hook to a one that's featured on pre-commit's page * Format report notebook and test new notebook hook * Black the notebook * Remove deleted functions from the crypto discovery API * Remove deleted functions from the crypto overview API * replaced print for console print and removed print from table * replaced print for console print and removed print from table * auto completion + sort for all discovery commands * replacing help messages * fix linting * added docs and removed unused commands * added todos and fixed help messages * lint * pr issues fixed * updated tests * tests merge * replaced with new rich table function Co-authored-by: Colin Delahunty <[email protected]> Co-authored-by: Theodore Aptekarev <[email protected]>
499
0
84,032
13
9
29
def fit(self, df): # threshold - items below this number get set to zero in cooccurrence counts df.createOrReplaceTempView(self.f("{prefix}df_train_input")) if self.timedecay_formula: # WARNING: previously we would take the last value in training dataframe and set it # as a matrix U element # for each user-item pair. Now with time decay, we compute a sum over ratings given # by a user in the case # when T=np.inf, so user gets a cumulative sum of ratings for a particular item and # not the last rating. # Time Decay # does a group by on user item pairs and apply the formula for time decay there # Time T parameter is in days and input time is in seconds, # so we do dt/60/(T*24*60)=dt/(T*24*3600) # the following is the query which we want to run query = self.f( ) # replace with timedecayed version df = self.spark.sql(query) else: # since SQL is case-insensitive, this check needs to be performed similar if self.header["col_timestamp"].lower() in [ s.name.lower() for s in df.schema ]: # we need to de-duplicate items by using the latest item query = self.f( ) df = self.spark.sql(query) df.createOrReplaceTempView(self.f("{prefix}df_train")) log.info("sarplus.fit 1/2: compute item cooccurrences...") # compute cooccurrence above minimum threshold query = self.f( ) item_cooccurrence = self.spark.sql(query) item_cooccurrence.write.mode("overwrite").saveAsTable( self.f("{prefix}item_cooccurrence") ) # compute the diagonal used later for Jaccard and Lift if self.similarity_type == SIM_LIFT or self.similarity_type == SIM_JACCARD: item_marginal = self.spark.sql( self.f( "SELECT i1 i, value AS margin FROM {prefix}item_cooccurrence WHERE i1 = i2" ) ) item_marginal.createOrReplaceTempView(self.f("{prefix}item_marginal")) if self.similarity_type == SIM_COOCCUR: self.item_similarity = item_cooccurrence elif self.similarity_type == SIM_JACCARD: query = self.f( ) self.item_similarity = self.spark.sql(query) elif self.similarity_type == SIM_LIFT: query = self.f( ) self.item_similarity = self.spark.sql(query) else: raise ValueError( "Unknown similarity type: {0}".format(self.similarity_type) ) # store upper triangular log.info( "sarplus.fit 2/2: compute similarity metric %s..." % self.similarity_type ) self.item_similarity.write.mode("overwrite").saveAsTable( self.f("{prefix}item_similarity_upper") ) # expand upper triangular to full matrix query = self.f( ) self.item_similarity = self.spark.sql(query) self.item_similarity.write.mode("overwrite").saveAsTable( self.f("{prefix}item_similarity") ) # free space self.spark.sql(self.f("DROP TABLE {prefix}item_cooccurrence")) self.spark.sql(self.f("DROP TABLE {prefix}item_similarity_upper")) self.item_similarity = self.spark.table(self.f("{prefix}item_similarity"))
contrib/sarplus/python/pysarplus/SARPlus.py
669
recommenders
{ "docstring": "Main fit method for SAR.\n\n Expects the dataframes to have row_id, col_id columns which are indexes,\n i.e. contain the sequential integer index of the original alphanumeric user and item IDs.\n Dataframe also contains rating and timestamp as floats; timestamp is in seconds since Epoch by default.\n\n Arguments:\n df (pySpark.DataFrame): input dataframe which contains the index of users and items.\n \n SELECT\n {col_user}, {col_item}, \n SUM({col_rating} * EXP(-log(2) * (latest_timestamp - CAST({col_timestamp} AS long)) / ({time_decay_coefficient} * 3600 * 24))) as {col_rating}\n FROM {prefix}df_train_input,\n (SELECT CAST(MAX({col_timestamp}) AS long) latest_timestamp FROM {prefix}df_train_input)\n GROUP BY {col_user}, {col_item} \n CLUSTER BY {col_user} \n \n SELECT {col_user}, {col_item}, {col_rating}\n FROM\n (\n SELECT\n {col_user}, {col_item}, {col_rating}, \n ROW_NUMBER() OVER (PARTITION BY {col_user}, {col_item} ORDER BY {col_timestamp} DESC) latest\n FROM {prefix}df_train_input\n )\n WHERE latest = 1\n \n SELECT A.{col_item} i1, B.{col_item} i2, COUNT(*) value\n FROM {prefix}df_train A INNER JOIN {prefix}df_train B\n ON A.{col_user} = B.{col_user} AND A.{col_item} <= b.{col_item} \n GROUP BY A.{col_item}, B.{col_item}\n HAVING COUNT(*) >= {threshold}\n CLUSTER BY i1, i2\n \n SELECT i1, i2, value / (M1.margin + M2.margin - value) AS value\n FROM {prefix}item_cooccurrence A \n INNER JOIN {prefix}item_marginal M1 ON A.i1 = M1.i \n INNER JOIN {prefix}item_marginal M2 ON A.i2 = M2.i\n CLUSTER BY i1, i2\n \n SELECT i1, i2, value / (M1.margin * M2.margin) AS value\n FROM {prefix}item_cooccurrence A \n INNER JOIN {prefix}item_marginal M1 ON A.i1 = M1.i \n INNER JOIN {prefix}item_marginal M2 ON A.i2 = M2.i\n CLUSTER BY i1, i2\n \n SELECT i1, i2, value\n FROM\n (\n (SELECT i1, i2, value FROM {prefix}item_similarity_upper)\n UNION ALL\n (SELECT i2 i1, i1 i2, value FROM {prefix}item_similarity_upper WHERE i1 <> i2)\n )\n CLUSTER BY i1\n ", "language": "en", "n_whitespaces": 854, "n_words": 255, "vocab_size": 133 }
329
Python
175
2b98f1045321475f6537986af134fb53f8320268
SARPlus.py
39,221
109
375
fit
https://github.com/microsoft/recommenders.git
Correct typos
1,172
0
7,143
14
1
16
def iterate_binary(self, k): bin_list = Subset.bitlist_from_subset(self.subset, self.superset) n = (int(''.join(bin_list), 2) + k) % 2**self.superset_size bits = bin(n)[2:].rjust(self.superset_size, '0') return Subset.subset_from_bitlist(self.superset, bits)
sympy/combinatorics/subsets.py
120
sympy
{ "docstring": "\n This is a helper function. It iterates over the\n binary subsets by ``k`` steps. This variable can be\n both positive or negative.\n\n Examples\n ========\n\n >>> from sympy.combinatorics import Subset\n >>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd'])\n >>> a.iterate_binary(-2).subset\n ['d']\n >>> a = Subset(['a', 'b', 'c'], ['a', 'b', 'c', 'd'])\n >>> a.iterate_binary(2).subset\n []\n\n See Also\n ========\n\n next_binary, prev_binary\n ", "language": "en", "n_whitespaces": 172, "n_words": 59, "vocab_size": 45 }
22
Python
20
498015021131af4dbb07eb110e5badaba8250c7b
subsets.py
196,217
5
75
iterate_binary
https://github.com/sympy/sympy.git
Updated import locations
57
0
47,717
14
4
18
def _get_input_shape(self): arch = self.config["enc_architecture"] enforce_size = _MODEL_MAPPING[arch].get("enforce_for_weights", False) default_size = _MODEL_MAPPING[arch]["default_size"] scaling = self.config["enc_scaling"] / 100 min_size = _MODEL_MAPPING[arch].get("min_size", 32) size = int(max(min_size, min(default_size, ((default_size * scaling) // 16) * 16))) if self.config["enc_load_weights"] and enforce_size and scaling != 1.0: logger.warning("%s requires input size to be %spx when loading imagenet weights. " "Adjusting input size from %spx to %spx", arch, default_size, size, default_size) retval = (default_size, default_size, 3) else: retval = (size, size, 3) logger.debug("Encoder input set to: %s", retval) return retval
plugins/train/model/phaze_a.py
232
faceswap
{ "docstring": " Obtain the input shape for the model.\n\n Input shape is calculated from the selected Encoder's input size, scaled to the user\n selected Input Scaling, rounded down to the nearest 16 pixels.\n\n Notes\n -----\n Some models (NasNet) require the input size to be of a certain dimension if loading\n imagenet weights. In these instances resize inputs and raise warning message\n\n Returns\n -------\n tuple\n The shape tuple for the input size to the Phaze-A model\n ", "language": "en", "n_whitespaces": 155, "n_words": 73, "vocab_size": 53 }
82
Python
60
0189029dbaad486e623353ee4a8451af8c85f4e4
phaze_a.py
100,480
16
139
_get_input_shape
https://github.com/deepfakes/faceswap.git
Phaze-A: Add MobileNetV3 encoder
244
0
19,953
17
2
5
def has_free(self): return len(self._idle_actors) > 0 and len(self._pending_submits) == 0
python/ray/util/actor_pool.py
41
ray
{ "docstring": "Returns whether there are any idle actors available.\n\n Returns:\n True if there are any idle actors and no pending submits.\n\n Examples:\n >>> @ray.remote # doctest: +SKIP\n >>> class Actor: # doctest: +SKIP\n ... ... # doctest: +SKIP\n >>> a1 = Actor.remote() # doctest: +SKIP\n >>> pool = ActorPool(a1) # doctest: +SKIP\n >>> pool.submit(lambda a, v: a.double.remote(v), 1) # doctest: +SKIP\n >>> print(pool.has_free()) # doctest: +SKIP\n False\n >>> print(pool.get_next()) # doctest: +SKIP\n 2\n >>> print(pool.has_free()) # doctest: +SKIP\n True\n ", "language": "en", "n_whitespaces": 246, "n_words": 78, "vocab_size": 38 }
10
Python
9
60054995e65304fb14e6d0ab69bdec07aa9389fe
actor_pool.py
147,395
2
24
has_free
https://github.com/ray-project/ray.git
[docs] fix doctests and activate CI (#23418)
24
0
33,944
10
2
12
def stop_instances(self, instance_ids, stopped_mode="StopCharging"): request = StopInstancesRequest() request.set_InstanceIds(instance_ids) request.set_StoppedMode(stopped_mode) response = self._send_request(request) if response is None: logging.error("stop_instances failed")
python/ray/autoscaler/_private/aliyun/utils.py
84
ray
{ "docstring": "Stop one or more ECS instances that are in the Running state.\n\n :param instance_ids: The IDs of instances.\n :param stopped_mode: Specifies whether billing for the instance\n continues after the instance is stopped.\n ", "language": "en", "n_whitespaces": 81, "n_words": 32, "vocab_size": 28 }
18
Python
16
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
utils.py
130,358
7
48
stop_instances
https://github.com/ray-project/ray.git
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
71
0
29,244
10
11
6
def check_validation_split_arg(validation_split, subset, shuffle, seed): if validation_split and not 0 < validation_split < 1: raise ValueError( '`validation_split` must be between 0 and 1, received: %s' % (validation_split,)) if (validation_split or subset) and not (validation_split and subset): raise ValueError( 'If `subset` is set, `validation_split` must be set, and inversely.') if subset not in ('training', 'validation', 'both', None): raise ValueError('`subset` must be either "training", ' '"validation" or "both", received: %s' % (subset,)) if validation_split and shuffle and seed is None: raise ValueError( 'If using `validation_split` and shuffling the data, you must provide ' 'a `seed` argument, to make sure that there is no overlap between the ' 'training and validation subset.')
keras/utils/dataset_utils.py
159
keras
{ "docstring": "Raise errors in case of invalid argument values.\n\n Args:\n validation_split: float between 0 and 1, fraction of data to reserve for\n validation.\n subset: One of \"training\", \"validation\" or \"both\". Only used if `validation_split`\n is set.\n shuffle: Whether to shuffle the data. Either True or False.\n seed: random seed for shuffling and transformations.\n ", "language": "en", "n_whitespaces": 76, "n_words": 52, "vocab_size": 46 }
109
Python
68
c52c11968b096580577c75b169f51c5b39002106
dataset_utils.py
269,208
16
92
check_validation_split_arg
https://github.com/keras-team/keras.git
Updated tests for subset="both"
188
0
79,952
12
6
25
def _dedupe_indices(new, exclude): exclude = set(exclude) dums_new = set(get_dummy_indices(new)) conflicts = dums_new.intersection(exclude) if len(conflicts) == 0: return None exclude.update(dums_new) self_args_free = [(i, None) for i in exclude] gen = _IndexStructure._get_generator_for_dummy_indices(self_args_free) repl = {} for d in conflicts: if -d in repl.keys(): continue newname = gen(d.tensor_index_type) new_d = d.func(newname, *d.args[1:]) repl[d] = new_d repl[-d] = -new_d if len(repl) == 0: return None new_renamed = new._replace_indices(repl) return new_renamed
sympy/tensor/tensor.py
240
sympy
{ "docstring": "\n exclude: set\n new: TensExpr\n\n If ``new`` has any dummy indices that are in ``exclude``, return a version\n of new with those indices replaced. If no replacements are needed,\n return None\n\n \n ``self_args_free`` is to be passed to ``_IndexStructure._get_generator_for_dummy_indices()``.\n Since the latter does not use the index position for anything, we just\n set it as ``None`` here.\n ", "language": "en", "n_whitespaces": 127, "n_words": 55, "vocab_size": 48 }
66
Python
44
22174995eac1f437c5f4abe0232760877daf586f
tensor.py
200,579
26
148
_dedupe_indices
https://github.com/sympy/sympy.git
TensMul._dedupe_indices: remove index_structure arg _get_generator_for_dummy_indices is a staticmethod, and so I can just call _IndexStructure._get_generator_for_dummy_indices
257
0
49,714
13
3
13
def process(self) -> None: logger.info("[EXTRACT FACES]") # Tidy up cli output self._check_folder() if self._is_legacy: self._legacy_check() self._saver = ImagesSaver(self._faces_dir, as_bytes=True) if self._min_size > 0: logger.info("Only selecting faces that have been resized from a minimum resolution " "of %spx", self._min_size) self._export_faces()
tools/alignments/jobs.py
117
faceswap
{ "docstring": " Run the re-extraction from Alignments file process", "language": "en", "n_whitespaces": 7, "n_words": 7, "vocab_size": 7 }
39
Python
38
a9908b46f77dc66ac7efe7100ea0eed4b1f2b460
jobs.py
100,672
11
66
process
https://github.com/deepfakes/faceswap.git
Alignments tool - Replace 'extract-large' with 'min-size'
134
0
20,130
11
1
6
def alter_object(self, obj, request, url_args, url_kwargs): return obj
netbox/netbox/views/generic/object_views.py
24
netbox
{ "docstring": "\n Provides a hook for views to modify an object before it is processed. For example, a parent object can be\n defined given some parameter from the request URL.\n\n Args:\n obj: The object being edited\n request: The current request\n url_args: URL path args\n url_kwargs: URL path kwargs\n ", "language": "en", "n_whitespaces": 119, "n_words": 46, "vocab_size": 39 }
8
Python
8
54834c47f8870e7faabcd847c3270da0bd3d2884
object_views.py
264,309
2
16
alter_object
https://github.com/netbox-community/netbox.git
Refactor generic views; add plugins dev documentation
22
0
77,680
6
1
12
def test_load_existing_stream(self) -> None: self._insert_rows("foobar1", "first", 3) self._insert_rows("foobar2", "second", 3) self._insert_rows("foobar2", "second", 1, update_stream_table=False) first_id_gen = self._create_id_generator("first", writers=["first", "second"]) second_id_gen = self._create_id_generator("second", writers=["first", "second"]) # The first ID gen will notice that it can advance its token to 7 as it # has no in progress writes... self.assertEqual(first_id_gen.get_positions(), {"first": 7, "second": 6}) self.assertEqual(first_id_gen.get_current_token_for_writer("first"), 7) self.assertEqual(first_id_gen.get_current_token_for_writer("second"), 6) self.assertEqual(first_id_gen.get_persisted_upto_position(), 7) # ... but the second ID gen doesn't know that. self.assertEqual(second_id_gen.get_positions(), {"first": 3, "second": 7}) self.assertEqual(second_id_gen.get_current_token_for_writer("first"), 3) self.assertEqual(second_id_gen.get_current_token_for_writer("second"), 7) self.assertEqual(first_id_gen.get_persisted_upto_position(), 7)
tests/storage/test_id_generators.py
330
synapse
{ "docstring": "Test creating ID gens with multiple tables that have rows from after\n the position in `stream_positions` table.\n ", "language": "en", "n_whitespaces": 31, "n_words": 17, "vocab_size": 17 }
79
Python
61
9d21ecf7ceab55bc19c4457b8b07401b0b1623a7
test_id_generators.py
247,794
17
190
test_load_existing_stream
https://github.com/matrix-org/synapse.git
Add type hints to tests files. (#12256)
198
0
71,927
11
1
6
def to_hex(self) -> str: return "#%02X%02X%02X" % (self.r, self.g, self.b)
bokeh/colors/color.py
40
bokeh
{ "docstring": " Return a hex color string for this RGB color.\n\n Any alpha value on this color is discarded, only hex color strings for\n the RGB components are returned.\n\n Returns:\n str, ``\"#RRGGBB\"``\n\n ", "language": "en", "n_whitespaces": 70, "n_words": 30, "vocab_size": 24 }
10
Python
10
ada85ff1dc6dc1d5857141b3202733870de5c809
color.py
211,927
11
24
to_hex
https://github.com/bokeh/bokeh.git
Bump min sphinx version (#11973) * Bump min sphinx version * checkpoint * comment for fully qualified names
24
0
53,164
8
2
7
def scale_module(module, scale): for p in module.parameters(): p.detach().mul_(scale) return module
modules/image/text_to_image/disco_diffusion_cnclip_vitb16/reverse_diffusion/model/nn.py
49
PaddleHub
{ "docstring": "\n Scale the parameters of a module and return it.\n ", "language": "en", "n_whitespaces": 16, "n_words": 9, "vocab_size": 9 }
10
Python
10
f4d6e64cdc132ae868699a0ba442f4ab1d304a14
nn.py
49,818
4
29
scale_module
https://github.com/PaddlePaddle/PaddleHub.git
add disco_diffusion_cnclip_vitb16 module
26
0
9,929
11
1
3
def settings_file(self): return self._settings_file
examples/text_summarization/prophetnet/evaluate/cnndm/bs_pyrouge.py
19
PaddleNLP
{ "docstring": "\n Path of the setttings file, which stores the ROUGE home dir.\n\n ", "language": "en", "n_whitespaces": 26, "n_words": 11, "vocab_size": 10 }
4
Python
4
487162262196bead8d9b4c2306f313b8f64edf9b
bs_pyrouge.py
322,374
2
10
settings_file
https://github.com/PaddlePaddle/PaddleNLP.git
Add model Prohetnet (#1698) * add Prohetnet model * update prohetnet * update format * pre commit * add prophetnet example * update tokenizer.py,run_train.sh,train_prophetnet.py * remove evaluate/gigaword/__init__.py Co-authored-by: smallv0221 <[email protected]>
18
0
118,150
6
1
13
def get_orderbook(self): df = self.__orderbook[ [ "Date", "Type", "Ticker", "Side", "Price", "Quantity", "Fees", "Investment", "Currency", "Sector", "Industry", "Country", "Region", ] ] df = df.replace(np.nan, "-") df["Date"] = df["Date"].dt.strftime("%Y-%m-%d") df.sort_values(by="Date", ascending=False, inplace=True) return df
openbb_terminal/portfolio/portfolio_model.py
157
OpenBBTerminal
{ "docstring": "Get formatted transactions\n\n Returns:\n pd.DataFrame: formatted transactions\n ", "language": "en", "n_whitespaces": 32, "n_words": 7, "vocab_size": 5 }
33
Python
28
291e7d69914e9ab8b9bf9b20bb44d971bcedc247
portfolio_model.py
285,633
22
87
get_orderbook
https://github.com/OpenBB-finance/OpenBBTerminal.git
Add ETF support for portfolio allocation command (#2143) * fill etf and prevent different length errors * auto fill etf sectors and allow 0 value portfolios * allow 0 value portfolios * remove unused folder * allow 1 asset portfolios * split allocation calls by category in controller * comment sector allocation model * get country and region allocations for etf * add comments and black * improve comments * improve comments * allow for 1 category in sector, country and region * add progress bars * linting fix * fix mypy * set default date * fix pylint * add isin on paexport * merge main * auto pre load benchmark * fix rich table for np.float64 * refactor portfolio allocs * refactor alloc command * add isins column * format output alloc * rename variable * fix nan bug * black * add ticker conversion to yf format by isin * warn and removed unsupported ISINs * solve same day trades bug * display bench loading progress * portfolio show * check if valid isins on preprocessing * black * fix bug when region empty * warn when category is empty * reformat preprocessing * codespell * check if ticker is valid * flake8 * fix test * fix bug with trades on holidays Co-authored-by: Jeroen Bouma <[email protected]>
299
0
85,338
11
1
5
def _delete_accounting_ledger_entries(voucher_type, voucher_no): _delete_gl_entries(voucher_type, voucher_no) _delete_pl_entries(voucher_type, voucher_no)
erpnext/accounts/utils.py
33
erpnext
{ "docstring": "\n\tRemove entries from both General and Payment Ledger for specified Voucher\n\t", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
7
Python
6
9209ec59c2216223bc1a7618bd95ec2424434849
utils.py
69,488
3
20
_delete_accounting_ledger_entries
https://github.com/frappe/erpnext.git
refactor: split delete gl utility function into two
4
0
15,053
7
1
3
def base(self, base): self.set_base(base)
lib/matplotlib/ticker.py
25
matplotlib
{ "docstring": "\n Change the *base* for labeling.\n\n .. warning::\n Should always match the base used for :class:`LogLocator`\n ", "language": "en", "n_whitespaces": 47, "n_words": 15, "vocab_size": 13 }
4
Python
4
1bc33e99efc9e4be433f99c6a74c7e3b30147dac
ticker.py
108,373
2
14
base
https://github.com/matplotlib/matplotlib.git
Improve consistency in LogLocator and LogFormatter API
18
0
23,162
7
1
8
def test_is_state(hass): hass.states.async_set("test.object", "available") tpl = template.Template( , hass, ) assert tpl.async_render() == "yes" tpl = template.Template( , hass, ) assert tpl.async_render() is False tpl = template.Template( , hass, ) assert tpl.async_render() == "yes" tpl = template.Template( , hass, ) assert tpl.async_render() == "test.object"
tests/helpers/test_template.py
162
core
{ "docstring": "Test is_state method.\n{% if is_state(\"test.object\", \"available\") %}yes{% else %}no{% endif %}\n \n{{ is_state(\"test.noobject\", \"available\") }}\n \n{% if \"test.object\" is is_state(\"available\") %}yes{% else %}no{% endif %}\n \n{{ ['test.object'] | select(\"is_state\", \"available\") | first | default }}\n ", "language": "en", "n_whitespaces": 63, "n_words": 36, "vocab_size": 23 }
44
Python
17
f73fc9e3558eca0a1e74a19273a67f8d2bfa8af7
test_template.py
289,830
30
92
test_is_state
https://github.com/home-assistant/core.git
Adds states and state_attr as a filter, adds is_state and is_state_attr as a test. (#79473)
142
0
88,960
9
2
9
def get_formatted_field_choices(self, field): if "\n" in field.choices: choices = map( lambda x: ( x.strip().rstrip(",").strip(), x.strip().rstrip(",").strip(), ), field.choices.split("\r\n"), ) else: choices = map(lambda x: (x.strip(), x.strip()), field.choices.split(",")) return choices
wagtail/contrib/forms/forms.py
172
wagtail
{ "docstring": "\n Returns a list of choices [(string, string),] for the field.\n Split the provided choices into a list, separated by new lines.\n If no new lines in the provided choices, split by commas.\n ", "language": "en", "n_whitespaces": 61, "n_words": 32, "vocab_size": 25 }
28
Python
23
134bd19bef529f0c205a48cedb8574ee0c52d436
forms.py
76,983
12
99
get_formatted_field_choices
https://github.com/wagtail/wagtail.git
add ability for form builder to split choices by newline - fixes #3001 - keep support for comma separated lists if supplied
172
0
16,608
18
1
8
def receive_file(filename="example.txt"): with open(filename, "wb") as out_file: ftp.retrbinary("RETR " + filename, out_file.write, 1024) ftp.quit()
ftp_send_receive.py
69
Python
{ "docstring": "\n\tThe file which will be sent via the FTP server\n\tThe file send will be send to the current working directory\n", "language": "en", "n_whitespaces": 19, "n_words": 21, "vocab_size": 15 }
14
Python
14
f0af0c43340763724f139fa68aa1e5a9ffe458b4
ftp_send_receive.py
22,626
4
36
receive_file
https://github.com/geekcomputers/Python.git
refactor: clean code Signed-off-by: slowy07 <[email protected]>
30
0
4,380
11
11
19
def set_layout_engine(self, layout=None, **kwargs): if layout is None: if mpl.rcParams['figure.autolayout']: layout = 'tight' elif mpl.rcParams['figure.constrained_layout.use']: layout = 'constrained' else: self._layout_engine = None return if layout == 'tight': new_layout_engine = TightLayoutEngine(**kwargs) elif layout == 'constrained': new_layout_engine = ConstrainedLayoutEngine(**kwargs) elif layout == 'compressed': new_layout_engine = ConstrainedLayoutEngine(compress=True, **kwargs) elif layout == 'none': if self._layout_engine is not None: new_layout_engine = PlaceHolderLayoutEngine( self._layout_engine.adjust_compatible, self._layout_engine.colorbar_gridspec ) else: new_layout_engine = None elif isinstance(layout, LayoutEngine): new_layout_engine = layout else: raise ValueError(f"Invalid value for 'layout': {layout!r}") if self._check_layout_engines_compat(self._layout_engine, new_layout_engine): self._layout_engine = new_layout_engine else: raise RuntimeError('Colorbar layout of new layout engine not ' 'compatible with old engine, and a colorbar ' 'has been created. Engine not changed.')
lib/matplotlib/figure.py
296
matplotlib
{ "docstring": "\n Set the layout engine for this figure.\n\n Parameters\n ----------\n layout: {'constrained', 'compressed', 'tight', 'none'} or \\\n`LayoutEngine` or None\n\n - 'constrained' will use `~.ConstrainedLayoutEngine`\n - 'compressed' will also use `~.ConstrainedLayoutEngine`, but with\n a correction that attempts to make a good layout for fixed-aspect\n ratio Axes.\n - 'tight' uses `~.TightLayoutEngine`\n - 'none' removes layout engine.\n\n If `None`, the behavior is controlled by :rc:`figure.autolayout`\n (which if `True` behaves as if 'tight' were passed) and\n :rc:`figure.constrained_layout.use` (which if true behaves as if\n 'constrained' were passed). If both are true,\n :rc:`figure.autolayout` takes priority.\n\n Users and libraries can define their own layout engines and pass\n the instance directly as well.\n\n kwargs: dict\n The keyword arguments are passed to the layout engine to set things\n like padding and margin sizes. Only used if *layout* is a string.\n\n ", "language": "en", "n_whitespaces": 344, "n_words": 131, "vocab_size": 94 }
107
Python
60
f7f3bb6079048506613c513231e1bd2a87ebc7d3
figure.py
108,782
35
167
set_layout_engine
https://github.com/matplotlib/matplotlib.git
ENH: add ability to remove layout engine This also adds a "place holder" layout engine to ensure that users can not "go through zero" and change to an incompatible layout engine. Co-authored-by: Jody Klymak <[email protected]>
612
0
23,340
15
1
21
def _kernel_constraint(self, kernel): padding = backend.constant([[1, 1], [1, 1]], dtype="int32") kernel_shape = backend.shape(kernel)[0] start = backend.cast(kernel_shape / 2, "int32") kernel_new = backend.switch( backend.cast(tf.math.floormod(kernel_shape, 2), "bool"), lambda: kernel[start - 1 : start, start - 1 : start], lambda: kernel[start - 1 : start, start - 1 : start] + backend.zeros((2, 2), dtype=kernel.dtype), ) index = backend.switch( backend.cast(tf.math.floormod(kernel_shape, 2), "bool"), lambda: backend.constant(0, dtype="int32"), lambda: backend.constant(1, dtype="int32"), ) while_condition = lambda index, *args: backend.less(index, start)
keras/constraints.py
299
keras
{ "docstring": "Radially constraints a kernel with shape (height, width,\n channels).", "language": "en", "n_whitespaces": 15, "n_words": 9, "vocab_size": 9 }
72
Python
44
3613c3defc39c236fb1592c4f7ba1a9cc887343a
constraints.py
278,654
24
246
_kernel_constraint
https://github.com/keras-team/keras.git
Remove pylint comments. PiperOrigin-RevId: 452353044
212
0
82,655
14
2
5
def set_seq1(self, a): if a is self.a: return self.a = a self.matching_blocks = self.opcodes = None
python3.10.4/Lib/difflib.py
50
XX-Net
{ "docstring": "Set the first sequence to be compared.\n\n The second sequence to be compared is not changed.\n\n >>> s = SequenceMatcher(None, \"abcd\", \"bcde\")\n >>> s.ratio()\n 0.75\n >>> s.set_seq1(\"bcde\")\n >>> s.ratio()\n 1.0\n >>>\n\n SequenceMatcher computes and caches detailed information about the\n second sequence, so if you want to compare one sequence S against\n many sequences, use .set_seq2(S) once and call .set_seq1(x)\n repeatedly for each of the other sequences.\n\n See also set_seqs() and set_seq2().\n ", "language": "en", "n_whitespaces": 169, "n_words": 71, "vocab_size": 56 }
16
Python
13
8198943edd73a363c266633e1aa5b2a9e9c9f526
difflib.py
222,491
5
30
set_seq1
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
55
0
56,591
8
3
5
def assert_lists_same(a, b): assert len(a) == len(b) for i in a: assert i in b for i in b: assert i in a
tests/common.py
57
core
{ "docstring": "Compare two lists, ignoring order.\n\n Check both that all items in a are in b and that all items in b are in a,\n otherwise assert_lists_same([\"1\", \"1\"], [\"1\", \"2\"]) could be True.\n ", "language": "en", "n_whitespaces": 41, "n_words": 32, "vocab_size": 24 }
23
Python
14
64381acbaf2930cda5dfa538d00bfa9f5172e690
common.py
296,701
6
36
assert_lists_same
https://github.com/home-assistant/core.git
Mark device actions from hidden or auxiliary entities as secondary (#70278)
49
0
95,675
8
2
7
def _get_state_dict(self): model_state = {} for task in self.tasks: model_state[task] = { "state_dict": self.__getattr__(task)._get_state_dict(), "class": self.__getattr__(task).__class__, } return model_state
flair/models/multitask_model.py
84
flair
{ "docstring": "\n Returns the state dict of the multitask model which has multiple models underneath.\n :return model_state: model state for the multitask model\n ", "language": "en", "n_whitespaces": 43, "n_words": 21, "vocab_size": 15 }
19
Python
17
03be003d417c4d0f90ec03fbca1ba0a0b337ff44
multitask_model.py
214,689
8
50
_get_state_dict
https://github.com/flairNLP/flair.git
multitask training
99
0
53,765
14
3
19
def set_exe_build_timestamp(exe_path, timestamp): import pefile with pefile.PE(exe_path, fast_load=True) as pe: # Manually perform a full load. We need it to load all headers, but specifying it in the constructor triggers # byte statistics gathering that takes forever with large files. So we try to go around that... pe.full_load() # Set build timestamp. # See: https://0xc0decafe.com/malware-analyst-guide-to-pe-timestamps timestamp = int(timestamp) # Set timestamp field in FILE_HEADER pe.FILE_HEADER.TimeDateStamp = timestamp # MSVC-compiled executables contain (at least?) one DIRECTORY_ENTRY_DEBUG entry that also contains timestamp # with same value as set in FILE_HEADER. So modify that as well, as long as it is set. debug_entries = getattr(pe, 'DIRECTORY_ENTRY_DEBUG', []) for debug_entry in debug_entries: if debug_entry.struct.TimeDateStamp: debug_entry.struct.TimeDateStamp = timestamp # Generate updated EXE data data = pe.write() # Rewrite the exe with open(exe_path, 'wb') as fp: fp.write(data)
PyInstaller/utils/win32/winutils.py
171
pyinstaller
{ "docstring": "\n Modifies the executable's build timestamp by updating values in the corresponding PE headers.\n ", "language": "en", "n_whitespaces": 20, "n_words": 13, "vocab_size": 12 }
131
Python
95
41483cb9e6d5086416c8fea6ad6781782c091c60
winutils.py
263,807
13
95
set_exe_build_timestamp
https://github.com/pyinstaller/pyinstaller.git
winutils: optimize PE headers fixup Attempt to optimize PE headers fix-up from both time- and memory- intensity perspective. First, avoid specifying `fast_load=False` in `pefile.PE` constructor, because that triggers the bytes statistics collection https://github.com/erocarrera/pefile/blob/v2022.5.30/pefile.py#L2862-L2876 which takes a long time for large files. Instead, we can obtain full headers (required for build timestamp modification) by calling `pe.full_load()` ourselves. Second, use (an equivalent of) `MapFileAndCheckSumW` to compute the PE checksum. For large files, it is orders of magnitude faster than its pure-python `pefile.PE.generate_checksum` counterpart. The downside is that `MapFileAndCheckSumW` requires an on-disk file as opposed to a memory buffer, so we need to split the PE headers fixup into two separate steps, with each modifying the corresponding PE headers and (re)writing the whole file. Even so, this brings the fix-up process for a 700MB executable down to seconds instead of minutes. In addition, as noted on MSDN, `MapFileAndCheckSumW` internally calls its ASCII variant (`MapFileAndCheckSumA`), so it cannot handle file paths that contain characters that are not representable in the current code page. Therefore, we implement our own equivalent using `ctypes` and pure widechar-based win32 API functions.
277
0
77,447
14
15
40
def load_tasks(request, project): file_upload_ids, found_formats, data_keys = [], [], set() could_be_tasks_lists = False # take tasks from request FILES if len(request.FILES): check_file_sizes_and_number(request.FILES) for filename, file in request.FILES.items(): file_upload = create_file_upload(request, project, file) if file_upload.format_could_be_tasks_list: could_be_tasks_lists = True file_upload_ids.append(file_upload.id) tasks, found_formats, data_keys = FileUpload.load_tasks_from_uploaded_files(project, file_upload_ids) # take tasks from url address elif 'application/x-www-form-urlencoded' in request.content_type: # empty url url = request.data.get('url') if not url: raise ValidationError('"url" is not found in request data') # try to load json with task or tasks from url as string json_data = str_to_json(url) if json_data: file_upload = create_file_upload(request, project, SimpleUploadedFile('inplace.json', url.encode())) file_upload_ids.append(file_upload.id) tasks, found_formats, data_keys = FileUpload.load_tasks_from_uploaded_files(project, file_upload_ids) # download file using url and read tasks from it else: if settings.SSRF_PROTECTION_ENABLED and url_is_local(url): raise ImportFromLocalIPError data_keys, found_formats, tasks, file_upload_ids = tasks_from_url( file_upload_ids, project, request, url ) # take one task from request DATA elif 'application/json' in request.content_type and isinstance(request.data, dict): tasks = [request.data] # take many tasks from request DATA elif 'application/json' in request.content_type and isinstance(request.data, list): tasks = request.data # incorrect data source else: raise ValidationError('load_tasks: No data found in DATA or in FILES') # check is data root is list if not isinstance(tasks, list): raise ValidationError('load_tasks: Data root must be list') # empty tasks error if not tasks: raise ValidationError('load_tasks: No tasks added') check_max_task_number(tasks) return tasks, file_upload_ids, could_be_tasks_lists, found_formats, list(data_keys)
label_studio/data_import/uploader.py
478
label-studio
{ "docstring": " Load tasks from different types of request.data / request.files\n ", "language": "en", "n_whitespaces": 13, "n_words": 9, "vocab_size": 9 }
216
Python
108
eb9198e827e0fdab1e10593c7ea91a56af299e8b
uploader.py
177,937
38
292
load_tasks
https://github.com/heartexlabs/label-studio.git
fix: DEV-2235: Fix blind SSRF on add model and import (#2450) * fix: DEV-2235: Fix blind SSRF on add model and import * Fix ip check (DEV-2235) * Disable bandit check (DEV-2235)
552
0
42,547
17
4
18
def synset_from_pos_and_offset(self, pos, offset): # Check to see if the synset is in the cache if offset in self._synset_offset_cache[pos]: return self._synset_offset_cache[pos][offset] data_file = self._data_file(pos) data_file.seek(offset) data_file_line = data_file.readline() # If valid, the offset equals the 8-digit 0-padded integer found at the start of the line: line_offset = data_file_line[:8] if line_offset.isalnum() and offset == int(line_offset): synset = self._synset_from_pos_and_line(pos, data_file_line) assert synset._offset == offset self._synset_offset_cache[pos][offset] = synset else: synset = None warnings.warn(f"No WordNet synset found for pos={pos} at offset={offset}.") data_file.seek(0) return synset
nltk/corpus/reader/wordnet.py
199
nltk
{ "docstring": "\n - pos: The synset's part of speech, matching one of the module level\n attributes ADJ, ADJ_SAT, ADV, NOUN or VERB ('a', 's', 'r', 'n', or 'v').\n - offset: The byte offset of this synset in the WordNet dict file\n for this pos.\n\n >>> from nltk.corpus import wordnet as wn\n >>> print(wn.synset_from_pos_and_offset('n', 1740))\n Synset('entity.n.01')\n ", "language": "en", "n_whitespaces": 114, "n_words": 53, "vocab_size": 45 }
80
Python
53
e081b67f971fa478a98d5734366c602f85d9f7d9
wordnet.py
42,471
16
119
synset_from_pos_and_offset
https://github.com/nltk/nltk.git
Warn about nonexistent synset offsets
230
0
7,557
12
2
8
async def async_unjoin_player(self): sonos_data = self.hass.data[DATA_SONOS] household_id = self.speaker.household_id
homeassistant/components/sonos/media_player.py
40
core
{ "docstring": "Remove this player from any group.\n\n Coalesces all calls within 0.5s to allow use of SonosSpeaker.unjoin_multi()\n which optimizes the order in which speakers are removed from their groups.\n Removing coordinators last better preserves playqueues on the speakers.\n ", "language": "en", "n_whitespaces": 65, "n_words": 37, "vocab_size": 34 }
9
Python
8
4bfdb1433e95dfe504e376ca082def5257c23bcb
media_player.py
314,760
12
89
async_unjoin_player
https://github.com/home-assistant/core.git
Optimize Sonos unjoin behavior when using `media_player.unjoin` (#74086) * Coalesce Sonos unjoins to process together * Refactor for readability * Skip unjoin call if already ungrouped * Store unjoin data in a dedicated dataclass * Revert import adjustment
30
0
113,364
9
3
13
def _key_to_file(self, session_key=None): if session_key is None: session_key = self._get_or_create_session_key() # Make sure we're not vulnerable to directory traversal. Session keys # should always be md5s, so they should never contain directory # components. if not set(session_key).issubset(VALID_KEY_CHARS): raise InvalidSessionKey("Invalid characters in session key") return os.path.join(self.storage_path, self.file_prefix + session_key)
django/contrib/sessions/backends/file.py
96
django
{ "docstring": "\n Get the file associated with this session key.\n ", "language": "en", "n_whitespaces": 23, "n_words": 8, "vocab_size": 8 }
48
Python
41
9c19aff7c7561e3a82978a272ecdaad40dda5c00
file.py
204,309
6
56
_key_to_file
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
119
0
50,690
10
1
18
def test_conversation_chain_errors_bad_variable() -> None: llm = FakeLLM() prompt = PromptTemplate(input_variables=["foo"], template="{foo}") memory = ConversationBufferMemory(dynamic_key="foo") with pytest.raises(ValueError): ConversationChain(llm=llm, prompt=prompt, memory=memory, input_key="foo") @pytest.mark.parametrize( "memory", [ ConversationBufferMemory(dynamic_key="baz"), ConversationSummaryMemory(llm=FakeLLM(), dynamic_key="baz"), ], )
tests/unit_tests/chains/test_conversation.py
161
@pytest.mark.parametrize( "memory", [ ConversationBufferMemory(dynamic_key="baz"), ConversationSummaryMemory(llm=FakeLLM(), dynamic_key="baz"), ], )
langchain
{ "docstring": "Test that conversation chain works in basic setting.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
28
Python
26
a408ed3ea39dfa47e8b522a9e153b259f25df54e
test_conversation.py
191,569
7
60
test_conversation_chain_errors_bad_variable
https://github.com/hwchase17/langchain.git
Samantha/add conversation chain (#166) Add MemoryChain and ConversationChain as chains that take a docstore in addition to the prompt, and use the docstore to stuff context into the prompt. This can be used to have an ongoing conversation with a chatbot. Probably needs a bit of refactoring for code quality Co-authored-by: Harrison Chase <[email protected]>
71
1
46,692
12
1
4
def _dry_run(self, **kwargs) -> bool: ...
jina/clients/base/__init__.py
22
jina
{ "docstring": "Sends a dry run to the Flow to validate if the Flow is ready to receive requests\n\n :param kwargs: potential kwargs received passed from the public interface\n ", "language": "en", "n_whitespaces": 41, "n_words": 27, "vocab_size": 22 }
6
Python
6
ef662b529b2a2eecea7bb99759a9f7b9d86d3062
__init__.py
12,488
6
12
_dry_run
https://github.com/jina-ai/jina.git
feat: add grpc health checking (#4779)
20
0
2,311
6
2
8
def is_fedora(): (osname, osrelease, oscodename) = ( x.strip('"').strip("'") for x in linux_distribution() ) return osname == "Fedora" @real_memoize
salt/utils/platform.py
68
@real_memoize
salt
{ "docstring": "\n Simple function to return if host is Fedora or not\n ", "language": "en", "n_whitespaces": 17, "n_words": 10, "vocab_size": 10 }
18
Python
18
f2a783643de61cac1ff3288b40241e5ce6e1ddc8
platform.py
215,982
5
36
is_fedora
https://github.com/saltstack/salt.git
Update to latest ``pyupgrade`` hook. Stop skipping it on CI. Signed-off-by: Pedro Algarvio <[email protected]>
36
1
54,302
12
2
9
def update(self) -> None: self.data.update() self._times = self.data.info if not self._times: self._state = None else: with suppress(TypeError): self._state = self._times[0][ATTR_DUE_IN]
homeassistant/components/rejseplanen/sensor.py
95
core
{ "docstring": "Get the latest data from rejseplanen.dk and update the states.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 9 }
20
Python
17
6f564e4f514b56bce281ec7e82703cfbff87b417
sensor.py
305,791
9
56
update
https://github.com/home-assistant/core.git
Improve entity type hints [r] (#77874)
92
0
104,575
14
4
26
def get_base_info(self) -> pd.DataFrame: regx = r'<a href="(.+?)">|</a>' results = {} for attr in BASE_INFO: info_obj = self.coin.get(attr, {}) if attr == "description": info_obj = info_obj.get("en") info_obj = re.sub(regx, "", info_obj) info_obj = re.sub(r"\r\n\r\n", " ", info_obj) results[attr] = info_obj results.update(self._get_base_market_data_info()) df = pd.Series(results).to_frame().reset_index() df.columns = ["Metric", "Value"] df["Metric"] = df["Metric"].apply( lambda x: lambda_replace_underscores_in_column_names(x) if isinstance(x, str) else x ) return df[df["Value"].notna()]
gamestonk_terminal/cryptocurrency/due_diligence/pycoingecko_model.py
262
OpenBBTerminal
{ "docstring": "Get all the base information about given coin. [Source: CoinGecko]\n\n Returns\n -------\n pandas.DataFrame\n Base information about coin\n ", "language": "en", "n_whitespaces": 56, "n_words": 17, "vocab_size": 15 }
62
Python
46
fd5821928265429d1ffb6e6d53f019915b3afbbc
pycoingecko_model.py
282,590
26
156
get_base_info
https://github.com/OpenBB-finance/OpenBBTerminal.git
adjusted format of logs (#1292) adjusted format of logs
243
0
84,179
13
2
8
def check_supplier_has_docname_access(supplier): status = True if frappe.form_dict.name not in frappe.db.sql_list( , (supplier,), ): status = False return status
erpnext/templates/pages/rfq.py
57
erpnext
{ "docstring": "select parent from `tabRequest for Quotation Supplier`\n\t\twhere supplier = %s", "language": "en", "n_whitespaces": 9, "n_words": 11, "vocab_size": 11 }
18
Python
15
494bd9ef78313436f0424b918f200dab8fc7c20b
rfq.py
68,093
9
36
check_supplier_has_docname_access
https://github.com/frappe/erpnext.git
style: format code with black
10
0
14,717
9
1
14
def test_format_float_precision(self, st_element, get_proto): values = [3.14, 3.1] display_values = ["3.14", "3.10"] df = pd.DataFrame({"test": values}) st_element(df.style.format({"test": "{:.2f}"})) proto_df = get_proto(self._get_element()) self._assert_column_display_values(proto_df, 0, display_values)
lib/tests/streamlit/legacy_dataframe_styling_test.py
121
streamlit
{ "docstring": "Tests DataFrame.style.format() with floats.\n By default, the frontend will format any unstyled DataFrame float\n with 4 digits after the decimal. If we have any floating point styling\n in a DataFrame, our display_values should be filled in even for\n cells whose display_value == value.\n ", "language": "en", "n_whitespaces": 78, "n_words": 43, "vocab_size": 39 }
24
Python
21
2c153aa179a27539f856e389870161d5a58da213
legacy_dataframe_styling_test.py
118,716
7
75
test_format_float_precision
https://github.com/streamlit/streamlit.git
Pandas 1.4 styler fix (#4316) Change the way we detect custom styling in a DataFrame, to account for changes in Pandas 1.4. Our DataFrame styling support is based on internal Pandas APIs, so they're always subject to change out from underneath us. In general, we'd prefer to only pass `display_value` data to the frontend when a DataFrame cell has been custom-formatted by the user, to save on bandwidth. However, Panda's Styler's internals are private, and it doesn't give us a consistent way of testing whether a cell has a custom `display_value` or not. Prior to Pandas 1.4, we could test whether a cell's `display_value` differed from its `value`, and only stick the `display_value` in the protobuf when that was the case. In 1.4, an unmodified Styler will contain `display_value` strings for all cells, regardless of whether any formatting has been applied to that cell, so we no longer have this ability (or at least I couldn't figure out a reasonable way to test for this). So instead, as of this PR, calling `st._legacy_dataframe(df.styler)` will *always* result in `display_value` strings being written to the dataframe protobuf (even though there isn't any custom formatting). This means that styled DataFrames may result in more data being sent to the frontend now than was the case before. In practice, I don't think this is a big deal - only the legacy DataFrame code has styling support; and often, if you're styling a DataFrame, you're customizing the formatting on most or all of its cells anyway. I also made a number of small type-safety changes as I was working with the dataframe code, and those are all in the PR as well. (I've left a PR comment under the actual logic changes.)
73
0
26,374
12
1
9
def get_asset_categories(filters): return frappe.db.sql( , {"to_date": filters.to_date, "from_date": filters.from_date, "company": filters.company}, as_dict=1, )
erpnext/accounts/report/asset_depreciations_and_balances/asset_depreciations_and_balances.py
64
erpnext
{ "docstring": "\n\t\tSELECT asset_category,\n\t\t\t ifnull(sum(case when purchase_date < %(from_date)s then\n\t\t\t\t\t\t\t case when ifnull(disposal_date, 0) = 0 or disposal_date >= %(from_date)s then\n\t\t\t\t\t\t\t\t\tgross_purchase_amount\n\t\t\t\t\t\t\t else\n\t\t\t\t\t\t\t\t\t0\n\t\t\t\t\t\t\t end\n\t\t\t\t\t\t else\n\t\t\t\t\t\t\t\t0\n\t\t\t\t\t\t end), 0) as cost_as_on_from_date,\n\t\t\t ifnull(sum(case when purchase_date >= %(from_date)s then\n\t\t\t \t\t\t\t\t\tgross_purchase_amount\n\t\t\t \t\t\t\t else\n\t\t\t \t\t\t\t \t\t0\n\t\t\t \t\t\t\t end), 0) as cost_of_new_purchase,\n\t\t\t ifnull(sum(case when ifnull(disposal_date, 0) != 0\n\t\t\t \t\t\t\t\t\tand disposal_date >= %(from_date)s\n\t\t\t \t\t\t\t\t\tand disposal_date <= %(to_date)s then\n\t\t\t\t\t\t\t case when status = \"Sold\" then\n\t\t\t\t\t\t\t \t\tgross_purchase_amount\n\t\t\t\t\t\t\t else\n\t\t\t\t\t\t\t \t\t0\n\t\t\t\t\t\t\t end\n\t\t\t\t\t\t else\n\t\t\t\t\t\t\t\t0\n\t\t\t\t\t\t end), 0) as cost_of_sold_asset,\n\t\t\t ifnull(sum(case when ifnull(disposal_date, 0) != 0\n\t\t\t \t\t\t\t\t\tand disposal_date >= %(from_date)s\n\t\t\t \t\t\t\t\t\tand disposal_date <= %(to_date)s then\n\t\t\t\t\t\t\t case when status = \"Scrapped\" then\n\t\t\t\t\t\t\t \t\tgross_purchase_amount\n\t\t\t\t\t\t\t else\n\t\t\t\t\t\t\t \t\t0\n\t\t\t\t\t\t\t end\n\t\t\t\t\t\t else\n\t\t\t\t\t\t\t\t0\n\t\t\t\t\t\t end), 0) as cost_of_scrapped_asset\n\t\tfrom `tabAsset`\n\t\twhere docstatus=1 and company=%(company)s and purchase_date <= %(to_date)s\n\t\tgroup by asset_category\n\t", "language": "en", "n_whitespaces": 179, "n_words": 117, "vocab_size": 40 }
13
Python
13
494bd9ef78313436f0424b918f200dab8fc7c20b
asset_depreciations_and_balances.py
65,151
47
39
get_asset_categories
https://github.com/frappe/erpnext.git
style: format code with black
7
0
13,809
10
2
14
def test_get_nodes_for_order_with_int_id(order_list): order_models.Order.objects.update(use_old_id=True) # given global_ids = [to_global_id("Order", order.number) for order in order_list] # Make sure function works even if duplicated ids are provided global_ids.append(to_global_id("Order", order_list[0].number)) # when orders = get_nodes(global_ids, Order) # then assert orders == order_list
saleor/graphql/core/tests/test_graphql.py
105
saleor
{ "docstring": "Ensure that `get_nodes` returns correct nodes, when old id is used\n for orders with the `use_old_id` flag set to True.", "language": "en", "n_whitespaces": 22, "n_words": 20, "vocab_size": 20 }
38
Python
33
41b87559118f560c223f83d405efe9b406701d17
test_graphql.py
26,315
6
62
test_get_nodes_for_order_with_int_id
https://github.com/saleor/saleor.git
Migrate order id from int to UUID (#9324) * Add migration to change order id from int to UUID (#9281) * Change order token to uuid * Migrate order id to uuid * Fix failing tests * Apply code review suggestions * Fix payment migration dependencies * Fix typo in order migration name * Handle old order ids for order queries * Hanlde old order ids for order mutations * Add order relation to GiftCardEvent model * Deprecate order token related queries and fields (#9295) * Deprecate order.token field * Update description of orderByToken query * Update prepare_order_search_document_value method * Update changelog * Update schema file
68
0
4,963
11
1
7
def serving(self, inputs): return self.call(inputs) CONVNEXT_START_DOCSTRING = r CONVNEXT_INPUTS_DOCSTRING = r @add_start_docstrings( "The bare ConvNext model outputting raw features without any specific head on top.", CONVNEXT_START_DOCSTRING, )
src/transformers/models/convnext/modeling_tf_convnext.py
54
@add_start_docstrings( "The bare ConvNext model outputting raw features without any specific head on top.", CONVNEXT_START_DOCSTRING, )
transformers
{ "docstring": "\n Method used for serving the model.\n\n Args:\n inputs (`Dict[str, tf.Tensor]`):\n The input of the saved model as a dictionary of tensors.\n \n This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it\n as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and\n behavior.\n\n <Tip>\n\n TF 2.0 models accepts two formats as inputs:\n\n - having all inputs as keyword arguments (like PyTorch models), or\n - having all inputs as a list, tuple or dict in the first positional arguments.\n\n This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the\n tensors in the first argument of the model call function: `model(inputs)`.\n\n </Tip>\n\n Parameters:\n config ([`ConvNextConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n\n Args:\n pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`ConvNextFeatureExtractor`]. See\n [`ConvNextFeatureExtractor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail. This argument can be used only in eager mode, in graph mode the value in the config will be\n used instead.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. This argument can be used\n in eager mode, in graph mode the value will always be set to True.\n", "language": "en", "n_whitespaces": 518, "n_words": 298, "vocab_size": 171 }
27
Python
25
84eaa6acf582206dba33135727dc3bfff05a7e9c
modeling_tf_convnext.py
35,595
2
15
serving
https://github.com/huggingface/transformers.git
Add TFConvNextModel (#15750) * feat: initial implementation of convnext in tensorflow. * fix: sample code for the classification model. * chore: added checked for from the classification model. * chore: set bias initializer in the classification head. * chore: updated license terms. * chore: removed ununsed imports * feat: enabled argument during using drop_path. * chore: replaced tf.identity with layers.Activation(linear). * chore: edited default checkpoint. * fix: minor bugs in the initializations. * partial-fix: tf model errors for loading pretrained pt weights. * partial-fix: call method updated * partial-fix: cross loading of weights (4x3 variables to be matched) * chore: removed unneeded comment. * removed playground.py * rebasing * rebasing and removing playground.py. * fix: renaming TFConvNextStage conv and layer norm layers * chore: added initializers and other minor additions. * chore: added initializers and other minor additions. * add: tests for convnext. * fix: integration tester class. * fix: issues mentioned in pr feedback (round 1). * fix: how output_hidden_states arg is propoagated inside the network. * feat: handling of arg for pure cnn models. * chore: added a note on equal contribution in model docs. * rebasing * rebasing and removing playground.py. * feat: encapsulation for the convnext trunk. * Fix variable naming; Test-related corrections; Run make fixup * chore: added Joao as a contributor to convnext. * rebasing * rebasing and removing playground.py. * rebasing * rebasing and removing playground.py. * chore: corrected copyright year and added comment on NHWC. * chore: fixed the black version and ran formatting. * chore: ran make style. * chore: removed from_pt argument from test, ran make style. * rebasing * rebasing and removing playground.py. * rebasing * rebasing and removing playground.py. * fix: tests in the convnext subclass, ran make style. * rebasing * rebasing and removing playground.py. * rebasing * rebasing and removing playground.py. * chore: moved convnext test to the correct location * fix: locations for the test file of convnext. * fix: convnext tests. * chore: applied sgugger's suggestion for dealing w/ output_attentions. * chore: added comments. * chore: applied updated quality enviornment style. * chore: applied formatting with quality enviornment. * chore: revert to the previous tests/test_modeling_common.py. * chore: revert to the original test_modeling_common.py * chore: revert to previous states for test_modeling_tf_common.py and modeling_tf_utils.py * fix: tests for convnext. * chore: removed output_attentions argument from convnext config. * chore: revert to the earlier tf utils. * fix: output shapes of the hidden states * chore: removed unnecessary comment * chore: reverting to the right test_modeling_tf_common.py. * Styling nits Co-authored-by: ariG23498 <[email protected]> Co-authored-by: Joao Gante <[email protected]> Co-authored-by: Sylvain Gugger <[email protected]>
43
1
6,512
7
1
5
def to_pydict(self, *args, **kwargs): return self.table.to_pydict(*args, **kwargs)
src/datasets/table.py
41
datasets
{ "docstring": "\n Convert the Table to a dict or OrderedDict.\n\n Returns:\n :obj:`dict`\n ", "language": "en", "n_whitespaces": 43, "n_words": 10, "vocab_size": 10 }
7
Python
7
e35be138148333078284b942ccc9ed7b1d826f97
table.py
104,413
2
25
to_pydict
https://github.com/huggingface/datasets.git
Update docs to new frontend/UI (#3690) * WIP: update docs to new UI * make style * Rm unused * inject_arrow_table_documentation __annotations__ * hasattr(arrow_table_method, "__annotations__") * Update task_template.rst * Codeblock PT-TF-SPLIT * Convert loading scripts * Convert docs to mdx * Fix mdx * Add <Tip> * Convert mdx tables * Fix codeblock * Rm unneded hashlinks * Update index.mdx * Redo dev change * Rm circle ci `build_doc` & `deploy_doc` * Rm unneeded files * Update docs reamde * Standardize to `Example::` * mdx logging levels doc * Table properties inject_arrow_table_documentation * ``` to ```py mdx * Add Tips mdx * important,None -> <Tip warning={true}> * More misc * Center imgs * Update instllation page * `setup.py` docs section * Rm imgs since they are in hf.co * Update docs/source/access.mdx Co-authored-by: Steven Liu <[email protected]> * Update index mdx * Update docs/source/access.mdx Co-authored-by: Steven Liu <[email protected]> * just `Dataset` obj * Addedversion just italics * Update ReadInstruction doc example syntax * Change docstring for `prepare_for_task` * Chore * Remove `code` syntax from headings * Rm `code` syntax from headings * Hashlink backward compatability * S3FileSystem doc * S3FileSystem doc updates * index.mdx updates * Add darkmode gifs * Index logo img css classes * Index mdx dataset logo img size * Docs for DownloadMode class * Doc DownloadMode table * format docstrings * style * Add doc builder scripts (#3790) * add doc builder scripts * fix docker image * Docs new UI actions no self hosted (#3793) * No self hosted * replace doc injection by actual docstrings * Docstring formatted Co-authored-by: Quentin Lhoest <[email protected]> Co-authored-by: Mishig Davaadorj <[email protected]> Co-authored-by: Lysandre Debut <[email protected]> Co-authored-by: Mishig Davaadorj <[email protected]> * Rm notebooks from docs actions since they dont exi * Update tsting branch * More docstring * Chore * bump up node version * bump up node * ``` -> ```py for audio_process.mdx * Update .github/workflows/build_documentation.yml Co-authored-by: Quentin Lhoest <[email protected]> * Uodate dev doc build * remove run on PR * fix action * Fix gh doc workflow * forgot this change when merging master * Update build doc Co-authored-by: Steven Liu <[email protected]> Co-authored-by: Quentin Lhoest <[email protected]> Co-authored-by: Quentin Lhoest <[email protected]> Co-authored-by: Lysandre Debut <[email protected]>
21
0
21,849
8
6
14
def avatar_url(user, size=50, gravatar_only=False): if ( not gravatar_only and hasattr(user, "wagtail_userprofile") and user.wagtail_userprofile.avatar ): return user.wagtail_userprofile.avatar.url if hasattr(user, "email"): gravatar_url = get_gravatar_url(user.email, size=size) if gravatar_url is not None: return gravatar_url return versioned_static_func("wagtailadmin/images/default-user-avatar.png") @register.simple_tag
wagtail/admin/templatetags/wagtailadmin_tags.py
127
@register.simple_tag
wagtail
{ "docstring": "\n A template tag that receives a user and size and return\n the appropriate avatar url for that user.\n Example usage: {% avatar_url request.user 50 %}\n ", "language": "en", "n_whitespaces": 38, "n_words": 25, "vocab_size": 23 }
33
Python
24
d10f15e55806c6944827d801cd9c2d53f5da4186
wagtailadmin_tags.py
71,246
12
74
avatar_url
https://github.com/wagtail/wagtail.git
Reformat with black
100
1
15,646
11
3
9
def get_position(self, original=False): if original: return self._originalPosition.frozen() else: locator = self.get_axes_locator() if not locator: self.apply_aspect() return self._position.frozen()
lib/matplotlib/axes/_base.py
81
matplotlib
{ "docstring": "\n Return the position of the Axes within the figure as a `.Bbox`.\n\n Parameters\n ----------\n original : bool\n If ``True``, return the original position. Otherwise, return the\n active position. For an explanation of the positions see\n `.set_position`.\n\n Returns\n -------\n `.Bbox`\n\n ", "language": "en", "n_whitespaces": 129, "n_words": 39, "vocab_size": 30 }
17
Python
15
383de519505964ed879c40b23ef36e90c17ebe0d
_base.py
110,326
8
47
get_position
https://github.com/matplotlib/matplotlib.git
[Doc] fix more spelling and grammar
97
0
24,065
12
2
9
def back(self, title, next, name = "Back", active = 1): if active: flags = 3 # Visible|Enabled else: flags = 1 # Visible return self.pushbutton(name, 180, self.h-27 , 56, 17, flags, title, next)
python3.10.4/Lib/distutils/command/bdist_msi.py
81
XX-Net
{ "docstring": "Add a back button with a given title, the tab-next button,\n its name in the Control table, possibly initially disabled.\n\n Return the button, so that events can be associated", "language": "en", "n_whitespaces": 42, "n_words": 29, "vocab_size": 25 }
33
Python
27
8198943edd73a363c266633e1aa5b2a9e9c9f526
bdist_msi.py
222,640
6
54
back
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
83
0
56,682
9
1
17
def test_insert_new_client_ip_none_device_id(self) -> None: self.reactor.advance(12345678) user_id = "@user:id" # Add & trigger the storage loop self.get_success( self.store.insert_client_ip( user_id, "access_token", "ip", "user_agent", None ) ) self.reactor.advance(200) self.pump(0) result = self.get_success( self.store.db_pool.simple_select_list( table="user_ips", keyvalues={"user_id": user_id}, retcols=["access_token", "ip", "user_agent", "device_id", "last_seen"], desc="get_user_ip_and_agents", ) ) self.assertEqual( result, [ { "access_token": "access_token", "ip": "ip", "user_agent": "user_agent", "device_id": None, "last_seen": 12345678000, } ], ) # Add another & trigger the storage loop self.get_success( self.store.insert_client_ip( user_id, "access_token", "ip", "user_agent", None ) ) self.reactor.advance(10) self.pump(0) result = self.get_success( self.store.db_pool.simple_select_list( table="user_ips", keyvalues={"user_id": user_id}, retcols=["access_token", "ip", "user_agent", "device_id", "last_seen"], desc="get_user_ip_and_agents", ) ) # Only one result, has been upserted. self.assertEqual( result, [ { "access_token": "access_token", "ip": "ip", "user_agent": "user_agent", "device_id": None, "last_seen": 12345878000, } ], )
tests/storage/test_client_ips.py
431
synapse
{ "docstring": "\n An insert with a device ID of NULL will not create a new entry, but\n update an existing entry in the user_ips table.\n ", "language": "en", "n_whitespaces": 45, "n_words": 23, "vocab_size": 22 }
116
Python
55
3ac412b4e2f8c5ba11dc962b8a9d871c1efdce9b
test_client_ips.py
250,111
61
245
test_insert_new_client_ip_none_device_id
https://github.com/matrix-org/synapse.git
Require types in tests.storage. (#14646) Adds missing type hints to `tests.storage` package and does not allow untyped definitions.
824
0
73,277
14
1
19
def test_api_unset_storage_path(self, m): m.return_value = "OK" response = self.client.post( "/api/documents/bulk_edit/", json.dumps( { "documents": [self.doc1.id], "method": "set_storage_path", "parameters": {"storage_path": None}, }, ), content_type="application/json", ) self.assertEqual(response.status_code, 200) m.assert_called_once() args, kwargs = m.call_args self.assertListEqual(args[0], [self.doc1.id]) self.assertEqual(kwargs["storage_path"], None)
src/documents/tests/test_api.py
182
paperless-ngx
{ "docstring": "\n GIVEN:\n - API data to clear/unset the storage path of a document\n WHEN:\n - API is called\n THEN:\n - set_storage_path is called with correct document IDs and None storage_path\n ", "language": "en", "n_whitespaces": 91, "n_words": 29, "vocab_size": 23 }
34
Python
32
53baed03895f28f24113d376b089e3ef281b34ed
test_api.py
319,783
18
109
test_api_unset_storage_path
https://github.com/paperless-ngx/paperless-ngx.git
Increases test coverage of storage paths
228
0
116,996
15
1
12
def test_update_display_name(self) -> None: # Set new display_name channel = self.make_request( "PUT", self.url, access_token=self.admin_user_tok, content={"display_name": "new displayname"}, ) self.assertEqual(200, channel.code, msg=channel.json_body) # Check new display_name channel = self.make_request( "GET", self.url, access_token=self.admin_user_tok, ) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("new displayname", channel.json_body["display_name"])
tests/rest/admin/test_device.py
160
synapse
{ "docstring": "\n Tests a normal successful update of display name\n ", "language": "en", "n_whitespaces": 23, "n_words": 8, "vocab_size": 8 }
38
Python
26
c97042f7eef3748e17c90e48a4122389a89c4735
test_device.py
249,078
18
99
test_update_display_name
https://github.com/matrix-org/synapse.git
Use literals in place of `HTTPStatus` constants in tests (#13469)
185
0
72,585
12
3
16
def _user_may_move_collection(self, user, instance): if user.is_active and user.is_superuser: return True else: permissions = self.permission_policy._get_permission_objects_for_actions( ["add", "edit", "delete"] ) return not GroupCollectionPermission.objects.filter( group__user=user, permission__in=permissions, collection=instance, ).exists()
wagtail/admin/views/collections.py
104
wagtail
{ "docstring": "\n Is this instance used for assigning GroupCollectionPermissions to the user?\n If so, this user is not allowed do move the collection to a new part of the tree\n ", "language": "en", "n_whitespaces": 50, "n_words": 28, "vocab_size": 24 }
25
Python
24
d10f15e55806c6944827d801cd9c2d53f5da4186
collections.py
72,395
12
64
_user_may_move_collection
https://github.com/wagtail/wagtail.git
Reformat with black
161
0
15,884
14
4
34
def test_predict_proba(loss, global_random_seed): n_samples = 20 y_true, raw_prediction = random_y_true_raw_prediction( loss=loss, n_samples=n_samples, y_bound=(-100, 100), raw_bound=(-5, 5), seed=global_random_seed, ) if hasattr(loss, "predict_proba"): proba = loss.predict_proba(raw_prediction) assert proba.shape == (n_samples, loss.n_classes) assert np.sum(proba, axis=1) == approx(1, rel=1e-11) if hasattr(loss, "gradient_proba"): for grad, proba in ( (None, None), (None, np.empty_like(raw_prediction)), (np.empty_like(raw_prediction), None), (np.empty_like(raw_prediction), np.empty_like(raw_prediction)), ): grad, proba = loss.gradient_proba( y_true=y_true, raw_prediction=raw_prediction, sample_weight=None, gradient_out=grad, proba_out=proba, ) assert proba.shape == (n_samples, loss.n_classes) assert np.sum(proba, axis=1) == approx(1, rel=1e-11) assert_allclose( grad, loss.gradient( y_true=y_true, raw_prediction=raw_prediction, sample_weight=None, gradient_out=None, ), ) @pytest.mark.parametrize("loss", ALL_LOSSES) @pytest.mark.parametrize("sample_weight", [None, "range"]) @pytest.mark.parametrize("dtype", (np.float32, np.float64)) @pytest.mark.parametrize("order", ("C", "F"))
sklearn/_loss/tests/test_loss.py
453
@pytest.mark.parametrize("loss", ALL_LOSSES) @pytest.mark.parametrize("sample_weight", [None, "range"]) @pytest.mark.parametrize("dtype", (np.float32, np.float64)) @pytest.mark.parametrize("order", ("C", "F"))
scikit-learn
{ "docstring": "Test that predict_proba and gradient_proba work as expected.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
93
Python
62
751c5cd05ff545c20ad0b09ac491c07f31e4cd56
test_loss.py
259,259
38
248
test_predict_proba
https://github.com/scikit-learn/scikit-learn.git
TST ensure that sklearn/_loss/tests/test_loss.py is seed insensitive (#22847) Co-authored-by: Christian Lorentzen <[email protected]>
483
1
75,678
14
1
4
def handles(self): self._deprecate("handles") return self._handles
pandas/io/excel/_base.py
31
pandas
{ "docstring": "\n Handles to Excel sheets.\n\n .. deprecated:: 1.5.0\n ", "language": "en", "n_whitespaces": 29, "n_words": 7, "vocab_size": 7 }
5
Python
5
047137ce2619cfe2027e3999dfb92eb614d9a485
_base.py
164,685
3
16
handles
https://github.com/pandas-dev/pandas.git
DEP: Protect some ExcelWriter attributes (#45795) * DEP: Deprecate ExcelWriter attributes * DEP: Deprecate ExcelWriter attributes * Fixup for test * Move tests and restore check_extension y * Deprecate xlwt fm_date and fm_datetime; doc improvements
26
0
39,590
8
2
9
def xreplace(self, rule): new_args = [] for mat, frame in self.args: mat = mat.xreplace(rule) new_args.append([mat, frame]) return Vector(new_args)
sympy/physics/vector/vector.py
71
sympy
{ "docstring": "Replace occurrences of objects within the measure numbers of the\n vector.\n\n Parameters\n ==========\n\n rule : dict-like\n Expresses a replacement rule.\n\n Returns\n =======\n\n Vector\n Result of the replacement.\n\n Examples\n ========\n\n >>> from sympy import symbols, pi\n >>> from sympy.physics.vector import ReferenceFrame\n >>> A = ReferenceFrame('A')\n >>> x, y, z = symbols('x y z')\n >>> ((1 + x*y) * A.x).xreplace({x: pi})\n (pi*y + 1)*A.x\n >>> ((1 + x*y) * A.x).xreplace({x: pi, y: 2})\n (1 + 2*pi)*A.x\n\n Replacements occur only if an entire node in the expression tree is\n matched:\n\n >>> ((x*y + z) * A.x).xreplace({x*y: pi})\n (z + pi)*A.x\n >>> ((x*y*z) * A.x).xreplace({x*y: pi})\n x*y*z*A.x\n\n ", "language": "en", "n_whitespaces": 293, "n_words": 103, "vocab_size": 74 }
18
Python
17
9a3ffc6781bd44c47cf49e128ef154389c32876a
vector.py
197,456
6
44
xreplace
https://github.com/sympy/sympy.git
Some pep8 cleanup of sympy.physics.vector.
68
0
48,559
10
2
8
def call_event(func_obj, *func_args): connection = transaction.get_connection() if connection.in_atomic_block: transaction.on_commit(lambda: func_obj(*func_args)) else: func_obj(*func_args)
saleor/core/utils/events.py
70
saleor
{ "docstring": "Call webhook event with given args.\n\n Ensures that in atomic transaction event is called on_commit.\n ", "language": "en", "n_whitespaces": 21, "n_words": 15, "vocab_size": 14 }
12
Python
12
89786f24b5296a23c093fcfea90893292473b275
events.py
28,887
6
40
call_event
https://github.com/saleor/saleor.git
[Change] Change the way transactions are handled in mutations (#10606) * refactor account, app, attribute mutations * add checkout refactor * Change transactions on all mutations to context, and use call_event method to trigger webhooks * remove comments * refactor call_event and move app load outside transaction in few places * remove redundant code from merge conflicts * switch calling call_event to more readable way * fix missed event call * refactor and add transaction in permission group * move call_event function to utils, fix few event calls after review * fix one event call after review * fix transaction scope
38
0
5,184
13
3
7
def _cleanup_discovery_on_remove(self) -> None: if self._discovery_data and not self._removed_from_hass: stop_discovery_updates( self.hass, self._discovery_data, self._remove_discovery_updated ) self._removed_from_hass = True
homeassistant/components/mqtt/mixins.py
60
core
{ "docstring": "Stop listening to signal and cleanup discovery data.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
17
Python
17
3b2aae5045f9f08dc8f174c5d975852588e1a132
mixins.py
296,361
7
37
_cleanup_discovery_on_remove
https://github.com/home-assistant/core.git
Refactor MQTT discovery (#67966) * Proof of concept * remove notify platform * remove loose test * Add rework from #67912 (#1) * Move notify serviceupdater to Mixins * Move tag discovery handler to Mixins * fix tests * Add typing for async_load_platform_helper * Add add entry unload support for notify platform * Simplify discovery updates * Remove not needed extra logic * Cleanup inrelevant or duplicate code * reuse update_device and move to mixins * Remove notify platform * revert changes to notify platform * Rename update class * unify tag entry setup * Use shared code for device_trigger `update_device` * PoC shared dispatcher for device_trigger * Fix bugs * Improve typing - remove async_update * Unload config_entry and tests * Release dispatcher after setup and deduplicate * closures to methods, revert `in` to `=`, updates * Re-add update support for tag platform * Re-add update support for device-trigger platform * Cleanup rediscovery code revert related changes * Undo discovery code shift * Update homeassistant/components/mqtt/mixins.py Co-authored-by: Erik Montnemery <[email protected]> * Update homeassistant/components/mqtt/device_trigger.py Co-authored-by: Erik Montnemery <[email protected]> * Update homeassistant/components/mqtt/mixins.py Co-authored-by: Erik Montnemery <[email protected]> * revert doc string changes * move conditions * typing and check config_entry_id * Update homeassistant/components/mqtt/mixins.py Co-authored-by: Erik Montnemery <[email protected]> * cleanup not used attribute * Remove entry_unload code and tests * update comment * add second comment Co-authored-by: Erik Montnemery <[email protected]>
79
0
95,345
10
1
19
def test_keep_media_by_date(self) -> None: # timestamp before upload now_ms = self.clock.time_msec() server_and_media_id = self._create_media() self._access_media(server_and_media_id) channel = self.make_request( "POST", self.url + "?before_ts=" + str(now_ms), access_token=self.admin_user_tok, ) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(0, channel.json_body["total"]) self._access_media(server_and_media_id) # timestamp after upload now_ms = self.clock.time_msec() channel = self.make_request( "POST", self.url + "?before_ts=" + str(now_ms), access_token=self.admin_user_tok, ) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(1, channel.json_body["total"]) self.assertEqual( server_and_media_id.split("/")[1], channel.json_body["deleted_media"][0], ) self._access_media(server_and_media_id, False)
tests/rest/admin/test_media.py
304
synapse
{ "docstring": "\n Tests that media is not deleted if it is newer than `before_ts`\n ", "language": "en", "n_whitespaces": 27, "n_words": 12, "vocab_size": 11 }
61
Python
35
c97042f7eef3748e17c90e48a4122389a89c4735
test_media.py
249,111
28
188
test_keep_media_by_date
https://github.com/matrix-org/synapse.git
Use literals in place of `HTTPStatus` constants in tests (#13469)
282
0
72,618
11
1
5
def external_ray_cluster_activity_hook1(): global ray_cluster_activity_hook_counter ray_cluster_activity_hook_counter += 1 return { "test_component1": TestRayActivityResponse( is_active="ACTIVE", reason=f"Counter: {ray_cluster_activity_hook_counter}", ) }
python/ray/_private/test_utils.py
53
ray
{ "docstring": "\n Example external hook for test_component_activities_hook.\n\n Returns valid response and increments counter in `reason`\n field on each call.\n ", "language": "en", "n_whitespaces": 30, "n_words": 17, "vocab_size": 17 }
16
Python
15
56716a1c1b6f9aae3967b910a799bb6af9f2c5d9
test_utils.py
124,496
9
27
external_ray_cluster_activity_hook1
https://github.com/ray-project/ray.git
[dashboard] Add `RAY_CLUSTER_ACTIVITY_HOOK` to `/api/component_activities` (#26297) Add external hook to /api/component_activities endpoint in dashboard snapshot router Change is_active field of RayActivityResponse to take an enum RayActivityStatus instead of bool. This is a backward incompatible change, but should be ok because [dashboard] Add component_activities API #25996 wasn't included in any branch cuts. RayActivityResponse now supports informing when there was an error getting the activity observation and the reason.
67
0
27,613
12
1
7
async def test_auth_middleware_loaded_by_default(hass): with patch("homeassistant.components.http.async_setup_auth") as mock_setup: await async_setup_component(hass, "http", {"http": {}}) assert len(mock_setup.mock_calls) == 1
tests/components/http/test_auth.py
71
core
{ "docstring": "Test accessing to server from banned IP when feature is off.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
16
Python
16
63f8e437ed0bf79d72286853b7c2e7c01abef91f
test_auth.py
310,495
4
37
test_auth_middleware_loaded_by_default
https://github.com/home-assistant/core.git
Add Home Assistant Content user (#64337)
32
0
109,180
13
1
10
def sum(self, axis=None, dtype=None, keepdims=False, split_every=None, out=None): from dask.array.reductions import sum return sum( self, axis=axis, dtype=dtype, keepdims=keepdims, split_every=split_every, out=out, )
dask/array/core.py
83
dask
{ "docstring": "\n Return the sum of the array elements over the given axis.\n\n Refer to :func:`dask.array.sum` for full documentation.\n\n See Also\n --------\n dask.array.sum : equivalent function\n ", "language": "en", "n_whitespaces": 67, "n_words": 24, "vocab_size": 22 }
20
Python
20
2820bae493a49cb1d0a6e376985c5473b8f04fa8
core.py
156,749
10
60
sum
https://github.com/dask/dask.git
Don't include docs in ``Array`` methods, just refer to module docs (#9244) Co-authored-by: James Bourbeau <[email protected]>
114
0
36,759
8
6
30
def test_limit_without_orderby_excess_groups_pruned(self): for tag, tag_value in (("tag1", "group1"), ("tag1", "group2")): self.store_release_health_metric( name=SessionMRI.SESSION.value, tags={tag: tag_value}, value=10, minutes_before_now=4, ) for tag, tag_value, numbers in ( ("tag1", "group2", list(range(3))), ("tag1", "group3", list(range(3, 6))), ): for value in numbers: self.store_release_health_metric( name=SessionMRI.ERROR.value, tags={tag: tag_value}, value=value, ) for tag, tag_value, numbers in ( ("tag1", "group4", list(range(3))), ("tag1", "group5", list(range(3, 6))), ): for value in numbers: self.store_release_health_metric( name=SessionMRI.DURATION.value, tags={tag: tag_value}, value=value, ) response = self.get_success_response( self.organization.slug, field=[ f"p50({SessionMetricKey.DURATION.value})", SessionMetricKey.ERRORED.value, "sum(sentry.sessions.session)", ], statsPeriod="1h", interval="1h", groupBy="tag1", per_page=3, ) groups = response.data["groups"] assert len(groups) == 3
tests/sentry/api/endpoints/test_organization_metric_data.py
401
sentry
{ "docstring": "\n Test that ensures that when requesting series data that is not ordered, if the limit of\n each query is not met, thereby a limit is not applied to the aueries and we end up with\n more groups than the limit then the excess number of groups should be pruned\n ", "language": "en", "n_whitespaces": 78, "n_words": 49, "vocab_size": 36 }
86
Python
52
c67c560f667e6fc7fee2c6d62ac3987ba54f89d5
test_organization_metric_data.py
86,580
42
254
test_limit_without_orderby_excess_groups_pruned
https://github.com/getsentry/sentry.git
feat(metrics): Standardize tests and fix overall flakiness [TET-437] (#39660)
600
0
18,131
14
1
18
def _select_states() -> Select: return select( literal(value=None, type_=sqlalchemy.Text).label("event_id"), literal(value=EVENT_STATE_CHANGED, type_=sqlalchemy.String).label("event_type"), literal(value=None, type_=sqlalchemy.Text).label("event_data"), States.last_updated.label("time_fired"), States.context_id.label("context_id"), States.context_user_id.label("context_user_id"), States.context_parent_id.label("context_parent_id"), literal(value=None, type_=sqlalchemy.Text).label("shared_data"), *STATE_COLUMNS, NOT_CONTEXT_ONLY, )
homeassistant/components/logbook/queries.py
200
core
{ "docstring": "Generate a states select that formats the states table as event rows.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 11 }
21
Python
19
0584e84c30903aae07cf16898138ce4e1e8b6be7
queries.py
300,696
14
124
_select_states
https://github.com/home-assistant/core.git
Add MySQL index hints to logbook (#71864) * Add MySQL index hints to logbook * fix mysql query planner
100
0
99,556
13
2
7
def _prepare_options(self) -> None: super()._prepare_options() if not self.options.restart_cmd_alt: # pragma: no cover raise ValueError("OS option restart_cmd_alt must be set for CentOS.") self.options.restart_cmd_alt[0] = self.options.ctl
certbot-apache/certbot_apache/_internal/override_centos.py
74
certbot
{ "docstring": "\n Override the options dictionary initialization in order to support\n alternative restart cmd used in CentOS.\n ", "language": "en", "n_whitespaces": 37, "n_words": 15, "vocab_size": 14 }
24
Python
24
7d9e9a49005de7961e84d2a7c608db57dbab3046
override_centos.py
186,658
9
42
_prepare_options
https://github.com/certbot/certbot.git
Add typing to certbot.apache (#9071) * Add typing to certbot.apache Co-authored-by: Adrien Ferrand <[email protected]>
64
0
45,566
10
1
5
def add_column(self, *args, **kwargs): raise NotImplementedError()
src/datasets/table.py
28
datasets
{ "docstring": "\n Add column to Table at position.\n\n A new table is returned with the column added, the original table\n object is left unchanged.\n\n Args:\n i (:obj:`int`):\n Index to place the column at.\n field_ (:obj:`Union[str, pyarrow.Field]`):\n If a string is passed then the type is deduced from the column\n data.\n column (:obj:`Union[pyarrow.Array, List[pyarrow.Array]]`):\n Column data.\n\n Returns:\n :class:`datasets.table.Table`: New table with the passed column added.\n ", "language": "en", "n_whitespaces": 209, "n_words": 62, "vocab_size": 43 }
6
Python
6
e35be138148333078284b942ccc9ed7b1d826f97
table.py
104,423
2
16
add_column
https://github.com/huggingface/datasets.git
Update docs to new frontend/UI (#3690) * WIP: update docs to new UI * make style * Rm unused * inject_arrow_table_documentation __annotations__ * hasattr(arrow_table_method, "__annotations__") * Update task_template.rst * Codeblock PT-TF-SPLIT * Convert loading scripts * Convert docs to mdx * Fix mdx * Add <Tip> * Convert mdx tables * Fix codeblock * Rm unneded hashlinks * Update index.mdx * Redo dev change * Rm circle ci `build_doc` & `deploy_doc` * Rm unneeded files * Update docs reamde * Standardize to `Example::` * mdx logging levels doc * Table properties inject_arrow_table_documentation * ``` to ```py mdx * Add Tips mdx * important,None -> <Tip warning={true}> * More misc * Center imgs * Update instllation page * `setup.py` docs section * Rm imgs since they are in hf.co * Update docs/source/access.mdx Co-authored-by: Steven Liu <[email protected]> * Update index mdx * Update docs/source/access.mdx Co-authored-by: Steven Liu <[email protected]> * just `Dataset` obj * Addedversion just italics * Update ReadInstruction doc example syntax * Change docstring for `prepare_for_task` * Chore * Remove `code` syntax from headings * Rm `code` syntax from headings * Hashlink backward compatability * S3FileSystem doc * S3FileSystem doc updates * index.mdx updates * Add darkmode gifs * Index logo img css classes * Index mdx dataset logo img size * Docs for DownloadMode class * Doc DownloadMode table * format docstrings * style * Add doc builder scripts (#3790) * add doc builder scripts * fix docker image * Docs new UI actions no self hosted (#3793) * No self hosted * replace doc injection by actual docstrings * Docstring formatted Co-authored-by: Quentin Lhoest <[email protected]> Co-authored-by: Mishig Davaadorj <[email protected]> Co-authored-by: Lysandre Debut <[email protected]> Co-authored-by: Mishig Davaadorj <[email protected]> * Rm notebooks from docs actions since they dont exi * Update tsting branch * More docstring * Chore * bump up node version * bump up node * ``` -> ```py for audio_process.mdx * Update .github/workflows/build_documentation.yml Co-authored-by: Quentin Lhoest <[email protected]> * Uodate dev doc build * remove run on PR * fix action * Fix gh doc workflow * forgot this change when merging master * Update build doc Co-authored-by: Steven Liu <[email protected]> Co-authored-by: Quentin Lhoest <[email protected]> Co-authored-by: Quentin Lhoest <[email protected]> Co-authored-by: Lysandre Debut <[email protected]>
20
0
21,859
7
1
16
def write_to_version_file(filename, versions): os.unlink(filename) contents = json.dumps(versions, sort_keys=True, indent=1, separators=(",", ": ")) with open(filename, "w") as f: f.write(SHORT_VERSION_PY % contents) print("set %s to '%s'" % (filename, versions["version"]))
versioneer.py
118
rembg
{ "docstring": "Write the given version number to the given _version.py file.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 8 }
27
Python
26
f0194812568c83585ff09488fe7f67df300938cc
versioneer.py
195,585
7
69
write_to_version_file
https://github.com/danielgatis/rembg.git
add auto tag
74
0
47,300
11
1
2
def minexponent(self): return self["minexponent"]
packages/python/plotly/plotly/graph_objs/bar/marker/_colorbar.py
22
plotly.py
{ "docstring": "\n Hide SI prefix for 10^n if |n| is below this number. This only\n has an effect when `tickformat` is \"SI\" or \"B\".\n\n The 'minexponent' property is a number and may be specified as:\n - An int or float in the interval [0, inf]\n\n Returns\n -------\n int|float\n ", "language": "en", "n_whitespaces": 105, "n_words": 46, "vocab_size": 43 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_colorbar.py
228,732
2
11
minexponent
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
60,405
7
1
10
def bcoo_sum_duplicates(mat, nse=None): data, indices = _bcoo_sum_duplicates(mat.data, mat.indices, spinfo=mat._info, nse=nse) return BCOO((data, indices), shape=mat.shape)
jax/experimental/sparse/bcoo.py
73
jax
{ "docstring": "Sums duplicate indices within a BCOO array, returning an array with sorted indices.\n\n Args:\n mat : BCOO array\n nse : integer (optional). The number of specified elements in the output matrix. This must\n be specified for bcoo_sum_duplicates to be compatible with JIT and other JAX transformations.\n If not specified, the optimal nse will be computed based on the contents of the data and\n index arrays. If specified nse is larger than necessary, data and index arrays will be padded\n with standard fill values. If smaller than necessary, data elements will be dropped from the\n output matrix.\n\n Returns:\n mat_out : BCOO array with sorted indices and no duplicate indices.\n ", "language": "en", "n_whitespaces": 145, "n_words": 108, "vocab_size": 67 }
14
Python
14
edae0ac31f7493bbe3a7f845dd8f48fc9f5b5760
bcoo.py
120,235
3
49
bcoo_sum_duplicates
https://github.com/google/jax.git
[sparse] make bcoo_sum_duplicates a primitive
17
0
26,802
10
8
18
def _run_sql(self, sql, params, raw=True, output=False, latest=False): toget = 'source_raw' if raw else 'source' sqlfrom = "history" if output: sqlfrom = "history LEFT JOIN output_history USING (session, line)" toget = "history.%s, output_history.output" % toget if latest: toget += ", MAX(session * 128 * 1024 + line)" this_querry = "SELECT session, line, %s FROM %s " % (toget, sqlfrom) + sql cur = self.db.execute(this_querry, params) if latest: cur = (row[:-1] for row in cur) if output: # Regroup into 3-tuples, and parse JSON return ((ses, lin, (inp, out)) for ses, lin, inp, out in cur) return cur
IPython/core/history.py
188
ipython
{ "docstring": "Prepares and runs an SQL query for the history database.\n\n Parameters\n ----------\n sql : str\n Any filtering expressions to go after SELECT ... FROM ...\n params : tuple\n Parameters passed to the SQL query (to replace \"?\")\n raw, output : bool\n See :meth:`get_range`\n latest : bool\n Select rows with max (session, line)\n\n Returns\n -------\n Tuples as :meth:`get_range`\n ", "language": "en", "n_whitespaces": 171, "n_words": 57, "vocab_size": 46 }
96
Python
68
dc5bcc1c50892a5128fcf128af28887226144927
history.py
208,718
15
118
_run_sql
https://github.com/ipython/ipython.git
This fixed the mixing of multiple history seen in #13631 It forces get_tail to put the current session last in the returned results.
224
0
52,477
12
1
16
async def test_registered_no_pin_required(hass, user_form): with patch(MOCK_API_CONNECT, return_value=True), patch( MOCK_API_DEVICE_REGISTERED, new_callable=PropertyMock ) as mock_device_registered, patch(MOCK_API_IS_PIN_REQUIRED, return_value=False): mock_device_registered.return_value = True await hass.config_entries.flow.async_configure( user_form["flow_id"], user_input=TEST_CREDS )
tests/components/subaru/test_config_flow.py
100
core
{ "docstring": "Test if the device is already registered and PIN not required.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
23
Python
22
ab0abdc988ac101217ba043909c4be8b33101ab3
test_config_flow.py
294,965
8
61
test_registered_no_pin_required
https://github.com/home-assistant/core.git
Add 2FA support for Subaru integration setup (#68753) * Add 2FA support for Subaru integration setup * Update config flow to abort with 2FA request fail
71
0
93,992
12
1
9
def test_model_panels(self): response = self.client.get('/admin/modeladmintest/friend/create/') self.assertEqual( list(response.context['form'].fields), ['first_name', 'phone_number'] )
wagtail/contrib/modeladmin/tests/test_modeladmin_edit_handlers.py
69
wagtail
{ "docstring": "loads the 'create' view and verifies that form fields are returned\n which have been defined via model Friend.panels", "language": "en", "n_whitespaces": 24, "n_words": 18, "vocab_size": 18 }
10
Python
10
de3fcba9e95818e9634ab7de6bfcb1f4221f2775
test_modeladmin_edit_handlers.py
70,998
6
38
test_model_panels
https://github.com/wagtail/wagtail.git
Fix warnings from flake8-comprehensions.
60
0
15,597
12
2
22
def get_ordered_to_be_billed_data(args): doctype, party = args.get("doctype"), args.get("party") child_tab = doctype + " Item" precision = ( get_field_precision( frappe.get_meta(child_tab).get_field("billed_amt"), currency=get_default_currency() ) or 2 ) project_field = get_project_field(doctype, party) return frappe.db.sql( .format( parent_tab="tab" + doctype, child_tab="tab" + child_tab, precision=precision, party=party, date_field=args.get("date"), project_field=project_field, order=args.get("order"), order_by=args.get("order_by"), ) )
erpnext/accounts/report/non_billed_report.py
208
erpnext
{ "docstring": "\n\t\tSelect\n\t\t\t`{parent_tab}`.name, `{parent_tab}`.{date_field},\n\t\t\t`{parent_tab}`.{party}, `{parent_tab}`.{party}_name,\n\t\t\t`{child_tab}`.item_code,\n\t\t\t`{child_tab}`.base_amount,\n\t\t\t(`{child_tab}`.billed_amt * ifnull(`{parent_tab}`.conversion_rate, 1)),\n\t\t\t(`{child_tab}`.base_rate * ifnull(`{child_tab}`.returned_qty, 0)),\n\t\t\t(`{child_tab}`.base_amount -\n\t\t\t(`{child_tab}`.billed_amt * ifnull(`{parent_tab}`.conversion_rate, 1)) -\n\t\t\t(`{child_tab}`.base_rate * ifnull(`{child_tab}`.returned_qty, 0))),\n\t\t\t`{child_tab}`.item_name, `{child_tab}`.description,\n\t\t\t{project_field}, `{parent_tab}`.company\n\t\tfrom\n\t\t\t`{parent_tab}`, `{child_tab}`\n\t\twhere\n\t\t\t`{parent_tab}`.name = `{child_tab}`.parent and `{parent_tab}`.docstatus = 1\n\t\t\tand `{parent_tab}`.status not in ('Closed', 'Completed')\n\t\t\tand `{child_tab}`.amount > 0\n\t\t\tand (`{child_tab}`.base_amount -\n\t\t\tround(`{child_tab}`.billed_amt * ifnull(`{parent_tab}`.conversion_rate, 1), {precision}) -\n\t\t\t(`{child_tab}`.base_rate * ifnull(`{child_tab}`.returned_qty, 0))) > 0\n\t\torder by\n\t\t\t`{parent_tab}`.{order} {order_by}\n\t\t", "language": "en", "n_whitespaces": 47, "n_words": 70, "vocab_size": 48 }
44
Python
35
494bd9ef78313436f0424b918f200dab8fc7c20b
non_billed_report.py
65,288
46
125
get_ordered_to_be_billed_data
https://github.com/frappe/erpnext.git
style: format code with black
22
0
13,841
14
1
11
def parseFragment(doc, container="div", treebuilder="etree", namespaceHTMLElements=True, **kwargs): tb = treebuilders.getTreeBuilder(treebuilder) p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements) return p.parseFragment(doc, container=container, **kwargs)
.venv/lib/python3.8/site-packages/pip/_vendor/html5lib/html5parser.py
84
transferlearning
{ "docstring": "Parse an HTML fragment as a string or file-like object into a tree\n\n :arg doc: the fragment to parse as a string or file-like object\n\n :arg container: the container context to parse the fragment in\n\n :arg treebuilder: the treebuilder to use when parsing\n\n :arg namespaceHTMLElements: whether or not to namespace HTML elements\n\n :returns: parsed tree\n\n Example:\n\n >>> from html5lib.html5libparser import parseFragment\n >>> parseFragment('<b>this is a fragment</b>')\n <Element u'DOCUMENT_FRAGMENT' at 0x7feac484b090>\n\n ", "language": "en", "n_whitespaces": 100, "n_words": 70, "vocab_size": 46 }
17
Python
16
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
html5parser.py
62,546
4
53
parseFragment
https://github.com/jindongwang/transferlearning.git
upd; format
29
0
12,986
9
3
9
def _preprocess(self, inputs): inputs = self._check_input_text(inputs) self._max_cls_len = 5 num_workers = self.kwargs[ 'num_workers'] if 'num_workers' in self.kwargs else 0 lazy_load = self.kwargs[ 'lazy_load'] if 'lazy_load' in self.kwargs else False # Prompt template: input_text + "是" + "[MASK]" * cls_seq_length prompt_template = ["是"] + ["[MASK]"] * self._max_cls_len
paddlenlp/taskflow/knowledge_mining.py
115
PaddleNLP
{ "docstring": "\n Create the dataset and dataloader for the predict.\n ", "language": "en", "n_whitespaces": 23, "n_words": 8, "vocab_size": 7 }
46
Python
33
621357338437ee420eabbbf5ab19065bc85e73a5
knowledge_mining.py
322,190
26
168
_preprocess
https://github.com/PaddlePaddle/PaddleNLP.git
Update neural search readme and Add Paddle Serving Support (#1558) * add recall inference similarity * update examples * updatea readme * update dir name * update neural search readme * update milvus readme * update domain adaptive pretraining readme * fix the mistakes * update readme * add recall Paddle Serving Support * update readme * update readme and format the code * reformat the files * move the files * reformat the code * remove redundant code Co-authored-by: Zeyu Chen <[email protected]> Co-authored-by: tianxin <[email protected]>
117
0
118,085
10
1
22
def test_server_side_settings_are_used_if_present(self, patch_import, tmp_path): d = Deployment( name="TEST", flow_name="fn", description="server-side value", version="server", parameters={"key": "server"}, tags=["server-tag"], work_queue_name="dev", ) assert d.apply() invoke_and_assert( [ "deployment", "build", "fake-path.py:fn", "-n", "TEST", "-o", str(tmp_path / "test.yaml"), ], expected_code=0, temp_dir=tmp_path, ) deployment = Deployment.load_from_yaml(tmp_path / "test.yaml") assert deployment.description == "server-side value" assert deployment.tags == ["server-tag"] assert deployment.parameters == dict(key="server") assert deployment.work_queue_name == "dev"
tests/cli/test_deployment_cli.py
225
prefect
{ "docstring": "\n This only applies to tags, work queue name, description, schedules and default parameter values\n ", "language": "en", "n_whitespaces": 29, "n_words": 14, "vocab_size": 14 }
56
Python
46
451688c6aa1350bb3967d0d72b95e9da311de5d7
test_deployment_cli.py
58,837
29
129
test_server_side_settings_are_used_if_present
https://github.com/PrefectHQ/prefect.git
Further merge CLI and Python code paths
359
0
11,821
12
1
15
def test_transformer_size_gets_corrected(train_persist_load_with_different_settings,): pipeline = [ {"component": WhitespaceTokenizer}, {"component": CountVectorsFeaturizer}, ] config_params = {EPOCHS: 1, NUM_TRANSFORMER_LAYERS: 1} selector = train_persist_load_with_different_settings( pipeline, config_params, False, ) assert selector.component_config[TRANSFORMER_SIZE] == DEFAULT_TRANSFORMER_SIZE @pytest.mark.timeout(120)
tests/nlu/selectors/test_selectors.py
100
@pytest.mark.timeout(120)
rasa
{ "docstring": "Tests that the default value of `transformer_size` which is `None` is\n corrected if transformer layers are enabled in `ResponseSelector`.\n ", "language": "en", "n_whitespaces": 25, "n_words": 19, "vocab_size": 18 }
28
Python
25
c687960f44e2ad07ccd48ddbccda26cb18a9d1c7
test_selectors.py
159,098
10
54
test_transformer_size_gets_corrected
https://github.com/RasaHQ/rasa.git
correct transformer_size value if needed
69
1
38,124
10
1
2
def valign(self): return self["valign"]
packages/python/plotly/plotly/graph_objs/layout/_annotation.py
22
plotly.py
{ "docstring": "\n Sets the vertical alignment of the `text` within the box. Has\n an effect only if an explicit height is set to override the\n text height.\n\n The 'valign' property is an enumeration that may be specified as:\n - One of the following enumeration values:\n ['top', 'middle', 'bottom']\n\n Returns\n -------\n Any\n ", "language": "en", "n_whitespaces": 130, "n_words": 49, "vocab_size": 40 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_annotation.py
230,885
2
11
valign
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
62,558
7
15
26
def check_permissions(cls, context, permissions=None): all_permissions = permissions or cls._meta.permissions if not all_permissions: return True authorization_filters = [ p for p in all_permissions if isinstance(p, AuthorizationFilters) ] permissions = [ p for p in all_permissions if not isinstance(p, AuthorizationFilters) ] granted_by_permissions = False granted_by_authorization_filters = False app = getattr(context, "app", None) if app and permissions and AccountPermissions.MANAGE_STAFF in permissions: # `MANAGE_STAFF` permission for apps is not supported. If apps could use it # they could create a staff user with full access which would be a # permission leak issue. return False requestor = get_user_or_app_from_context(context) if permissions: granted_by_permissions = requestor.has_perms(permissions) if authorization_filters: internal_perm_checks = [] for p in authorization_filters: perm_fn = resolve_authorization_filter_fn(p) if perm_fn: res = perm_fn(context) internal_perm_checks.append(bool(res)) granted_by_authorization_filters = any(internal_perm_checks) return granted_by_permissions or granted_by_authorization_filters
saleor/graphql/core/mutations.py
244
saleor
{ "docstring": "Determine whether user or app has rights to perform this mutation.\n\n Default implementation assumes that account is allowed to perform any\n mutation. By overriding this method or defining required permissions\n in the meta-class, you can restrict access to it.\n\n The `context` parameter is the Context instance associated with the request.\n ", "language": "en", "n_whitespaces": 85, "n_words": 50, "vocab_size": 41 }
124
Python
68
ab45ebda5a14df6806046fd552e2c6d08f025503
mutations.py
26,386
27
152
check_permissions
https://github.com/saleor/saleor.git
Better permissions (#9363) * Better permissions * Add OWNER permission * WIP Add enums to represent function-based permissions * Rename OWNER to IS_OWNER * Add flag to skip autogenerated permission message * Rename InternalPermissions to PermissionFunctions * Add permission descriptions for meta mutations * Better permissions validation * Reuse user checking functions * Rename permission functions enums * Update schema * Rename permission functions enums
418
0
4,979
15
1
5
def enabled(): which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'stdlib') return which == 'local'
.venv/lib/python3.8/site-packages/_distutils_hack/__init__.py
42
transferlearning
{ "docstring": "\n Allow selection of distutils by environment variable.\n ", "language": "en", "n_whitespaces": 14, "n_words": 7, "vocab_size": 7 }
10
Python
9
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
__init__.py
60,460
3
21
enabled
https://github.com/jindongwang/transferlearning.git
upd; format
19
0
12,170
9
1
19
def test_dynamic_prompt_valid() -> None: input_variables = ["question"] example_separator = "\n\n" dynamic_prompt_cls = DynamicPrompt( examples=EXAMPLES, suffix=SUFFIX, input_variables=input_variables, example_separator=example_separator, prefix=PREFIX, ) prompt_cls = Prompt(input_variables=input_variables, template=LONGER_TEMPLATE) dynamic_prompt_template = dynamic_prompt_cls.format(question="foo?") prompt_template = prompt_cls.format(question="foo?") assert dynamic_prompt_template == prompt_template assert dynamic_prompt_cls.input_variables == prompt_cls.input_variables
tests/unit_tests/test_dynamic_prompt.py
140
langchain
{ "docstring": "Test dynamic prompt can be successfully constructed from examples.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
37
Python
28
c636488fe5e144bcf41832d27d64dbed6c9f4997
test_dynamic_prompt.py
191,437
16
84
test_dynamic_prompt_valid
https://github.com/hwchase17/langchain.git
DynamicPrompt class creation (#49) Checking that this structure looks generally ok -- going to sub in logic where the TODO comment is then add a test.
102
0
46,569
10