text
stringlengths
67
7.88k
<|fim_prefix|>def <|fim_suffix|>(self, session): pass<|fim_middle|>on_204<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(g): g.cmd(b's', b'T05thread:01;')<|fim_middle|>gdb_step<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self): self.check('/admin/default/shell') ws_url = server.base_url.replace('http://', 'ws://') + '/admin/default/webshell-data' ws = create_connection(ws_url) # Python expressions are computed ws.send('1 + 2') eq_(ws.recv(), '3') # Session state is maintained. Gramex can be imported ws.send('import gramex') eq_(ws.recv(), '') ws.send('gramex.__version__') eq_(ast.literal_eval(ws.recv()), gramex.__version__) # handler is available for use ws.send('handler.session') result = ast.literal_eval(ws.recv()) ok_('_t' in result and 'id' in result)<|fim_middle|>test_shell<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self, x): self.__buf.write(struct.pack('>L', x))<|fim_middle|>pack_uint<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self): action = ChatJoinRequestHandler(self.callback) for attr in action.__slots__: assert getattr(action, attr, "err") != "err", f"got extra slot '{attr}'" assert len(mro_slots(action)) == len(set(mro_slots(action))), "duplicate slot"<|fim_middle|>test_slot_behaviour<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self) -> str: """ Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} """ return pulumi.get(self, "id")<|fim_middle|>id<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(path: Optional[Path] = None) -> Path: if path is None: path = Path.cwd() here = path while here.parent != here: config = here / ".neuro.toml" if config.exists(): return here here = here.parent raise ConfigError(f"Project root is not found for {path}")<|fim_middle|>find_project_root<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(): parser = argparse.ArgumentParser( description=USAGE, prog="ddtrace-run", usage="ddtrace-run <your usual python command>", formatter_class=argparse.RawTextHelpFormatter, ) parser.add_argument("command", nargs=argparse.REMAINDER, type=str, help="Command string to execute.") parser.add_argument("-d", "--debug", help="enable debug mode (disabled by default)", action="store_true") parser.add_argument( "-i", "--info", help=( "print library info useful for debugging. Only reflects configurations made via environment " "variables, not those made in code." ), action="store_true", ) parser.add_argument("-p", "--profiling", help="enable profiling (disabled by default)", action="store_true") parser.add_argument("-v", "--version", action="version", version="%(prog)s " + ddtrace.__version__) parser.add_argument("-nc", "--colorless", help="print output of command without color", action="store_true") args = parser.parse_args() if args.profiling: os.environ["DD_PROFILING_ENABLED"] = "true" if args.debug or ddtrace.config._debug_mode: logging.basicConfig(level=logging.DEBUG) os.environ["DD_TRACE_DEBUG"] = "true" if args.info: # Inline imports for performance. from ddtrace.internal.debug import pretty_collect print(pretty_collect(ddtrace.tracer, color=not args.colorless)) sys.exit(0) root_dir = os.path.dirname(ddtrace.__file__) log.debug("ddtrace root: %s", root_dir) bootstrap_dir = os.path.join(root_dir, "bootstrap") log.debug("ddtrace bootstrap: %s", bootstrap_dir) _add_bootstrap_to_pythonpath(bootstrap_dir) log.debug("PYTHONPATH: %s", os.environ["PYTHONPATH"]) log.debug("sys.path: %s", sys.path) if not args.command: parser.print_help() sys.exit(1) # Find the executable path executable = find_executable(args.command[0]) if executable is None: print("ddtrace-run: failed to find executable '%s'.\n" % args.command[0]) parser.print_usage() sys.exit(1) log.debug("program executable: %s", executable) if os.path.basename(executable) == "uwsgi": print( ( "ddtrace-run has known compatibility issues with uWSGI where the " "tracer is not started properly in uWSGI workers which can cause " "broken behavior. It is recommended you remove ddtrace-run and " "update your uWSGI configuration following " "https://ddtrace.readthedocs.io/en/stable/advanced_usage.html#uwsgi." ) ) try: os.execl(executable, executable, *args.command[1:]) except PermissionError: print("ddtrace-run: permission error while launching '%s'" % executable) print("Did you mean `ddtrace-run python %s`?" % executable) sys.exit(1) except Exception: print("ddtrace-run: error launching '%s'" % executable) raise sys.exit(0)<|fim_middle|>main<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(iterable, n): """ Split a interable into chunks of length n with the final element being the remainder len < n if n does not divide evenly """ len_iter = len(iterable) return [iterable[i: min(i + n, len_iter)] for i in range(0, len_iter, n)]<|fim_middle|>take_n<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self): log.debug("Loading live event") res = self.request("GET", self.live_url) for event in res.get("events", []): return "event/{sportId}/{propertyId}/{tournamentId}/{id}".format(**event)<|fim_middle|>get_live_id<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(n_servers, i=None): return server_n<|fim_middle|>dist_fcn_1_server<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self) -> 'outputs.PrivateEndpointConnectionPropertiesResponse': """ Resource properties. """ return pulumi.get(self, "properties")<|fim_middle|>properties<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self): cli_params = ['application_name', 'config_file', 'eu-west-1', '--destinationTableAutoCreate', '--connection-pre-test', 'False'] config_reader = GlobalConfigParametersReader() default_parameters = config_reader.get_config_key_values_updated_with_cli_args(cli_params) expected_value = True returned_value = default_parameters['destinationTableAutoCreate'] self.assertEqual(expected_value, returned_value)<|fim_middle|>test_global_config_reader_overwritten_default_value<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>( staff_api_client, permission_manage_shipping, shipping_method ): # given shipping_method.store_value_in_private_metadata({PUBLIC_KEY: PUBLIC_VALUE}) shipping_method.save(update_fields=["metadata"]) shipping_method_id = graphene.Node.to_global_id( "ShippingMethodType", shipping_method.pk ) # when response = execute_clear_private_metadata_for_item( staff_api_client, permission_manage_shipping, shipping_method_id, "ShippingMethodType", ) # then assert item_without_private_metadata( response["data"]["deletePrivateMetadata"]["item"], shipping_method, shipping_method_id, )<|fim_middle|>test_delete_private_metadata_for_shipping_method<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self): if not session.user: raise Forbidden # If the user cannot manage the whole event see if anything gives them # limited management access. if not self.event.can_manage(session.user): urls = sorted(values_from_signal(signals.event_management.management_url.send(self.event), single_value=True)) response = redirect(urls[0]) if urls else None raise Forbidden(response=response) RHManageEventBase.METHOD_NAME(self) # mainly to trigger the legacy "event locked" check<|fim_middle|>check_access<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(cursor) -> List[Tuple[DbTableSchema, str]]: schemas: Dict = {} for row in cursor.fetchall(): table_schema_name: str = row[_TABLE_SCHEMA] table_name: DbTableMeta = DbTableMeta(row[_TABLE_NAME]) table_column: DbColumn = DbColumn( name=row[_COLUMN_NAME], type=row[_UDT_NAME], ordinal_position=row[_ORDINAL_POSITION], ) try: table_database = row[_TABLE_DATABASE] except IndexError: table_database = None # Attempt to get table schema table_key = ".".join( filter(None, [table_database, table_schema_name, table_name.name]) ) # table_key: str = f"{table_schema_name}.{table_name}" table_schema: Optional[DbTableSchema] table_schema, _ = schemas.get(table_key) or (None, None) if table_schema: # Add column to existing table schema. schemas[table_key][0].columns.append(table_column) else: # Create new table schema with column. schemas[table_key] = ( DbTableSchema( schema_name=table_schema_name, table_name=table_name, columns=[table_column], ), table_database, ) return list(schemas.values())<|fim_middle|>parse_query_result<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(): # Try again with a target with a stretched y axis. A_orig = np.array([[-3, 3], [-2, 3], [-2, 2], [-3, 2]], dtype=float) B_orig = np.array([[3, 40], [1, 0], [3, -40], [5, 0]], dtype=float) A, A_mu = _centered(A_orig) B, B_mu = _centered(B_orig) R, s = orthogonal_procrustes(A, B) scale = s / np.square(norm(A)) B_approx = scale * np.dot(A, R) + B_mu expected = np.array([[3, 21], [-18, 0], [3, -21], [24, 0]], dtype=float) assert_allclose(B_approx, expected, atol=1e-8) # Check disparity symmetry. expected_disparity = 0.4501246882793018 AB_disparity = np.square(norm(B_approx - B_orig) / norm(B)) assert_allclose(AB_disparity, expected_disparity) R, s = orthogonal_procrustes(B, A) scale = s / np.square(norm(B)) A_approx = scale * np.dot(B, R) + A_mu BA_disparity = np.square(norm(A_approx - A_orig) / norm(A)) assert_allclose(BA_disparity, expected_disparity)<|fim_middle|>test_orthogonal_procrustes_stretched_example<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>( mock_smb_client: SMBClient, smb_remote_access_client: SMBRemoteAccessClient, ): tags = EXPLOITER_TAGS.copy() smb_remote_access_client.login(FULL_CREDENTIALS[0], set()) smb_remote_access_client.execute_agent(DESTINATION_PATH, tags) assert tags == EXPLOITER_TAGS.union(EXECUTION_TAGS)<|fim_middle|>test_execute_succeeds<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self) -> str: """ Gets the workflow trigger callback URL relative path. """ return pulumi.get(self, "relative_path")<|fim_middle|>relative_path<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self): form_data = { "name": "Assunto 2", "visible": True, "init_date": datetime.now() + timedelta(days=2), "end_date": datetime.now() + timedelta(days=3), "subscribe_begin": datetime.now(), "subscribe_end": datetime.now() + timedelta(days=1), "category": self.category, "tags": "teste,test,testando" } form = SubjectForm(data=form_data, initial={"category": self.category}) form.save() subject = Subject.objects.latest("id") tags = [str(t) for t in subject.tags.all()] self.assertIn("teste", tags) self.assertIn("test", tags) self.assertIn("testando", tags)<|fim_middle|>test_form_tags<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self): self.deployment_type = "AllAtOnce" self.pre_traffic_hook = "pre_traffic_function_ref" self.post_traffic_hook = "post_traffic_function_ref" self.alarms = ["alarm1ref", "alarm2ref"] self.role = {"Ref": "MyRole"} self.trigger_configurations = { "TriggerEvents": ["DeploymentSuccess", "DeploymentFailure"], "TriggerTargetArn": {"Ref": "MySNSTopic"}, "TriggerName": "TestTrigger", } self.condition = "condition"<|fim_middle|>set_up<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(x, n): c = 0.9 mu = (np.arange(1, n+1) - 0.5)/n return x - 1/(1 - c/(2*n) * (mu[:,None]*x / (mu[:,None] + mu)).sum(axis=1))<|fim_middle|>f_6<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(): args = argsparser() config_parser = ConfigParser(args) args = config_parser.parser() random.seed(args.seed) np.random.seed(args.seed) paddle.seed(args.seed) paddle.device.set_device(args.device) class_name = args.category assert class_name in mvtec.CLASS_NAMES print("Testing model for {}".format(class_name)) # build model model = get_model(args.method)(arch=args.backbone, pretrained=False, k=args.k, method=args.method) model.eval() state = paddle.load(args.model_path) model.model.set_dict(state["params"]) model.load(state["stats"]) model.eval() # build data MVTecDataset = mvtec.MVTecDataset(is_predict=True) transform_x = MVTecDataset.get_transform_x() x = Image.open(args.img_path).convert('RGB') x = transform_x(x).unsqueeze(0) predict(args, model, x)<|fim_middle|>main<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(api_dir, xml_dir): import subprocess, sys try: # We don't generate groups since we create those manually ret = subprocess.call('breathe-apidoc -m -o %s -p openucx %s -g struct,file' % (api_dir, xml_dir), shell=True) if ret < 0: sys.stderr.write('breathe-apidoc error code %s' % (-ret)) except OSError as e: sys.stderr.write('breathe-apidoc execution failed: %s' % e)<|fim_middle|>run_apidoc<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>( tmp_path: Path, filename: str, fmt: str | None, data: str, expected: Any, testing_metadata, ): path = tmp_path / filename path.write_text(data) assert ( jinja_context.load_file_data(str(path), fmt, config=testing_metadata.config) == expected )<|fim_middle|>test_load_file_data<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self) -> int: return hash(self)<|fim_middle|>hash_code<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self): """Open preferences dialog""" widgets = gamewidget.getWidgets() preferencesDialog.run(widgets) notebook = widgets["preferences_notebook"] self.assertIsNotNone(preferencesDialog.general_tab) notebook.next_page() self.assertIsNotNone(preferencesDialog.hint_tab) notebook.next_page() self.assertIsNotNone(preferencesDialog.theme_tab) notebook.next_page() self.assertIsNotNone(preferencesDialog.sound_tab) notebook.next_page() self.assertIsNotNone(preferencesDialog.save_tab)<|fim_middle|>test4<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(dataarray) -> None: data_repr = fh.short_data_repr_html(dataarray) assert data_repr.startswith("<pre>array")<|fim_middle|>test_short_data_repr_html<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self, fileno, new=False): mask = 0 if self.listeners[self.READ].get(fileno): mask |= self.READ_MASK | self.EXC_MASK if self.listeners[self.WRITE].get(fileno): mask |= self.WRITE_MASK | self.EXC_MASK try: if mask: if new: self.poll.METHOD_NAME(fileno, mask) else: try: self.poll.modify(fileno, mask) except (IOError, OSError): self.poll.METHOD_NAME(fileno, mask) else: try: self.poll.unregister(fileno) except (KeyError, IOError, OSError): # raised if we try to remove a fileno that was # already removed/invalid pass except ValueError: # fileno is bad, issue 74 self.remove_descriptor(fileno) raise<|fim_middle|>register<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self): # restart the collectd mapper to use recently set port c8y_mapper_status = self.startProcess( command=self.sudo, arguments=["systemctl", "restart", "tedge-mapper-collectd.service"], stdouterr="collectd_mapper_restart", ) # check the status of the collectd mapper c8y_mapper_status = self.startProcess( command=self.sudo, arguments=["systemctl", "status", "tedge-mapper-collectd.service"], stdouterr="collectd_mapper_status", ) self.assertGrep( "collectd_mapper_status.out", " MQTT connection error: I/O: Connection refused (os error 111)", contains=False, )<|fim_middle|>validate_collectd_mapper<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self, collection_name, vectors, top_k): # Search vector in milvus collection try: self.set_collection(collection_name) search_params = { "metric_type": METRIC_TYPE, "params": { "nprobe": 16 } } res = self.collection.search( vectors, anns_field="embedding", param=search_params, limit=top_k) LOGGER.debug(f"Successfully search in collection: {res}") return res except Exception as e: LOGGER.error(f"Failed to search vectors in Milvus: {e}") sys.exit(1)<|fim_middle|>search_vectors<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(q, t, q_len, t_len): """Compute the sliding dot products between a query and a time series. Parameters ---------- q: numpy.array Query. t: numpy.array Time series. q_len: int Length of the query. t_len: int Length of the time series. Output ------ dot_prod: numpy.array Sliding dot products between q and t. """ # Reversing query and padding both query and time series t_padded = np.pad(t, (0, t_len)) q_reversed = np.flipud(q) q_reversed_padded = np.pad(q_reversed, (0, 2 * t_len - q_len)) # Applying FFT to both query and time series t_fft = np.fft.fft(t_padded) q_fft = np.fft.fft(q_reversed_padded) # Applying inverse FFT to obtain the convolution of the time series by # the query element_wise_mult = np.multiply(t_fft, q_fft) inverse_fft = np.fft.ifft(element_wise_mult) # Returns only the valid dot products from inverse_fft dot_prod = inverse_fft[q_len - 1 : t_len].real return dot_prod<|fim_middle|>sliding_dot_products<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(file_path, size=None): """ Turn given picture into a smaller version. """ im = Image.open(file_path) if size is not None: (width, height) = size if height == 0: size = get_full_size_from_width(im, width) else: size = im.size im = make_im_bigger_if_needed(im, size) im = fit_to_target_size(im, size) im.thumbnail(size, Image.Resampling.LANCZOS) if im.mode == "CMYK": im = im.convert("RGBA") final = Image.new("RGBA", size, (0, 0, 0, 0)) final.paste( im, (int((size[0] - im.size[0]) / 2), int((size[1] - im.size[1]) / 2)) ) final.save(file_path, "PNG") return file_path<|fim_middle|>turn_into_thumbnail<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self): pass<|fim_middle|>test_future<|file_separator|>
<|fim_prefix|> <|fim_suffix|>(self):<|fim_middle|>contents<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(): group_delete_mock = MagicMock(return_value=True) group_info_mock = MagicMock(return_value={"things": "stuff"}) with patch.dict(group.__salt__, {"group.delete": group_delete_mock}), patch.dict( group.__salt__, {"group.info": group_info_mock} ): ret = group.absent("salt", local=True) assert ret == { "changes": {"salt": ""}, "comment": "Removed group salt", "name": "salt", "result": True, } if salt.utils.platform.is_windows(): group_info_mock.assert_called_once_with("salt") group_delete_mock.assert_called_once_with("salt") else: group_info_mock.assert_called_once_with("salt", root="/") group_delete_mock.assert_called_once_with("salt", local=True)<|fim_middle|>test_absent_with_local<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(bin): if type(bin) == type(bytes()): try: return bytes.decode(bin, encoding='utf-8', errors='strict') except: pass # we want a hexdump in \xNN notation. bin.hex only takes a single char, so we replace that later. return "\\x" + bin.hex(':').replace(':', "\\x") return "ERROR: unknown type in bin_dumper(): " + str(type(bin))<|fim_middle|>bin_dumper<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(): # One of these environment variables are guaranteed to exist # from our official docker images. # DISPATCH_VERSION is from a tagged release, and DISPATCH_BUILD is from a # a git based image. return "DISPATCH_VERSION" in os.environ or "DISPATCH_BUILD" in os.environ<|fim_middle|>is_docker<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(validate_event_schema): def inner(message, **kwargs): event = serialize({"logentry": {"message": message}}, **kwargs) validate_event_schema(event) return event["logentry"]["message"] return inner<|fim_middle|>message_normalizer<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(): s = vaex.string_column(["aap", None, "noot", "mies"]) o = ["aap", None, "noot", np.nan] x = np.arange(4, dtype=np.float64) x[2] = x[3] = np.nan m = np.ma.array(x, mask=[0, 1, 0, 1]) df = vaex.from_arrays(x=x, m=m, s=s, o=o) x = df.x.dropmissing().tolist() assert (9 not in x) assert np.any(np.isnan(x)), "nan is not a missing value" m = df.m.dropmissing().tolist() assert (m[:1] == [0]) assert np.isnan(m[1]) assert len(m) == 2 assert (df.s.dropmissing().tolist() == ["aap", "noot", "mies"]) assert (df.o.dropmissing().tolist()[:2] == ["aap", "noot"]) # this changed in vaex 4, since the np.nan is considered missing, the whole # columns is seen as string # assert np.isnan(df.o.dropmissing().tolist()[2])<|fim_middle|>test_dropmissing<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(A, node_features, k): """ Compute the k-hop adjacency matrix and aggregated features using message passing. Parameters: A (numpy array or scipy sparse matrix): The adjacency matrix of the graph. node_features (numpy array or scipy sparse matrix): The feature matrix of the nodes. k (int): The number of hops for message passing. Returns: A_k (numpy array): The k-hop adjacency matrix. agg_features (numpy array): The aggregated feature matrix for each node in the k-hop neighborhood. """ # Convert input matrices to sparse matrices if they are not already if not sp.issparse(A): A = sp.csr_matrix(A) if not sp.issparse(node_features): node_features = sp.csr_matrix(node_features) # Compute the k-hop adjacency matrix and the aggregated features A_k = A.copy() agg_features = node_features.copy() for i in tqdm(range(k)): # Compute the message passing for the k-hop neighborhood message = A_k.dot(node_features) # Apply a GCN layer to aggregate the messages agg_features = A_k.dot(agg_features) + message # Update the k-hop adjacency matrix by adding new edges A_k += A_k.dot(A) return A_k.toarray(), agg_features.toarray()<|fim_middle|>k_hop_message_passing_sparse<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self): if self.options.shared: self.options.rm_safe("fPIC") self.options["trantor"].shared = True if not self.options.with_orm: del self.options.with_postgres del self.options.with_postgres_batch del self.options.with_mysql del self.options.with_sqlite del self.options.with_redis elif not self.options.with_postgres: del self.options.with_postgres_batch<|fim_middle|>configure<|file_separator|>
<|fim_prefix|>async def <|fim_suffix|>(self): pass<|fim_middle|>async_tear_down<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>( self, recipe: BaseRecipe, recipe_conf: PerfRecipeConf, results: List[PerfMeasurementResults], ) -> List[List[PerfMeasurementResults]]: results_by_host = self._divide_results_by_host(results) for host_results in results_by_host.values(): yield host_results<|fim_middle|>group_results<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(): aq17 = ThermoFunDatabase("aq17") T = 298.15 P = 1.0e5 #------------------------------------------------------------------- # Testing attributes and thermodynamic properties of H2O@ #------------------------------------------------------------------- species = aq17.species().get("H2O@") assert species.formula().equivalent("H2O") assert species.substance() == "Water HGK" assert species.aggregateState() == AggregateState.Aqueous assert species.charge() == 0 assert species.molarMass() == pytest.approx(0.0180153) props = species.standardThermoProps(T, P) assert props.G0[0] == pytest.approx(-2.371817e+05) assert props.H0[0] == pytest.approx(-2.858310e+05) assert props.V0[0] == pytest.approx( 1.806862e-05) assert props.Cp0[0] == pytest.approx( 7.532758e+01) #------------------------------------------------------------------- # Testing attributes and thermodynamic properties of CO3-2 #------------------------------------------------------------------- species = aq17.species().get("CO3-2") assert species.formula().equivalent("CO3-2") assert species.substance() == "CO3-2 carbonate ion" assert species.aggregateState() == AggregateState.Aqueous assert species.charge() == -2 assert species.molarMass() == pytest.approx(0.0600100979) props = species.standardThermoProps(T, P) assert props.G0[0] == pytest.approx(-5.279830e+05) assert props.H0[0] == pytest.approx(-6.752359e+05) assert props.V0[0] == pytest.approx(-6.063738e-06) assert props.Cp0[0] == pytest.approx(-3.228612e+02) #------------------------------------------------------------------- # Testing attributes and thermodynamic properties of Ca+2 #------------------------------------------------------------------- species = aq17.species().get("Ca+2") assert species.formula().equivalent("Ca+2") assert species.substance() == "Ca+2 ion" assert species.aggregateState() == AggregateState.Aqueous assert species.charge() == +2 assert species.molarMass() == pytest.approx(0.040076902) props = species.standardThermoProps(T, P) assert props.G0[0] == pytest.approx(-5.528210e+05) assert props.H0[0] == pytest.approx(-5.431003e+05) assert props.V0[0] == pytest.approx(-1.844093e-05) assert props.Cp0[0] == pytest.approx(-3.099935e+01) #------------------------------------------------------------------- # Testing attributes and thermodynamic properties of CO2 #------------------------------------------------------------------- species = aq17.species().get("CO2") assert species.formula().equivalent("CO2") assert species.substance() == "Carbon dioxide (CO2)" assert species.aggregateState() == AggregateState.Gas assert species.charge() == 0 assert species.molarMass() == pytest.approx(0.0440096006) props = species.standardThermoProps(T, P) assert props.G0[0] == pytest.approx(-3.943510e+05) assert props.H0[0] == pytest.approx(-3.935472e+05) assert props.V0[0] == pytest.approx( 0.0000000000) assert props.Cp0[0] == pytest.approx( 3.710812e+01) #------------------------------------------------------------------- # Testing attributes and thermodynamic properties of Calcite #------------------------------------------------------------------- species = aq17.species().get("Calcite") assert species.formula().equivalent("CaCO3") assert species.substance() == "Calcite (cc)" assert species.aggregateState() == AggregateState.CrystallineSolid assert species.charge() == 0 assert species.molarMass() == pytest.approx(0.1000869999) props = species.standardThermoProps(T, P) assert props.G0[0] == pytest.approx(-1.129195e+06) assert props.H0[0] == pytest.approx(-1.207470e+06) assert props.V0[0] == pytest.approx( 3.689000e-05) assert props.Cp0[0] == pytest.approx( 8.337073e+01) with pytest.raises(RuntimeError): assert ThermoFunDatabase("not-a-valid-file-name") with pytest.raises(RuntimeError): assert ThermoFunDatabase.withName("not-a-valid-file-name") with pytest.raises(RuntimeError): assert ThermoFunDatabase.fromFile("not-a-valid-file-name")<|fim_middle|>test_thermo_fun_database<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(next_link=None): if not next_link: request = build_list_request( subscription_id=self._config.subscription_id, api_version=api_version, template_url=self.list.metadata["url"], headers=_headers, params=_params, ) request = _convert_request(request) request.url = self._client.format_url(request.url) else: # make call to next link with the client's api-version _parsed_next_link = urllib.parse.urlparse(next_link) _next_request_params = case_insensitive_dict( { key: [urllib.parse.quote(v) for v in value] for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() } ) _next_request_params["api-version"] = self._config.api_version request = HttpRequest( "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params ) request = _convert_request(request) request.url = self._client.format_url(request.url) request.method = "GET" return request<|fim_middle|>prepare_request<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self, record: logging.LogRecord) -> str: levelname = record.levelname if self.use_color and levelname in self.COLORS: levelname_with_color = ( self.COLOR_SEQ % (30 + self.COLORS[levelname]) + levelname + self.RESET_SEQ ) record.levelname = levelname_with_color formated_record = logging.Formatter.METHOD_NAME(self, record) record.levelname = ( levelname # Resetting levelname as `record` might be used elsewhere ) return formated_record else: return logging.Formatter.METHOD_NAME(self, record)<|fim_middle|>format<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self): section = self.doc_structure.add_new_section('mysection') section.writeln('section contents') self.doc_structure.hrefs['foo'] = 'www.foo.com' section.hrefs['bar'] = 'www.bar.com' contents = self.doc_structure.flush_structure() self.assertIn(b'.. _foo: www.foo.com', contents) self.assertIn(b'.. _bar: www.bar.com', contents)<|fim_middle|>test_flush_structure_hrefs<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(): """ "vendors" notary into docker by copying all of notary into the docker vendor directory - also appending several lines into the Dockerfile because it pulls down notary from github and builds the binaries """ docker_notary_relpath = "vendor/src/github.com/theupdateframework/notary" docker_notary_abspath = os.path.join(DOCKER_DIR, docker_notary_relpath) print("copying notary ({0}) into {1}".format(NOTARY_DIR, docker_notary_abspath)) def ignore_dirs(walked_dir, _): """ Don't vendor everything, particularly not the docker directory recursively, if it happened to be in the notary directory """ if walked_dir == NOTARY_DIR: return [".git", ".cover", "docs", "bin"] elif walked_dir == os.path.join(NOTARY_DIR, "fixtures"): return ["compatibility"] return [] if os.path.exists(docker_notary_abspath): shutil.rmtree(docker_notary_abspath) shutil.copytree( NOTARY_DIR, docker_notary_abspath, symlinks=True, ignore=ignore_dirs) # hack this because docker/docker's Dockerfile checks out a particular version of notary # based on a tag or SHA, and we want to build based on what was vendored in dockerfile_addition = ("\n" "RUN set -x && " "export GO15VENDOREXPERIMENT=1 && " "go build -o /usr/local/bin/notary-server github.com/theupdateframework/notary/cmd/notary-server &&" "go build -o /usr/local/bin/notary github.com/theupdateframework/notary/cmd/notary") with open(os.path.join(DOCKER_DIR, "Dockerfile")) as dockerfile: text = dockerfile.read() if not text.endswith(dockerfile_addition): with open(os.path.join(DOCKER_DIR, "Dockerfile"), 'a+') as dockerfile: dockerfile.write(dockerfile_addition) # hack the makefile so that we tag the built image as something else so we # don't interfere with any other docker test builds with open(os.path.join(DOCKER_DIR, "Makefile"), 'r') as makefile: makefiletext = makefile.read() with open(os.path.join(DOCKER_DIR, "Makefile"), 'wb') as makefile: image_name = os.getenv("DOCKER_TEST_IMAGE_NAME", "notary-docker-vendor-test") text = re.sub("^DOCKER_IMAGE := .+$", "DOCKER_IMAGE := {0}".format(image_name), makefiletext, 1, flags=re.M) makefile.write(text)<|fim_middle|>fake_vendor<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(context, data_dict): return {'success': False, 'msg': 'Not implemented yet in the auth refactor'}<|fim_middle|>revision_undelete<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(): if not isRunningAsRoot(): return False if not isMMapSupported(): return False return True<|fim_middle|>is_timing_series_supported<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(filename, line): """ Append one line of text to filename. :param filename: Path to the file. :type filename: str :param line: Line to be written. :type line: str """ append_file(filename, line.rstrip("\n") + "\n")<|fim_middle|>append_one_line<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self, assembler): """ Create a list of functions to be tested and their reference values for the problem """ func_list = [ functions.StructuralMass(assembler), functions.Compliance(assembler), functions.KSDisplacement( assembler, ksWeight=ksweight, direction=[0.0, 0.0, 1.0] ), functions.KSFailure(assembler, ksWeight=ksweight, safetyFactor=1.5), ] return func_list, FUNC_REFS<|fim_middle|>setup_funcs<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(request, kube_apis): filtered_ns_1 = create_namespace_with_name_from_yaml(kube_apis.v1, f"filtered-ns-1", f"{TEST_DATA}/common/ns.yaml") filtered_ns_2 = create_namespace_with_name_from_yaml(kube_apis.v1, f"filtered-ns-2", f"{TEST_DATA}/common/ns.yaml") filtered_secret_1 = create_secret_from_yaml( kube_apis.v1, filtered_ns_1, f"{TEST_DATA}/filter-secrets/filtered-secret-1.yaml" ) filtered_secret_2 = create_secret_from_yaml( kube_apis.v1, filtered_ns_2, f"{TEST_DATA}/filter-secrets/filtered-secret-2.yaml" ) nginx_ingress_secret = create_secret_from_yaml( kube_apis.v1, "nginx-ingress", f"{TEST_DATA}/filter-secrets/nginx-ingress-secret.yaml" ) wait_before_test(1) def fin(): if request.config.getoption("--skip-fixture-teardown") == "no": print("Clean up:") if is_secret_present(kube_apis.v1, filtered_secret_1, filtered_ns_1): delete_secret(kube_apis.v1, filtered_secret_1, filtered_ns_1) if is_secret_present(kube_apis.v1, filtered_secret_2, filtered_ns_2): delete_secret(kube_apis.v1, filtered_secret_2, filtered_ns_2) if is_secret_present(kube_apis.v1, nginx_ingress_secret, "nginx-ingress"): delete_secret(kube_apis.v1, nginx_ingress_secret, "nginx-ingress") delete_namespace(kube_apis.v1, filtered_ns_1) delete_namespace(kube_apis.v1, filtered_ns_2) request.addfinalizer(fin)<|fim_middle|>setup_multiple_ns_and_multiple_secrets<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(name: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, version: Optional[pulumi.Input[str]] = None, workspace_name: Optional[pulumi.Input[str]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetDataVersionResult]: """ Azure Resource Manager resource envelope. :param str name: Container name. :param str resource_group_name: The name of the resource group. The name is case insensitive. :param str version: Version identifier. :param str workspace_name: Name of Azure Machine Learning workspace. """ ...<|fim_middle|>get_data_version_output<|file_separator|>
<|fim_prefix|> <|fim_suffix|>( self ) :<|fim_middle|>test_copy<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(address: str) -> bytes32: hrpgot, data = bech32_decode(address) if data is None: raise ValueError("Invalid Address") decoded = convertbits(data, 5, 8, False) decoded_bytes = bytes32(decoded) return decoded_bytes<|fim_middle|>decode_puzzle_hash<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(en_vocab): doc = Doc(en_vocab, words=["hello", "world"]) with make_tempdir() as d: file_path = d / "doc" doc.to_disk(file_path) doc_d = Doc(en_vocab).from_disk(file_path) assert doc.to_bytes() == doc_d.to_bytes()<|fim_middle|>test_serialize_doc_roundtrip_disk<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self): with self.assertRaises(ValueError): losses.regularization_penalty("l1_l2", 1e-4, [])<|fim_middle|>test_regulaization_missing_scale_value<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(): """Parse command line arguments using argparse. """ parser = argparse.ArgumentParser(description=DESCRIPTION) parser.add_argument( '-V', '--version', action='version', version='{0}: v{1} by {2}'.format('%(prog)s', __version__, __author__) ) parser.add_argument( '--always-ok', help='Always returns OK.', dest='ALWAYS_OK', action='store_true', default=False, ) parser.add_argument( '--defaults-file', help='Specifies a cnf file to read parameters like user, host and password from ' '(instead of specifying them on the command line), ' 'for example `/var/spool/icinga2/.my.cnf`. Default: %(default)s', dest='DEFAULTS_FILE', default=DEFAULT_DEFAULTS_FILE, ) parser.add_argument( '--defaults-group', help='Group/section to read from in the cnf file. Default: %(default)s', dest='DEFAULTS_GROUP', default=DEFAULT_DEFAULTS_GROUP, ) parser.add_argument( '--timeout', help='Network timeout in seconds. Default: %(default)s (seconds)', dest='TIMEOUT', type=int, default=DEFAULT_TIMEOUT, ) return parser.METHOD_NAME()<|fim_middle|>parse_args<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>( component: ComponentSpec, cross_section: CrossSectionSpec = "strip", port1: str = "o1", port2: str = "o2", straight_length: float | None = None, **kwargs, ) -> ComponentSpec: """Returns double straight. Args: component: for cutback. cross_section: specification (CrossSection, string or dict). port1: name of first optical port. port2: name of second optical port. straight_length: length of straight. kwargs: cross_section settings. """ xs = gf.get_cross_section(cross_section, **kwargs) METHOD_NAME = gf.Component() straight_component = straight( length=straight_length or xs.radius * 2, cross_section=xs ) straight_component2 = straight( length=straight_length or xs.radius * 2, cross_section=xs ) straight_r = METHOD_NAME << straight_component straight_r2 = METHOD_NAME << straight_component2.mirror((1, 0)) straight_r2 = straight_r2.move( origin=(0, 0), destination=(0, -component.ports[port1].y + component.ports[port2].y), ) METHOD_NAME.add_port("o1", port=straight_r.ports["o1"]) METHOD_NAME.add_port("o2", port=straight_r2.ports["o1"]) METHOD_NAME.add_port("o3", port=straight_r2.ports["o2"]) METHOD_NAME.add_port("o4", port=straight_r.ports["o2"]) return METHOD_NAME<|fim_middle|>straight_double<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self): """ BaseDirectory with no existence check accepts any pathlib path. """ foo = SimpleBaseDirectory() foo.path = pathlib.Path("!!!") self.assertIsInstance(foo.path, str)<|fim_middle|>test_simple_accepts_any_pathlib<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(): fmt = """ # comments are allowed > # big endian (see documentation for struct) # empty lines are allowed: ashort: h along: l abyte: b # a byte achar: c astr: 5s afloat: f; adouble: d # multiple "statements" are allowed afixed: 16.16F abool: ? apad: x """ print("size:", calcsize(fmt)) class foo(object): pass i = foo() i.ashort = 0x7FFF i.along = 0x7FFFFFFF i.abyte = 0x7F i.achar = "a" i.astr = "12345" i.afloat = 0.5 i.adouble = 0.5 i.afixed = 1.5 i.abool = True data = pack(fmt, i) print("data:", repr(data)) print(unpack(fmt, data)) i2 = foo() unpack(fmt, data, i2) print(vars(i2))<|fim_middle|>test<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(tmp_path): outfilename = tmp_path / "vu_tide_hourly_p0.dfs0" ds = mikeio.read("tests/testdata/vu_tide_hourly.dfs1") assert ds.n_elements > 1 ds_0 = ds.isel(0, axis="space") assert ds_0.n_elements == 1 ds_0_0 = ds_0.isel(0) assert ds_0_0.n_timesteps == 1 ds_0_0.to_dfs(outfilename) dsnew = mikeio.read(outfilename) assert dsnew.n_timesteps == 1<|fim_middle|>test_select_point_and_single_step_dfs1<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self): self.window.show_all() self.window.present()<|fim_middle|>show<|file_separator|>
<|fim_prefix|>async def <|fim_suffix|>( auth: AcaPyAuth = Depends(acapy_auth),<|fim_middle|>create_did<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(): session = requests.Session() make_session_public_only(session, 'demo_domain', src='testing') return session<|fim_middle|>set_up_session<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self): self.assertEqual(build_password("plain"), "plaintext:plain")<|fim_middle|>test_default_plaintext<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>( user_id: str ) -> List[learner_group_domain.LearnerGroup]: """Returns a list of learner groups of the given facilitator. Args: user_id: str. The id of the facilitator. Returns: list(LearnerGroup). A list of learner groups of the given facilitator. """ learner_grp_models = ( learner_group_models.LearnerGroupModel.get_by_facilitator_id(user_id)) if not learner_grp_models: return [] return [ learner_group_services.get_learner_group_from_model(model) for model in learner_grp_models ]<|fim_middle|>get_learner_groups_of_facilitator<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self, value: Optional[float]) -> None: """When not draining we pass thru to the socket, since when draining we control the timeout. """ if value is not None: self._recv_timeout_sec = value if self._drain_thread is None: socket.socket.METHOD_NAME(self, value)<|fim_middle|>settimeout<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>( self, description: str, params: Mapping[str, Any], url: bool | None = False, provider: ExternalProviders | None = None, ) -> str: if self.user: name = self.user.name or self.user.email else: name = "Sentry" issue_name = self.group.qualified_short_id or "an issue" if url and self.group.qualified_short_id: group_url = self.group.get_absolute_url(params={"referrer": "activity_notification"}) issue_name = f"{self.format_url(text=self.group.qualified_short_id, url=group_url, provider=provider)}" context = {"author": name, "an issue": issue_name} context.update(params) return description.format(**context)<|fim_middle|>description_as_text<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self, native_face): self._face = native_face self._loops = [RhinoBrepLoop(loop) for loop in native_face.Loops] self._surface = RhinoNurbsSurface.from_rhino(self._face.UnderlyingSurface().ToNurbsSurface())<|fim_middle|>set_face<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self, user): return self.get_for_user(user, teammembership__role=TeamMembership.ROLE.OWNER)<|fim_middle|>get_owner_teams<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(): column = BigqueryColumn( name="date", field_path="date", ordinal_position=1, data_type="TIMESTAMP", is_partition_column=True, cluster_column_position=None, comment=None, is_nullable=False, ) partition_info = PartitionInfo(type="DAY", field="date", column=column) profiler = BigqueryProfiler(config=BigQueryV2Config(), report=BigQueryV2Report()) test_table = BigqueryTable( name="test_table", comment="test_comment", rows_count=1, size_in_bytes=1, last_altered=datetime.now(timezone.utc), created=datetime.now(timezone.utc), partition_info=partition_info, max_partition_id="20200101", ) query = profiler.generate_partition_profiler_query( project="test_project", schema="test_dataset", table=test_table, ) expected_query = """<|fim_middle|>test_generate_day_partitioned_partition_profiler_query<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self, inputs, metric, functional_metric, ref_metric, ignore_index): """Test functional implementation of metric.""" preds, target = inputs if ignore_index is not None: target = inject_ignore_index(target, ignore_index) self.run_functional_metric_test( preds=preds, target=target, metric_functional=functional_metric, reference_metric=partial(_sklearn_ranking, fn=ref_metric, ignore_index=ignore_index), metric_args={ "num_labels": NUM_CLASSES, "ignore_index": ignore_index, }, )<|fim_middle|>test_multilabel_ranking_functional<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self, positions: TensorType["bs":..., 3]) -> TensorType["bs":..., 1]: """Returns only the density. Used primarily with the density grid. Args: positions: the origin of the samples/frustums """ # Need to figure out a better way to descibe positions with a ray. ray_samples = RaySamples( frustums=Frustums( origins=positions, directions=torch.ones_like(positions), starts=torch.zeros_like(positions[..., :1]), ends=torch.zeros_like(positions[..., :1]), pixel_area=torch.ones_like(positions[..., :1]), ) ) density, _ = self.get_density(ray_samples) return density<|fim_middle|>density_fn<|file_separator|>
<|fim_prefix|> <|fim_suffix|>(self, old_name, new_name, merge=False):<|fim_middle|>after_rename<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(): examinee = create_upgrade_pr( from_ref=cm.ComponentReference( name='c1', componentName='c1', version='1.2.3', ), to_ref=cm.ComponentReference( name='c1', componentName='c1', version='2.0.0', ), ) cref = cm.ComponentReference( name='c1', componentName='c1', version='6.0.0', ) reference_component = cm.Component( name='c1', version='6.6.6', repositoryContexts=(), provider=None, sources=(), resources=(), componentReferences=() ) # test with reference component not declaring this dependency assert not examinee.is_obsolete(reference_component=reference_component) # add differently-named dependency with greater version reference_component.componentReferences = ( dataclasses.replace(cref, componentName='other-name'), ) assert not examinee.is_obsolete(reference_component=reference_component) # add same-named web dependency with lesser version reference_component.componentReferences = ( dataclasses.replace(cref, version='0.0.1'), ) assert not examinee.is_obsolete(reference_component=reference_component) # add same-named resource of greater version but different type # todo: we should actually also test dependencies towards resources of two different types reference_component.resources = ( cm.Resource( name='c1', version='6.0.0', type=cm.ArtefactType.BLOB, access=None, ), ) assert not examinee.is_obsolete(reference_component=reference_component) # finally, add greater dependency of matching type and name reference_component.componentReferences = ( dataclasses.replace(cref, version='9.9.9'), ) assert examinee.is_obsolete(reference_component=reference_component)<|fim_middle|>test_is_obsolete<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(testsystem_names, niterations=5): """ Run sampler stack on named test systems. Parameters ---------- testsystem_names : list of str Names of test systems to run niterations : int, optional, default=5 Number of iterations to run """ for testsystem_name in testsystem_names: import perses.tests.testsystems testsystem_class = getattr(perses.tests.testsystems, testsystem_name) # Instantiate test system. testsystem = testsystem_class() # Test MCMCSampler samplers. for environment in testsystem.environments: mcmc_sampler = testsystem.mcmc_samplers[environment] f = partial(mcmc_sampler.run, niterations) f.description = "Testing MCMC sampler with %s '%s'" % (testsystem_name, environment) yield f # Test ExpandedEnsembleSampler samplers. for environment in testsystem.environments: exen_sampler = testsystem.exen_samplers[environment] f = partial(exen_sampler.run, niterations) f.description = "Testing expanded ensemble sampler with %s '%s'" % (testsystem_name, environment) yield f # Test SAMSSampler samplers. for environment in testsystem.environments: sams_sampler = testsystem.sams_samplers[environment] f = partial(sams_sampler.run, niterations) f.description = "Testing SAMS sampler with %s '%s'" % (testsystem_name, environment) yield f # Test MultiTargetDesign sampler, if present. if hasattr(testsystem, 'designer') and (testsystem.designer is not None): f = partial(testsystem.designer.run, niterations) f.description = "Testing MultiTargetDesign sampler with %s transfer free energy from vacuum -> %s" % (testsystem_name, environment) yield f<|fim_middle|>run_samplers<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self) -> Response: """ Get a list with all of the tabels in TDEngine """ q = 'SHOW TABLES;' return self.native_query(q)<|fim_middle|>get_tables<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>( self, configs: List[Config[ModelConfig]], performances: List[Performance], ) -> None: super().METHOD_NAME(configs, performances) # We need to sort by dataset to have the same ordering for each model config ordering = np.argsort([c.dataset.name() for c in configs]) performance_df = Performance.to_dataframe(performances) # Extract all metrics metric_map = defaultdict(list) for i in ordering: metric_map[configs[i].model].append( performance_df.iloc[i][self.objectives].to_numpy(), # type: ignore ) # Build the properties self.metrics = np.stack(list(metric_map.values()), axis=1) self.model_indices = {model: i for i, model in enumerate(metric_map)} # If we are in the multi-objective setting, we have to apply dataset-level quantile # normalization of each objective. Otherwise, we perform standardization. if not self.enforce_single_objective and len(self.objectives) > 1: transformer = QuantileTransformer( n_quantiles=min(1000, self.metrics.shape[0]) ) self.metrics = np.stack( [ transformer.fit_transform(dataset_metrics) for dataset_metrics in self.metrics ] ) else: transformer = StandardScaler() self.metrics = np.stack( [ transformer.fit_transform(dataset_metrics) for dataset_metrics in self.metrics ] )<|fim_middle|>fit<|file_separator|>
<|fim_prefix|>async def <|fim_suffix|>(mock_iam_client): group = await get_group(EXAMPLE_GROUPNAME, mock_iam_client) assert group["GroupName"] == EXAMPLE_GROUPNAME<|fim_middle|>test_get_group<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self, Paramsmulticast): # controle parameters multicast return self.api.SetMulticastMultiSessionParameters(Paramsmulticast)<|fim_middle|>xmlrpc_set_multicast_multi_session_parameters<|file_separator|>
<|fim_prefix|>f <|fim_suffix|>(self):<|fim_middle|>test_process_file<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(m): opt = pyo.SolverFactory('gurobi') res = opt.solve(m) assert_optimal_termination(res)<|fim_middle|>solve_warehouse_location<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self) -> str: """ Resource ID. """ return pulumi.get(self, "id")<|fim_middle|>id<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self): return self.event.METHOD_NAME + f"/session/{self.id}"<|fim_middle|>site_link<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(colorer, s, i): return colorer.match_seq_regexp(s, i, kind="label", regexp="`[A-z0-9]+[^`]+`_{1,2}")<|fim_middle|>rest_rule17<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self): for pos in self: seq = pos.l10n_es_simplified_invoice_sequence_id pos.l10n_es_simplified_invoice_number = ( seq._get_current_sequence().number_next_actual ) pos.l10n_es_simplified_invoice_prefix = seq._get_prefix_suffix()[0] pos.l10n_es_simplified_invoice_padding = seq.padding<|fim_middle|>compute_simplified_invoice_sequence<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self): x = tensor.Tensor(np.array([1, 2, 3])) self.assertEqual(x.rank, 1)<|fim_middle|>test_rank_is_one_for_vector<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(): assert not np.isnan(atmosphere.get_relative_airmass(10))<|fim_middle|>test_airmass_scalar<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(): """Return the default filters (all available filters).""" return dict((name, set(PlayerIter(name))) for name in PlayerIter.filters)<|fim_middle|>get_default_filters<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self, token_ids: Sequence[bytes]) -> Sequence[KlerosToken]: queries = [] for token_id in token_ids: queries.append(self.kleros_contract.functions.getTokenInfo(token_id)) # name string, ticker string, addr address, symbolMultihash string, status uint8, numberOfRequests uint256 token_infos = self.ethereum_client.batch_call(queries) return [KlerosToken(*token_info) for token_info in token_infos]<|fim_middle|>get_token_info<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>( self, aligned_segment_starting_times: List[List[float]], stub_test: bool = False ): """ Align the individual starting time for each video in this interface relative to the common session start time. Must be in units seconds relative to the common 'session_start_time'. Parameters ---------- aligned_segment_starting_times : list of list of floats The relative starting times of each video. Outer list is over file paths (readers). Inner list is over segments of each recording. """ number_of_files_from_starting_times = len(aligned_segment_starting_times) assert number_of_files_from_starting_times == len(self.readers_list), ( f"The length of the outer list of 'starting_times' ({number_of_files_from_starting_times}) " "does not match the number of files ({len(self.readers_list)})!" ) for file_index, (reader, aligned_segment_starting_times_by_file) in enumerate( zip(self.readers_list, aligned_segment_starting_times) ): number_of_segments = reader.header["nb_segment"][0] assert number_of_segments == len( aligned_segment_starting_times_by_file ), f"The length of starting times index {file_index} does not match the number of segments of that reader!" reader._t_starts = aligned_segment_starting_times_by_file<|fim_middle|>set_aligned_segment_starting_times<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self, msg): pass<|fim_middle|>on_inv<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self, output, identifier): return self._wrapped.METHOD_NAME(output._lines, identifier)<|fim_middle|>get_value_from_output<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(): x = np.zeros((5, 5), dtype=int) array_2d_view_assign(x[::, ::], 9) array_2d_view_assign(x[:2:2, :2:3], 10) array_2d_view_assign(x[3::2, 3::3], 11) array_2d_view_assign(x[1:2, 2:3], 12) array_1d_view_assign(x[0, :], 1) array_1d_view_assign(x[1, ::2], 2) array_1d_view_assign(x[2, 1:4:2], 3) array_1d_view_assign(x[3, 3:4], 4) array_1d_view_assign(x[:, 0], 5) array_1d_view_assign(x[::2, 1], 6) array_1d_view_assign(x[1:4:2, 2], 7) array_1d_view_assign(x[3:4, 3], 8) for i in range(np.shape(x)[0]): for j in range(np.shape(x)[1]): print(x[i][j])<|fim_middle|>array_2d_view<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(iterable): """Test whether visitors properly set the type constraint of the a For node representing for/else statement iterating over a heterogeneous list. """ assume(type(iterable[0]) != type(iterable[1])) val_types = [type(val) for val in iterable] if int in val_types: assume(bool not in val_types) if bool in val_types: assume(int not in val_types) program = f"for elt in {iterable}:\n" f" x = elt\n" module, TypeInferrer = cs._parse_text(program) for_node = list(module.nodes_of_class(nodes.For))[0] local_type_var = module.type_environment.lookup_in_env("x") inferred_type = TypeInferrer.type_constraints.resolve(local_type_var).getValue() assert inferred_type == Any<|fim_middle|>test_for_heterogeneous_list<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(plistpath, content): """A test utility to create a plist file with known content. Ensures that the directory for the file exists, and writes an XML plist with specific content. :param plistpath: The path for the plist file to create. :param content: A dictionary of content that plistlib can use to create the plist file. :returns: The path to the file that was created. """ plistpath.parent.mkdir(parents=True, exist_ok=True) with plistpath.open("wb") as f: plistlib.dump(content, f) return plistpath<|fim_middle|>create_plist_file<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(instance, check, aggregator): del instance['custom_queries'] with mock.patch( 'datadog_checks.ibm_was.IbmWasCheck.make_request', return_value=mock_data('perfservlet-multiple-nodes.xml') ): check = check(instance) check.check(instance) node = 'node:cmhqlvij2a04' for metric_name, metrics in aggregator._metrics.items(): for metric in metrics: if 'server:IJ2Server02' in metric.tags: assert node in metric.tags, "Expected '{}' tag in '{}' tags, found {}".format( node, metric_name, metric.tags )<|fim_middle|>test_right_server_tag<|file_separator|>
README.md exists but content is empty.
Downloads last month
46