code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class __A : """simple docstring""" UpperCamelCase__ : int =XGLMConfig UpperCamelCase__ : Optional[Any] ={} UpperCamelCase__ : List[str] ="""gelu""" def __init__( self , lowerCamelCase__ , lowerCamelCase__=14 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=32 , lowerCamelCase__=2 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=0.02 , ): """simple docstring""" __UpperCamelCase : Tuple =parent __UpperCamelCase : List[str] =batch_size __UpperCamelCase : str =seq_length __UpperCamelCase : Dict =is_training __UpperCamelCase : Tuple =use_input_mask __UpperCamelCase : List[Any] =use_labels __UpperCamelCase : Any =vocab_size __UpperCamelCase : List[Any] =d_model __UpperCamelCase : Optional[int] =num_hidden_layers __UpperCamelCase : List[str] =num_attention_heads __UpperCamelCase : Optional[int] =ffn_dim __UpperCamelCase : str =activation_function __UpperCamelCase : Any =activation_dropout __UpperCamelCase : Optional[int] =attention_dropout __UpperCamelCase : Optional[int] =max_position_embeddings __UpperCamelCase : Any =initializer_range __UpperCamelCase : Dict =None __UpperCamelCase : Optional[int] =0 __UpperCamelCase : Optional[Any] =2 __UpperCamelCase : str =1 def __lowercase ( self ): """simple docstring""" return XGLMConfig.from_pretrained('facebook/xglm-564M' ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : List[Any] =tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 ) __UpperCamelCase : Union[str, Any] =None if self.use_input_mask: __UpperCamelCase : Dict =random_attention_mask([self.batch_size, self.seq_length] ) __UpperCamelCase : Any =self.get_config() __UpperCamelCase : Optional[Any] =floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, ) def __lowercase ( self ): """simple docstring""" return XGLMConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=lowerCamelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=lowerCamelCase__ , ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : List[str] =self.prepare_config_and_inputs() ( ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ) : int =config_and_inputs __UpperCamelCase : Optional[Any] ={ 'input_ids': input_ids, 'head_mask': head_mask, } return config, inputs_dict @require_tf class __A ( a , a , unittest.TestCase ): """simple docstring""" UpperCamelCase__ : Union[str, Any] =(TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () UpperCamelCase__ : str =(TFXGLMForCausalLM,) if is_tf_available() else () UpperCamelCase__ : Optional[Any] =( {"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {} ) UpperCamelCase__ : Tuple =False UpperCamelCase__ : Tuple =False UpperCamelCase__ : Optional[Any] =False def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Tuple =TFXGLMModelTester(self ) __UpperCamelCase : Dict =ConfigTester(self , config_class=lowerCamelCase__ , n_embd=37 ) def __lowercase ( self ): """simple docstring""" self.config_tester.run_common_tests() @slow def __lowercase ( self ): """simple docstring""" for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCamelCase : Optional[Any] =TFXGLMModel.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) @unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' ) def __lowercase ( self ): """simple docstring""" super().test_resize_token_embeddings() @require_tf class __A ( unittest.TestCase ): """simple docstring""" @slow def __lowercase ( self , lowerCamelCase__=True ): """simple docstring""" __UpperCamelCase : int =TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' ) __UpperCamelCase : List[str] =tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off __UpperCamelCase : str =[2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581] # fmt: on __UpperCamelCase : Optional[Any] =model.generate(lowerCamelCase__ , do_sample=lowerCamelCase__ , num_beams=1 ) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist() , lowerCamelCase__ ) @slow def __lowercase ( self ): """simple docstring""" __UpperCamelCase : List[str] =XGLMTokenizer.from_pretrained('facebook/xglm-564M' ) __UpperCamelCase : Union[str, Any] =TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' ) tf.random.set_seed(0 ) __UpperCamelCase : str =tokenizer('Today is a nice day and' , return_tensors='tf' ) __UpperCamelCase : Union[str, Any] =tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(':/CPU:0' ): __UpperCamelCase : Any =model.generate(lowerCamelCase__ , do_sample=lowerCamelCase__ , seed=[7, 0] ) __UpperCamelCase : Tuple =tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCamelCase__ ) __UpperCamelCase : List[Any] =( 'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due' ) self.assertEqual(lowerCamelCase__ , lowerCamelCase__ ) @slow def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Tuple =TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' ) __UpperCamelCase : Optional[Any] =XGLMTokenizer.from_pretrained('facebook/xglm-564M' ) __UpperCamelCase : Optional[Any] ='left' # use different length sentences to test batching __UpperCamelCase : Optional[int] =[ 'This is an extremelly long sentence that only exists to test the ability of the model to cope with ' 'left-padding, such as in batched generation. The output for the sequence below should be the same ' 'regardless of whether left padding is applied or not. When', 'Hello, my dog is a little', ] __UpperCamelCase : List[Any] =tokenizer(lowerCamelCase__ , return_tensors='tf' , padding=lowerCamelCase__ ) __UpperCamelCase : Union[str, Any] =inputs['input_ids'] __UpperCamelCase : Dict =model.generate(input_ids=lowerCamelCase__ , attention_mask=inputs['attention_mask'] , max_new_tokens=12 ) __UpperCamelCase : List[Any] =tokenizer(sentences[0] , return_tensors='tf' ).input_ids __UpperCamelCase : Dict =model.generate(input_ids=lowerCamelCase__ , max_new_tokens=12 ) __UpperCamelCase : Any =tokenizer(sentences[1] , return_tensors='tf' ).input_ids __UpperCamelCase : Optional[Any] =model.generate(input_ids=lowerCamelCase__ , max_new_tokens=12 ) __UpperCamelCase : Optional[int] =tokenizer.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ ) __UpperCamelCase : Union[str, Any] =tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCamelCase__ ) __UpperCamelCase : int =tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCamelCase__ ) __UpperCamelCase : Any =[ 'This is an extremelly long sentence that only exists to test the ability of the model to cope with ' 'left-padding, such as in batched generation. The output for the sequence below should be the same ' 'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be ' 'a single', 'Hello, my dog is a little bit of a shy one, but he is very friendly', ] self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , [non_padded_sentence, padded_sentence] )
71
"""simple docstring""" import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() A : Tuple = logging.get_logger(__name__) A : Tuple = [ ("bert.bert", "visual_bert"), ("bert.cls", "cls"), ("bert.classifier", "cls"), ("token_type_embeddings_visual", "visual_token_type_embeddings"), ("position_embeddings_visual", "visual_position_embeddings"), ("projection", "visual_projection"), ] A : Optional[Any] = [ "nlvr2_coco_pre_trained.th", "nlvr2_fine_tuned.th", "nlvr2_pre_trained.th", "vcr_coco_pre_train.th", "vcr_fine_tune.th", "vcr_pre_train.th", "vqa_coco_pre_trained.th", "vqa_fine_tuned.th", "vqa_pre_trained.th", ] def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = torch.load(_UpperCamelCase , map_location="cpu" ) return sd def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=rename_keys_prefix ): '''simple docstring''' __lowerCAmelCase = OrderedDict() __lowerCAmelCase = torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue __lowerCAmelCase = key for name_pair in rename_keys_prefix: __lowerCAmelCase = new_key.replace(name_pair[0] , name_pair[1] ) __lowerCAmelCase = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately __lowerCAmelCase = new_d["cls.predictions.bias"] return new_d @torch.no_grad() def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' assert ( checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS ), f"The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}." # Get Config if "pre" in checkpoint_path: __lowerCAmelCase = "pretraining" if "vcr" in checkpoint_path: __lowerCAmelCase = {"visual_embedding_dim": 512} elif "vqa_advanced" in checkpoint_path: __lowerCAmelCase = {"visual_embedding_dim": 2048} elif "vqa" in checkpoint_path: __lowerCAmelCase = {"visual_embedding_dim": 2048} elif "nlvr" in checkpoint_path: __lowerCAmelCase = {"visual_embedding_dim": 1024} else: raise NotImplementedError(f"No implementation found for `{checkpoint_path}`." ) else: if "vcr" in checkpoint_path: __lowerCAmelCase = {"visual_embedding_dim": 512} __lowerCAmelCase = "multichoice" elif "vqa_advanced" in checkpoint_path: __lowerCAmelCase = {"visual_embedding_dim": 2048} __lowerCAmelCase = "vqa_advanced" elif "vqa" in checkpoint_path: __lowerCAmelCase = {"visual_embedding_dim": 2048, "num_labels": 3129} __lowerCAmelCase = "vqa" elif "nlvr" in checkpoint_path: __lowerCAmelCase = { "visual_embedding_dim": 1024, "num_labels": 2, } __lowerCAmelCase = "nlvr" __lowerCAmelCase = VisualBertConfig(**_UpperCamelCase ) # Load State Dict __lowerCAmelCase = load_state_dict(_UpperCamelCase ) __lowerCAmelCase = get_new_dict(_UpperCamelCase , _UpperCamelCase ) if model_type == "pretraining": __lowerCAmelCase = VisualBertForPreTraining(_UpperCamelCase ) elif model_type == "vqa": __lowerCAmelCase = VisualBertForQuestionAnswering(_UpperCamelCase ) elif model_type == "nlvr": __lowerCAmelCase = VisualBertForVisualReasoning(_UpperCamelCase ) elif model_type == "multichoice": __lowerCAmelCase = VisualBertForMultipleChoice(_UpperCamelCase ) model.load_state_dict(_UpperCamelCase ) # Save Checkpoints Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase ) model.save_pretrained(_UpperCamelCase ) if __name__ == "__main__": A : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.") parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.") A : Optional[int] = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
57
0
'''simple docstring''' def __a ( UpperCAmelCase , UpperCAmelCase ) ->float: """simple docstring""" _validate_point(__A ) _validate_point(__A ) if len(__A ) != len(__A ): raise ValueError("""Both points must be in the same n-dimensional space""" ) return float(sum(abs(a - b ) for a, b in zip(__A , __A ) ) ) def __a ( UpperCAmelCase ) ->None: """simple docstring""" if point: if isinstance(__A , __A ): for item in point: if not isinstance(__A , (int, float) ): A = ( """Expected a list of numbers as input, found """ f"""{type(__A ).__name__}""" ) raise TypeError(__A ) else: A = f"""Expected a list of numbers as input, found {type(__A ).__name__}""" raise TypeError(__A ) else: raise ValueError("""Missing an input""" ) def __a ( UpperCAmelCase , UpperCAmelCase ) ->float: """simple docstring""" _validate_point(__A ) _validate_point(__A ) if len(__A ) != len(__A ): raise ValueError("""Both points must be in the same n-dimensional space""" ) return float(sum(abs(x - y ) for x, y in zip(__A , __A ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
357
'''simple docstring''' from typing import List, Optional, Union import numpy as np import tensorflow as tf from .utils import logging _lowerCamelCase : List[Any] = logging.get_logger(__name__) def __a ( UpperCAmelCase ) ->List[int]: """simple docstring""" if isinstance(UpperCAmelCase , np.ndarray ): return list(tensor.shape ) A = tf.shape(UpperCAmelCase ) if tensor.shape == tf.TensorShape(UpperCAmelCase ): return dynamic A = tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(UpperCAmelCase )] def __a ( UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None ) ->tf.Tensor: """simple docstring""" return tf.nn.softmax(logits=logits + 1E-9 , axis=UpperCAmelCase , name=UpperCAmelCase ) def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=1E-5 , UpperCAmelCase=-1 ) ->str: """simple docstring""" if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(UpperCAmelCase , UpperCAmelCase ): raise NotImplementedError("""Only 1D weight and bias tensors are supported for now, with only a single axis.""" ) # Get mean and variance on the axis to be normalized A , A = tf.nn.moments(UpperCAmelCase , axes=[axis] , keepdims=UpperCAmelCase ) if axis != -1: # Reshape scale and weight to have the same rank as inputs, but with 1 dimensions # on every dimension except axis A = [1] * inputs.shape.rank A = shape_list(UpperCAmelCase )[axis] A = tf.reshape(UpperCAmelCase , UpperCAmelCase ) A = tf.reshape(UpperCAmelCase , UpperCAmelCase ) # Compute layer normalization using the batch_normalization # function. A = tf.nn.batch_normalization( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , offset=UpperCAmelCase , scale=UpperCAmelCase , variance_epsilon=UpperCAmelCase , ) return outputs def __a ( UpperCAmelCase , UpperCAmelCase=0 , UpperCAmelCase=-1 ) ->int: """simple docstring""" if end_dim < 0: end_dim += input.shape.rank if start_dim < 0: start_dim += input.shape.rank if start_dim == end_dim: return input A = tf.shape(UpperCAmelCase ) A = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] ) A = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 ) return tf.reshape(UpperCAmelCase , UpperCAmelCase ) def __a ( UpperCAmelCase ) ->tf.Tensor: """simple docstring""" if not isinstance(UpperCAmelCase , tf.Tensor ): A = tf.convert_to_tensor(UpperCAmelCase ) # Catches stray NumPy inputs if encoder_attention_mask.shape.rank == 3: A = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.shape.rank == 2: A = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow # /transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = (encoder_extended_attention_mask == # encoder_extended_attention_mask.transpose(-1, -2)) A = ( tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask ) * encoder_extended_attention_mask.dtype.min return encoder_extended_attention_mask def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = "input_ids" ) ->None: """simple docstring""" tf.debugging.assert_less( UpperCAmelCase , tf.cast(UpperCAmelCase , dtype=tensor.dtype ) , message=( f"""The maximum value of {tensor_name} ({tf.math.reduce_max(UpperCAmelCase )}) must be smaller than the embedding """ f"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time.""" ) , ) def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Optional[Any]: """simple docstring""" A = 64512 # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT` # because in that case even chunking the array would not make the saving # possible. A = [x for x in data if len(UpperCAmelCase ) > HDF5_OBJECT_HEADER_LIMIT] # Expecting this to never be true. if bad_attributes: raise RuntimeError( """The following attributes cannot be saved to HDF5 file because """ f"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """ f"""bytes: {bad_attributes}""" ) A = np.asarray(UpperCAmelCase ) A = 1 A = np.array_split(UpperCAmelCase , UpperCAmelCase ) # This will never loop forever thanks to the test above. while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ): num_chunks += 1 A = np.array_split(UpperCAmelCase , UpperCAmelCase ) if num_chunks > 1: for chunk_id, chunk_data in enumerate(UpperCAmelCase ): A = chunk_data else: A = data def __a ( UpperCAmelCase , UpperCAmelCase ) ->int: """simple docstring""" if name in group.attrs: A = [n.decode("""utf8""" ) if hasattr(UpperCAmelCase , """decode""" ) else n for n in group.attrs[name]] else: A = [] A = 0 while "%s%d" % (name, chunk_id) in group.attrs: data.extend( [n.decode("""utf8""" ) if hasattr(UpperCAmelCase , """decode""" ) else n for n in group.attrs["""%s%d""" % (name, chunk_id)]] ) chunk_id += 1 return data def __a ( UpperCAmelCase ) ->Optional[Any]: """simple docstring""" def _expand_single_ad_tensor(UpperCAmelCase ): if isinstance(UpperCAmelCase , tf.Tensor ) and t.shape.rank == 1: return tf.expand_dims(UpperCAmelCase , axis=-1 ) return t return tf.nest.map_structure(_expand_single_ad_tensor , UpperCAmelCase )
337
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCAmelCase = { 'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST', 'PegasusXForConditionalGeneration', 'PegasusXModel', 'PegasusXPreTrainedModel', ] if TYPE_CHECKING: from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pegasus_x import ( PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST, PegasusXForConditionalGeneration, PegasusXModel, PegasusXPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
29
import asyncio import os import shutil import subprocess import sys import tempfile import unittest from distutils.util import strtobool from functools import partial from pathlib import Path from typing import List, Union from unittest import mock import torch from ..state import AcceleratorState, PartialState from ..utils import ( gather, is_bnb_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_mps_available, is_safetensors_available, is_tensorboard_available, is_torch_version, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) def lowercase__ ( __snake_case : List[Any] , __snake_case : List[str]=False ): '''simple docstring''' try: UpperCAmelCase_ : int = os.environ[key] except KeyError: # KEY isn't set, default to `default`. UpperCAmelCase_ : Optional[int] = default else: # KEY is set, convert it to True or False. try: UpperCAmelCase_ : List[Any] = strtobool(__snake_case ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(F"If set, {key} must be yes or no." ) return _value __UpperCAmelCase = parse_flag_from_env('RUN_SLOW', default=False) def lowercase__ ( __snake_case : int ): '''simple docstring''' return unittest.skip('Test was skipped' )(__snake_case ) def lowercase__ ( __snake_case : Tuple ): '''simple docstring''' return unittest.skipUnless(_run_slow_tests , 'test is slow' )(__snake_case ) def lowercase__ ( __snake_case : List[str] ): '''simple docstring''' return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(__snake_case ) def lowercase__ ( __snake_case : Tuple ): '''simple docstring''' return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(__snake_case ) def lowercase__ ( __snake_case : List[str] ): '''simple docstring''' return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(__snake_case ) def lowercase__ ( __snake_case : str ): '''simple docstring''' return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(__snake_case ) def lowercase__ ( __snake_case : Tuple ): '''simple docstring''' return unittest.skipUnless( is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(__snake_case ) def lowercase__ ( __snake_case : str ): '''simple docstring''' return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(__snake_case ) def lowercase__ ( __snake_case : Dict ): '''simple docstring''' return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(__snake_case ) def lowercase__ ( __snake_case : Tuple ): '''simple docstring''' return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(__snake_case ) def lowercase__ ( __snake_case : Dict ): '''simple docstring''' return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(__snake_case ) def lowercase__ ( __snake_case : Optional[int] ): '''simple docstring''' return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(__snake_case ) def lowercase__ ( __snake_case : int ): '''simple docstring''' return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(__snake_case ) def lowercase__ ( __snake_case : Dict ): '''simple docstring''' return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(__snake_case ) def lowercase__ ( __snake_case : Tuple ): '''simple docstring''' return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(__snake_case ) def lowercase__ ( __snake_case : List[Any] ): '''simple docstring''' return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(__snake_case ) def lowercase__ ( __snake_case : Dict=None , __snake_case : Dict=None ): '''simple docstring''' if test_case is None: return partial(__snake_case , version=__snake_case ) return unittest.skipUnless(is_torch_version('>=' , __snake_case ) , F"test requires torch version >= {version}" )(__snake_case ) def lowercase__ ( __snake_case : str ): '''simple docstring''' return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(__snake_case ) def lowercase__ ( __snake_case : List[str] ): '''simple docstring''' return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(__snake_case ) def lowercase__ ( __snake_case : str ): '''simple docstring''' return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(__snake_case ) __UpperCAmelCase = ( any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available() ) def lowercase__ ( __snake_case : List[Any] ): '''simple docstring''' return unittest.skipUnless( _atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(__snake_case ) class lowerCamelCase (unittest.TestCase ): '''simple docstring''' _snake_case : Union[str, Any] = True @classmethod def __UpperCAmelCase ( cls ) -> Union[str, Any]: UpperCAmelCase_ : List[Any] = tempfile.mkdtemp() @classmethod def __UpperCAmelCase ( cls ) -> List[str]: if os.path.exists(cls.tmpdir ): shutil.rmtree(cls.tmpdir ) def __UpperCAmelCase ( self ) -> str: if self.clear_on_setup: for path in Path(self.tmpdir ).glob('**/*' ): if path.is_file(): path.unlink() elif path.is_dir(): shutil.rmtree(_UpperCamelCase ) class lowerCamelCase (unittest.TestCase ): '''simple docstring''' def __UpperCAmelCase ( self ) -> Optional[int]: super().tearDown() # Reset the state of the AcceleratorState singleton. AcceleratorState._reset_state() PartialState._reset_state() class lowerCamelCase (unittest.TestCase ): '''simple docstring''' def __UpperCAmelCase ( self , _UpperCamelCase ) -> Any: UpperCAmelCase_ : List[Any] = mocks if isinstance(_UpperCamelCase , (tuple, list) ) else [mocks] for m in self.mocks: m.start() self.addCleanup(m.stop ) def lowercase__ ( __snake_case : int ): '''simple docstring''' UpperCAmelCase_ : int = AcceleratorState() UpperCAmelCase_ : str = tensor[None].clone().to(state.device ) UpperCAmelCase_ : List[str] = gather(__snake_case ).cpu() UpperCAmelCase_ : List[Any] = tensor[0].cpu() for i in range(tensors.shape[0] ): if not torch.equal(tensors[i] , __snake_case ): return False return True class lowerCamelCase : '''simple docstring''' def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any: UpperCAmelCase_ : str = returncode UpperCAmelCase_ : Optional[Any] = stdout UpperCAmelCase_ : Optional[Any] = stderr async def lowercase__ ( __snake_case : Optional[Any] , __snake_case : Optional[int] ): '''simple docstring''' while True: UpperCAmelCase_ : Dict = await stream.readline() if line: callback(__snake_case ) else: break async def lowercase__ ( __snake_case : Optional[int] , __snake_case : Dict=None , __snake_case : str=None , __snake_case : Dict=None , __snake_case : List[str]=False , __snake_case : Optional[int]=False ): '''simple docstring''' if echo: print('\nRunning: ' , ' '.join(__snake_case ) ) UpperCAmelCase_ : Optional[Any] = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=__snake_case , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__snake_case , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) UpperCAmelCase_ : Any = [] UpperCAmelCase_ : str = [] def tee(__snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Optional[int]="" ): UpperCAmelCase_ : List[str] = line.decode('utf-8' ).rstrip() sink.append(__snake_case ) if not quiet: print(__snake_case , __snake_case , file=__snake_case ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ asyncio.create_task(_read_stream(p.stdout , lambda __snake_case : tee(__snake_case , __snake_case , sys.stdout , label='stdout:' ) ) ), asyncio.create_task(_read_stream(p.stderr , lambda __snake_case : tee(__snake_case , __snake_case , sys.stderr , label='stderr:' ) ) ), ] , timeout=__snake_case , ) return _RunOutput(await p.wait() , __snake_case , __snake_case ) def lowercase__ ( __snake_case : Optional[Any] , __snake_case : List[Any]=None , __snake_case : str=None , __snake_case : Tuple=180 , __snake_case : Dict=False , __snake_case : Optional[Any]=True ): '''simple docstring''' UpperCAmelCase_ : str = asyncio.get_event_loop() UpperCAmelCase_ : int = loop.run_until_complete( _stream_subprocess(__snake_case , env=__snake_case , stdin=__snake_case , timeout=__snake_case , quiet=__snake_case , echo=__snake_case ) ) UpperCAmelCase_ : int = ' '.join(__snake_case ) if result.returncode > 0: UpperCAmelCase_ : int = '\n'.join(result.stderr ) raise RuntimeError( F"'{cmd_str}' failed with returncode {result.returncode}\n\n" F"The combined stderr from workers follows:\n{stderr}" ) return result class lowerCamelCase (_snake_case ): '''simple docstring''' pass def lowercase__ ( __snake_case : List[str] , __snake_case : List[Any]=False ): '''simple docstring''' try: UpperCAmelCase_ : List[Any] = subprocess.check_output(__snake_case , stderr=subprocess.STDOUT ) if return_stdout: if hasattr(__snake_case , 'decode' ): UpperCAmelCase_ : str = output.decode('utf-8' ) return output except subprocess.CalledProcessError as e: raise SubprocessCallException( F"Command `{' '.join(__snake_case )}` failed with the following error:\n\n{e.output.decode()}" ) from e
29
1
import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: UpperCAmelCase_ = None UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'} UpperCAmelCase_ = { 'vocab_file': { 'facebook/mbart-large-en-ro': ( 'https://huggingface.co./facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model' ), 'facebook/mbart-large-cc25': ( 'https://huggingface.co./facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model' ), }, 'tokenizer_file': { 'facebook/mbart-large-en-ro': 'https://huggingface.co./facebook/mbart-large-en-ro/resolve/main/tokenizer.json', 'facebook/mbart-large-cc25': 'https://huggingface.co./facebook/mbart-large-cc25/resolve/main/tokenizer.json', }, } UpperCAmelCase_ = { 'facebook/mbart-large-en-ro': 1024, 'facebook/mbart-large-cc25': 1024, } # fmt: off UpperCAmelCase_ = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN'] class lowercase__ ( __lowerCamelCase ): '''simple docstring''' a : Dict = VOCAB_FILES_NAMES a : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP a : Tuple = ["input_ids", "attention_mask"] a : List[str] = MBartTokenizer a : List[int] = [] a : List[int] = [] def __init__( self, __magic_name__=None, __magic_name__=None, __magic_name__="<s>", __magic_name__="</s>", __magic_name__="</s>", __magic_name__="<s>", __magic_name__="<unk>", __magic_name__="<pad>", __magic_name__="<mask>", __magic_name__=None, __magic_name__=None, __magic_name__=None, **__magic_name__, ) -> Tuple: """simple docstring""" UpperCamelCase__ : Any = AddedToken(__magic_name__, lstrip=__magic_name__, rstrip=__magic_name__ ) if isinstance(__magic_name__, __magic_name__ ) else mask_token super().__init__( vocab_file=__magic_name__, tokenizer_file=__magic_name__, bos_token=__magic_name__, eos_token=__magic_name__, sep_token=__magic_name__, cls_token=__magic_name__, unk_token=__magic_name__, pad_token=__magic_name__, mask_token=__magic_name__, src_lang=__magic_name__, tgt_lang=__magic_name__, additional_special_tokens=__magic_name__, **__magic_name__, ) UpperCamelCase__ : Dict = vocab_file UpperCamelCase__ : Optional[int] = False if not self.vocab_file else True UpperCamelCase__ : List[str] = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} ) UpperCamelCase__ : Tuple = { lang_code: self.convert_tokens_to_ids(__magic_name__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES } UpperCamelCase__ : Any = src_lang if src_lang is not None else '''en_XX''' UpperCamelCase__ : int = self.convert_tokens_to_ids(self._src_lang ) UpperCamelCase__ : Union[str, Any] = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def UpperCamelCase__ ( self ) -> str: """simple docstring""" return self._src_lang @src_lang.setter def UpperCamelCase__ ( self, __magic_name__ ) -> None: """simple docstring""" UpperCamelCase__ : Dict = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = None ) -> List[int]: """simple docstring""" UpperCamelCase__ : List[Any] = [self.sep_token_id] UpperCamelCase__ : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__, **__magic_name__ ) -> Tuple: """simple docstring""" if src_lang is None or tgt_lang is None: raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' ) UpperCamelCase__ : List[str] = src_lang UpperCamelCase__ : Optional[Any] = self(__magic_name__, add_special_tokens=__magic_name__, return_tensors=__magic_name__, **__magic_name__ ) UpperCamelCase__ : Optional[Any] = self.convert_tokens_to_ids(__magic_name__ ) UpperCamelCase__ : str = tgt_lang_id return inputs def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = "en_XX", __magic_name__ = None, __magic_name__ = "ro_RO", **__magic_name__, ) -> BatchEncoding: """simple docstring""" UpperCamelCase__ : List[Any] = src_lang UpperCamelCase__ : Any = tgt_lang return super().prepare_seqaseq_batch(__magic_name__, __magic_name__, **__magic_name__ ) def UpperCamelCase__ ( self ) -> List[Any]: """simple docstring""" return self.set_src_lang_special_tokens(self.src_lang ) def UpperCamelCase__ ( self ) -> Union[str, Any]: """simple docstring""" return self.set_tgt_lang_special_tokens(self.tgt_lang ) def UpperCamelCase__ ( self, __magic_name__ ) -> None: """simple docstring""" UpperCamelCase__ : Dict = self.convert_tokens_to_ids(__magic_name__ ) UpperCamelCase__ : Dict = [] UpperCamelCase__ : int = [self.eos_token_id, self.cur_lang_code] UpperCamelCase__ : str = self.convert_ids_to_tokens(self.prefix_tokens ) UpperCamelCase__ : List[str] = self.convert_ids_to_tokens(self.suffix_tokens ) UpperCamelCase__ : Dict = processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str, pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens ) ), ) def UpperCamelCase__ ( self, __magic_name__ ) -> None: """simple docstring""" UpperCamelCase__ : Optional[int] = self.convert_tokens_to_ids(__magic_name__ ) UpperCamelCase__ : Any = [] UpperCamelCase__ : Optional[Any] = [self.eos_token_id, self.cur_lang_code] UpperCamelCase__ : List[str] = self.convert_ids_to_tokens(self.prefix_tokens ) UpperCamelCase__ : List[str] = self.convert_ids_to_tokens(self.suffix_tokens ) UpperCamelCase__ : str = processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str, pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens ) ), ) def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = None ) -> Tuple[str]: """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(__magic_name__ ): logger.error(f"Vocabulary path ({save_directory}) should be a directory." ) return UpperCamelCase__ : Tuple = os.path.join( __magic_name__, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ): copyfile(self.vocab_file, __magic_name__ ) return (out_vocab_file,)
355
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..utils import cached_file # docstyle-ignore UpperCAmelCase_ = '\nHuman: <<task>>\n\nAssistant: ' UpperCAmelCase_ = 'huggingface-tools/default-prompts' UpperCAmelCase_ = {'chat': 'chat_prompt_template.txt', 'run': 'run_prompt_template.txt'} def lowerCAmelCase_ ( __UpperCAmelCase: Optional[Any] , __UpperCAmelCase: List[Any] , __UpperCAmelCase: Optional[Any]="run" ) -> int: if prompt_or_repo_id is None: UpperCamelCase__ : List[Any] = DEFAULT_PROMPTS_REPO # prompt is considered a repo ID when it does not contain any kind of space if re.search('''\\s''' , __UpperCAmelCase ) is not None: return prompt_or_repo_id UpperCamelCase__ : Any = cached_file( __UpperCAmelCase , PROMPT_FILES[mode] , repo_type='''dataset''' , user_agent={'''agent''': agent_name} ) with open(__UpperCAmelCase , '''r''' , encoding='''utf-8''' ) as f: return f.read()
247
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __A : List[str] = { 'configuration_groupvit': [ 'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GroupViTConfig', 'GroupViTOnnxConfig', 'GroupViTTextConfig', 'GroupViTVisionConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : str = [ 'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'GroupViTModel', 'GroupViTPreTrainedModel', 'GroupViTTextModel', 'GroupViTVisionModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : str = [ 'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFGroupViTModel', 'TFGroupViTPreTrainedModel', 'TFGroupViTTextModel', 'TFGroupViTVisionModel', ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys __A : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
138
'''simple docstring''' import datasets from .evaluate import evaluate UpperCamelCase__ : int = '\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n' UpperCamelCase__ : Any = '\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n' UpperCamelCase__ : Optional[Any] = '\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the SQuAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> squad_metric = datasets.load_metric("squad")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class _lowerCAmelCase ( datasets.Metric ): """simple docstring""" def UpperCAmelCase_ ( self ) -> str: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": {"""id""": datasets.Value("""string""" ), """prediction_text""": datasets.Value("""string""" )}, """references""": { """id""": datasets.Value("""string""" ), """answers""": datasets.features.Sequence( { """text""": datasets.Value("""string""" ), """answer_start""": datasets.Value("""int32""" ), } ), }, } ) , codebase_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , reference_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , ) def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> List[Any]: A_ : Optional[Any] = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions} A_ : List[Any] = [ { """paragraphs""": [ { """qas""": [ { """answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]], """id""": ref["""id"""], } for ref in references ] } ] } ] A_ : int = evaluate(dataset=_lowerCamelCase , predictions=_lowerCamelCase ) return score
344
0
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL UpperCamelCase = logging.get_logger(__name__) def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ): if isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(SCREAMING_SNAKE_CASE ): return [[videos]] raise ValueError(f'''Could not make batched video from {videos}''' ) class _lowerCamelCase ( UpperCamelCase ): """simple docstring""" snake_case = ["pixel_values"] def __init__( self , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = 1 / 255 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )->None: '''simple docstring''' super().__init__(**_SCREAMING_SNAKE_CASE ) A_ : str = size if size is not None else {'''shortest_edge''': 224} A_ : Optional[Any] = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE ) A_ : str = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} A_ : Optional[Any] = get_size_dict(_SCREAMING_SNAKE_CASE , param_name='''crop_size''' ) A_ : Optional[int] = do_resize A_ : Tuple = size A_ : Optional[int] = do_center_crop A_ : List[Any] = crop_size A_ : Union[str, Any] = resample A_ : Any = do_rescale A_ : List[str] = rescale_factor A_ : Optional[Any] = do_normalize A_ : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN A_ : List[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )->np.ndarray: '''simple docstring''' A_ : Optional[Any] = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE ) if "shortest_edge" in size: A_ : List[str] = get_resize_output_image_size(_SCREAMING_SNAKE_CASE , size['''shortest_edge'''] , default_to_square=_SCREAMING_SNAKE_CASE ) elif "height" in size and "width" in size: A_ : Tuple = (size['''height'''], size['''width''']) else: raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' ) return resize(_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )->np.ndarray: '''simple docstring''' A_ : int = get_size_dict(_SCREAMING_SNAKE_CASE ) if "height" not in size or "width" not in size: raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' ) return center_crop(_SCREAMING_SNAKE_CASE , size=(size['''height'''], size['''width''']) , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )->Optional[int]: '''simple docstring''' return rescale(_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )->np.ndarray: '''simple docstring''' return normalize(_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , )->np.ndarray: '''simple docstring''' if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. A_ : Union[str, Any] = to_numpy_array(_SCREAMING_SNAKE_CASE ) if do_resize: A_ : Optional[Any] = self.resize(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE ) if do_center_crop: A_ : Union[str, Any] = self.center_crop(_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE ) if do_rescale: A_ : Dict = self.rescale(image=_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE ) if do_normalize: A_ : List[Any] = self.normalize(image=_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE ) A_ : List[str] = to_channel_dimension_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return image def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE , )->PIL.Image.Image: '''simple docstring''' A_ : Any = do_resize if do_resize is not None else self.do_resize A_ : Dict = resample if resample is not None else self.resample A_ : str = do_center_crop if do_center_crop is not None else self.do_center_crop A_ : List[str] = do_rescale if do_rescale is not None else self.do_rescale A_ : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor A_ : List[Any] = do_normalize if do_normalize is not None else self.do_normalize A_ : str = image_mean if image_mean is not None else self.image_mean A_ : int = image_std if image_std is not None else self.image_std A_ : List[str] = size if size is not None else self.size A_ : Optional[Any] = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE ) A_ : List[Any] = crop_size if crop_size is not None else self.crop_size A_ : List[Any] = get_size_dict(_SCREAMING_SNAKE_CASE , param_name='''crop_size''' ) if not valid_images(_SCREAMING_SNAKE_CASE ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) A_ : List[str] = make_batched(_SCREAMING_SNAKE_CASE ) A_ : Union[str, Any] = [ [ self._preprocess_image( image=_SCREAMING_SNAKE_CASE , do_resize=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE , do_center_crop=_SCREAMING_SNAKE_CASE , crop_size=_SCREAMING_SNAKE_CASE , do_rescale=_SCREAMING_SNAKE_CASE , rescale_factor=_SCREAMING_SNAKE_CASE , do_normalize=_SCREAMING_SNAKE_CASE , image_mean=_SCREAMING_SNAKE_CASE , image_std=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , ) for img in video ] for video in videos ] A_ : Dict = {'''pixel_values''': videos} return BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
65
from collections import defaultdict from pathlib import Path import pandas as pd from rouge_cli import calculate_rouge_path from utils import calculate_rouge UpperCamelCase = [ """Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the""" """ final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe""" """ depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.""", """The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal""" """ accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's""" """ founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the""" """ body.""", """Amnesty International releases its annual report on the death penalty. The report catalogs the use of""" """ state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the""" """ world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital""" """ punishment.""", ] UpperCamelCase = [ """Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""" """ Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz""" """ had informed his Lufthansa training school of an episode of severe depression, airline says .""", """Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .""" """ Israel and the United States opposed the move, which could open the door to war crimes investigations against""" """ Israelis .""", """Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to""" """ death . Organization claims that governments around the world are using the threat of terrorism to advance""" """ executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death""" """ sentences up by 28% .""", ] def _SCREAMING_SNAKE_CASE ( ): A_ : Dict = calculate_rouge(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bootstrap_aggregation=SCREAMING_SNAKE_CASE , rouge_keys=['''rouge2''', '''rougeL'''] ) assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A_ : List[Any] = calculate_rouge(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bootstrap_aggregation=SCREAMING_SNAKE_CASE , rouge_keys=['''rouge2'''] ) assert ( pd.DataFrame(no_aggregation['''rouge2'''] ).fmeasure.mean() == pd.DataFrame(no_aggregation_just_ra['''rouge2'''] ).fmeasure.mean() ) def _SCREAMING_SNAKE_CASE ( ): A_ : Any = '''rougeLsum''' A_ : List[str] = calculate_rouge(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , newline_sep=SCREAMING_SNAKE_CASE , rouge_keys=[k] )[k] A_ : List[str] = calculate_rouge(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , newline_sep=SCREAMING_SNAKE_CASE , rouge_keys=[k] )[k] assert score > score_no_sep def _SCREAMING_SNAKE_CASE ( ): A_ : Optional[int] = ['''rouge1''', '''rouge2''', '''rougeL'''] A_ : Optional[int] = calculate_rouge(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , newline_sep=SCREAMING_SNAKE_CASE , rouge_keys=SCREAMING_SNAKE_CASE ) A_ : Optional[int] = calculate_rouge(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , newline_sep=SCREAMING_SNAKE_CASE , rouge_keys=SCREAMING_SNAKE_CASE ) assert score_sep == score_no_sep def _SCREAMING_SNAKE_CASE ( ): A_ : Union[str, Any] = [ '''Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.''', '''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .''', ] A_ : Optional[int] = [ '''Margot Frank, died in 1945, a month earlier than previously thought.''', '''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of''' ''' the final seconds on board Flight 9525.''', ] assert calculate_rouge(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , newline_sep=SCREAMING_SNAKE_CASE ) == calculate_rouge(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , newline_sep=SCREAMING_SNAKE_CASE ) def _SCREAMING_SNAKE_CASE ( ): A_ : List[Any] = [ '''" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" ''' ] A_ : Optional[Any] = [ ''' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .''' ] A_ : List[str] = calculate_rouge(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , rouge_keys=['''rougeLsum'''] , newline_sep=SCREAMING_SNAKE_CASE )['''rougeLsum'''] A_ : Optional[Any] = calculate_rouge(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , rouge_keys=['''rougeLsum'''] )['''rougeLsum'''] assert new_score > prev_score def _SCREAMING_SNAKE_CASE ( ): A_ : Any = Path('''examples/seq2seq/test_data/wmt_en_ro''' ) A_ : List[Any] = calculate_rouge_path(data_dir.joinpath('''test.source''' ) , data_dir.joinpath('''test.target''' ) ) assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A_ : List[str] = calculate_rouge_path( data_dir.joinpath('''test.source''' ) , data_dir.joinpath('''test.target''' ) , bootstrap_aggregation=SCREAMING_SNAKE_CASE ) assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
65
1
'''simple docstring''' import os # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_doctest_list.py UpperCamelCase__ : str = '.' if __name__ == "__main__": UpperCamelCase__ : List[str] = os.path.join(REPO_PATH, 'utils/documentation_tests.txt') UpperCamelCase__ : Tuple = [] UpperCamelCase__ : List[Any] = [] with open(doctest_file_path) as fp: for line in fp: UpperCamelCase__ : Any = line.strip() UpperCamelCase__ : Optional[int] = os.path.join(REPO_PATH, line) if not (os.path.isfile(path) or os.path.isdir(path)): non_existent_paths.append(line) all_paths.append(path) if len(non_existent_paths) > 0: UpperCamelCase__ : Any = '\n'.join(non_existent_paths) raise ValueError(f'`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}') if all_paths != sorted(all_paths): raise ValueError('Files in `utils/documentation_tests.txt` are not in alphabetical order.')
344
'''simple docstring''' from typing import Dict import numpy as np import torch from . import residue_constants as rc from .tensor_utils import tensor_tree_map, tree_map def UpperCAmelCase ( a_ ) -> Dict[str, torch.Tensor]: """simple docstring""" A_ : List[str] = [] A_ : Dict = [] A_ : List[Any] = [] for rt in rc.restypes: A_ : Tuple = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]] restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] ) A_ : Union[str, Any] = {name: i for i, name in enumerate(a_ )} restype_atomaa_to_atomaa_list.append( [(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] ) restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] ) # Add dummy mapping for restype 'UNK' restype_atomaa_to_atomaa_list.append([0] * 1_4 ) restype_atomaa_to_atomaa_list.append([0] * 3_7 ) restype_atomaa_mask_list.append([0.0] * 1_4 ) A_ : Tuple = torch.tensor( a_ , dtype=torch.intaa , device=protein["""aatype"""].device , ) A_ : Optional[int] = torch.tensor( a_ , dtype=torch.intaa , device=protein["""aatype"""].device , ) A_ : List[Any] = torch.tensor( a_ , dtype=torch.floataa , device=protein["""aatype"""].device , ) A_ : Optional[int] = protein["""aatype"""].to(torch.long ) # create the mapping for (residx, atom14) --> atom37, i.e. an array # with shape (num_res, 14) containing the atom37 indices for this protein A_ : Dict = restype_atomaa_to_atomaa[protein_aatype] A_ : Optional[Any] = restype_atomaa_mask[protein_aatype] A_ : Any = residx_atomaa_mask A_ : List[str] = residx_atomaa_to_atomaa.long() # create the gather indices for mapping back A_ : Tuple = restype_atomaa_to_atomaa[protein_aatype] A_ : Tuple = residx_atomaa_to_atomaa.long() # create the corresponding mask A_ : Optional[Any] = torch.zeros([2_1, 3_7] , dtype=torch.floataa , device=protein["""aatype"""].device ) for restype, restype_letter in enumerate(rc.restypes ): A_ : Optional[Any] = rc.restype_atoa[restype_letter] A_ : Any = rc.residue_atoms[restype_name] for atom_name in atom_names: A_ : Any = rc.atom_order[atom_name] A_ : Optional[int] = 1 A_ : Optional[int] = restype_atomaa_mask[protein_aatype] A_ : Dict = residx_atomaa_mask return protein def UpperCAmelCase ( a_ ) -> Dict[str, np.ndarray]: """simple docstring""" A_ : Union[str, Any] = tree_map(lambda a_ : torch.tensor(a_ , device=batch["""aatype"""].device ) , a_ , np.ndarray ) A_ : Optional[int] = tensor_tree_map(lambda a_ : np.array(a_ ) , make_atomaa_masks(a_ ) ) return out
344
1
'''simple docstring''' import ast import os import re import shutil import tempfile import unittest from unittest import mock import torch from accelerate.test_utils.examples import compare_against_test from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow from accelerate.utils import write_basic_config # DataLoaders built from `test_samples/MRPC` for quick testing # Should mock `{script_name}.get_dataloaders` via: # @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders) lowerCAmelCase: Union[str, Any] = [ """cross_validation.py""", """gradient_accumulation.py""", """local_sgd.py""", """multi_process_metrics.py""", """memory.py""", """automatic_gradient_accumulation.py""", """fsdp_with_peak_mem_tracking.py""", """deepspeed_with_config_support.py""", """megatron_lm_gpt_pretraining.py""", ] class a__( unittest.TestCase ): def lowercase_ ( self : List[Any] , __snake_case : Any , __snake_case : Dict , __snake_case : str = None , __snake_case : int = None ): a : Union[str, Any] = None a : Any = os.path.abspath(os.path.join('examples' , 'by_feature' ) ) a : List[str] = os.path.abspath('examples' ) for item in os.listdir(a__ ): if item not in EXCLUDE_EXAMPLES: a : Optional[Any] = os.path.join(a__ , a__ ) if os.path.isfile(a__ ) and ".py" in item_path: with self.subTest( tested_script=a__ , feature_script=a__ , tested_section='main()' if parser_only else 'training_function()' , ): a : Union[str, Any] = compare_against_test( os.path.join(a__ , a__ ) , a__ , a__ , a__ ) a : Any = '\n'.join(a__ ) if special_strings is not None: for string in special_strings: a : List[Any] = diff.replace(a__ , '' ) self.assertEqual(a__ , '' ) def lowercase_ ( self : List[str] ): self.one_complete_example('complete_nlp_example.py' , a__ ) self.one_complete_example('complete_nlp_example.py' , a__ ) def lowercase_ ( self : Optional[int] ): a : Optional[int] = os.path.abspath(os.path.join('examples' , 'cv_example.py' ) ) a : Optional[int] = [ ' ' * 16 + '{\n\n', ' ' * 20 + '"accuracy": eval_metric["accuracy"],\n\n', ' ' * 20 + '"f1": eval_metric["f1"],\n\n', ' ' * 20 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n', ' ' * 20 + '"epoch": epoch,\n\n', ' ' * 16 + '},\n\n', ' ' * 16 + 'step=epoch,\n', ' ' * 12, ' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n', ] self.one_complete_example('complete_cv_example.py' , a__ , a__ , a__ ) self.one_complete_example('complete_cv_example.py' , a__ , a__ , a__ ) @mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """1"""} ) class a__( lowerCAmelCase__ ): lowercase__ = False @classmethod def lowercase_ ( cls : Optional[Any] ): super().setUpClass() a : List[str] = tempfile.mkdtemp() a : List[str] = os.path.join(cls._tmpdir , 'default_config.yml' ) write_basic_config(save_location=cls.configPath ) a : int = ['accelerate', 'launch', '--config_file', cls.configPath] @classmethod def lowercase_ ( cls : Any ): super().tearDownClass() shutil.rmtree(cls._tmpdir ) def lowercase_ ( self : Optional[Any] ): a : List[Any] = F"""\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n """.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'epoch_0' ) ) ) def lowercase_ ( self : Any ): a : int = F"""\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n """.split() a : int = run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'step_2' ) ) ) def lowercase_ ( self : Tuple ): a : int = F"""\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}\n """.split() a : List[str] = run_command(self._launch_args + testargs , return_stdout=a__ ) self.assertNotIn('epoch 0:' , a__ ) self.assertIn('epoch 1:' , a__ ) def lowercase_ ( self : str ): a : Union[str, Any] = F"""\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}\n """.split() a : Dict = run_command(self._launch_args + testargs , return_stdout=a__ ) if torch.cuda.is_available(): a : Any = torch.cuda.device_count() else: a : List[str] = 1 if num_processes > 1: self.assertNotIn('epoch 0:' , a__ ) self.assertIn('epoch 1:' , a__ ) else: self.assertIn('epoch 0:' , a__ ) self.assertIn('epoch 1:' , a__ ) @slow def lowercase_ ( self : Optional[int] ): a : Optional[int] = '\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split() with mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '0'} ): a : List[str] = run_command(self._launch_args + testargs , return_stdout=a__ ) a : Optional[Any] = re.findall('({.+})' , a__ ) a : Union[str, Any] = [r for r in results if 'accuracy' in r][-1] a : Optional[int] = ast.literal_eval(a__ ) self.assertGreaterEqual(results['accuracy'] , 0.75 ) def lowercase_ ( self : int ): a : Dict = ['examples/by_feature/multi_process_metrics.py'] run_command(self._launch_args + testargs ) @require_trackers @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def lowercase_ ( self : List[Any] ): with tempfile.TemporaryDirectory() as tmpdir: a : int = F"""\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n """.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(a__ , 'tracking' ) ) ) def lowercase_ ( self : Optional[int] ): a : int = ['examples/by_feature/gradient_accumulation.py'] run_command(self._launch_args + testargs ) def lowercase_ ( self : Dict ): a : Dict = ['examples/by_feature/local_sgd.py'] run_command(self._launch_args + testargs )
364
'''simple docstring''' import re import string from collections import Counter import sacrebleu import sacremoses from packaging import version import datasets lowerCAmelCase: Dict = '\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n' lowerCAmelCase: Optional[Any] = '\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n' lowerCAmelCase: List[Any] = '\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=["About 95 species are currently accepted ."]\n >>> predictions=["About 95 you now get in ."]\n >>> references=[["About 95 species are currently known ."]]\n >>> wiki_split = datasets.load_metric("wiki_split")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}\n' def lowerCamelCase__ ( _A ): def remove_articles(_A ): a : Union[str, Any] = re.compile(r'\b(a|an|the)\b' , re.UNICODE ) return re.sub(_A , ' ' , _A ) def white_space_fix(_A ): return " ".join(text.split() ) def remove_punc(_A ): a : Tuple = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(_A ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(_A ) ) ) ) def lowerCamelCase__ ( _A , _A ): return int(normalize_answer(_A ) == normalize_answer(_A ) ) def lowerCamelCase__ ( _A , _A ): a : List[Any] = [any(compute_exact(_A , _A ) for ref in refs ) for pred, refs in zip(_A , _A )] return (sum(_A ) / len(_A )) * 100 def lowerCamelCase__ ( _A , _A , _A , _A ): a : List[Any] = [rgram for rgrams in rgramslist for rgram in rgrams] a : Any = Counter(_A ) a : Dict = Counter(_A ) a : Tuple = Counter() for sgram, scount in sgramcounter.items(): a : List[str] = scount * numref a : Optional[int] = Counter(_A ) a : Optional[int] = Counter() for cgram, ccount in cgramcounter.items(): a : List[str] = ccount * numref # KEEP a : Optional[Any] = sgramcounter_rep & cgramcounter_rep a : Union[str, Any] = keepgramcounter_rep & rgramcounter a : Any = sgramcounter_rep & rgramcounter a : str = 0 a : Optional[Any] = 0 for keepgram in keepgramcountergood_rep: keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram] # Fix an alleged bug [2] in the keep score computation. # keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram] keeptmpscorea += keepgramcountergood_rep[keepgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. a : Tuple = 1 a : Any = 1 if len(_A ) > 0: a : Optional[int] = keeptmpscorea / len(_A ) if len(_A ) > 0: # Fix an alleged bug [2] in the keep score computation. # keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep) a : Tuple = keeptmpscorea / sum(keepgramcounterall_rep.values() ) a : List[str] = 0 if keepscore_precision > 0 or keepscore_recall > 0: a : Dict = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall) # DELETION a : List[Any] = sgramcounter_rep - cgramcounter_rep a : Any = delgramcounter_rep - rgramcounter a : Union[str, Any] = sgramcounter_rep - rgramcounter a : Tuple = 0 a : str = 0 for delgram in delgramcountergood_rep: deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram] deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. a : str = 1 if len(_A ) > 0: a : Tuple = deltmpscorea / len(_A ) # ADDITION a : Any = set(_A ) - set(_A ) a : Optional[int] = set(_A ) & set(_A ) a : Union[str, Any] = set(_A ) - set(_A ) a : List[str] = 0 for addgram in addgramcountergood: addtmpscore += 1 # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. a : Tuple = 1 a : Optional[Any] = 1 if len(_A ) > 0: a : str = addtmpscore / len(_A ) if len(_A ) > 0: a : Optional[Any] = addtmpscore / len(_A ) a : str = 0 if addscore_precision > 0 or addscore_recall > 0: a : List[str] = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall) return (keepscore, delscore_precision, addscore) def lowerCamelCase__ ( _A , _A , _A ): a : List[str] = len(_A ) a : List[str] = ssent.split(' ' ) a : int = csent.split(' ' ) a : Optional[Any] = [] a : Tuple = [] a : Optional[Any] = [] a : Optional[Any] = [] a : Union[str, Any] = [] a : str = [] a : Dict = [] a : Dict = [] a : str = [] a : Any = [] for rsent in rsents: a : List[Any] = rsent.split(' ' ) a : Dict = [] a : Optional[Any] = [] a : Optional[Any] = [] ragramslist.append(_A ) for i in range(0 , len(_A ) - 1 ): if i < len(_A ) - 1: a : List[str] = ragrams[i] + ' ' + ragrams[i + 1] ragrams.append(_A ) if i < len(_A ) - 2: a : Union[str, Any] = ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2] ragrams.append(_A ) if i < len(_A ) - 3: a : Tuple = ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2] + ' ' + ragrams[i + 3] ragrams.append(_A ) ragramslist.append(_A ) ragramslist.append(_A ) ragramslist.append(_A ) for i in range(0 , len(_A ) - 1 ): if i < len(_A ) - 1: a : Tuple = sagrams[i] + ' ' + sagrams[i + 1] sagrams.append(_A ) if i < len(_A ) - 2: a : Union[str, Any] = sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2] sagrams.append(_A ) if i < len(_A ) - 3: a : List[str] = sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2] + ' ' + sagrams[i + 3] sagrams.append(_A ) for i in range(0 , len(_A ) - 1 ): if i < len(_A ) - 1: a : Any = cagrams[i] + ' ' + cagrams[i + 1] cagrams.append(_A ) if i < len(_A ) - 2: a : Optional[Any] = cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2] cagrams.append(_A ) if i < len(_A ) - 3: a : Optional[Any] = cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2] + ' ' + cagrams[i + 3] cagrams.append(_A ) ((a) , (a) , (a)) : int = SARIngram(_A , _A , _A , _A ) ((a) , (a) , (a)) : Optional[int] = SARIngram(_A , _A , _A , _A ) ((a) , (a) , (a)) : Union[str, Any] = SARIngram(_A , _A , _A , _A ) ((a) , (a) , (a)) : int = SARIngram(_A , _A , _A , _A ) a : Dict = sum([keepascore, keepascore, keepascore, keepascore] ) / 4 a : Any = sum([delascore, delascore, delascore, delascore] ) / 4 a : Union[str, Any] = sum([addascore, addascore, addascore, addascore] ) / 4 a : str = (avgkeepscore + avgdelscore + avgaddscore) / 3 return finalscore def lowerCamelCase__ ( _A , _A = True , _A = "13a" , _A = True ): # Normalization is requried for the ASSET dataset (one of the primary # datasets in sentence simplification) to allow using space # to split the sentence. Even though Wiki-Auto and TURK datasets, # do not require normalization, we do it for consistency. # Code adapted from the EASSE library [1] written by the authors of the ASSET dataset. # [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7 if lowercase: a : Dict = sentence.lower() if tokenizer in ["13a", "intl"]: if version.parse(sacrebleu.__version__ ).major >= 2: a : List[str] = sacrebleu.metrics.bleu._get_tokenizer(_A )()(_A ) else: a : str = sacrebleu.TOKENIZERS[tokenizer]()(_A ) elif tokenizer == "moses": a : List[Any] = sacremoses.MosesTokenizer().tokenize(_A , return_str=_A , escape=_A ) elif tokenizer == "penn": a : Tuple = sacremoses.MosesTokenizer().penn_tokenize(_A , return_str=_A ) else: a : List[Any] = sentence if not return_str: a : Optional[Any] = normalized_sent.split() return normalized_sent def lowerCamelCase__ ( _A , _A , _A ): if not (len(_A ) == len(_A ) == len(_A )): raise ValueError('Sources length must match predictions and references lengths.' ) a : Tuple = 0 for src, pred, refs in zip(_A , _A , _A ): sari_score += SARIsent(normalize(_A ) , normalize(_A ) , [normalize(_A ) for sent in refs] ) a : Tuple = sari_score / len(_A ) return 100 * sari_score def lowerCamelCase__ ( _A , _A , _A="exp" , _A=None , _A=False , _A=False , _A=False , ): a : Optional[int] = len(references[0] ) if any(len(_A ) != references_per_prediction for refs in references ): raise ValueError('Sacrebleu requires the same number of references for each prediction' ) a : List[Any] = [[refs[i] for refs in references] for i in range(_A )] a : Optional[Any] = sacrebleu.corpus_bleu( _A , _A , smooth_method=_A , smooth_value=_A , force=_A , lowercase=_A , use_effective_order=_A , ) return output.score @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a__( datasets.Metric ): def lowercase_ ( self : Optional[int] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ), } ) , codebase_urls=[ 'https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py', 'https://github.com/cocoxu/simplification/blob/master/SARI.py', 'https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py', 'https://github.com/mjpost/sacreBLEU', ] , reference_urls=[ 'https://www.aclweb.org/anthology/Q16-1029.pdf', 'https://github.com/mjpost/sacreBLEU', 'https://en.wikipedia.org/wiki/BLEU', 'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213', ] , ) def lowercase_ ( self : str , __snake_case : Optional[Any] , __snake_case : int , __snake_case : str ): a : int = {} result.update({'sari': compute_sari(sources=__snake_case , predictions=__snake_case , references=__snake_case )} ) result.update({'sacrebleu': compute_sacrebleu(predictions=__snake_case , references=__snake_case )} ) result.update({'exact': compute_em(predictions=__snake_case , references=__snake_case )} ) return result
96
0
from collections import OrderedDict from typing import Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...feature_extraction_utils import FeatureExtractionMixin from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType, logging __a = logging.get_logger(__name__) __a = { '''deepmind/language-perceiver''': '''https://huggingface.co./deepmind/language-perceiver/resolve/main/config.json''', # See all Perceiver models at https://huggingface.co./models?filter=perceiver } class __SCREAMING_SNAKE_CASE ( A__ ): A : List[str] = 'perceiver' def __init__( self , SCREAMING_SNAKE_CASE__=256 , SCREAMING_SNAKE_CASE__=1280 , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=26 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="kv" , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=262 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=56 , SCREAMING_SNAKE_CASE__=[368, 496] , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=1920 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=[1, 16, 224, 224] , **SCREAMING_SNAKE_CASE__ , ): super().__init__(**SCREAMING_SNAKE_CASE__ ) lowercase : Any = num_latents lowercase : Union[str, Any] = d_latents lowercase : str = d_model lowercase : int = num_blocks lowercase : str = num_self_attends_per_block lowercase : List[str] = num_self_attention_heads lowercase : List[str] = num_cross_attention_heads lowercase : int = qk_channels lowercase : List[Any] = v_channels lowercase : int = cross_attention_shape_for_attention lowercase : Tuple = self_attention_widening_factor lowercase : Dict = cross_attention_widening_factor lowercase : Any = hidden_act lowercase : Optional[Any] = attention_probs_dropout_prob lowercase : Union[str, Any] = initializer_range lowercase : Any = layer_norm_eps lowercase : Any = use_query_residual # masked language modeling attributes lowercase : List[str] = vocab_size lowercase : Dict = max_position_embeddings # image classification attributes lowercase : int = image_size # flow attributes lowercase : List[Any] = train_size # multimodal autoencoding attributes lowercase : List[Any] = num_frames lowercase : Union[str, Any] = audio_samples_per_frame lowercase : int = samples_per_patch lowercase : Optional[int] = output_shape class __SCREAMING_SNAKE_CASE ( A__ ): @property def __lowerCamelCase ( self ): if self.task == "multiple-choice": lowercase : Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: lowercase : Dict = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''inputs''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] ) @property def __lowerCamelCase ( self ): return 1E-4 def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 3 , SCREAMING_SNAKE_CASE__ = 40 , SCREAMING_SNAKE_CASE__ = 40 , ): # copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX lowercase : str = compute_effective_axis_dimension( SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX lowercase : Union[str, Any] = preprocessor.num_special_tokens_to_add(SCREAMING_SNAKE_CASE__ ) lowercase : Optional[int] = compute_effective_axis_dimension( SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=SCREAMING_SNAKE_CASE__ ) # Generate dummy inputs according to compute batch and sequence lowercase : Optional[Any] = [''' '''.join(['''a'''] ) * seq_length] * batch_size lowercase : Any = dict(preprocessor(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ ) ) lowercase : Union[str, Any] = inputs.pop('''input_ids''' ) return inputs elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and preprocessor.model_input_names[0] == "pixel_values": # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX lowercase : List[str] = compute_effective_axis_dimension(SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_batch ) lowercase : List[str] = self._generate_dummy_images(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowercase : Optional[int] = dict(preprocessor(images=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ ) ) lowercase : Union[str, Any] = inputs.pop('''pixel_values''' ) return inputs else: raise ValueError( '''Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.''' )
337
import logging import os from .state import PartialState class __SCREAMING_SNAKE_CASE ( logging.LoggerAdapter ): @staticmethod def __lowerCamelCase ( SCREAMING_SNAKE_CASE__ ): lowercase : List[Any] = PartialState() return not main_process_only or (main_process_only and state.is_main_process) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): if PartialState._shared_state == {}: raise RuntimeError( '''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' ) lowercase : List[str] = kwargs.pop('''main_process_only''' , SCREAMING_SNAKE_CASE__ ) lowercase : List[str] = kwargs.pop('''in_order''' , SCREAMING_SNAKE_CASE__ ) if self.isEnabledFor(SCREAMING_SNAKE_CASE__ ): if self._should_log(SCREAMING_SNAKE_CASE__ ): lowercase , lowercase : str = self.process(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.logger.log(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) elif in_order: lowercase : List[Any] = PartialState() for i in range(state.num_processes ): if i == state.process_index: lowercase , lowercase : Union[str, Any] = self.process(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.logger.log(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) state.wait_for_everyone() def __lowercase ( _UpperCamelCase, _UpperCamelCase = None ) ->List[Any]: """simple docstring""" if log_level is None: lowercase : str = os.environ.get('''ACCELERATE_LOG_LEVEL''', _UpperCamelCase ) lowercase : str = logging.getLogger(_UpperCamelCase ) if log_level is not None: logger.setLevel(log_level.upper() ) logger.root.setLevel(log_level.upper() ) return MultiProcessAdapter(_UpperCamelCase, {} )
337
1
"""simple docstring""" from ....configuration_utils import PretrainedConfig from ....utils import logging _A = logging.get_logger(__name__) _A = { 'speechbrain/m-ctc-t-large': 'https://huggingface.co./speechbrain/m-ctc-t-large/resolve/main/config.json', # See all M-CTC-T models at https://huggingface.co./models?filter=mctct } class _lowercase ( __UpperCAmelCase ): lowercase_ = 'mctct' def __init__( self , UpperCAmelCase_=8065 , UpperCAmelCase_=1536 , UpperCAmelCase_=36 , UpperCAmelCase_=6144 , UpperCAmelCase_=4 , UpperCAmelCase_=384 , UpperCAmelCase_=920 , UpperCAmelCase_=1E-5 , UpperCAmelCase_=0.3 , UpperCAmelCase_="relu" , UpperCAmelCase_=0.02 , UpperCAmelCase_=0.3 , UpperCAmelCase_=0.3 , UpperCAmelCase_=1 , UpperCAmelCase_=0 , UpperCAmelCase_=2 , UpperCAmelCase_=1 , UpperCAmelCase_=0.3 , UpperCAmelCase_=1 , UpperCAmelCase_=(7,) , UpperCAmelCase_=(3,) , UpperCAmelCase_=80 , UpperCAmelCase_=1 , UpperCAmelCase_=None , UpperCAmelCase_="sum" , UpperCAmelCase_=False , **UpperCAmelCase_ , ) -> int: super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ ) lowerCamelCase : str = vocab_size lowerCamelCase : List[Any] = hidden_size lowerCamelCase : Dict = num_hidden_layers lowerCamelCase : Optional[int] = intermediate_size lowerCamelCase : Tuple = num_attention_heads lowerCamelCase : Dict = attention_head_dim lowerCamelCase : str = max_position_embeddings lowerCamelCase : Optional[int] = layer_norm_eps lowerCamelCase : Dict = layerdrop lowerCamelCase : Any = hidden_act lowerCamelCase : List[Any] = initializer_range lowerCamelCase : List[Any] = hidden_dropout_prob lowerCamelCase : Any = attention_probs_dropout_prob lowerCamelCase : Any = pad_token_id lowerCamelCase : List[Any] = bos_token_id lowerCamelCase : Dict = eos_token_id lowerCamelCase : Any = conv_glu_dim lowerCamelCase : str = conv_dropout lowerCamelCase : Union[str, Any] = num_conv_layers lowerCamelCase : Tuple = input_feat_per_channel lowerCamelCase : List[str] = input_channels lowerCamelCase : str = conv_channels lowerCamelCase : Any = ctc_loss_reduction lowerCamelCase : Optional[Any] = ctc_zero_infinity # prevents config testing fail with exporting to json lowerCamelCase : int = list(UpperCAmelCase_ ) lowerCamelCase : str = list(UpperCAmelCase_ ) if len(self.conv_kernel ) != self.num_conv_layers: raise ValueError( 'Configuration for convolutional module is incorrect. ' 'It is required that `len(config.conv_kernel)` == `config.num_conv_layers` ' F"""but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, """ F"""`config.num_conv_layers = {self.num_conv_layers}`.""" )
354
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging _A = logging.get_logger(__name__) _A = { 'asapp/sew-d-tiny-100k': 'https://huggingface.co./asapp/sew-d-tiny-100k/resolve/main/config.json', # See all SEW-D models at https://huggingface.co./models?filter=sew-d } class _lowercase ( __UpperCAmelCase ): lowercase_ = 'sew-d' def __init__( self , UpperCAmelCase_=32 , UpperCAmelCase_=768 , UpperCAmelCase_=12 , UpperCAmelCase_=12 , UpperCAmelCase_=3072 , UpperCAmelCase_=2 , UpperCAmelCase_=512 , UpperCAmelCase_=256 , UpperCAmelCase_=True , UpperCAmelCase_=True , UpperCAmelCase_=("p2c", "c2p") , UpperCAmelCase_="layer_norm" , UpperCAmelCase_="gelu_python" , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.02 , UpperCAmelCase_=1E-7 , UpperCAmelCase_=1E-5 , UpperCAmelCase_="group" , UpperCAmelCase_="gelu" , UpperCAmelCase_=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , UpperCAmelCase_=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , UpperCAmelCase_=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , UpperCAmelCase_=False , UpperCAmelCase_=128 , UpperCAmelCase_=16 , UpperCAmelCase_=True , UpperCAmelCase_=0.05 , UpperCAmelCase_=10 , UpperCAmelCase_=2 , UpperCAmelCase_=0.0 , UpperCAmelCase_=10 , UpperCAmelCase_=0 , UpperCAmelCase_="mean" , UpperCAmelCase_=False , UpperCAmelCase_=False , UpperCAmelCase_=256 , UpperCAmelCase_=0 , UpperCAmelCase_=1 , UpperCAmelCase_=2 , **UpperCAmelCase_ , ) -> Optional[Any]: super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ ) lowerCamelCase : Any = hidden_size lowerCamelCase : Any = feat_extract_norm lowerCamelCase : List[str] = feat_extract_activation lowerCamelCase : str = list(UpperCAmelCase_ ) lowerCamelCase : Any = list(UpperCAmelCase_ ) lowerCamelCase : str = list(UpperCAmelCase_ ) lowerCamelCase : List[Any] = conv_bias lowerCamelCase : Optional[int] = num_conv_pos_embeddings lowerCamelCase : str = num_conv_pos_embedding_groups lowerCamelCase : Optional[int] = len(self.conv_dim ) lowerCamelCase : Optional[int] = num_hidden_layers lowerCamelCase : Union[str, Any] = intermediate_size lowerCamelCase : str = squeeze_factor lowerCamelCase : Any = max_position_embeddings lowerCamelCase : List[Any] = position_buckets lowerCamelCase : Union[str, Any] = share_att_key lowerCamelCase : Optional[int] = relative_attention lowerCamelCase : Tuple = norm_rel_ebd lowerCamelCase : Union[str, Any] = list(UpperCAmelCase_ ) lowerCamelCase : List[Any] = hidden_act lowerCamelCase : Optional[Any] = num_attention_heads lowerCamelCase : Tuple = hidden_dropout lowerCamelCase : List[Any] = attention_dropout lowerCamelCase : Optional[Any] = activation_dropout lowerCamelCase : List[str] = feat_proj_dropout lowerCamelCase : List[str] = final_dropout lowerCamelCase : str = layer_norm_eps lowerCamelCase : int = feature_layer_norm_eps lowerCamelCase : Optional[Any] = initializer_range lowerCamelCase : int = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect.' 'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,' F"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)""" F"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowerCamelCase : Any = apply_spec_augment lowerCamelCase : Optional[int] = mask_time_prob lowerCamelCase : Optional[Any] = mask_time_length lowerCamelCase : str = mask_time_min_masks lowerCamelCase : List[Any] = mask_feature_prob lowerCamelCase : int = mask_feature_length lowerCamelCase : List[Any] = mask_feature_min_masks # ctc loss lowerCamelCase : Optional[Any] = ctc_loss_reduction lowerCamelCase : Union[str, Any] = ctc_zero_infinity # sequence classification lowerCamelCase : Optional[Any] = use_weighted_layer_sum lowerCamelCase : Dict = classifier_proj_size @property def _UpperCamelCase ( self ) -> Optional[Any]: return functools.reduce(operator.mul , self.conv_stride , 1 )
205
0
"""simple docstring""" def _snake_case ( UpperCamelCase : str ): UpperCAmelCase : str = [0] * len(UpperCamelCase ) for i in range(1 , len(UpperCamelCase ) ): # use last results for better performance - dynamic programming UpperCAmelCase : int = prefix_result[i - 1] while j > 0 and input_string[i] != input_string[j]: UpperCAmelCase : List[str] = prefix_result[j - 1] if input_string[i] == input_string[j]: j += 1 UpperCAmelCase : Optional[int] = j return prefix_result def _snake_case ( UpperCamelCase : str ): return max(prefix_function(UpperCamelCase ) ) if __name__ == "__main__": import doctest doctest.testmod()
109
"""simple docstring""" import fire from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , **lowercase_ ) -> List[Any]: A__ = AutoConfig.from_pretrained(lowercase_ , **lowercase_ ) A__ = AutoModelForSeqaSeqLM.from_config(lowercase_ ) model.save_pretrained(lowercase_ ) AutoTokenizer.from_pretrained(lowercase_ ).save_pretrained(lowercase_ ) return model if __name__ == "__main__": fire.Fire(save_randomly_initialized_version)
247
0
'''simple docstring''' import string from math import logaa def _lowerCamelCase ( lowercase : str , lowercase : str ) -> int: _a = document.translate( str.maketrans("" , "" , string.punctuation ) ).replace("\n" , "" ) _a = document_without_punctuation.split(" " ) # word tokenization return len([word for word in tokenize_document if word.lower() == term.lower()] ) def _lowerCamelCase ( lowercase : str , lowercase : str ) -> tuple[int, int]: _a = corpus.lower().translate( str.maketrans("" , "" , string.punctuation ) ) # strip all punctuation and replace it with '' _a = corpus_without_punctuation.split("\n" ) _a = term.lower() return (len([doc for doc in docs if term in doc] ), len(lowercase )) def _lowerCamelCase ( lowercase : int , lowercase : int , lowercase : Tuple=False ) -> float: if smoothing: if n == 0: raise ValueError("log10(0) is undefined." ) return round(1 + logaa(n / (1 + df) ) , 3 ) if df == 0: raise ZeroDivisionError("df must be > 0" ) elif n == 0: raise ValueError("log10(0) is undefined." ) return round(logaa(n / df ) , 3 ) def _lowerCamelCase ( lowercase : int , lowercase : int ) -> float: return round(tf * idf , 3 )
346
'''simple docstring''' from dataclasses import dataclass from typing import Tuple import numpy as np import torch @dataclass class __SCREAMING_SNAKE_CASE : """simple docstring""" __a =42 # [batch_size x 3] __a =42 # [batch_size x 3] __a =42 # [batch_size x 3] __a =42 # [batch_size x 3] __a =42 __a =42 __a =42 __a =42 __a =42 def UpperCamelCase__ ( self : str ): assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0] assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3 assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2 def UpperCamelCase__ ( self : List[str] ): return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) ) def UpperCamelCase__ ( self : Union[str, Any] ): return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) ) def UpperCamelCase__ ( self : Union[str, Any] ): _a = torch.arange(self.height * self.width ) _a = torch.stack( [ pixel_indices % self.width, torch.div(__a , self.width , rounding_mode="trunc" ), ] , axis=1 , ) return coords @property def UpperCamelCase__ ( self : List[Any] ): _a , *_a = self.shape _a = int(np.prod(__a ) ) _a = self.get_image_coords() _a = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] ) _a = self.get_camera_rays(__a ) _a = rays.view(__a , inner_batch_size * self.height * self.width , 2 , 3 ) return rays def UpperCamelCase__ ( self : Dict , __a : torch.Tensor ): _a , *_a , _a = coords.shape assert n_coords == 2 assert batch_size == self.origin.shape[0] _a = coords.view(__a , -1 , 2 ) _a = self.resolution() _a = self.fov() _a = (flat.float() / (res - 1)) * 2 - 1 _a = fracs * torch.tan(fov / 2 ) _a = fracs.view(__a , -1 , 2 ) _a = ( self.z.view(__a , 1 , 3 ) + self.x.view(__a , 1 , 3 ) * fracs[:, :, :1] + self.y.view(__a , 1 , 3 ) * fracs[:, :, 1:] ) _a = directions / directions.norm(dim=-1 , keepdim=__a ) _a = torch.stack( [ torch.broadcast_to(self.origin.view(__a , 1 , 3 ) , [batch_size, directions.shape[1], 3] ), directions, ] , dim=2 , ) return rays.view(__a , *__a , 2 , 3 ) def UpperCamelCase__ ( self : Dict , __a : int , __a : int ): assert width * self.height == height * self.width, "The aspect ratio should not change." return DifferentiableProjectiveCamera( origin=self.origin , x=self.x , y=self.y , z=self.z , width=__a , height=__a , x_fov=self.x_fov , y_fov=self.y_fov , ) def _lowerCamelCase ( lowercase : int ) -> DifferentiableProjectiveCamera: _a = [] _a = [] _a = [] _a = [] for theta in np.linspace(0 , 2 * np.pi , num=20 ): _a = np.array([np.sin(lowercase ), np.cos(lowercase ), -0.5] ) z /= np.sqrt(np.sum(z**2 ) ) _a = -z * 4 _a = np.array([np.cos(lowercase ), -np.sin(lowercase ), 0.0] ) _a = np.cross(lowercase , lowercase ) origins.append(lowercase ) xs.append(lowercase ) ys.append(lowercase ) zs.append(lowercase ) return DifferentiableProjectiveCamera( origin=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , width=lowercase , height=lowercase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(lowercase )) , )
346
1
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, PNDMScheduler, StableDiffusionLDMaDPipeline, UNetaDConditionModel, ) from diffusers.utils import nightly, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS enable_full_determinism() class A ( unittest.TestCase ): __UpperCAmelCase : List[str] = StableDiffusionLDMaDPipeline __UpperCAmelCase : Optional[Any] = TEXT_TO_IMAGE_PARAMS __UpperCAmelCase : Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS __UpperCAmelCase : str = TEXT_TO_IMAGE_IMAGE_PARAMS def lowercase_ (self : Any ) -> Optional[int]: """simple docstring""" torch.manual_seed(0 ) UpperCAmelCase__ = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , ) UpperCAmelCase__ = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , ) torch.manual_seed(0 ) UpperCAmelCase__ = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=6 , out_channels=6 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) torch.manual_seed(0 ) UpperCAmelCase__ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) UpperCAmelCase__ = CLIPTextModel(__UpperCAmelCase ) UpperCAmelCase__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) UpperCAmelCase__ = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def lowercase_ (self : Any , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Dict=0 ) -> Optional[Any]: """simple docstring""" if str(__UpperCAmelCase ).startswith("mps" ): UpperCAmelCase__ = torch.manual_seed(__UpperCAmelCase ) else: UpperCAmelCase__ = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) UpperCAmelCase__ = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def lowercase_ (self : List[str] ) -> str: """simple docstring""" UpperCAmelCase__ = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase__ = self.get_dummy_components() UpperCAmelCase__ = StableDiffusionLDMaDPipeline(**__UpperCAmelCase ) UpperCAmelCase__ = ldmad_pipe.to(__UpperCAmelCase ) ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) UpperCAmelCase__ = self.get_dummy_inputs(__UpperCAmelCase ) UpperCAmelCase__ = ldmad_pipe(**__UpperCAmelCase ) UpperCAmelCase__ , UpperCAmelCase__ = output.rgb, output.depth UpperCAmelCase__ = rgb[0, -3:, -3:, -1] UpperCAmelCase__ = depth[0, -3:, -1] assert rgb.shape == (1, 6_4, 6_4, 3) assert depth.shape == (1, 6_4, 6_4) UpperCAmelCase__ = np.array( [0.37338176, 0.70247, 0.74203193, 0.51643604, 0.58256793, 0.60932136, 0.4181095, 0.48355877, 0.46535262] ) UpperCAmelCase__ = np.array([103.46727, 85.812004, 87.849236] ) assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1E-2 assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1E-2 def lowercase_ (self : int ) -> List[str]: """simple docstring""" UpperCAmelCase__ = self.get_dummy_components() UpperCAmelCase__ = StableDiffusionLDMaDPipeline(**__UpperCAmelCase ) UpperCAmelCase__ = ldmad_pipe.to(__UpperCAmelCase ) ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) UpperCAmelCase__ = self.get_dummy_inputs(__UpperCAmelCase ) UpperCAmelCase__ = 3 * [inputs["prompt"]] # forward UpperCAmelCase__ = ldmad_pipe(**__UpperCAmelCase ) UpperCAmelCase__ , UpperCAmelCase__ = output.rgb, output.depth UpperCAmelCase__ = rgb_slice_a[0, -3:, -3:, -1] UpperCAmelCase__ = depth_slice_a[0, -3:, -1] UpperCAmelCase__ = self.get_dummy_inputs(__UpperCAmelCase ) UpperCAmelCase__ = 3 * [inputs.pop("prompt" )] UpperCAmelCase__ = ldmad_pipe.tokenizer( __UpperCAmelCase , padding="max_length" , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=__UpperCAmelCase , return_tensors="pt" , ) UpperCAmelCase__ = text_inputs["input_ids"].to(__UpperCAmelCase ) UpperCAmelCase__ = ldmad_pipe.text_encoder(__UpperCAmelCase )[0] UpperCAmelCase__ = prompt_embeds # forward UpperCAmelCase__ = ldmad_pipe(**__UpperCAmelCase ) UpperCAmelCase__ , UpperCAmelCase__ = output.rgb, output.depth UpperCAmelCase__ = rgb_slice_a[0, -3:, -3:, -1] UpperCAmelCase__ = depth_slice_a[0, -3:, -1] assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1E-4 assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1E-4 def lowercase_ (self : List[Any] ) -> Optional[Any]: """simple docstring""" UpperCAmelCase__ = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase__ = self.get_dummy_components() UpperCAmelCase__ = PNDMScheduler(skip_prk_steps=__UpperCAmelCase ) UpperCAmelCase__ = StableDiffusionLDMaDPipeline(**__UpperCAmelCase ) UpperCAmelCase__ = ldmad_pipe.to(__UpperCAmelCase ) ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) UpperCAmelCase__ = self.get_dummy_inputs(__UpperCAmelCase ) UpperCAmelCase__ = "french fries" UpperCAmelCase__ = ldmad_pipe(**__UpperCAmelCase , negative_prompt=__UpperCAmelCase ) UpperCAmelCase__ , UpperCAmelCase__ = output.rgb, output.depth UpperCAmelCase__ = rgb[0, -3:, -3:, -1] UpperCAmelCase__ = depth[0, -3:, -1] assert rgb.shape == (1, 6_4, 6_4, 3) assert depth.shape == (1, 6_4, 6_4) UpperCAmelCase__ = np.array( [0.37044, 0.71811503, 0.7223251, 0.48603675, 0.5638391, 0.6364948, 0.42833704, 0.4901315, 0.47926217] ) UpperCAmelCase__ = np.array([107.84738, 84.62802, 89.962135] ) assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1E-2 assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1E-2 @slow @require_torch_gpu class A ( unittest.TestCase ): def lowercase_ (self : List[Any] ) -> Union[str, Any]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase_ (self : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple="cpu" , __UpperCAmelCase : Tuple=torch.floataa , __UpperCAmelCase : Optional[int]=0 ) -> int: """simple docstring""" UpperCAmelCase__ = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) UpperCAmelCase__ = np.random.RandomState(__UpperCAmelCase ).standard_normal((1, 4, 6_4, 6_4) ) UpperCAmelCase__ = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase ) UpperCAmelCase__ = { "prompt": "a photograph of an astronaut riding a horse", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def lowercase_ (self : Tuple ) -> Dict: """simple docstring""" UpperCAmelCase__ = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" ) UpperCAmelCase__ = ldmad_pipe.to(__UpperCAmelCase ) ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) UpperCAmelCase__ = self.get_inputs(__UpperCAmelCase ) UpperCAmelCase__ = ldmad_pipe(**__UpperCAmelCase ) UpperCAmelCase__ , UpperCAmelCase__ = output.rgb, output.depth UpperCAmelCase__ = rgb[0, -3:, -3:, -1].flatten() UpperCAmelCase__ = rgb[0, -3:, -1].flatten() assert rgb.shape == (1, 5_1_2, 5_1_2, 3) assert depth.shape == (1, 5_1_2, 5_1_2) UpperCAmelCase__ = np.array( [0.53805465, 0.56707305, 0.5486515, 0.57012236, 0.5814511, 0.56253487, 0.54843014, 0.55092263, 0.6459706] ) UpperCAmelCase__ = np.array( [0.9263781, 0.6678672, 0.5486515, 0.92202145, 0.67831135, 0.56253487, 0.9241694, 0.7551478, 0.6459706] ) assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3E-3 assert np.abs(depth_slice - expected_slice_depth ).max() < 3E-3 @nightly @require_torch_gpu class A ( unittest.TestCase ): def lowercase_ (self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase_ (self : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any]="cpu" , __UpperCAmelCase : Optional[int]=torch.floataa , __UpperCAmelCase : Optional[int]=0 ) -> str: """simple docstring""" UpperCAmelCase__ = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) UpperCAmelCase__ = np.random.RandomState(__UpperCAmelCase ).standard_normal((1, 4, 6_4, 6_4) ) UpperCAmelCase__ = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase ) UpperCAmelCase__ = { "prompt": "a photograph of an astronaut riding a horse", "latents": latents, "generator": generator, "num_inference_steps": 5_0, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def lowercase_ (self : Any ) -> Any: """simple docstring""" UpperCAmelCase__ = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" ).to(__UpperCAmelCase ) ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) UpperCAmelCase__ = self.get_inputs(__UpperCAmelCase ) UpperCAmelCase__ = ldmad_pipe(**__UpperCAmelCase ) UpperCAmelCase__ , UpperCAmelCase__ = output.rgb, output.depth UpperCAmelCase__ = 0.495586 UpperCAmelCase__ = 0.33795515 UpperCAmelCase__ = 112.48518 UpperCAmelCase__ = 98.489746 assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3 assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3 assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3 assert np.abs(expected_depth_std - depth.std() ) < 1E-3 def lowercase_ (self : Tuple ) -> Tuple: """simple docstring""" UpperCAmelCase__ = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d-4c" ).to(__UpperCAmelCase ) ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) UpperCAmelCase__ = self.get_inputs(__UpperCAmelCase ) UpperCAmelCase__ = ldmad_pipe(**__UpperCAmelCase ) UpperCAmelCase__ , UpperCAmelCase__ = output.rgb, output.depth UpperCAmelCase__ = 0.4194127 UpperCAmelCase__ = 0.35375586 UpperCAmelCase__ = 0.5638502 UpperCAmelCase__ = 0.34686103 assert rgb.shape == (1, 5_1_2, 5_1_2, 3) assert depth.shape == (1, 5_1_2, 5_1_2, 1) assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3 assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3 assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3 assert np.abs(expected_depth_std - depth.std() ) < 1E-3
65
from __future__ import annotations from scipy.special import comb # type: ignore class A : def __init__(self : List[Any] , __UpperCAmelCase : list[tuple[float, float]] ) -> List[str]: """simple docstring""" UpperCAmelCase__ = list_of_points # Degree determines the flexibility of the curve. # Degree = 1 will produce a straight line. UpperCAmelCase__ = len(__UpperCAmelCase ) - 1 def lowercase_ (self : int , __UpperCAmelCase : float ) -> list[float]: """simple docstring""" assert 0 <= t <= 1, "Time t must be between 0 and 1." UpperCAmelCase__ = [] for i in range(len(self.list_of_points ) ): # basis function for each i output_values.append( comb(self.degree , __UpperCAmelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) ) # the basis must sum up to 1 for it to produce a valid Bezier curve. assert round(sum(__UpperCAmelCase ) , 5 ) == 1 return output_values def lowercase_ (self : Dict , __UpperCAmelCase : float ) -> tuple[float, float]: """simple docstring""" assert 0 <= t <= 1, "Time t must be between 0 and 1." UpperCAmelCase__ = self.basis_function(__UpperCAmelCase ) UpperCAmelCase__ = 0.0 UpperCAmelCase__ = 0.0 for i in range(len(self.list_of_points ) ): # For all points, sum up the product of i-th basis function and i-th point. x += basis_function[i] * self.list_of_points[i][0] y += basis_function[i] * self.list_of_points[i][1] return (x, y) def lowercase_ (self : Optional[int] , __UpperCAmelCase : float = 0.01 ) -> Optional[int]: """simple docstring""" from matplotlib import pyplot as plt # type: ignore UpperCAmelCase__ = [] # x coordinates of points to plot UpperCAmelCase__ = [] # y coordinates of points to plot UpperCAmelCase__ = 0.0 while t <= 1: UpperCAmelCase__ = self.bezier_curve_function(__UpperCAmelCase ) to_plot_x.append(value[0] ) to_plot_y.append(value[1] ) t += step_size UpperCAmelCase__ = [i[0] for i in self.list_of_points] UpperCAmelCase__ = [i[1] for i in self.list_of_points] plt.plot( __UpperCAmelCase , __UpperCAmelCase , color="blue" , label="Curve of Degree " + str(self.degree ) , ) plt.scatter(__UpperCAmelCase , __UpperCAmelCase , color="red" , label="Control Points" ) plt.legend() plt.show() if __name__ == "__main__": import doctest doctest.testmod() BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1 BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2 BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
65
1
"""simple docstring""" from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging a : Dict = logging.get_logger(__name__) a : List[Any] = { """huggingface/informer-tourism-monthly""": ( """https://huggingface.co./huggingface/informer-tourism-monthly/resolve/main/config.json""" ), # See all Informer models at https://huggingface.co./models?filter=informer } class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = "informer" __lowerCamelCase = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", "num_hidden_layers": "encoder_layers", } def __init__( self , snake_case__ = None , snake_case__ = None , snake_case__ = "student_t" , snake_case__ = "nll" , snake_case__ = 1 , snake_case__ = None , snake_case__ = "mean" , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = None , snake_case__ = None , snake_case__ = 64 , snake_case__ = 32 , snake_case__ = 32 , snake_case__ = 2 , snake_case__ = 2 , snake_case__ = 2 , snake_case__ = 2 , snake_case__ = True , snake_case__ = "gelu" , snake_case__ = 0.05 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 100 , snake_case__ = 0.02 , snake_case__=True , snake_case__ = "prob" , snake_case__ = 5 , snake_case__ = True , **snake_case__ , ): '''simple docstring''' # time series specific configuration lowercase__ : Dict= prediction_length lowercase__ : List[Any]= context_length or prediction_length lowercase__ : Dict= distribution_output lowercase__ : Dict= loss lowercase__ : str= input_size lowercase__ : Any= num_time_features lowercase__ : int= lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7] lowercase__ : int= scaling lowercase__ : List[Any]= num_dynamic_real_features lowercase__ : Dict= num_static_real_features lowercase__ : Dict= num_static_categorical_features # set cardinality if cardinality and num_static_categorical_features > 0: if len(snake_case__ ) != num_static_categorical_features: raise ValueError( "The cardinality should be a list of the same length as `num_static_categorical_features`" ) lowercase__ : Union[str, Any]= cardinality else: lowercase__ : Any= [0] # set embedding_dimension if embedding_dimension and num_static_categorical_features > 0: if len(snake_case__ ) != num_static_categorical_features: raise ValueError( "The embedding dimension should be a list of the same length as `num_static_categorical_features`" ) lowercase__ : Any= embedding_dimension else: lowercase__ : Tuple= [min(50 , (cat + 1) // 2 ) for cat in self.cardinality] lowercase__ : Union[str, Any]= num_parallel_samples # Transformer architecture configuration lowercase__ : Optional[int]= input_size * len(self.lags_sequence ) + self._number_of_features lowercase__ : Dict= d_model lowercase__ : Dict= encoder_attention_heads lowercase__ : str= decoder_attention_heads lowercase__ : List[Any]= encoder_ffn_dim lowercase__ : Tuple= decoder_ffn_dim lowercase__ : Union[str, Any]= encoder_layers lowercase__ : Optional[int]= decoder_layers lowercase__ : Tuple= dropout lowercase__ : Optional[Any]= attention_dropout lowercase__ : Optional[int]= activation_dropout lowercase__ : Optional[Any]= encoder_layerdrop lowercase__ : List[Any]= decoder_layerdrop lowercase__ : List[Any]= activation_function lowercase__ : Dict= init_std lowercase__ : List[str]= use_cache # Informer lowercase__ : Union[str, Any]= attention_type lowercase__ : Any= sampling_factor lowercase__ : Optional[Any]= distil super().__init__(is_encoder_decoder=snake_case__ , **snake_case__ ) @property def UpperCAmelCase_ ( self ): '''simple docstring''' return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
150
"""simple docstring""" from collections import defaultdict from pathlib import Path import pandas as pd from rouge_cli import calculate_rouge_path from utils import calculate_rouge a : Optional[Any] = [ """Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the""" """ final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe""" """ depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.""", """The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal""" """ accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's""" """ founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the""" """ body.""", """Amnesty International releases its annual report on the death penalty. The report catalogs the use of""" """ state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the""" """ world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital""" """ punishment.""", ] a : str = [ """Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""" """ Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz""" """ had informed his Lufthansa training school of an episode of severe depression, airline says .""", """Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .""" """ Israel and the United States opposed the move, which could open the door to war crimes investigations against""" """ Israelis .""", """Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to""" """ death . Organization claims that governments around the world are using the threat of terrorism to advance""" """ executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death""" """ sentences up by 28% .""", ] def lowercase__() ->List[Any]: """simple docstring""" lowercase__ : str= calculate_rouge(A , A , bootstrap_aggregation=A , rouge_keys=["rouge2", "rougeL"] ) assert isinstance(A , A ) lowercase__ : Optional[int]= calculate_rouge(A , A , bootstrap_aggregation=A , rouge_keys=["rouge2"] ) assert ( pd.DataFrame(no_aggregation["rouge2"] ).fmeasure.mean() == pd.DataFrame(no_aggregation_just_ra["rouge2"] ).fmeasure.mean() ) def lowercase__() ->int: """simple docstring""" lowercase__ : Optional[int]= "rougeLsum" lowercase__ : str= calculate_rouge(A , A , newline_sep=A , rouge_keys=[k] )[k] lowercase__ : Union[str, Any]= calculate_rouge(A , A , newline_sep=A , rouge_keys=[k] )[k] assert score > score_no_sep def lowercase__() ->Tuple: """simple docstring""" lowercase__ : Tuple= ["rouge1", "rouge2", "rougeL"] lowercase__ : Optional[Any]= calculate_rouge(A , A , newline_sep=A , rouge_keys=A ) lowercase__ : Dict= calculate_rouge(A , A , newline_sep=A , rouge_keys=A ) assert score_sep == score_no_sep def lowercase__() ->Optional[int]: """simple docstring""" lowercase__ : int= [ "Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.", "Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .", ] lowercase__ : Dict= [ "Margot Frank, died in 1945, a month earlier than previously thought.", "Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of" " the final seconds on board Flight 9525.", ] assert calculate_rouge(A , A , newline_sep=A ) == calculate_rouge(A , A , newline_sep=A ) def lowercase__() ->Dict: """simple docstring""" lowercase__ : List[str]= [ "\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" " ] lowercase__ : Union[str, Any]= [ " Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ." ] lowercase__ : List[Any]= calculate_rouge(A , A , rouge_keys=["rougeLsum"] , newline_sep=A )["rougeLsum"] lowercase__ : Optional[Any]= calculate_rouge(A , A , rouge_keys=["rougeLsum"] )["rougeLsum"] assert new_score > prev_score def lowercase__() ->Optional[Any]: """simple docstring""" lowercase__ : Optional[Any]= Path("examples/seq2seq/test_data/wmt_en_ro" ) lowercase__ : Union[str, Any]= calculate_rouge_path(data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) ) assert isinstance(A , A ) lowercase__ : List[Any]= calculate_rouge_path( data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) , bootstrap_aggregation=A ) assert isinstance(A , A )
150
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) snake_case_ = { """configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""], """processing_layoutlmv2""": ["""LayoutLMv2Processor"""], """tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ = ["""LayoutLMv2TokenizerFast"""] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ = ["""LayoutLMv2FeatureExtractor"""] snake_case_ = ["""LayoutLMv2ImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ = [ """LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""", """LayoutLMv2ForQuestionAnswering""", """LayoutLMv2ForSequenceClassification""", """LayoutLMv2ForTokenClassification""", """LayoutLMv2Layer""", """LayoutLMv2Model""", """LayoutLMv2PreTrainedModel""", ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys snake_case_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
78
"""simple docstring""" from sklearn.metrics import fa_score, matthews_corrcoef import datasets from .record_evaluation import evaluate as evaluate_record lowercase__ = """\ @article{wang2019superglue, title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems}, author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R}, journal={arXiv preprint arXiv:1905.00537}, year={2019} } """ lowercase__ = """\ SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after GLUE with a new set of more difficult language understanding tasks, improved resources, and a new public leaderboard. """ lowercase__ = """ Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset. Args: predictions: list of predictions to score. Depending on the SuperGlUE subset: - for 'record': list of question-answer dictionaries with the following keys: - 'idx': index of the question as specified by the dataset - 'prediction_text': the predicted answer text - for 'multirc': list of question-answer dictionaries with the following keys: - 'idx': index of the question-answer pair as specified by the dataset - 'prediction': the predicted answer label - otherwise: list of predicted labels references: list of reference labels. Depending on the SuperGLUE subset: - for 'record': list of question-answers dictionaries with the following keys: - 'idx': index of the question as specified by the dataset - 'answers': list of possible answers - otherwise: list of reference labels Returns: depending on the SuperGLUE subset: - for 'record': - 'exact_match': Exact match between answer and gold answer - 'f1': F1 score - for 'multirc': - 'exact_match': Exact match between answer and gold answer - 'f1_m': Per-question macro-F1 score - 'f1_a': Average F1 score over all answers - for 'axb': 'matthews_correlation': Matthew Correlation - for 'cb': - 'accuracy': Accuracy - 'f1': F1 score - for all others: - 'accuracy': Accuracy Examples: >>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"] >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0} >>> super_glue_metric = datasets.load_metric('super_glue', 'cb') >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0, 'f1': 1.0} >>> super_glue_metric = datasets.load_metric('super_glue', 'record') >>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}] >>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'exact_match': 1.0, 'f1': 1.0} >>> super_glue_metric = datasets.load_metric('super_glue', 'multirc') >>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0} >>> super_glue_metric = datasets.load_metric('super_glue', 'axb') >>> references = [0, 1] >>> predictions = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'matthews_correlation': 1.0} """ def _snake_case ( lowercase__ , lowercase__ ): return float((preds == labels).mean() ) def _snake_case ( lowercase__ , lowercase__ , lowercase__="binary" ): _lowerCamelCase : str = simple_accuracy(lowercase__ , lowercase__ ) _lowerCamelCase : Any = float(fa_score(y_true=lowercase__ , y_pred=lowercase__ , average=lowercase__ ) ) return { "accuracy": acc, "f1": fa, } def _snake_case ( lowercase__ , lowercase__ ): _lowerCamelCase : Any = {} for id_pred, label in zip(lowercase__ , lowercase__ ): _lowerCamelCase : Tuple = f'''{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}''' _lowerCamelCase : Union[str, Any] = id_pred['prediction'] if question_id in question_map: question_map[question_id].append((pred, label) ) else: _lowerCamelCase : Optional[Any] = [(pred, label)] _lowerCamelCase, _lowerCamelCase : Optional[int] = [], [] for question, preds_labels in question_map.items(): _lowerCamelCase, _lowerCamelCase : Tuple = zip(*lowercase__ ) _lowerCamelCase : List[str] = fa_score(y_true=lowercase__ , y_pred=lowercase__ , average='macro' ) fas.append(lowercase__ ) _lowerCamelCase : int = int(sum(pred == label for pred, label in preds_labels ) == len(lowercase__ ) ) ems.append(lowercase__ ) _lowerCamelCase : Optional[Any] = float(sum(lowercase__ ) / len(lowercase__ ) ) _lowerCamelCase : Optional[int] = sum(lowercase__ ) / len(lowercase__ ) _lowerCamelCase : List[Any] = float(fa_score(y_true=lowercase__ , y_pred=[id_pred['prediction'] for id_pred in ids_preds] ) ) return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a} @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class lowerCAmelCase__ ( datasets.Metric ): '''simple docstring''' def A_ ( self ): if self.config_name not in [ "boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg", ]: raise KeyError( 'You should supply a configuration name selected in ' '["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='numpy' if not self.config_name == 'record' and not self.config_name == 'multirc' else None , ) def A_ ( self ): if self.config_name == "record": return { "predictions": { "idx": { "passage": datasets.Value('int64' ), "query": datasets.Value('int64' ), }, "prediction_text": datasets.Value('string' ), }, "references": { "idx": { "passage": datasets.Value('int64' ), "query": datasets.Value('int64' ), }, "answers": datasets.Sequence(datasets.Value('string' ) ), }, } elif self.config_name == "multirc": return { "predictions": { "idx": { "answer": datasets.Value('int64' ), "paragraph": datasets.Value('int64' ), "question": datasets.Value('int64' ), }, "prediction": datasets.Value('int64' ), }, "references": datasets.Value('int64' ), } else: return { "predictions": datasets.Value('int64' ), "references": datasets.Value('int64' ), } def A_ ( self , lowercase , lowercase ): if self.config_name == "axb": return {"matthews_correlation": matthews_corrcoef(lowercase , lowercase )} elif self.config_name == "cb": return acc_and_fa(lowercase , lowercase , fa_avg='macro' ) elif self.config_name == "record": _lowerCamelCase : List[str] = [ { 'qas': [ {'id': ref['idx']['query'], 'answers': [{'text': ans} for ans in ref['answers']]} for ref in references ] } ] _lowerCamelCase : Union[str, Any] = {pred['idx']['query']: pred['prediction_text'] for pred in predictions} return evaluate_record(lowercase , lowercase )[0] elif self.config_name == "multirc": return evaluate_multirc(lowercase , lowercase ) elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]: return {"accuracy": simple_accuracy(lowercase , lowercase )} else: raise KeyError( 'You should supply a configuration name selected in ' '["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
96
0
"""simple docstring""" from __future__ import annotations from collections.abc import Generator def snake_case () -> Generator[int, None, None]: UpperCamelCase_: dict[int, int] = {} UpperCamelCase_: Union[str, Any] = 2 while True: UpperCamelCase_: Dict = factor_map.pop(UpperCAmelCase__ , UpperCAmelCase__ ) if factor: UpperCamelCase_: Optional[int] = factor + prime while x in factor_map: x += factor UpperCamelCase_: Union[str, Any] = factor else: UpperCamelCase_: str = prime yield prime prime += 1 def snake_case (UpperCAmelCase__ = 1E10 ) -> int: UpperCamelCase_: List[str] = sieve() UpperCamelCase_: Union[str, Any] = 1 while True: UpperCamelCase_: Dict = next(UpperCAmelCase__ ) if (2 * prime * n) > limit: return n # Ignore the next prime as the reminder will be 2. next(UpperCAmelCase__ ) n += 2 if __name__ == "__main__": print(solution())
360
import unittest from transformers import RoFormerTokenizer, RoFormerTokenizerFast from transformers.testing_utils import require_rjieba, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_rjieba @require_tokenizers class _lowerCAmelCase( UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" a : Optional[int] =RoFormerTokenizer a : int =RoFormerTokenizerFast a : int =True a : Optional[int] =True def _a ( self ): super().setUp() def _a ( self , **_lowerCamelCase ): return self.tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' , **_lowerCamelCase ) def _a ( self , **_lowerCamelCase ): return self.rust_tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' , **_lowerCamelCase ) def _a ( self ): UpperCamelCase_: Optional[int] = '永和服装饰品有限公司,今天天气非常好' UpperCamelCase_: Any = '永和 服装 饰品 有限公司 , 今 天 天 气 非常 好' return input_text, output_text def _a ( self ): UpperCamelCase_: int = self.get_tokenizer() UpperCamelCase_ ,UpperCamelCase_: int = self.get_chinese_input_output_texts() UpperCamelCase_: Tuple = tokenizer.tokenize(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , output_text.split() ) UpperCamelCase_: Dict = tokens + [tokenizer.unk_token] UpperCamelCase_: Dict = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0] self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase ) def _a ( self ): UpperCamelCase_: Optional[Any] = self.get_rust_tokenizer() UpperCamelCase_ ,UpperCamelCase_: Tuple = self.get_chinese_input_output_texts() UpperCamelCase_: Optional[Any] = tokenizer.tokenize(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , output_text.split() ) UpperCamelCase_: str = tokens + [tokenizer.unk_token] UpperCamelCase_: Optional[Any] = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0] self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase ) def _a ( self ): pass def _a ( self ): pass def _a ( self ): pass
292
0
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ): """simple docstring""" _enforce_args(A__ ,A__ ) if n == 0: return 0 snake_case = float('''-inf''' ) for i in range(1 ,n + 1 ): snake_case = max( A__ ,prices[i - 1] + naive_cut_rod_recursive(n - i ,A__ ) ) return max_revue def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ): """simple docstring""" _enforce_args(A__ ,A__ ) snake_case = [float('''-inf''' ) for _ in range(n + 1 )] return _top_down_cut_rod_recursive(A__ ,A__ ,A__ ) def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ): """simple docstring""" if max_rev[n] >= 0: return max_rev[n] elif n == 0: return 0 else: snake_case = float('''-inf''' ) for i in range(1 ,n + 1 ): snake_case = max( A__ ,prices[i - 1] + _top_down_cut_rod_recursive(n - i ,A__ ,A__ ) ,) snake_case = max_revenue return max_rev[n] def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ): """simple docstring""" _enforce_args(A__ ,A__ ) # length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of # length 0. snake_case = [float('''-inf''' ) for _ in range(n + 1 )] snake_case = 0 for i in range(1 ,n + 1 ): snake_case = max_rev[i] for j in range(1 ,i + 1 ): snake_case = max(A__ ,prices[j - 1] + max_rev[i - j] ) snake_case = max_revenue_i return max_rev[n] def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ): """simple docstring""" if n < 0: snake_case = F'''n must be greater than or equal to 0. Got n = {n}''' raise ValueError(A__ ) if n > len(A__ ): snake_case = ( '''Each integral piece of rod must have a corresponding price. ''' F'''Got n = {n} but length of prices = {len(A__ )}''' ) raise ValueError(A__ ) def UpperCAmelCase__ (): """simple docstring""" snake_case = [6, 10, 12, 15, 20, 23] snake_case = len(A__ ) # the best revenue comes from cutting the rod into 6 pieces, each # of length 1 resulting in a revenue of 6 * 6 = 36. snake_case = 36 snake_case = top_down_cut_rod(A__ ,A__ ) snake_case = bottom_up_cut_rod(A__ ,A__ ) snake_case = naive_cut_rod_recursive(A__ ,A__ ) assert expected_max_revenue == max_rev_top_down assert max_rev_top_down == max_rev_bottom_up assert max_rev_bottom_up == max_rev_naive if __name__ == "__main__": main()
127
import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler lowercase_ = 1_6 lowercase_ = 3_2 def a ( A__ : Accelerator , A__ : int = 16 , A__ : str = "bert-base-cased" ) -> Optional[int]: """simple docstring""" _lowercase =AutoTokenizer.from_pretrained(A__ ) _lowercase =load_dataset('glue' , 'mrpc' ) def tokenize_function(A__ : Optional[int] ): # max_length=None => use the model max length (it's actually the default) _lowercase =tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=A__ , max_length=A__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset _lowercase =datasets.map( A__ , batched=A__ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=A__ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _lowercase =tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(A__ : List[str] ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(A__ , padding='max_length' , max_length=128 , return_tensors='pt' ) return tokenizer.pad(A__ , padding='longest' , return_tensors='pt' ) # Instantiate dataloaders. _lowercase =DataLoader( tokenized_datasets['train'] , shuffle=A__ , collate_fn=A__ , batch_size=A__ ) _lowercase =DataLoader( tokenized_datasets['validation'] , shuffle=A__ , collate_fn=A__ , batch_size=A__ ) return train_dataloader, eval_dataloader def a ( A__ : Optional[Any] , A__ : Optional[int] , A__ : List[str] , A__ : Dict ) -> Dict: """simple docstring""" model.eval() _lowercase =0 for step, batch in enumerate(A__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _lowercase =model(**A__ ) _lowercase =outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times _lowercase , _lowercase =accelerator.gather( (predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(A__ ) - 1: _lowercase =predictions[: len(eval_dataloader.dataset ) - samples_seen] _lowercase =references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=A__ , references=A__ , ) _lowercase =metric.compute() return eval_metric["accuracy"] def a ( A__ : str , A__ : Optional[Any] ) -> Optional[Any]: """simple docstring""" _lowercase =Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _lowercase =config['lr'] _lowercase =int(config['num_epochs'] ) _lowercase =int(config['seed'] ) _lowercase =int(config['batch_size'] ) _lowercase =args.model_name_or_path set_seed(A__ ) _lowercase , _lowercase =get_dataloaders(A__ , A__ , A__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _lowercase =AutoModelForSequenceClassification.from_pretrained(A__ , return_dict=A__ ) # Instantiate optimizer _lowercase =( AdamW if accelerator.state.deepspeed_plugin is None or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) _lowercase =optimizer_cls(params=model.parameters() , lr=A__ ) if accelerator.state.deepspeed_plugin is not None: _lowercase =accelerator.state.deepspeed_plugin.deepspeed_config[ 'gradient_accumulation_steps' ] else: _lowercase =1 _lowercase =(len(A__ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): _lowercase =get_linear_schedule_with_warmup( optimizer=A__ , num_warmup_steps=0 , num_training_steps=A__ , ) else: _lowercase =DummyScheduler(A__ , total_num_steps=A__ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _lowercase , _lowercase , _lowercase , _lowercase , _lowercase =accelerator.prepare( A__ , A__ , A__ , A__ , A__ ) # We need to keep track of how many total steps we have iterated over _lowercase =0 # We also need to keep track of the stating epoch so files are named properly _lowercase =0 _lowercase =evaluate.load('glue' , 'mrpc' ) _lowercase =num_epochs if args.partial_train_epoch is not None: _lowercase =args.partial_train_epoch if args.resume_from_checkpoint: accelerator.load_state(args.resume_from_checkpoint ) _lowercase =args.resume_from_checkpoint.split('epoch_' )[1] _lowercase ='' for char in epoch_string: if char.isdigit(): state_epoch_num += char else: break _lowercase =int(A__ ) + 1 _lowercase =evaluation_loop(A__ , A__ , A__ , A__ ) accelerator.print('resumed checkpoint performance:' , A__ ) accelerator.print('resumed checkpoint\'s scheduler\'s lr:' , lr_scheduler.get_lr()[0] ) accelerator.print('resumed optimizers\'s lr:' , optimizer.param_groups[0]['lr'] ) with open(os.path.join(args.output_dir , F'''state_{starting_epoch-1}.json''' ) , 'r' ) as f: _lowercase =json.load(A__ ) assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed" assert ( resumed_state["lr"] == lr_scheduler.get_lr()[0] ), "Scheduler learning rate mismatch, loading from checkpoint failed" assert ( resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"] ), "Optimizer learning rate mismatch, loading from checkpoint failed" assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed" return # Now we train the model _lowercase ={} for epoch in range(A__ , A__ ): model.train() for step, batch in enumerate(A__ ): _lowercase =model(**A__ ) _lowercase =outputs.loss _lowercase =loss / gradient_accumulation_steps accelerator.backward(A__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 _lowercase =F'''epoch_{epoch}''' _lowercase =os.path.join(args.output_dir , A__ ) accelerator.save_state(A__ ) _lowercase =evaluation_loop(A__ , A__ , A__ , A__ ) _lowercase =accuracy _lowercase =lr_scheduler.get_lr()[0] _lowercase =optimizer.param_groups[0]['lr'] _lowercase =epoch _lowercase =overall_step accelerator.print(F'''epoch {epoch}:''' , A__ ) accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , F'''state_{epoch}.json''' ) , 'w' ) as f: json.dump(A__ , A__ ) def a ( ) -> Tuple: """simple docstring""" _lowercase =argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' ) parser.add_argument( '--model_name_or_path' , type=A__ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=A__ , ) parser.add_argument( '--output_dir' , type=A__ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , ) parser.add_argument( '--resume_from_checkpoint' , type=A__ , default=A__ , help='If the training should continue from a checkpoint folder.' , ) parser.add_argument( '--partial_train_epoch' , type=A__ , default=A__ , help='If passed, the training will stop after this number of epochs.' , ) parser.add_argument( '--num_epochs' , type=A__ , default=2 , help='Number of train epochs.' , ) _lowercase =parser.parse_args() _lowercase ={'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16} training_function(A__ , A__ ) if __name__ == "__main__": main()
205
0
def _A ( lowerCAmelCase_ : list ): """simple docstring""" lowerCAmelCase__ = 0 while len(_snake_case ) > 1: lowerCAmelCase__ = 0 # Consider two files with minimum cost to be merged for _ in range(2 ): lowerCAmelCase__ = files.index(min(_snake_case ) ) temp += files[min_index] files.pop(_snake_case ) files.append(_snake_case ) optimal_merge_cost += temp return optimal_merge_cost if __name__ == "__main__": import doctest doctest.testmod()
369
import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class __lowerCamelCase ( unittest.TestCase ): """simple docstring""" def a ( self : Any , SCREAMING_SNAKE_CASE__ : Any ) -> int: for model_result in results.values(): for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ): lowerCAmelCase__ = model_result["result"][batch_size][sequence_length] self.assertIsNotNone(SCREAMING_SNAKE_CASE__ ) def a ( self : Optional[Any] ) -> Any: lowerCAmelCase__ = "sshleifer/tiny-gpt2" lowerCAmelCase__ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE__ , ) lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def a ( self : int ) -> Optional[Any]: lowerCAmelCase__ = "sgugger/tiny-distilbert-classification" lowerCAmelCase__ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE__ , only_pretrain_model=SCREAMING_SNAKE_CASE__ , ) lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def a ( self : Optional[Any] ) -> int: lowerCAmelCase__ = "sshleifer/tiny-gpt2" lowerCAmelCase__ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , torchscript=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE__ , ) lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(torch_device == "cpu" , "Cant do half precision" ) def a ( self : Dict ) -> Optional[Any]: lowerCAmelCase__ = "sshleifer/tiny-gpt2" lowerCAmelCase__ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , fpaa=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE__ , ) lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def a ( self : Union[str, Any] ) -> Tuple: lowerCAmelCase__ = "sshleifer/tiny-gpt2" lowerCAmelCase__ = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ ) # set architectures equal to `None` lowerCAmelCase__ = None lowerCAmelCase__ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE__ , ) lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ , configs=[config] ) lowerCAmelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def a ( self : Any ) -> Optional[Any]: lowerCAmelCase__ = "sshleifer/tiny-gpt2" lowerCAmelCase__ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE__ , ) lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) @unittest.skipIf(torch_device == "cpu" , "Can't do half precision" ) def a ( self : int ) -> Dict: lowerCAmelCase__ = "sshleifer/tiny-gpt2" lowerCAmelCase__ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=SCREAMING_SNAKE_CASE__ , multi_process=SCREAMING_SNAKE_CASE__ , ) lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def a ( self : Optional[int] ) -> Union[str, Any]: lowerCAmelCase__ = "sshleifer/tiny-gpt2" lowerCAmelCase__ = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE__ , ) lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ , configs=[config] ) lowerCAmelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def a ( self : Optional[Any] ) -> Optional[Any]: lowerCAmelCase__ = "sshleifer/tinier_bart" lowerCAmelCase__ = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE__ , ) lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ , configs=[config] ) lowerCAmelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def a ( self : List[str] ) -> Dict: lowerCAmelCase__ = "sshleifer/tiny-gpt2" lowerCAmelCase__ = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE__ , ) lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ , configs=[config] ) lowerCAmelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def a ( self : Optional[int] ) -> Optional[int]: lowerCAmelCase__ = "sshleifer/tinier_bart" lowerCAmelCase__ = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE__ , ) lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ , configs=[config] ) lowerCAmelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def a ( self : List[Any] ) -> Optional[int]: lowerCAmelCase__ = "sshleifer/tiny-gpt2" with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase__ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , save_to_csv=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(SCREAMING_SNAKE_CASE__ , "inf_time.csv" ) , train_memory_csv_file=os.path.join(SCREAMING_SNAKE_CASE__ , "train_mem.csv" ) , inference_memory_csv_file=os.path.join(SCREAMING_SNAKE_CASE__ , "inf_mem.csv" ) , train_time_csv_file=os.path.join(SCREAMING_SNAKE_CASE__ , "train_time.csv" ) , env_info_csv_file=os.path.join(SCREAMING_SNAKE_CASE__ , "env.csv" ) , multi_process=SCREAMING_SNAKE_CASE__ , ) lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ ) benchmark.run() self.assertTrue(Path(os.path.join(SCREAMING_SNAKE_CASE__ , "inf_time.csv" ) ).exists() ) self.assertTrue(Path(os.path.join(SCREAMING_SNAKE_CASE__ , "train_time.csv" ) ).exists() ) self.assertTrue(Path(os.path.join(SCREAMING_SNAKE_CASE__ , "inf_mem.csv" ) ).exists() ) self.assertTrue(Path(os.path.join(SCREAMING_SNAKE_CASE__ , "train_mem.csv" ) ).exists() ) self.assertTrue(Path(os.path.join(SCREAMING_SNAKE_CASE__ , "env.csv" ) ).exists() ) def a ( self : Optional[Any] ) -> Any: lowerCAmelCase__ = "sshleifer/tiny-gpt2" def _check_summary_is_not_empty(SCREAMING_SNAKE_CASE__ : List[Any] ): self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "sequential" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "cumulative" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "current" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "total" ) ) with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase__ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(SCREAMING_SNAKE_CASE__ , "log.txt" ) , log_print=SCREAMING_SNAKE_CASE__ , trace_memory_line_by_line=SCREAMING_SNAKE_CASE__ , multi_process=SCREAMING_SNAKE_CASE__ , ) lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) _check_summary_is_not_empty(result.train_summary ) self.assertTrue(Path(os.path.join(SCREAMING_SNAKE_CASE__ , "log.txt" ) ).exists() )
221
0
'''simple docstring''' import numpy as np UpperCAmelCase_ = [ ['a', 'b', 'c', 'd', 'e'], ['f', 'g', 'h', 'i', 'k'], ['l', 'm', 'n', 'o', 'p'], ['q', 'r', 's', 't', 'u'], ['v', 'w', 'x', 'y', 'z'], ] class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Dict ): """simple docstring""" UpperCAmelCase__ = np.array(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : str ): """simple docstring""" UpperCAmelCase__ , UpperCAmelCase__ = np.where(letter == self.SQUARE ) UpperCAmelCase__ = np.concatenate([indexa + 1, indexa + 1] ) return indexes def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : int ): """simple docstring""" UpperCAmelCase__ = self.SQUARE[indexa - 1, indexa - 1] return letter def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : str ): """simple docstring""" UpperCAmelCase__ = message.lower() UpperCAmelCase__ = message.replace(""" """ , """""" ) UpperCAmelCase__ = message.replace("""j""" , """i""" ) UpperCAmelCase__ = np.empty((2, len(_UpperCAmelCase )) ) for letter_index in range(len(_UpperCAmelCase ) ): UpperCAmelCase__ = self.letter_to_numbers(message[letter_index] ) UpperCAmelCase__ = numbers[0] UpperCAmelCase__ = numbers[1] UpperCAmelCase__ = first_step.reshape(2 * len(_UpperCAmelCase ) ) UpperCAmelCase__ = """""" for numbers_index in range(len(_UpperCAmelCase ) ): UpperCAmelCase__ = int(second_step[numbers_index * 2] ) UpperCAmelCase__ = int(second_step[(numbers_index * 2) + 1] ) UpperCAmelCase__ = self.numbers_to_letter(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = encoded_message + letter return encoded_message def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : str ): """simple docstring""" UpperCAmelCase__ = message.lower() message.replace(""" """ , """""" ) UpperCAmelCase__ = np.empty(2 * len(_UpperCAmelCase ) ) for letter_index in range(len(_UpperCAmelCase ) ): UpperCAmelCase__ = self.letter_to_numbers(message[letter_index] ) UpperCAmelCase__ = numbers[0] UpperCAmelCase__ = numbers[1] UpperCAmelCase__ = first_step.reshape((2, len(_UpperCAmelCase )) ) UpperCAmelCase__ = """""" for numbers_index in range(len(_UpperCAmelCase ) ): UpperCAmelCase__ = int(second_step[0, numbers_index] ) UpperCAmelCase__ = int(second_step[1, numbers_index] ) UpperCAmelCase__ = self.numbers_to_letter(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = decoded_message + letter return decoded_message
346
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { 'google/vivit-b-16x2-kinetics400': ( 'https://huggingface.co./google/vivit-b-16x2-kinetics400/resolve/main/config.json' ), # See all Vivit models at https://huggingface.co./models?filter=vivit } class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : Optional[int] = """vivit""" def __init__( self : List[str] , _UpperCAmelCase : List[Any]=2_24 , _UpperCAmelCase : List[str]=32 , _UpperCAmelCase : Any=[2, 16, 16] , _UpperCAmelCase : int=3 , _UpperCAmelCase : Optional[Any]=7_68 , _UpperCAmelCase : Union[str, Any]=12 , _UpperCAmelCase : Dict=12 , _UpperCAmelCase : Optional[Any]=30_72 , _UpperCAmelCase : Optional[int]="gelu_fast" , _UpperCAmelCase : Union[str, Any]=0.0 , _UpperCAmelCase : Tuple=0.0 , _UpperCAmelCase : Optional[int]=0.02 , _UpperCAmelCase : List[Any]=1E-06 , _UpperCAmelCase : List[str]=True , **_UpperCAmelCase : List[Any] , ): """simple docstring""" UpperCAmelCase__ = hidden_size UpperCAmelCase__ = num_hidden_layers UpperCAmelCase__ = num_attention_heads UpperCAmelCase__ = intermediate_size UpperCAmelCase__ = hidden_act UpperCAmelCase__ = hidden_dropout_prob UpperCAmelCase__ = attention_probs_dropout_prob UpperCAmelCase__ = initializer_range UpperCAmelCase__ = layer_norm_eps UpperCAmelCase__ = image_size UpperCAmelCase__ = num_frames UpperCAmelCase__ = tubelet_size UpperCAmelCase__ = num_channels UpperCAmelCase__ = qkv_bias super().__init__(**_UpperCAmelCase )
346
1
import json import os from collections import Counter import torch import torchvision import torchvision.transforms as transforms from PIL import Image from torch import nn from torch.utils.data import Dataset __UpperCamelCase : int = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)} class __magic_name__ ( nn.Module): def __init__( self : Any , lowerCamelCase__ : int ) -> Any: '''simple docstring''' super().__init__() UpperCamelCase__ : Any = torchvision.models.resnetaaa(pretrained=lowerCamelCase__ ) UpperCamelCase__ : List[str] = list(model.children() )[:-2] UpperCamelCase__ : Optional[int] = nn.Sequential(*lowerCamelCase__ ) UpperCamelCase__ : Optional[int] = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] ) def UpperCAmelCase__ ( self : str , lowerCamelCase__ : Optional[Any] ) -> Any: '''simple docstring''' UpperCamelCase__ : Optional[Any] = self.pool(self.model(lowerCamelCase__ ) ) UpperCamelCase__ : int = torch.flatten(lowerCamelCase__ , start_dim=2 ) UpperCamelCase__ : int = out.transpose(1 , 2 ).contiguous() return out # BxNx2048 class __magic_name__ ( __lowerCAmelCase): def __init__( self : str , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : List[Any] ) -> Union[str, Any]: '''simple docstring''' UpperCamelCase__ : Optional[Any] = [json.loads(lowerCamelCase__ ) for l in open(lowerCamelCase__ )] UpperCamelCase__ : Any = os.path.dirname(lowerCamelCase__ ) UpperCamelCase__ : int = tokenizer UpperCamelCase__ : int = labels UpperCamelCase__ : Optional[Any] = len(lowerCamelCase__ ) UpperCamelCase__ : Union[str, Any] = max_seq_length UpperCamelCase__ : Optional[int] = transforms def __len__( self : Optional[int] ) -> Any: '''simple docstring''' return len(self.data ) def __getitem__( self : Optional[int] , lowerCamelCase__ : Dict ) -> List[str]: '''simple docstring''' UpperCamelCase__ : List[Any] = torch.LongTensor(self.tokenizer.encode(self.data[index]['''text'''] , add_special_tokens=lowerCamelCase__ ) ) UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : List[str] = sentence[0], sentence[1:-1], sentence[-1] UpperCamelCase__ : List[str] = sentence[: self.max_seq_length] UpperCamelCase__ : Tuple = torch.zeros(self.n_classes ) UpperCamelCase__ : int = 1 UpperCamelCase__ : Tuple = Image.open(os.path.join(self.data_dir , self.data[index]['''img'''] ) ).convert('''RGB''' ) UpperCamelCase__ : List[str] = self.transforms(lowerCamelCase__ ) return { "image_start_token": start_token, "image_end_token": end_token, "sentence": sentence, "image": image, "label": label, } def UpperCAmelCase__ ( self : List[Any] ) -> int: '''simple docstring''' UpperCamelCase__ : Any = Counter() for row in self.data: label_freqs.update(row['''label'''] ) return label_freqs def _a ( SCREAMING_SNAKE_CASE : int ): """simple docstring""" UpperCamelCase__ : Tuple = [len(row['''sentence'''] ) for row in batch] UpperCamelCase__ , UpperCamelCase__ : List[str] = len(SCREAMING_SNAKE_CASE ), max(SCREAMING_SNAKE_CASE ) UpperCamelCase__ : List[str] = torch.zeros(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dtype=torch.long ) UpperCamelCase__ : str = torch.zeros(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dtype=torch.long ) for i_batch, (input_row, length) in enumerate(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ): UpperCamelCase__ : Tuple = input_row['''sentence'''] UpperCamelCase__ : Optional[int] = 1 UpperCamelCase__ : str = torch.stack([row['''image'''] for row in batch] ) UpperCamelCase__ : Union[str, Any] = torch.stack([row['''label'''] for row in batch] ) UpperCamelCase__ : int = torch.stack([row['''image_start_token'''] for row in batch] ) UpperCamelCase__ : str = torch.stack([row['''image_end_token'''] for row in batch] ) return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor def _a ( ): """simple docstring""" return [ "Crime", "Drama", "Thriller", "Action", "Comedy", "Romance", "Documentary", "Short", "Mystery", "History", "Family", "Adventure", "Fantasy", "Sci-Fi", "Western", "Horror", "Sport", "War", "Music", "Musical", "Animation", "Biography", "Film-Noir", ] def _a ( ): """simple docstring""" return transforms.Compose( [ transforms.Resize(256 ), transforms.CenterCrop(224 ), transforms.ToTensor(), transforms.Normalize( mean=[0.4677_7044, 0.4453_1429, 0.4066_1017] , std=[0.1222_1994, 0.1214_5835, 0.1438_0469] , ), ] )
51
import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class __magic_name__ ( unittest.TestCase): def __init__( self : Optional[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Tuple=7 , lowerCamelCase__ : List[Any]=3 , lowerCamelCase__ : Optional[int]=18 , lowerCamelCase__ : Any=30 , lowerCamelCase__ : int=400 , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : Union[str, Any]=True , lowerCamelCase__ : int=False , lowerCamelCase__ : Union[str, Any]=True , lowerCamelCase__ : int=True , lowerCamelCase__ : Dict=[0.5, 0.5, 0.5] , lowerCamelCase__ : str=[0.5, 0.5, 0.5] , ) -> Optional[Any]: '''simple docstring''' UpperCamelCase__ : Optional[Any] = parent UpperCamelCase__ : Dict = batch_size UpperCamelCase__ : List[Any] = num_channels UpperCamelCase__ : int = image_size UpperCamelCase__ : str = min_resolution UpperCamelCase__ : str = max_resolution UpperCamelCase__ : Tuple = do_resize UpperCamelCase__ : str = size if size is not None else {'''height''': 18, '''width''': 20} UpperCamelCase__ : Optional[Any] = do_thumbnail UpperCamelCase__ : int = do_align_axis UpperCamelCase__ : List[Any] = do_pad UpperCamelCase__ : List[Any] = do_normalize UpperCamelCase__ : Dict = image_mean UpperCamelCase__ : List[Any] = image_std def UpperCAmelCase__ ( self : List[Any] ) -> Any: '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class __magic_name__ ( __lowerCAmelCase , unittest.TestCase): A: Tuple = DonutImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self : str ) -> int: '''simple docstring''' UpperCamelCase__ : int = DonutImageProcessingTester(self ) @property def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self : Any ) -> Optional[Any]: '''simple docstring''' UpperCamelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase__ , '''do_resize''' ) ) self.assertTrue(hasattr(lowerCamelCase__ , '''size''' ) ) self.assertTrue(hasattr(lowerCamelCase__ , '''do_thumbnail''' ) ) self.assertTrue(hasattr(lowerCamelCase__ , '''do_align_long_axis''' ) ) self.assertTrue(hasattr(lowerCamelCase__ , '''do_pad''' ) ) self.assertTrue(hasattr(lowerCamelCase__ , '''do_normalize''' ) ) self.assertTrue(hasattr(lowerCamelCase__ , '''image_mean''' ) ) self.assertTrue(hasattr(lowerCamelCase__ , '''image_std''' ) ) def UpperCAmelCase__ ( self : Optional[Any] ) -> Tuple: '''simple docstring''' UpperCamelCase__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 20} ) UpperCamelCase__ : Any = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} ) # Previous config had dimensions in (width, height) order UpperCamelCase__ : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) ) self.assertEqual(image_processor.size , {'''height''': 84, '''width''': 42} ) def UpperCAmelCase__ ( self : Any ) -> str: '''simple docstring''' pass @is_flaky() def UpperCAmelCase__ ( self : Optional[int] ) -> Any: '''simple docstring''' UpperCamelCase__ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase__ , Image.Image ) # Test not batched input UpperCamelCase__ : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched UpperCamelCase__ : List[str] = image_processing(lowerCamelCase__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) @is_flaky() def UpperCAmelCase__ ( self : Optional[int] ) -> List[str]: '''simple docstring''' UpperCamelCase__ : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , numpify=lowerCamelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase__ , np.ndarray ) # Test not batched input UpperCamelCase__ : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched UpperCamelCase__ : List[Any] = image_processing(lowerCamelCase__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) @is_flaky() def UpperCAmelCase__ ( self : str ) -> Tuple: '''simple docstring''' UpperCamelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase__ , torch.Tensor ) # Test not batched input UpperCamelCase__ : str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched UpperCamelCase__ : List[str] = image_processing(lowerCamelCase__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , )
51
1
"""simple docstring""" def lowerCAmelCase__ ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Any ) -> Optional[int]: """simple docstring""" return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2 def lowerCAmelCase__ ( _UpperCamelCase : Dict , _UpperCamelCase : Optional[Any]=0 ) -> Optional[int]: """simple docstring""" return sorted(_UpperCamelCase , key=lambda _UpperCamelCase : x[column] ) def lowerCAmelCase__ ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[Any]=float('inf' ) ) -> Optional[Any]: """simple docstring""" for i in range(points_counts - 1 ): for j in range(i + 1 , _UpperCamelCase ): snake_case = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: snake_case = current_dis return min_dis def lowerCAmelCase__ ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : int=float('inf' ) ) -> Optional[int]: """simple docstring""" for i in range(min(6 , points_counts - 1 ) , _UpperCamelCase ): for j in range(max(0 , i - 6 ) , _UpperCamelCase ): snake_case = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: snake_case = current_dis return min_dis def lowerCAmelCase__ ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] ) -> int: """simple docstring""" if points_counts <= 3: return dis_between_closest_pair(_UpperCamelCase , _UpperCamelCase ) # recursion snake_case = points_counts // 2 snake_case = closest_pair_of_points_sqr( _UpperCamelCase , points_sorted_on_y[:mid] , _UpperCamelCase ) snake_case = closest_pair_of_points_sqr( _UpperCamelCase , points_sorted_on_y[mid:] , points_counts - mid ) snake_case = min(_UpperCamelCase , _UpperCamelCase ) snake_case = [] for point in points_sorted_on_x: if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis: cross_strip.append(_UpperCamelCase ) snake_case = dis_between_closest_in_strip( _UpperCamelCase , len(_UpperCamelCase ) , _UpperCamelCase ) return min(_UpperCamelCase , _UpperCamelCase ) def lowerCAmelCase__ ( _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any] ) -> Optional[Any]: """simple docstring""" snake_case = column_based_sort(_UpperCamelCase , column=0 ) snake_case = column_based_sort(_UpperCamelCase , column=1 ) return ( closest_pair_of_points_sqr( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) ) ** 0.5 if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)] print("Distance:", closest_pair_of_points(points, len(points)))
150
"""simple docstring""" import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = {"vocab_file": "vocab.json"} SCREAMING_SNAKE_CASE__ = { "vocab_file": { "mgp-str": "https://huggingface.co./alibaba-damo/mgp-str-base/blob/main/vocab.json", } } SCREAMING_SNAKE_CASE__ = {"mgp-str": 27} class lowerCAmelCase_ ( lowerCAmelCase ): """simple docstring""" _lowerCAmelCase : Tuple = VOCAB_FILES_NAMES _lowerCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP _lowerCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , lowerCAmelCase , lowerCAmelCase="[GO]" , lowerCAmelCase="[GO]" , lowerCAmelCase="[s]" , lowerCAmelCase="[GO]" , **lowerCAmelCase ): """simple docstring""" super().__init__( unk_token=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , pad_token=lowerCAmelCase , **lowerCAmelCase , ) with open(lowerCAmelCase , encoding='utf-8' ) as vocab_handle: snake_case = json.load(lowerCAmelCase ) snake_case = {v: k for k, v in self.vocab.items()} @property def snake_case ( self ): """simple docstring""" return len(self.vocab ) def snake_case ( self ): """simple docstring""" return dict(self.vocab , **self.added_tokens_encoder ) def snake_case ( self , lowerCAmelCase ): """simple docstring""" snake_case = [] for s in text: char_tokens.extend(lowerCAmelCase ) return char_tokens def snake_case ( self , lowerCAmelCase ): """simple docstring""" return self.vocab.get(lowerCAmelCase , self.vocab.get(self.unk_token ) ) def snake_case ( self , lowerCAmelCase ): """simple docstring""" return self.decoder.get(lowerCAmelCase ) def snake_case ( self , lowerCAmelCase , lowerCAmelCase = None ): """simple docstring""" if not os.path.isdir(lowerCAmelCase ): logger.error('Vocabulary path ({}) should be a directory'.format(lowerCAmelCase ) ) return snake_case = os.path.join( lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowerCAmelCase , ensure_ascii=lowerCAmelCase ) + '\n' ) return (vocab_file,)
150
1
"""simple docstring""" from datasets.utils.patching import _PatchedModuleObj, patch_submodule from . import _test_patching def _SCREAMING_SNAKE_CASE () -> Dict: '''simple docstring''' import os as original_os from os import path as original_path from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join lowercase_ = """__test_patch_submodule_mock__""" with patch_submodule(_test_patching , """os.path.join""" , _lowerCAmelCase ): # Every way to access os.path.join must be patched, and the rest must stay untouched # check os.path.join assert isinstance(_test_patching.os , _PatchedModuleObj ) assert isinstance(_test_patching.os.path , _PatchedModuleObj ) assert _test_patching.os.path.join is mock # check path.join assert isinstance(_test_patching.path , _PatchedModuleObj ) assert _test_patching.path.join is mock # check join assert _test_patching.join is mock # check that the other attributes are untouched assert _test_patching.os.rename is original_rename assert _test_patching.path.dirname is original_dirname assert _test_patching.os.path.dirname is original_dirname # Even renamed modules or objects must be patched # check renamed_os.path.join assert isinstance(_test_patching.renamed_os , _PatchedModuleObj ) assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj ) assert _test_patching.renamed_os.path.join is mock # check renamed_path.join assert isinstance(_test_patching.renamed_path , _PatchedModuleObj ) assert _test_patching.renamed_path.join is mock # check renamed_join assert _test_patching.renamed_join is mock # check that the other attributes are untouched assert _test_patching.renamed_os.rename is original_rename assert _test_patching.renamed_path.dirname is original_dirname assert _test_patching.renamed_os.path.dirname is original_dirname # check that everthing is back to normal when the patch is over assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join def _SCREAMING_SNAKE_CASE () -> Union[str, Any]: '''simple docstring''' assert _test_patching.open is open lowercase_ = """__test_patch_submodule_builtin_mock__""" # _test_patching has "open" in its globals assert _test_patching.open is open with patch_submodule(_test_patching , """open""" , _lowerCAmelCase ): assert _test_patching.open is mock # check that everthing is back to normal when the patch is over assert _test_patching.open is open def _SCREAMING_SNAKE_CASE () -> Any: '''simple docstring''' lowercase_ = """__test_patch_submodule_missing_mock__""" with patch_submodule(_test_patching , """pandas.read_csv""" , _lowerCAmelCase ): pass def _SCREAMING_SNAKE_CASE () -> Dict: '''simple docstring''' lowercase_ = """__test_patch_submodule_missing_builtin_mock__""" # _test_patching doesn't have "len" in its globals assert getattr(_test_patching , """len""" , _lowerCAmelCase ) is None with patch_submodule(_test_patching , """len""" , _lowerCAmelCase ): assert _test_patching.len is mock assert _test_patching.len is len def _SCREAMING_SNAKE_CASE () -> Optional[int]: '''simple docstring''' lowercase_ = """__test_patch_submodule_start_and_stop_mock__""" lowercase_ = patch_submodule(_test_patching , """open""" , _lowerCAmelCase ) assert _test_patching.open is open patch.start() assert _test_patching.open is mock patch.stop() assert _test_patching.open is open def _SCREAMING_SNAKE_CASE () -> List[str]: '''simple docstring''' from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join lowercase_ = """__test_patch_submodule_successive_join__""" lowercase_ = """__test_patch_submodule_successive_dirname__""" lowercase_ = """__test_patch_submodule_successive_rename__""" assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename with patch_submodule(_test_patching , """os.path.join""" , _lowerCAmelCase ): with patch_submodule(_test_patching , """os.rename""" , _lowerCAmelCase ): with patch_submodule(_test_patching , """os.path.dirname""" , _lowerCAmelCase ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename # try another order with patch_submodule(_test_patching , """os.rename""" , _lowerCAmelCase ): with patch_submodule(_test_patching , """os.path.join""" , _lowerCAmelCase ): with patch_submodule(_test_patching , """os.path.dirname""" , _lowerCAmelCase ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename def _SCREAMING_SNAKE_CASE () -> Any: '''simple docstring''' lowercase_ = """__test_patch_submodule_doesnt_exist_mock__""" with patch_submodule(_test_patching , """__module_that_doesn_exist__.__attribute_that_doesn_exist__""" , _lowerCAmelCase ): pass with patch_submodule(_test_patching , """os.__attribute_that_doesn_exist__""" , _lowerCAmelCase ): pass
371
"""simple docstring""" import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , unittest.TestCase ): lowercase__ = BarthezTokenizer lowercase__ = BarthezTokenizerFast lowercase__ = True lowercase__ = True def _UpperCAmelCase ( self : List[Any]): """simple docstring""" super().setUp() lowercase_ = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""") tokenizer.save_pretrained(self.tmpdirname) tokenizer.save_pretrained(self.tmpdirname , legacy_format=lowerCAmelCase_) lowercase_ = tokenizer def _UpperCAmelCase ( self : Any): """simple docstring""" lowercase_ = """<pad>""" lowercase_ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_) , lowerCAmelCase_) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_) , lowerCAmelCase_) def _UpperCAmelCase ( self : Dict): """simple docstring""" lowercase_ = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , """<s>""") self.assertEqual(vocab_keys[1] , """<pad>""") self.assertEqual(vocab_keys[-1] , """<mask>""") self.assertEqual(len(lowerCAmelCase_) , 1_0_1_1_2_2) def _UpperCAmelCase ( self : Optional[int]): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_1_2_2) @require_torch def _UpperCAmelCase ( self : List[str]): """simple docstring""" lowercase_ = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] lowercase_ = [0, 5_7, 3_0_1_8, 7_0_3_0_7, 9_1, 2] lowercase_ = self.tokenizer( lowerCAmelCase_ , max_length=len(lowerCAmelCase_) , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors="""pt""") self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_) self.assertEqual((2, 6) , batch.input_ids.shape) self.assertEqual((2, 6) , batch.attention_mask.shape) lowercase_ = batch.input_ids.tolist()[0] self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_) def _UpperCAmelCase ( self : List[Any]): """simple docstring""" if not self.test_rust_tokenizer: return lowercase_ = self.get_tokenizer() lowercase_ = self.get_rust_tokenizer() lowercase_ = """I was born in 92000, and this is falsé.""" lowercase_ = tokenizer.tokenize(lowerCAmelCase_) lowercase_ = rust_tokenizer.tokenize(lowerCAmelCase_) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_) lowercase_ = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_) lowercase_ = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_) lowercase_ = self.get_rust_tokenizer() lowercase_ = tokenizer.encode(lowerCAmelCase_) lowercase_ = rust_tokenizer.encode(lowerCAmelCase_) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_) @slow def _UpperCAmelCase ( self : int): """simple docstring""" lowercase_ = {"""input_ids""": [[0, 4_9_0, 1_4_3_2_8, 4_5_0_7, 3_5_4, 4_7, 4_3_6_6_9, 9_5, 2_5, 7_8_1_1_7, 2_0_2_1_5, 1_9_7_7_9, 1_9_0, 2_2, 4_0_0, 4, 3_5_3_4_3, 8_0_3_1_0, 6_0_3, 8_6, 2_4_9_3_7, 1_0_5, 3_3_4_3_8, 9_4_7_6_2, 1_9_6, 3_9_6_4_2, 7, 1_5, 1_5_9_3_3, 1_7_3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0_5_3_4, 8_7, 2_5, 6_6, 3_3_5_8, 1_9_6, 5_5_2_8_9, 8, 8_2_9_6_1, 8_1, 2_2_0_4, 7_5_2_0_3, 7, 1_5, 7_6_3, 1_2_9_5_6, 2_1_6, 1_7_8, 1_4_3_2_8, 9_5_9_5, 1_3_7_7, 6_9_6_9_3, 7, 4_4_8, 7_1_0_2_1, 1_9_6, 1_8_1_0_6, 1_4_3_7, 1_3_9_7_4, 1_0_8, 9_0_8_3, 4, 4_9_3_1_5, 7, 3_9, 8_6, 1_3_2_6, 2_7_9_3, 4_6_3_3_3, 4, 4_4_8, 1_9_6, 7_4_5_8_8, 7, 4_9_3_1_5, 7, 3_9, 2_1, 8_2_2, 3_8_4_7_0, 7_4, 2_1, 6_6_7_2_3, 6_2_4_8_0, 8, 2_2_0_5_0, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. lowercase_ = [ """Le transformeur est un modèle d'apprentissage profond introduit en 2017, """ """utilisé principalement dans le domaine du traitement automatique des langues (TAL).""", """À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """ """pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """ """telles que la traduction et la synthèse de texte.""", ] self.tokenizer_integration_test_util( expected_encoding=lowerCAmelCase_ , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=lowerCAmelCase_ , )
313
0
"""simple docstring""" import argparse import torch from torch import nn from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration def a__ ( _SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "decoder.output_projection.weight", "_float_tensor", "encoder.embed_positions._float_tensor", "decoder.embed_positions._float_tensor", ] for k in ignore_keys: state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def a__ ( _SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase = list(s_dict.keys() ) for key in keys: if "transformer_layers" in key: UpperCamelCase = s_dict.pop(_SCREAMING_SNAKE_CASE ) elif "subsample" in key: UpperCamelCase = s_dict.pop(_SCREAMING_SNAKE_CASE ) def a__ ( _SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase , UpperCamelCase = emb.weight.shape UpperCamelCase = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE ) UpperCamelCase = emb.weight.data return lin_layer def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase = torch.load(_SCREAMING_SNAKE_CASE , map_location="cpu" ) UpperCamelCase = mam_aaa["args"] UpperCamelCase = mam_aaa["model"] UpperCamelCase = state_dict["decoder.output_projection.weight"] remove_ignore_keys_(_SCREAMING_SNAKE_CASE ) rename_keys(_SCREAMING_SNAKE_CASE ) UpperCamelCase = state_dict["decoder.embed_tokens.weight"].shape[0] UpperCamelCase = args.share_decoder_input_output_embed UpperCamelCase = [int(_SCREAMING_SNAKE_CASE ) for i in args.conv_kernel_sizes.split("," )] UpperCamelCase = SpeechaTextConfig( vocab_size=_SCREAMING_SNAKE_CASE , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , num_conv_layers=len(_SCREAMING_SNAKE_CASE ) , conv_channels=args.conv_channels , conv_kernel_sizes=_SCREAMING_SNAKE_CASE , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=_SCREAMING_SNAKE_CASE , num_beams=5 , max_length=200 , use_cache=_SCREAMING_SNAKE_CASE , decoder_start_token_id=2 , early_stopping=_SCREAMING_SNAKE_CASE , ) UpperCamelCase = SpeechaTextForConditionalGeneration(_SCREAMING_SNAKE_CASE ) UpperCamelCase , UpperCamelCase = model.model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) > 0 and not set(_SCREAMING_SNAKE_CASE ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( "Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing," F" but all the following weights are missing {missing}" ) if tie_embeds: UpperCamelCase = make_linear_from_emb(model.model.decoder.embed_tokens ) else: UpperCamelCase = lm_head_weights model.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument('''--fairseq_path''', type=str, help='''Path to the fairseq model (.pt) file.''') parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') lowerCAmelCase__ = parser.parse_args() convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
153
"""simple docstring""" class _UpperCAmelCase : def __init__( self :List[str] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :Tuple ): A = name A = val def __str__( self :str ): return f"{self.__class__.__name__}({self.name}, {self.val})" def __lt__( self :List[Any] , __UpperCamelCase :Union[str, Any] ): return self.val < other.val class _UpperCAmelCase : def __init__( self :List[str] , __UpperCamelCase :Optional[Any] ): A = {} A = {} A = self.build_heap(__UpperCamelCase ) def __getitem__( self :int , __UpperCamelCase :Optional[int] ): return self.get_value(__UpperCamelCase ) def lowerCamelCase ( self :List[Any] , __UpperCamelCase :str ): return (idx - 1) // 2 def lowerCamelCase ( self :int , __UpperCamelCase :Optional[Any] ): return idx * 2 + 1 def lowerCamelCase ( self :Union[str, Any] , __UpperCamelCase :Optional[int] ): return idx * 2 + 2 def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :str ): return self.heap_dict[key] def lowerCamelCase ( self :int , __UpperCamelCase :Optional[Any] ): A = len(__UpperCamelCase ) - 1 A = self.get_parent_idx(__UpperCamelCase ) for idx, i in enumerate(__UpperCamelCase ): A = idx A = i.val for i in range(__UpperCamelCase , -1 , -1 ): self.sift_down(__UpperCamelCase , __UpperCamelCase ) return array def lowerCamelCase ( self :str , __UpperCamelCase :Optional[Any] , __UpperCamelCase :Dict ): while True: A = self.get_left_child_idx(__UpperCamelCase ) # noqa: E741 A = self.get_right_child_idx(__UpperCamelCase ) A = idx if l < len(__UpperCamelCase ) and array[l] < array[idx]: A = l if r < len(__UpperCamelCase ) and array[r] < array[smallest]: A = r if smallest != idx: A, A = array[smallest], array[idx] ( ( A ), ( A ), ) = ( self.idx_of_element[array[smallest]], self.idx_of_element[array[idx]], ) A = smallest else: break def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :Optional[int] ): A = self.get_parent_idx(__UpperCamelCase ) while p >= 0 and self.heap[p] > self.heap[idx]: A, A = self.heap[idx], self.heap[p] A, A = ( self.idx_of_element[self.heap[idx]], self.idx_of_element[self.heap[p]], ) A = p A = self.get_parent_idx(__UpperCamelCase ) def lowerCamelCase ( self :Any ): return self.heap[0] def lowerCamelCase ( self :Tuple ): A, A = self.heap[-1], self.heap[0] A, A = ( self.idx_of_element[self.heap[-1]], self.idx_of_element[self.heap[0]], ) A = self.heap.pop() del self.idx_of_element[x] self.sift_down(0 , self.heap ) return x def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :Optional[int] ): self.heap.append(__UpperCamelCase ) A = len(self.heap ) - 1 A = node.val self.sift_up(len(self.heap ) - 1 ) def lowerCamelCase ( self :Tuple ): return len(self.heap ) == 0 def lowerCamelCase ( self :Any , __UpperCamelCase :str , __UpperCamelCase :Dict ): assert ( self.heap[self.idx_of_element[node]].val > new_value ), "newValue must be less that current value" A = new_value A = new_value self.sift_up(self.idx_of_element[node] ) _snake_case : Optional[int] = Node('R', -1) _snake_case : Tuple = Node('B', 6) _snake_case : Tuple = Node('A', 3) _snake_case : Optional[int] = Node('X', 1) _snake_case : List[Any] = Node('E', 4) # Use one of these two ways to generate Min-Heap # Generating Min-Heap from array _snake_case : Tuple = MinHeap([r, b, a, x, e]) # Generating Min-Heap by Insert method # myMinHeap.insert(a) # myMinHeap.insert(b) # myMinHeap.insert(x) # myMinHeap.insert(r) # myMinHeap.insert(e) # Before print('Min Heap - before decrease key') for i in my_min_heap.heap: print(i) print('Min Heap - After decrease key of node [B -> -17]') my_min_heap.decrease_key(b, -17) # After for i in my_min_heap.heap: print(i) if __name__ == "__main__": import doctest doctest.testmod()
292
0
'''simple docstring''' import json import os import unittest from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ): snake_case_ = XLMTokenizer snake_case_ = False def __magic_name__ ( self : Optional[int] ) -> str: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt SCREAMING_SNAKE_CASE__ : Any =[ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''w</w>''', '''r</w>''', '''t</w>''', '''lo''', '''low''', '''er</w>''', '''low</w>''', '''lowest</w>''', '''newer</w>''', '''wider</w>''', '''<unk>''', ] SCREAMING_SNAKE_CASE__ : Optional[int] =dict(zip(__lowercase , range(len(__lowercase ) ) ) ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', ''''''] SCREAMING_SNAKE_CASE__ : Tuple =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) SCREAMING_SNAKE_CASE__ : int =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' ) as fp: fp.write(json.dumps(__lowercase ) ) with open(self.merges_file , '''w''' ) as fp: fp.write('''\n'''.join(__lowercase ) ) def __magic_name__ ( self : Tuple , __lowercase : List[Any] ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ : Optional[int] ='''lower newer''' SCREAMING_SNAKE_CASE__ : Tuple ='''lower newer''' return input_text, output_text def __magic_name__ ( self : Optional[Any] ) -> Dict: SCREAMING_SNAKE_CASE__ : Any =XLMTokenizer(self.vocab_file , self.merges_file ) SCREAMING_SNAKE_CASE__ : List[Any] ='''lower''' SCREAMING_SNAKE_CASE__ : Tuple =['''low''', '''er</w>'''] SCREAMING_SNAKE_CASE__ : str =tokenizer.tokenize(__lowercase ) self.assertListEqual(__lowercase , __lowercase ) SCREAMING_SNAKE_CASE__ : Optional[Any] =tokens + ['''<unk>'''] SCREAMING_SNAKE_CASE__ : Tuple =[14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) , __lowercase ) @slow def __magic_name__ ( self : int ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__ : Tuple =XLMTokenizer.from_pretrained('''xlm-mlm-en-2048''' ) SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.encode('''sequence builders''' , add_special_tokens=__lowercase ) SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowercase ) SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.build_inputs_with_special_tokens(__lowercase ) SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.build_inputs_with_special_tokens(__lowercase , __lowercase ) assert encoded_sentence == [0] + text + [1] assert encoded_pair == [0] + text + [1] + text_a + [1]
222
'''simple docstring''' from math import factorial def _a( UpperCamelCase__ : int = 1_0_0 ): '''simple docstring''' return sum(int(UpperCamelCase__ ) for x in str(factorial(UpperCamelCase__ ) ) ) if __name__ == "__main__": print(solution(int(input('Enter the Number: ').strip())))
222
1
import copy from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING _snake_case = logging.get_logger(__name__) _snake_case = { "microsoft/conditional-detr-resnet-50": ( "https://huggingface.co./microsoft/conditional-detr-resnet-50/resolve/main/config.json" ), } class UpperCAmelCase_ ( a): lowerCamelCase__ = 'conditional_detr' lowerCamelCase__ = ['past_key_values'] lowerCamelCase__ = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self, __a=True, __a=None, __a=3, __a=300, __a=6, __a=2048, __a=8, __a=6, __a=2048, __a=8, __a=0.0, __a=0.0, __a=True, __a="relu", __a=256, __a=0.1, __a=0.0, __a=0.0, __a=0.02, __a=1.0, __a=False, __a="sine", __a="resnet50", __a=True, __a=False, __a=2, __a=5, __a=2, __a=1, __a=1, __a=2, __a=5, __a=2, __a=0.25, **__a, ): '''simple docstring''' if backbone_config is not None and use_timm_backbone: raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`.") if not use_timm_backbone: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.") _lowerCAmelCase : str = CONFIG_MAPPING["resnet"](out_features=["stage4"]) elif isinstance(__a, __a): _lowerCAmelCase : Union[str, Any] = backbone_config.get("model_type") _lowerCAmelCase : Optional[Any] = CONFIG_MAPPING[backbone_model_type] _lowerCAmelCase : Dict = config_class.from_dict(__a) _lowerCAmelCase : Dict = use_timm_backbone _lowerCAmelCase : Optional[int] = backbone_config _lowerCAmelCase : Union[str, Any] = num_channels _lowerCAmelCase : int = num_queries _lowerCAmelCase : Tuple = d_model _lowerCAmelCase : Dict = encoder_ffn_dim _lowerCAmelCase : Any = encoder_layers _lowerCAmelCase : int = encoder_attention_heads _lowerCAmelCase : str = decoder_ffn_dim _lowerCAmelCase : Tuple = decoder_layers _lowerCAmelCase : Optional[Any] = decoder_attention_heads _lowerCAmelCase : Tuple = dropout _lowerCAmelCase : Any = attention_dropout _lowerCAmelCase : List[str] = activation_dropout _lowerCAmelCase : Dict = activation_function _lowerCAmelCase : Union[str, Any] = init_std _lowerCAmelCase : str = init_xavier_std _lowerCAmelCase : Optional[Any] = encoder_layerdrop _lowerCAmelCase : List[str] = decoder_layerdrop _lowerCAmelCase : Dict = encoder_layers _lowerCAmelCase : List[str] = auxiliary_loss _lowerCAmelCase : List[Any] = position_embedding_type _lowerCAmelCase : Union[str, Any] = backbone _lowerCAmelCase : Optional[Any] = use_pretrained_backbone _lowerCAmelCase : int = dilation # Hungarian matcher _lowerCAmelCase : Dict = class_cost _lowerCAmelCase : str = bbox_cost _lowerCAmelCase : List[Any] = giou_cost # Loss coefficients _lowerCAmelCase : Optional[int] = mask_loss_coefficient _lowerCAmelCase : Optional[int] = dice_loss_coefficient _lowerCAmelCase : Tuple = cls_loss_coefficient _lowerCAmelCase : Dict = bbox_loss_coefficient _lowerCAmelCase : List[Any] = giou_loss_coefficient _lowerCAmelCase : Union[str, Any] = focal_alpha super().__init__(is_encoder_decoder=__a, **__a) @property def snake_case__ ( self): '''simple docstring''' return self.encoder_attention_heads @property def snake_case__ ( self): '''simple docstring''' return self.d_model def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Dict = copy.deepcopy(self.__dict__) if self.backbone_config is not None: _lowerCAmelCase : List[str] = self.backbone_config.to_dict() _lowerCAmelCase : int = self.__class__.model_type return output class UpperCAmelCase_ ( a): lowerCamelCase__ = version.parse('1.11') @property def snake_case__ ( self): '''simple docstring''' return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("pixel_mask", {0: "batch"}), ]) @property def snake_case__ ( self): '''simple docstring''' return 1E-5 @property def snake_case__ ( self): '''simple docstring''' return 12
36
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __lowerCamelCase = { "configuration_chinese_clip": [ "CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "ChineseCLIPConfig", "ChineseCLIPOnnxConfig", "ChineseCLIPTextConfig", "ChineseCLIPVisionConfig", ], "processing_chinese_clip": ["ChineseCLIPProcessor"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = ["ChineseCLIPFeatureExtractor"] __lowerCamelCase = ["ChineseCLIPImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = [ "CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "ChineseCLIPModel", "ChineseCLIPPreTrainedModel", "ChineseCLIPTextModel", "ChineseCLIPVisionModel", ] if TYPE_CHECKING: from .configuration_chinese_clip import ( CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, ChineseCLIPConfig, ChineseCLIPOnnxConfig, ChineseCLIPTextConfig, ChineseCLIPVisionConfig, ) from .processing_chinese_clip import ChineseCLIPProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_chinese_clip import ( CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, ChineseCLIPModel, ChineseCLIPPreTrainedModel, ChineseCLIPTextModel, ChineseCLIPVisionModel, ) else: import sys __lowerCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
221
0
from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. __A = 200 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. __A = 50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. __A = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 1000)) def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = len([g for position, g in enumerate(_lowercase ) if g == main_target[position]] ) return (item, float(_lowercase )) def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = random.randint(0 , len(_lowercase ) - 1 ) _A = parent_a[:random_slice] + parent_a[random_slice:] _A = parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = list(_lowercase ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: _A = random.choice(_lowercase ) return "".join(_lowercase ) def __A ( _lowercase , _lowercase , _lowercase , ): '''simple docstring''' _A = [] # Generate more children proportionally to the fitness score. _A = int(parent_a[1] * 1_00 ) + 1 _A = 10 if child_n >= 10 else child_n for _ in range(_lowercase ): _A = population_score[random.randint(0 , _lowercase )][0] _A ,_A = crossover(parent_a[0] , _lowercase ) # Append new string to the population list. pop.append(mutate(_lowercase , _lowercase ) ) pop.append(mutate(_lowercase , _lowercase ) ) return pop def __A ( _lowercase , _lowercase , _lowercase = True ): '''simple docstring''' if N_POPULATION < N_SELECTED: _A = f"""{N_POPULATION} must be bigger than {N_SELECTED}""" raise ValueError(_lowercase ) # Verify that the target contains no genes besides the ones inside genes variable. _A = sorted({c for c in target if c not in genes} ) if not_in_genes_list: _A = f"""{not_in_genes_list} is not in genes list, evolution cannot converge""" raise ValueError(_lowercase ) # Generate random starting population. _A = [] for _ in range(_lowercase ): population.append(''''''.join([random.choice(_lowercase ) for i in range(len(_lowercase ) )] ) ) # Just some logs to know what the algorithms is doing. _A ,_A = 0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(_lowercase ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. _A = [evaluate(_lowercase , _lowercase ) for item in population] # Check if there is a matching evolution. _A = sorted(_lowercase , key=lambda _lowercase : x[1] , reverse=_lowercase ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( f"""\nGeneration: {generation}""" f"""\nTotal Population:{total_population}""" f"""\nBest score: {population_score[0][1]}""" f"""\nBest string: {population_score[0][0]}""" ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. _A = population[: int(N_POPULATION / 3 )] population.clear() population.extend(_lowercase ) # Normalize population score to be between 0 and 1. _A = [ (item, score / len(_lowercase )) for item, score in population_score ] # This is selection for i in range(_lowercase ): population.extend(select(population_score[int(_lowercase )] , _lowercase , _lowercase ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(_lowercase ) > N_POPULATION: break if __name__ == "__main__": __A = ( 'This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!' ) __A = list( ' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm' 'nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\' ) __A , __A , __A = basic(target_str, genes_list) print( f'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}' )
352
import warnings from ...utils import logging from .image_processing_dpt import DPTImageProcessor __A = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" def __init__( self: List[Any] , *__A: Union[str, Any] , **__A: Optional[Any] ) -> None: warnings.warn( '''The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use DPTImageProcessor instead.''' , __A , ) super().__init__(*__A , **__A )
75
0
import doctest import glob import importlib import inspect import os import re from contextlib import contextmanager from functools import wraps from unittest.mock import patch import numpy as np import pytest from absl.testing import parameterized import datasets from datasets import load_metric from .utils import for_all_test_methods, local, slow # mark all tests as integration snake_case_ : int = pytest.mark.integration snake_case_ : str = {"comet"} snake_case_ : Optional[Any] = importlib.util.find_spec("fairseq") is not None snake_case_ : List[Any] = {"code_eval"} snake_case_ : str = os.name == "nt" snake_case_ : Any = {"bertscore", "frugalscore", "perplexity"} snake_case_ : str = importlib.util.find_spec("transformers") is not None def A (__A : Dict ) -> List[Any]: """simple docstring""" @wraps(__A ) def wrapper(self : Optional[int] , __A : Tuple ): if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ: self.skipTest('''"test requires Fairseq"''' ) else: test_case(self , __A ) return wrapper def A (__A : Tuple ) -> List[str]: """simple docstring""" @wraps(__A ) def wrapper(self : str , __A : int ): if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS: self.skipTest('''"test requires transformers"''' ) else: test_case(self , __A ) return wrapper def A (__A : int ) -> List[Any]: """simple docstring""" @wraps(__A ) def wrapper(self : List[Any] , __A : str ): if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS: self.skipTest('''"test not supported on Windows"''' ) else: test_case(self , __A ) return wrapper def A () -> int: """simple docstring""" UpperCAmelCase_ = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('''./metrics/*/''' )] return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished @parameterized.named_parameters(get_local_metric_names() ) @for_all_test_methods( a , a , a ) @local class __snake_case ( parameterized.TestCase ): UpperCAmelCase__ : List[Any] = {} UpperCAmelCase__ : List[str] = None @pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''') @pytest.mark.filterwarnings('''ignore:load_metric is deprecated:FutureWarning''') def lowerCamelCase ( self : Union[str, Any] , _snake_case : str): """simple docstring""" UpperCAmelCase_ = '''[...]''' UpperCAmelCase_ = importlib.import_module( datasets.load.metric_module_factory(os.path.join('''metrics''' , _snake_case)).module_path) UpperCAmelCase_ = datasets.load.import_main_class(metric_module.__name__ , dataset=_snake_case) # check parameters UpperCAmelCase_ = inspect.signature(metric._compute).parameters self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values())) # no **kwargs # run doctest with self.patch_intensive_calls(_snake_case , metric_module.__name__): with self.use_local_metrics(): try: UpperCAmelCase_ = doctest.testmod(_snake_case , verbose=_snake_case , raise_on_error=_snake_case) except doctest.UnexpectedException as e: raise e.exc_info[1] # raise the exception that doctest caught self.assertEqual(results.failed , 0) self.assertGreater(results.attempted , 1) @slow def lowerCamelCase ( self : Union[str, Any] , _snake_case : List[Any]): """simple docstring""" UpperCAmelCase_ = '''[...]''' UpperCAmelCase_ = importlib.import_module( datasets.load.metric_module_factory(os.path.join('''metrics''' , _snake_case)).module_path) # run doctest with self.use_local_metrics(): UpperCAmelCase_ = doctest.testmod(_snake_case , verbose=_snake_case , raise_on_error=_snake_case) self.assertEqual(results.failed , 0) self.assertGreater(results.attempted , 1) @contextmanager def lowerCamelCase ( self : List[Any] , _snake_case : Optional[Any] , _snake_case : List[str]): """simple docstring""" if metric_name in self.INTENSIVE_CALLS_PATCHER: with self.INTENSIVE_CALLS_PATCHER[metric_name](_snake_case): yield else: yield @contextmanager def lowerCamelCase ( self : Optional[Any]): """simple docstring""" def load_local_metric(_snake_case : List[str] , *_snake_case : Union[str, Any] , **_snake_case : List[Any]): return load_metric(os.path.join('''metrics''' , _snake_case) , *_snake_case , **_snake_case) with patch('''datasets.load_metric''') as mock_load_metric: UpperCAmelCase_ = load_local_metric yield @classmethod def lowerCamelCase ( cls : List[str] , _snake_case : List[Any]): """simple docstring""" def wrapper(_snake_case : Optional[Any]): UpperCAmelCase_ = contextmanager(_snake_case) UpperCAmelCase_ = patcher return patcher return wrapper @LocalMetricTest.register_intensive_calls_patcher('''bleurt''' ) def A (__A : Any ) -> List[str]: """simple docstring""" import tensorflow.compat.va as tf from bleurt.score import Predictor tf.flags.DEFINE_string('''sv''' , '''''' , '''''' ) # handle pytest cli flags class __snake_case ( a ): def lowerCamelCase ( self : int , _snake_case : Optional[int]): """simple docstring""" assert len(input_dict['''input_ids''']) == 2 return np.array([1.0_3, 1.0_4]) # mock predict_fn which is supposed to do a forward pass with a bleurt model with patch('''bleurt.score._create_predictor''' ) as mock_create_predictor: UpperCAmelCase_ = MockedPredictor() yield @LocalMetricTest.register_intensive_calls_patcher('''bertscore''' ) def A (__A : int ) -> Tuple: """simple docstring""" import torch def bert_cos_score_idf(__A : str , __A : Optional[Any] , *__A : List[Any] , **__A : List[str] ): return torch.tensor([[1.0, 1.0, 1.0]] * len(__A ) ) # mock get_model which is supposed to do download a bert model # mock bert_cos_score_idf which is supposed to do a forward pass with a bert model with patch('''bert_score.scorer.get_model''' ), patch( '''bert_score.scorer.bert_cos_score_idf''' ) as mock_bert_cos_score_idf: UpperCAmelCase_ = bert_cos_score_idf yield @LocalMetricTest.register_intensive_calls_patcher('''comet''' ) def A (__A : List[str] ) -> Optional[Any]: """simple docstring""" def load_from_checkpoint(__A : str ): class __snake_case : def lowerCamelCase ( self : Any , _snake_case : Optional[int] , *_snake_case : str , **_snake_case : Optional[int]): """simple docstring""" assert len(_snake_case) == 2 UpperCAmelCase_ = [0.1_9, 0.9_2] return scores, sum(_snake_case) / len(_snake_case) return Model() # mock load_from_checkpoint which is supposed to do download a bert model # mock load_from_checkpoint which is supposed to do download a bert model with patch('''comet.download_model''' ) as mock_download_model: UpperCAmelCase_ = None with patch('''comet.load_from_checkpoint''' ) as mock_load_from_checkpoint: UpperCAmelCase_ = load_from_checkpoint yield def A () -> int: """simple docstring""" UpperCAmelCase_ = load_metric(os.path.join('''metrics''' , '''seqeval''' ) ) UpperCAmelCase_ = '''ERROR''' UpperCAmelCase_ = F"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}""" with pytest.raises(__A , match=re.escape(__A ) ): metric.compute(predictions=[] , references=[] , scheme=__A )
51
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/ import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, DDIMScheduler, StableDiffusionControlNetImgaImgPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class __snake_case ( a , a , a , unittest.TestCase ): UpperCAmelCase__ : List[Any] = StableDiffusionControlNetImgaImgPipeline UpperCAmelCase__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''} UpperCAmelCase__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS UpperCAmelCase__ : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'''control_image'''} ) UpperCAmelCase__ : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS def lowerCamelCase ( self : int): """simple docstring""" torch.manual_seed(0) UpperCAmelCase_ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) torch.manual_seed(0) UpperCAmelCase_ = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) torch.manual_seed(0) UpperCAmelCase_ = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=_snake_case , set_alpha_to_one=_snake_case , ) torch.manual_seed(0) UpperCAmelCase_ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0) UpperCAmelCase_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) UpperCAmelCase_ = CLIPTextModel(_snake_case) UpperCAmelCase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''') UpperCAmelCase_ = { '''unet''': unet, '''controlnet''': controlnet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def lowerCamelCase ( self : Union[str, Any] , _snake_case : Any , _snake_case : Dict=0): """simple docstring""" if str(_snake_case).startswith('''mps'''): UpperCAmelCase_ = torch.manual_seed(_snake_case) else: UpperCAmelCase_ = torch.Generator(device=_snake_case).manual_seed(_snake_case) UpperCAmelCase_ = 2 UpperCAmelCase_ = randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case) , ) UpperCAmelCase_ = floats_tensor(control_image.shape , rng=random.Random(_snake_case)).to(_snake_case) UpperCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1)[0] UpperCAmelCase_ = Image.fromarray(np.uinta(_snake_case)).convert('''RGB''').resize((64, 64)) UpperCAmelCase_ = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', '''image''': image, '''control_image''': control_image, } return inputs def lowerCamelCase ( self : Any): """simple docstring""" return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def lowerCamelCase ( self : Any): """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) def lowerCamelCase ( self : Optional[Any]): """simple docstring""" self._test_inference_batch_single_identical(expected_max_diff=2e-3) class __snake_case ( a , a , unittest.TestCase ): UpperCAmelCase__ : str = StableDiffusionControlNetImgaImgPipeline UpperCAmelCase__ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''} UpperCAmelCase__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS UpperCAmelCase__ : str = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess def lowerCamelCase ( self : str): """simple docstring""" torch.manual_seed(0) UpperCAmelCase_ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) torch.manual_seed(0) def init_weights(_snake_case : Optional[int]): if isinstance(_snake_case , torch.nn.Convad): torch.nn.init.normal(m.weight) m.bias.data.fill_(1.0) UpperCAmelCase_ = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(_snake_case) torch.manual_seed(0) UpperCAmelCase_ = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(_snake_case) torch.manual_seed(0) UpperCAmelCase_ = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=_snake_case , set_alpha_to_one=_snake_case , ) torch.manual_seed(0) UpperCAmelCase_ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0) UpperCAmelCase_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) UpperCAmelCase_ = CLIPTextModel(_snake_case) UpperCAmelCase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''') UpperCAmelCase_ = MultiControlNetModel([controlneta, controlneta]) UpperCAmelCase_ = { '''unet''': unet, '''controlnet''': controlnet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def lowerCamelCase ( self : int , _snake_case : Union[str, Any] , _snake_case : str=0): """simple docstring""" if str(_snake_case).startswith('''mps'''): UpperCAmelCase_ = torch.manual_seed(_snake_case) else: UpperCAmelCase_ = torch.Generator(device=_snake_case).manual_seed(_snake_case) UpperCAmelCase_ = 2 UpperCAmelCase_ = [ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case) , ), randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case) , ), ] UpperCAmelCase_ = floats_tensor(control_image[0].shape , rng=random.Random(_snake_case)).to(_snake_case) UpperCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1)[0] UpperCAmelCase_ = Image.fromarray(np.uinta(_snake_case)).convert('''RGB''').resize((64, 64)) UpperCAmelCase_ = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', '''image''': image, '''control_image''': control_image, } return inputs def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = self.get_dummy_components() UpperCAmelCase_ = self.pipeline_class(**_snake_case) pipe.to(_snake_case) UpperCAmelCase_ = 1_0.0 UpperCAmelCase_ = 4 UpperCAmelCase_ = self.get_dummy_inputs(_snake_case) UpperCAmelCase_ = steps UpperCAmelCase_ = scale UpperCAmelCase_ = pipe(**_snake_case)[0] UpperCAmelCase_ = self.get_dummy_inputs(_snake_case) UpperCAmelCase_ = steps UpperCAmelCase_ = scale UpperCAmelCase_ = pipe(**_snake_case , control_guidance_start=0.1 , control_guidance_end=0.2)[0] UpperCAmelCase_ = self.get_dummy_inputs(_snake_case) UpperCAmelCase_ = steps UpperCAmelCase_ = scale UpperCAmelCase_ = pipe(**_snake_case , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7])[0] UpperCAmelCase_ = self.get_dummy_inputs(_snake_case) UpperCAmelCase_ = steps UpperCAmelCase_ = scale UpperCAmelCase_ = pipe(**_snake_case , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8])[0] # make sure that all outputs are different assert np.sum(np.abs(output_a - output_a)) > 1e-3 assert np.sum(np.abs(output_a - output_a)) > 1e-3 assert np.sum(np.abs(output_a - output_a)) > 1e-3 def lowerCamelCase ( self : Dict): """simple docstring""" return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def lowerCamelCase ( self : int): """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) def lowerCamelCase ( self : int): """simple docstring""" self._test_inference_batch_single_identical(expected_max_diff=2e-3) def lowerCamelCase ( self : Optional[int]): """simple docstring""" UpperCAmelCase_ = self.get_dummy_components() UpperCAmelCase_ = self.pipeline_class(**_snake_case) pipe.to(_snake_case) pipe.set_progress_bar_config(disable=_snake_case) with tempfile.TemporaryDirectory() as tmpdir: try: # save_pretrained is not implemented for Multi-ControlNet pipe.save_pretrained(_snake_case) except NotImplementedError: pass @slow @require_torch_gpu class __snake_case ( unittest.TestCase ): def lowerCamelCase ( self : Optional[int]): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase ( self : Optional[int]): """simple docstring""" UpperCAmelCase_ = ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''') UpperCAmelCase_ = StableDiffusionControlNetImgaImgPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , safety_checker=_snake_case , controlnet=_snake_case) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=_snake_case) UpperCAmelCase_ = torch.Generator(device='''cpu''').manual_seed(0) UpperCAmelCase_ = '''evil space-punk bird''' UpperCAmelCase_ = load_image( '''https://huggingface.co./datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''').resize((512, 512)) UpperCAmelCase_ = load_image( '''https://huggingface.co./lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''').resize((512, 512)) UpperCAmelCase_ = pipe( _snake_case , _snake_case , control_image=_snake_case , generator=_snake_case , output_type='''np''' , num_inference_steps=50 , strength=0.6 , ) UpperCAmelCase_ = output.images[0] assert image.shape == (512, 512, 3) UpperCAmelCase_ = load_numpy( '''https://huggingface.co./datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''') assert np.abs(expected_image - image).max() < 9e-2
51
1
"""simple docstring""" from __future__ import annotations from collections.abc import Iterator from typing import Any class snake_case : def __init__( self : Optional[Any] , a__ : Any ) -> List[str]: '''simple docstring''' _A = data _A = None class snake_case : def __init__( self : List[Any] ) -> Union[str, Any]: '''simple docstring''' _A = None _A = None def __iter__( self : str ) -> Iterator[Any]: '''simple docstring''' _A = self.head while self.head: yield node.data _A = node.next if node == self.head: break def __len__( self : List[Any] ) -> int: '''simple docstring''' return sum(1 for _ in self ) def __repr__( self : Optional[Any] ) -> Optional[int]: '''simple docstring''' return "->".join(str(a__ ) for item in iter(self ) ) def a_ ( self : List[Any] , a__ : Any ) -> None: '''simple docstring''' self.insert_nth(len(self ) , a__ ) def a_ ( self : Optional[Any] , a__ : Any ) -> None: '''simple docstring''' self.insert_nth(0 , a__ ) def a_ ( self : Optional[Any] , a__ : int , a__ : Any ) -> None: '''simple docstring''' if index < 0 or index > len(self ): raise IndexError("list index out of range." ) _A = Node(a__ ) if self.head is None: _A = new_node # first node points itself _A = _A = new_node elif index == 0: # insert at head _A = self.head _A = _A = new_node else: _A = self.head for _ in range(index - 1 ): _A = temp.next _A = temp.next _A = new_node if index == len(self ) - 1: # insert at tail _A = new_node def a_ ( self : str ) -> List[Any]: '''simple docstring''' return self.delete_nth(0 ) def a_ ( self : Tuple ) -> Any: '''simple docstring''' return self.delete_nth(len(self ) - 1 ) def a_ ( self : List[str] , a__ : int = 0 ) -> Any: '''simple docstring''' if not 0 <= index < len(self ): raise IndexError("list index out of range." ) _A = self.head if self.head == self.tail: # just one node _A = _A = None elif index == 0: # delete head node _A = self.tail.next.next _A = self.head.next else: _A = self.head for _ in range(index - 1 ): _A = temp.next _A = temp.next _A = temp.next.next if index == len(self ) - 1: # delete at tail _A = temp return delete_node.data def a_ ( self : List[str] ) -> bool: '''simple docstring''' return len(self ) == 0 def a__ ( ) -> None: _A = CircularLinkedList() assert len(__lowercase ) == 0 assert circular_linked_list.is_empty() is True assert str(__lowercase ) == "" try: circular_linked_list.delete_front() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_tail() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_nth(-1 ) raise AssertionError except IndexError: assert True try: circular_linked_list.delete_nth(0 ) raise AssertionError except IndexError: assert True assert circular_linked_list.is_empty() is True for i in range(5 ): assert len(__lowercase ) == i circular_linked_list.insert_nth(__lowercase , i + 1 ) assert str(__lowercase ) == "->".join(str(__lowercase ) for i in range(1 , 6 ) ) circular_linked_list.insert_tail(6 ) assert str(__lowercase ) == "->".join(str(__lowercase ) for i in range(1 , 7 ) ) circular_linked_list.insert_head(0 ) assert str(__lowercase ) == "->".join(str(__lowercase ) for i in range(0 , 7 ) ) assert circular_linked_list.delete_front() == 0 assert circular_linked_list.delete_tail() == 6 assert str(__lowercase ) == "->".join(str(__lowercase ) for i in range(1 , 6 ) ) assert circular_linked_list.delete_nth(2 ) == 3 circular_linked_list.insert_nth(2 , 3 ) assert str(__lowercase ) == "->".join(str(__lowercase ) for i in range(1 , 6 ) ) assert circular_linked_list.is_empty() is False if __name__ == "__main__": import doctest doctest.testmod()
163
"""simple docstring""" import collections.abc from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_poolformer import PoolFormerConfig a_ = logging.get_logger(__name__) # General docstring a_ = "PoolFormerConfig" # Base docstring a_ = "sail/poolformer_s12" a_ = [1, 5_12, 7, 7] # Image classification docstring a_ = "sail/poolformer_s12" a_ = "tabby, tabby cat" a_ = [ "sail/poolformer_s12", # See all PoolFormer models at https://huggingface.co./models?filter=poolformer ] def a__ ( __lowercase , __lowercase = 0.0 , __lowercase = False ) -> Dict: if drop_prob == 0.0 or not training: return input _A = 1 - drop_prob _A = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets _A = keep_prob + torch.rand(__lowercase , dtype=input.dtype , device=input.device ) random_tensor.floor_() # binarize _A = input.div(__lowercase ) * random_tensor return output class snake_case ( nn.Module): def __init__( self : Any , a__ : Optional[float] = None ) -> None: '''simple docstring''' super().__init__() _A = drop_prob def a_ ( self : Optional[Any] , a__ : torch.Tensor ) -> torch.Tensor: '''simple docstring''' return drop_path(a__ , self.drop_prob , self.training ) def a_ ( self : List[str] ) -> str: '''simple docstring''' return "p={}".format(self.drop_prob ) class snake_case ( nn.Module): def __init__( self : Union[str, Any] , a__ : List[Any] , a__ : Any , a__ : List[Any] , a__ : Optional[int] , a__ : Dict , a__ : str=None ) -> Optional[Any]: '''simple docstring''' super().__init__() _A = patch_size if isinstance(a__ , collections.abc.Iterable ) else (patch_size, patch_size) _A = stride if isinstance(a__ , collections.abc.Iterable ) else (stride, stride) _A = padding if isinstance(a__ , collections.abc.Iterable ) else (padding, padding) _A = nn.Convad(a__ , a__ , kernel_size=a__ , stride=a__ , padding=a__ ) _A = norm_layer(a__ ) if norm_layer else nn.Identity() def a_ ( self : Dict , a__ : Any ) -> List[str]: '''simple docstring''' _A = self.projection(a__ ) _A = self.norm(a__ ) return embeddings class snake_case ( nn.GroupNorm): def __init__( self : Dict , a__ : Optional[int] , **a__ : Dict ) -> Optional[Any]: '''simple docstring''' super().__init__(1 , a__ , **a__ ) class snake_case ( nn.Module): def __init__( self : int , a__ : List[Any] ) -> Union[str, Any]: '''simple docstring''' super().__init__() _A = nn.AvgPoolad(a__ , stride=1 , padding=pool_size // 2 , count_include_pad=a__ ) def a_ ( self : List[str] , a__ : int ) -> str: '''simple docstring''' return self.pool(a__ ) - hidden_states class snake_case ( nn.Module): def __init__( self : Tuple , a__ : Optional[int] , a__ : Optional[Any] , a__ : List[str] , a__ : Optional[int] ) -> Any: '''simple docstring''' super().__init__() _A = nn.Convad(a__ , a__ , 1 ) _A = nn.Convad(a__ , a__ , 1 ) _A = PoolFormerDropPath(a__ ) if isinstance(config.hidden_act , a__ ): _A = ACTaFN[config.hidden_act] else: _A = config.hidden_act def a_ ( self : List[Any] , a__ : int ) -> Dict: '''simple docstring''' _A = self.conva(a__ ) _A = self.act_fn(a__ ) _A = self.drop(a__ ) _A = self.conva(a__ ) _A = self.drop(a__ ) return hidden_states class snake_case ( nn.Module): def __init__( self : Union[str, Any] , a__ : str , a__ : List[str] , a__ : List[Any] , a__ : List[str] , a__ : Optional[Any] , a__ : Tuple ) -> Dict: '''simple docstring''' super().__init__() _A = PoolFormerPooling(a__ ) _A = PoolFormerOutput(a__ , a__ , a__ , a__ ) _A = PoolFormerGroupNorm(a__ ) _A = PoolFormerGroupNorm(a__ ) # Useful for training neural nets _A = PoolFormerDropPath(a__ ) if drop_path > 0.0 else nn.Identity() _A = config.use_layer_scale if config.use_layer_scale: _A = nn.Parameter( config.layer_scale_init_value * torch.ones((a__) ) , requires_grad=a__ ) _A = nn.Parameter( config.layer_scale_init_value * torch.ones((a__) ) , requires_grad=a__ ) def a_ ( self : Union[str, Any] , a__ : Optional[int] ) -> Tuple: '''simple docstring''' if self.use_layer_scale: _A = self.pooling(self.before_norm(a__ ) ) _A = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output # First residual connection _A = hidden_states + self.drop_path(a__ ) _A = () _A = self.output(self.after_norm(a__ ) ) _A = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output # Second residual connection _A = hidden_states + self.drop_path(a__ ) _A = (output,) + outputs return outputs else: _A = self.drop_path(self.pooling(self.before_norm(a__ ) ) ) # First residual connection _A = pooling_output + hidden_states _A = () # Second residual connection inside the PoolFormerOutput block _A = self.drop_path(self.output(self.after_norm(a__ ) ) ) _A = hidden_states + layer_output _A = (output,) + outputs return outputs class snake_case ( nn.Module): def __init__( self : str , a__ : int ) -> Any: '''simple docstring''' super().__init__() _A = config # stochastic depth decay rule _A = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )] # patch embeddings _A = [] for i in range(config.num_encoder_blocks ): embeddings.append( PoolFormerEmbeddings( patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) ) _A = nn.ModuleList(a__ ) # Transformer blocks _A = [] _A = 0 for i in range(config.num_encoder_blocks ): # each block consists of layers _A = [] if i != 0: cur += config.depths[i - 1] for j in range(config.depths[i] ): layers.append( PoolFormerLayer( a__ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) ) blocks.append(nn.ModuleList(a__ ) ) _A = nn.ModuleList(a__ ) def a_ ( self : Tuple , a__ : Union[str, Any] , a__ : Tuple=False , a__ : List[str]=True ) -> List[Any]: '''simple docstring''' _A = () if output_hidden_states else None _A = pixel_values for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ): _A , _A = layers # Get patch embeddings from hidden_states _A = embedding_layer(a__ ) # Send the embeddings through the blocks for _, blk in enumerate(a__ ): _A = blk(a__ ) _A = layer_outputs[0] if output_hidden_states: _A = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=a__ , hidden_states=a__ ) class snake_case ( _UpperCamelCase): __UpperCamelCase = PoolFormerConfig __UpperCamelCase = 'poolformer' __UpperCamelCase = 'pixel_values' __UpperCamelCase = True def a_ ( self : Tuple , a__ : Dict ) -> Any: '''simple docstring''' if isinstance(a__ , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(a__ , nn.LayerNorm ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) def a_ ( self : int , a__ : Dict , a__ : int=False ) -> str: '''simple docstring''' if isinstance(a__ , a__ ): _A = value a_ = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n" a_ = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n" @add_start_docstrings( 'The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.' , _UpperCamelCase , ) class snake_case ( _UpperCamelCase): def __init__( self : int , a__ : Dict ) -> str: '''simple docstring''' super().__init__(a__ ) _A = config _A = PoolFormerEncoder(a__ ) # Initialize weights and apply final processing self.post_init() def a_ ( self : Union[str, Any] ) -> str: '''simple docstring''' return self.embeddings.patch_embeddings @add_start_docstrings_to_model_forward(a__ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=a__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def a_ ( self : Tuple , a__ : Optional[torch.FloatTensor] = None , a__ : Optional[bool] = None , a__ : Optional[bool] = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]: '''simple docstring''' _A = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _A = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values" ) _A = self.encoder( a__ , output_hidden_states=a__ , return_dict=a__ , ) _A = encoder_outputs[0] if not return_dict: return (sequence_output, None) + encoder_outputs[1:] return BaseModelOutputWithNoAttention( last_hidden_state=a__ , hidden_states=encoder_outputs.hidden_states , ) class snake_case ( nn.Module): def __init__( self : List[str] , a__ : Dict ) -> Optional[Any]: '''simple docstring''' super().__init__() _A = nn.Linear(config.hidden_size , config.hidden_size ) def a_ ( self : int , a__ : Tuple ) -> str: '''simple docstring''' _A = self.dense(a__ ) return output @add_start_docstrings( '\n PoolFormer Model transformer with an image classification head on top\n ' , _UpperCamelCase , ) class snake_case ( _UpperCamelCase): def __init__( self : Tuple , a__ : str ) -> Optional[int]: '''simple docstring''' super().__init__(a__ ) _A = config.num_labels _A = PoolFormerModel(a__ ) # Final norm _A = PoolFormerGroupNorm(config.hidden_sizes[-1] ) # Classifier head _A = ( nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(a__ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=a__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def a_ ( self : int , a__ : Optional[torch.FloatTensor] = None , a__ : Optional[torch.LongTensor] = None , a__ : Optional[bool] = None , a__ : Optional[bool] = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]: '''simple docstring''' _A = return_dict if return_dict is not None else self.config.use_return_dict _A = self.poolformer( a__ , output_hidden_states=a__ , return_dict=a__ , ) _A = outputs[0] _A = self.classifier(self.norm(a__ ).mean([-2, -1] ) ) _A = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: _A = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): _A = "single_label_classification" else: _A = "multi_label_classification" if self.config.problem_type == "regression": _A = MSELoss() if self.num_labels == 1: _A = loss_fct(logits.squeeze() , labels.squeeze() ) else: _A = loss_fct(a__ , a__ ) elif self.config.problem_type == "single_label_classification": _A = CrossEntropyLoss() _A = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": _A = BCEWithLogitsLoss() _A = loss_fct(a__ , a__ ) if not return_dict: _A = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=a__ , logits=a__ , hidden_states=outputs.hidden_states )
163
1
'''simple docstring''' from __future__ import annotations __a = 1.6021E-19 # units = C def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> tuple[str, float]: if (conductivity, electron_conc, mobility).count(0 ) != 1: raise ValueError("""You cannot supply more or less than 2 values""" ) elif conductivity < 0: raise ValueError("""Conductivity cannot be negative""" ) elif electron_conc < 0: raise ValueError("""Electron concentration cannot be negative""" ) elif mobility < 0: raise ValueError("""mobility cannot be negative""" ) elif conductivity == 0: return ( "conductivity", mobility * electron_conc * ELECTRON_CHARGE, ) elif electron_conc == 0: return ( "electron_conc", conductivity / (mobility * ELECTRON_CHARGE), ) else: return ( "mobility", conductivity / (electron_conc * ELECTRON_CHARGE), ) if __name__ == "__main__": import doctest doctest.testmod()
35
import torch from diffusers import EulerDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class a_ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = (EulerDiscreteScheduler,) __SCREAMING_SNAKE_CASE : Optional[int] = 10 def __lowerCAmelCase ( self , **_lowerCamelCase ) ->Tuple: SCREAMING_SNAKE_CASE : Optional[int] = { '''num_train_timesteps''': 1100, '''beta_start''': 0.0_0_0_1, '''beta_end''': 0.0_2, '''beta_schedule''': '''linear''', } config.update(**_lowerCamelCase ) return config def __lowerCAmelCase ( self ) ->Tuple: for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=_lowerCamelCase ) def __lowerCAmelCase ( self ) ->Any: for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ): self.check_over_configs(beta_start=_lowerCamelCase , beta_end=_lowerCamelCase ) def __lowerCAmelCase ( self ) ->int: for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=_lowerCamelCase ) def __lowerCAmelCase ( self ) ->Optional[Any]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_lowerCamelCase ) def __lowerCAmelCase ( self ) ->List[Any]: SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : int = self.get_scheduler_config() SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**_lowerCamelCase ) scheduler.set_timesteps(self.num_inference_steps ) SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Any = self.dummy_model() SCREAMING_SNAKE_CASE : int = self.dummy_sample_deter * scheduler.init_noise_sigma SCREAMING_SNAKE_CASE : Any = sample.to(_lowerCamelCase ) for i, t in enumerate(scheduler.timesteps ): SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = model(_lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE : List[str] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = output.prev_sample SCREAMING_SNAKE_CASE : List[Any] = torch.sum(torch.abs(_lowerCamelCase ) ) SCREAMING_SNAKE_CASE : List[Any] = torch.mean(torch.abs(_lowerCamelCase ) ) assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1e-2 assert abs(result_mean.item() - 0.0_1_3_1 ) < 1e-3 def __lowerCAmelCase ( self ) ->List[str]: SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_scheduler_config(prediction_type='''v_prediction''' ) SCREAMING_SNAKE_CASE : Tuple = scheduler_class(**_lowerCamelCase ) scheduler.set_timesteps(self.num_inference_steps ) SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : int = self.dummy_model() SCREAMING_SNAKE_CASE : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma SCREAMING_SNAKE_CASE : List[str] = sample.to(_lowerCamelCase ) for i, t in enumerate(scheduler.timesteps ): SCREAMING_SNAKE_CASE : str = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = model(_lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = output.prev_sample SCREAMING_SNAKE_CASE : str = torch.sum(torch.abs(_lowerCamelCase ) ) SCREAMING_SNAKE_CASE : int = torch.mean(torch.abs(_lowerCamelCase ) ) assert abs(result_sum.item() - 0.0_0_0_2 ) < 1e-2 assert abs(result_mean.item() - 2.2676e-06 ) < 1e-3 def __lowerCAmelCase ( self ) ->Tuple: SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Dict = self.get_scheduler_config() SCREAMING_SNAKE_CASE : Tuple = scheduler_class(**_lowerCamelCase ) scheduler.set_timesteps(self.num_inference_steps , device=_lowerCamelCase ) SCREAMING_SNAKE_CASE : int = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_model() SCREAMING_SNAKE_CASE : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() SCREAMING_SNAKE_CASE : Optional[Any] = sample.to(_lowerCamelCase ) for t in scheduler.timesteps: SCREAMING_SNAKE_CASE : Dict = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = model(_lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[Any] = output.prev_sample SCREAMING_SNAKE_CASE : List[Any] = torch.sum(torch.abs(_lowerCamelCase ) ) SCREAMING_SNAKE_CASE : str = torch.mean(torch.abs(_lowerCamelCase ) ) assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1e-2 assert abs(result_mean.item() - 0.0_1_3_1 ) < 1e-3 def __lowerCAmelCase ( self ) ->Optional[int]: SCREAMING_SNAKE_CASE : Dict = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config() SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**_lowerCamelCase , use_karras_sigmas=_lowerCamelCase ) scheduler.set_timesteps(self.num_inference_steps , device=_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_model() SCREAMING_SNAKE_CASE : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() SCREAMING_SNAKE_CASE : int = sample.to(_lowerCamelCase ) for t in scheduler.timesteps: SCREAMING_SNAKE_CASE : List[Any] = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = model(_lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE : Dict = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ) SCREAMING_SNAKE_CASE : int = output.prev_sample SCREAMING_SNAKE_CASE : Optional[Any] = torch.sum(torch.abs(_lowerCamelCase ) ) SCREAMING_SNAKE_CASE : Any = torch.mean(torch.abs(_lowerCamelCase ) ) assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9 ) < 1e-2 assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3 ) < 1e-3
313
0
# Author: OMKAR PATHAK, Nwachukwu Chidiebere # Use a Python dictionary to construct the graph. from __future__ import annotations from pprint import pformat from typing import Generic, TypeVar __A =TypeVar('''T''') class _SCREAMING_SNAKE_CASE ( Generic[T] ): def __init__( self , lowercase = True ) -> None: lowerCamelCase_ = {} # dictionary of lists lowerCamelCase_ = directed def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> GraphAdjacencyList[T]: if not self.directed: # For undirected graphs # if both source vertex and destination vertex are both present in the # adjacency list, add destination vertex to source vertex list of adjacent # vertices and add source vertex to destination vertex list of adjacent # vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(lowercase ) self.adj_list[destination_vertex].append(lowercase ) # if only source vertex is present in adjacency list, add destination vertex # to source vertex list of adjacent vertices, then create a new vertex with # destination vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(lowercase ) lowerCamelCase_ = [source_vertex] # if only destination vertex is present in adjacency list, add source vertex # to destination vertex list of adjacent vertices, then create a new vertex # with source vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif destination_vertex in self.adj_list: self.adj_list[destination_vertex].append(lowercase ) lowerCamelCase_ = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and assign a list # containing the destination vertex as it's first adjacent vertex also # create a new vertex with destination vertex as key and assign a list # containing the source vertex as it's first adjacent vertex. else: lowerCamelCase_ = [destination_vertex] lowerCamelCase_ = [source_vertex] else: # For directed graphs # if both source vertex and destination vertex are present in adjacency # list, add destination vertex to source vertex list of adjacent vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(lowercase ) # if only source vertex is present in adjacency list, add destination # vertex to source vertex list of adjacent vertices and create a new vertex # with destination vertex as key, which has no adjacent vertex elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(lowercase ) lowerCamelCase_ = [] # if only destination vertex is present in adjacency list, create a new # vertex with source vertex as key and assign a list containing destination # vertex as first adjacent vertex elif destination_vertex in self.adj_list: lowerCamelCase_ = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and a list containing # destination vertex as it's first adjacent vertex. Then create a new vertex # with destination vertex as key, which has no adjacent vertex else: lowerCamelCase_ = [destination_vertex] lowerCamelCase_ = [] return self def __repr__( self ) -> str: return pformat(self.adj_list )
365
from sklearn.metrics import recall_score import datasets __A =''' Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation: Recall = TP / (TP + FN) Where TP is the true positives and FN is the false negatives. ''' __A =''' Args: - **predictions** (`list` of `int`): The predicted labels. - **references** (`list` of `int`): The ground truth labels. - **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None. - **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`. - **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`. - `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary. - `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives. - `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall. - `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). - **sample_weight** (`list` of `float`): Sample weights Defaults to `None`. - **zero_division** (): Sets the value to return when there is a zero division. Defaults to . - `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised. - `0`: If there is a zero division, the return value is `0`. - `1`: If there is a zero division, the return value is `1`. Returns: - **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better. Examples: Example 1-A simple example with some errors >>> recall_metric = datasets.load_metric(\'recall\') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1]) >>> print(results) {\'recall\': 0.6666666666666666} Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`. >>> recall_metric = datasets.load_metric(\'recall\') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0) >>> print(results) {\'recall\': 0.5} Example 3-The same example as Example 1, but with `sample_weight` included. >>> recall_metric = datasets.load_metric(\'recall\') >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8] >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight) >>> print(results) {\'recall\': 0.55} Example 4-A multiclass example, using different averages. >>> recall_metric = datasets.load_metric(\'recall\') >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\') >>> print(results) {\'recall\': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\') >>> print(results) {\'recall\': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\') >>> print(results) {\'recall\': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {\'recall\': array([1., 0., 0.])} ''' __A =''' @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _SCREAMING_SNAKE_CASE ( datasets.Metric ): def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("int32" ) ), "references": datasets.Sequence(datasets.Value("int32" ) ), } if self.config_name == "multilabel" else { "predictions": datasets.Value("int32" ), "references": datasets.Value("int32" ), } ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"] , ) def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase=None , lowercase=1 , lowercase="binary" , lowercase=None , lowercase="warn" , ) -> Optional[int]: lowerCamelCase_ = recall_score( lowercase , lowercase , labels=lowercase , pos_label=lowercase , average=lowercase , sample_weight=lowercase , zero_division=lowercase , ) return {"recall": float(lowercase ) if score.size == 1 else score}
47
0
from __future__ import annotations import typing from collections.abc import Iterable import numpy as np _UpperCAmelCase : Union[str, Any] = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007 _UpperCAmelCase : Tuple = typing.Union[np.floataa, int, float] # noqa: UP007 def A ( lowercase , lowercase ) -> VectorOut: '''simple docstring''' return np.sqrt(np.sum((np.asarray(lowercase ) - np.asarray(lowercase )) ** 2 ) ) def A ( lowercase , lowercase ) -> VectorOut: '''simple docstring''' return sum((va - va) ** 2 for va, va in zip(lowercase , lowercase ) ) ** (1 / 2) if __name__ == "__main__": def A ( ) -> None: '''simple docstring''' from timeit import timeit print('Without Numpy' ) print( timeit( 'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])' , number=10_000 , globals=globals() , ) ) print('With Numpy' ) print( timeit( 'euclidean_distance([1, 2, 3], [4, 5, 6])' , number=10_000 , globals=globals() , ) ) benchmark()
222
def A ( lowercase ) -> str: '''simple docstring''' return " ".join( ''.join(word[::-1] ) if len(lowercase ) > 4 else word for word in sentence.split() ) if __name__ == "__main__": import doctest doctest.testmod() print(reverse_long_words("Hey wollef sroirraw"))
222
1
"""simple docstring""" from typing import Dict, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends if is_vision_available(): import PIL # soft dependency if is_pytesseract_available(): import pytesseract UpperCAmelCase__ = logging.get_logger(__name__) def _UpperCAmelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : str , __lowerCamelCase : Any ) -> str: return [ int(10_00 * (box[0] / width) ), int(10_00 * (box[1] / height) ), int(10_00 * (box[2] / width) ), int(10_00 * (box[3] / height) ), ] def _UpperCAmelCase ( __lowerCamelCase : np.ndarray , __lowerCamelCase : Optional[str] , __lowerCamelCase : Optional[str] = None ) -> Optional[int]: _snake_case = tesseract_config if tesseract_config is not None else '''''' # apply OCR _snake_case = to_pil_image(__lowerCamelCase ) _snake_case , _snake_case = pil_image.size _snake_case = pytesseract.image_to_data(__lowerCamelCase , lang=__lowerCamelCase , output_type='''dict''' , config=__lowerCamelCase ) _snake_case , _snake_case , _snake_case , _snake_case , _snake_case = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height'''] # filter empty words and corresponding coordinates _snake_case = [idx for idx, word in enumerate(__lowerCamelCase ) if not word.strip()] _snake_case = [word for idx, word in enumerate(__lowerCamelCase ) if idx not in irrelevant_indices] _snake_case = [coord for idx, coord in enumerate(__lowerCamelCase ) if idx not in irrelevant_indices] _snake_case = [coord for idx, coord in enumerate(__lowerCamelCase ) if idx not in irrelevant_indices] _snake_case = [coord for idx, coord in enumerate(__lowerCamelCase ) if idx not in irrelevant_indices] _snake_case = [coord for idx, coord in enumerate(__lowerCamelCase ) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format _snake_case = [] for x, y, w, h in zip(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): _snake_case = [x, y, x + w, y + h] actual_boxes.append(__lowerCamelCase ) # finally, normalize the bounding boxes _snake_case = [] for box in actual_boxes: normalized_boxes.append(normalize_box(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) ) assert len(__lowerCamelCase ) == len(__lowerCamelCase ), "Not as many words as there are bounding boxes" return words, normalized_boxes class lowerCAmelCase__ ( A_ ): __a = ["""pixel_values"""] def __init__( self : Optional[int] , _lowerCamelCase : bool = True , _lowerCamelCase : Dict[str, int] = None , _lowerCamelCase : PILImageResampling = PILImageResampling.BILINEAR , _lowerCamelCase : bool = True , _lowerCamelCase : Optional[str] = None , _lowerCamelCase : Optional[str] = "" , **_lowerCamelCase : Optional[int] , ): super().__init__(**_lowerCamelCase ) _snake_case = size if size is not None else {'''height''': 224, '''width''': 224} _snake_case = get_size_dict(_lowerCamelCase ) _snake_case = do_resize _snake_case = size _snake_case = resample _snake_case = apply_ocr _snake_case = ocr_lang _snake_case = tesseract_config def lowercase ( self : List[Any] , _lowerCamelCase : np.ndarray , _lowerCamelCase : Dict[str, int] , _lowerCamelCase : PILImageResampling = PILImageResampling.BILINEAR , _lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCamelCase : Any , ): _snake_case = get_size_dict(_lowerCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' ) _snake_case = (size['''height'''], size['''width''']) return resize(_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase ) def lowercase ( self : Tuple , _lowerCamelCase : ImageInput , _lowerCamelCase : bool = None , _lowerCamelCase : Dict[str, int] = None , _lowerCamelCase : PILImageResampling = None , _lowerCamelCase : bool = None , _lowerCamelCase : Optional[str] = None , _lowerCamelCase : Optional[str] = None , _lowerCamelCase : Optional[Union[str, TensorType]] = None , _lowerCamelCase : ChannelDimension = ChannelDimension.FIRST , **_lowerCamelCase : List[str] , ): _snake_case = do_resize if do_resize is not None else self.do_resize _snake_case = size if size is not None else self.size _snake_case = get_size_dict(_lowerCamelCase ) _snake_case = resample if resample is not None else self.resample _snake_case = apply_ocr if apply_ocr is not None else self.apply_ocr _snake_case = ocr_lang if ocr_lang is not None else self.ocr_lang _snake_case = tesseract_config if tesseract_config is not None else self.tesseract_config _snake_case = make_list_of_images(_lowerCamelCase ) if not valid_images(_lowerCamelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) # All transformations expect numpy arrays. _snake_case = [to_numpy_array(_lowerCamelCase ) for image in images] if apply_ocr: requires_backends(self , '''pytesseract''' ) _snake_case = [] _snake_case = [] for image in images: _snake_case , _snake_case = apply_tesseract(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) words_batch.append(_lowerCamelCase ) boxes_batch.append(_lowerCamelCase ) if do_resize: _snake_case = [self.resize(image=_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase ) for image in images] # flip color channels from RGB to BGR (as Detectron2 requires this) _snake_case = [flip_channel_order(_lowerCamelCase ) for image in images] _snake_case = [to_channel_dimension_format(_lowerCamelCase , _lowerCamelCase ) for image in images] _snake_case = BatchFeature(data={'''pixel_values''': images} , tensor_type=_lowerCamelCase ) if apply_ocr: _snake_case = words_batch _snake_case = boxes_batch return data
352
"""simple docstring""" import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCAmelCase__ : def __init__( self : str , _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple=13 , _lowerCamelCase : int=32 , _lowerCamelCase : List[str]=3 , _lowerCamelCase : List[str]=4 , _lowerCamelCase : Optional[int]=[10, 20, 30, 40] , _lowerCamelCase : Dict=[2, 2, 3, 2] , _lowerCamelCase : Dict=True , _lowerCamelCase : Tuple=True , _lowerCamelCase : Tuple=37 , _lowerCamelCase : Optional[Any]="gelu" , _lowerCamelCase : Optional[Any]=10 , _lowerCamelCase : Any=0.0_2 , _lowerCamelCase : Optional[Any]=["stage2", "stage3", "stage4"] , _lowerCamelCase : Any=[2, 3, 4] , _lowerCamelCase : Any=None , ): _snake_case = parent _snake_case = batch_size _snake_case = image_size _snake_case = num_channels _snake_case = num_stages _snake_case = hidden_sizes _snake_case = depths _snake_case = is_training _snake_case = use_labels _snake_case = intermediate_size _snake_case = hidden_act _snake_case = num_labels _snake_case = initializer_range _snake_case = out_features _snake_case = out_indices _snake_case = scope def lowercase ( self : Dict ): _snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case = None if self.use_labels: _snake_case = ids_tensor([self.batch_size] , self.num_labels ) _snake_case = self.get_config() return config, pixel_values, labels def lowercase ( self : str ): return ConvNextVaConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def lowercase ( self : Tuple , _lowerCamelCase : Tuple , _lowerCamelCase : int , _lowerCamelCase : List[str] ): _snake_case = ConvNextVaModel(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() _snake_case = model(_lowerCamelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowercase ( self : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Tuple , _lowerCamelCase : Union[str, Any] ): _snake_case = ConvNextVaForImageClassification(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() _snake_case = model(_lowerCamelCase , labels=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase ( self : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple ): _snake_case = ConvNextVaBackbone(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() _snake_case = model(_lowerCamelCase ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None _snake_case = None _snake_case = ConvNextVaBackbone(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() _snake_case = model(_lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def lowercase ( self : str ): _snake_case = self.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case = config_and_inputs _snake_case = {'''pixel_values''': pixel_values} return config, inputs_dict def lowercase ( self : int ): _snake_case = self.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case = config_and_inputs _snake_case = {'''pixel_values''': pixel_values, '''labels''': labels} return config, inputs_dict @require_torch class lowerCAmelCase__ ( A_ , A_ , unittest.TestCase ): __a = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) __a = ( {"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification} if is_torch_available() else {} ) __a = False __a = False __a = False __a = False __a = False def lowercase ( self : str ): _snake_case = ConvNextVaModelTester(self ) _snake_case = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 ) def lowercase ( self : List[str] ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowercase ( self : Dict ): return @unittest.skip(reason='''ConvNextV2 does not use inputs_embeds''' ) def lowercase ( self : Dict ): pass @unittest.skip(reason='''ConvNextV2 does not support input and output embeddings''' ) def lowercase ( self : int ): pass @unittest.skip(reason='''ConvNextV2 does not use feedforward chunking''' ) def lowercase ( self : int ): pass def lowercase ( self : Union[str, Any] ): if not self.model_tester.is_training: return for model_class in self.all_model_classes: _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_with_labels() _snake_case = True if model_class.__name__ in [ *get_values(_lowerCamelCase ), *get_values(_lowerCamelCase ), ]: continue _snake_case = model_class(_lowerCamelCase ) model.to(_lowerCamelCase ) model.train() _snake_case = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase ) _snake_case = model(**_lowerCamelCase ).loss loss.backward() def lowercase ( self : Dict ): if not self.model_tester.is_training: return for model_class in self.all_model_classes: _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_with_labels() _snake_case = False _snake_case = True if ( model_class.__name__ in [*get_values(_lowerCamelCase ), *get_values(_lowerCamelCase )] or not model_class.supports_gradient_checkpointing ): continue _snake_case = model_class(_lowerCamelCase ) model.to(_lowerCamelCase ) model.gradient_checkpointing_enable() model.train() _snake_case = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase ) _snake_case = model(**_lowerCamelCase ).loss loss.backward() def lowercase ( self : Optional[Any] ): _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case = model_class(_lowerCamelCase ) _snake_case = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case = [*signature.parameters.keys()] _snake_case = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _lowerCamelCase ) def lowercase ( self : Optional[Any] ): _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCamelCase ) def lowercase ( self : Optional[int] ): def check_hidden_states_output(_lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] ): _snake_case = model_class(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() with torch.no_grad(): _snake_case = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) ) _snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _snake_case = self.model_tester.num_stages self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case = True check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _snake_case = True check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) def lowercase ( self : List[str] ): _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase ) @slow def lowercase ( self : str ): for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case = ConvNextVaModel.from_pretrained(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase ) def _UpperCAmelCase ( ) -> Optional[Any]: _snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class lowerCAmelCase__ ( unittest.TestCase ): @cached_property def lowercase ( self : List[Any] ): return AutoImageProcessor.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ) if is_vision_available() else None @slow def lowercase ( self : Optional[Any] ): _snake_case = ConvNextVaForImageClassification.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ).to(_lowerCamelCase ) _snake_case = self.default_image_processor _snake_case = prepare_img() _snake_case = preprocessor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase ) # forward pass with torch.no_grad(): _snake_case = model(**_lowerCamelCase ) # verify the logits _snake_case = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , _lowerCamelCase ) _snake_case = torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6] ).to(_lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 ) )
40
0
"""simple docstring""" from abc import ABC, abstractmethod from typing import Optional, Union from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit from ..utils.typing import NestedDataStructureLike, PathLike class __magic_name__ ( lowerCamelCase__ ): '''simple docstring''' def __init__( self , _a = None , _a = None , _a = None , _a = None , _a = False , _a = False , _a = None , **_a , ): """simple docstring""" lowerCamelCase = path_or_paths lowerCamelCase = split if split or isinstance(_a , _a ) else """train""" lowerCamelCase = features lowerCamelCase = cache_dir lowerCamelCase = keep_in_memory lowerCamelCase = streaming lowerCamelCase = num_proc lowerCamelCase = kwargs @abstractmethod def _lowerCAmelCase ( self ): """simple docstring""" pass class __magic_name__ ( lowerCamelCase__ ): '''simple docstring''' def __init__( self , _a = None , _a = None , _a = False , _a = False , _a = None , **_a , ): """simple docstring""" lowerCamelCase = features lowerCamelCase = cache_dir lowerCamelCase = keep_in_memory lowerCamelCase = streaming lowerCamelCase = num_proc lowerCamelCase = kwargs @abstractmethod def _lowerCAmelCase ( self ): """simple docstring""" pass
291
'''simple docstring''' from collections import UserDict from typing import Union import numpy as np import requests from ..utils import ( add_end_docstrings, logging, ) from .audio_classification import ffmpeg_read from .base import PIPELINE_INIT_ARGS, Pipeline a_ : Any = logging.get_logger(__name__) @add_end_docstrings(lowerCamelCase__ ) class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, **lowerCAmelCase ): """simple docstring""" super().__init__(**lowerCAmelCase ) if self.framework != "pt": raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' ) # No specific FOR_XXX available yet def __call__( self, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return super().__call__(lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ ={} if "candidate_labels" in kwargs: lowerCamelCase_ =kwargs['''candidate_labels'''] if "hypothesis_template" in kwargs: lowerCamelCase_ =kwargs['''hypothesis_template'''] return preprocess_params, {}, {} def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None, lowerCAmelCase="This is a sound of {}." ): """simple docstring""" if isinstance(lowerCAmelCase, lowerCAmelCase ): if audio.startswith('''http://''' ) or audio.startswith('''https://''' ): # We need to actually check for a real protocol, otherwise it's impossible to use a local file # like http_huggingface_co.png lowerCamelCase_ =requests.get(lowerCAmelCase ).content else: with open(lowerCAmelCase, '''rb''' ) as f: lowerCamelCase_ =f.read() if isinstance(lowerCAmelCase, lowerCAmelCase ): lowerCamelCase_ =ffmpeg_read(lowerCAmelCase, self.feature_extractor.sampling_rate ) if not isinstance(lowerCAmelCase, np.ndarray ): raise ValueError('''We expect a numpy ndarray as input''' ) if len(audio.shape ) != 1: raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' ) lowerCamelCase_ =self.feature_extractor( [audio], sampling_rate=self.feature_extractor.sampling_rate, return_tensors='''pt''' ) lowerCamelCase_ =candidate_labels lowerCamelCase_ =[hypothesis_template.format(lowerCAmelCase ) for x in candidate_labels] lowerCamelCase_ =self.tokenizer(lowerCAmelCase, return_tensors=self.framework, padding=lowerCAmelCase ) lowerCamelCase_ =[text_inputs] return inputs def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =model_inputs.pop('''candidate_labels''' ) lowerCamelCase_ =model_inputs.pop('''text_inputs''' ) if isinstance(text_inputs[0], lowerCAmelCase ): lowerCamelCase_ =text_inputs[0] else: # Batching case. lowerCamelCase_ =text_inputs[0][0] lowerCamelCase_ =self.model(**lowerCAmelCase, **lowerCAmelCase ) lowerCamelCase_ ={ '''candidate_labels''': candidate_labels, '''logits''': outputs.logits_per_audio, } return model_outputs def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =model_outputs.pop('''candidate_labels''' ) lowerCamelCase_ =model_outputs['''logits'''][0] if self.framework == "pt": lowerCamelCase_ =logits.softmax(dim=0 ) lowerCamelCase_ =probs.tolist() else: raise ValueError('''`tf` framework not supported.''' ) lowerCamelCase_ =[ {'''score''': score, '''label''': candidate_label} for score, candidate_label in sorted(zip(lowerCAmelCase, lowerCAmelCase ), key=lambda lowerCAmelCase : -x[0] ) ] return result
75
0
'''simple docstring''' import unittest import numpy as np from diffusers import OnnxStableDiffusionInpaintPipelineLegacy from diffusers.utils.testing_utils import ( is_onnx_available, load_image, load_numpy, nightly, require_onnxruntime, require_torch_gpu, ) if is_onnx_available(): import onnxruntime as ort @nightly @require_onnxruntime @require_torch_gpu class _a ( unittest.TestCase ): @property def _lowercase ( self ) -> Dict: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def _lowercase ( self ) -> str: _snake_case = ort.SessionOptions() _snake_case = False return options def _lowercase ( self ) -> List[Any]: _snake_case = load_image( "https://huggingface.co./datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo.png" ) _snake_case = load_image( "https://huggingface.co./datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo_mask.png" ) _snake_case = load_numpy( "https://huggingface.co./datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy" ) # using the PNDM scheduler by default _snake_case = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained( "CompVis/stable-diffusion-v1-4" ,revision="onnx" ,safety_checker=_SCREAMING_SNAKE_CASE ,feature_extractor=_SCREAMING_SNAKE_CASE ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) _snake_case = "A red cat sitting on a park bench" _snake_case = np.random.RandomState(0 ) _snake_case = pipe( prompt=_SCREAMING_SNAKE_CASE ,image=_SCREAMING_SNAKE_CASE ,mask_image=_SCREAMING_SNAKE_CASE ,strength=0.7_5 ,guidance_scale=7.5 ,num_inference_steps=15 ,generator=_SCREAMING_SNAKE_CASE ,output_type="np" ,) _snake_case = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 1e-2
142
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_big_bird import BigBirdTokenizer else: UpperCamelCase_ : List[Any] = None UpperCamelCase_ : Tuple = logging.get_logger(__name__) UpperCamelCase_ : Any = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''} UpperCamelCase_ : Any = { '''vocab_file''': { '''google/bigbird-roberta-base''': '''https://huggingface.co./google/bigbird-roberta-base/resolve/main/spiece.model''', '''google/bigbird-roberta-large''': ( '''https://huggingface.co./google/bigbird-roberta-large/resolve/main/spiece.model''' ), '''google/bigbird-base-trivia-itc''': ( '''https://huggingface.co./google/bigbird-base-trivia-itc/resolve/main/spiece.model''' ), }, '''tokenizer_file''': { '''google/bigbird-roberta-base''': ( '''https://huggingface.co./google/bigbird-roberta-base/resolve/main/tokenizer.json''' ), '''google/bigbird-roberta-large''': ( '''https://huggingface.co./google/bigbird-roberta-large/resolve/main/tokenizer.json''' ), '''google/bigbird-base-trivia-itc''': ( '''https://huggingface.co./google/bigbird-base-trivia-itc/resolve/main/tokenizer.json''' ), }, } UpperCamelCase_ : Optional[int] = { '''google/bigbird-roberta-base''': 4096, '''google/bigbird-roberta-large''': 4096, '''google/bigbird-base-trivia-itc''': 4096, } UpperCamelCase_ : List[str] = '''▁''' class _a ( __lowerCAmelCase ): SCREAMING_SNAKE_CASE_ : List[Any] = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ : str = BigBirdTokenizer SCREAMING_SNAKE_CASE_ : Tuple = ["""input_ids""", """attention_mask"""] SCREAMING_SNAKE_CASE_ : List[int] = [] def __init__( self ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE="<unk>" ,_SCREAMING_SNAKE_CASE="<s>" ,_SCREAMING_SNAKE_CASE="</s>" ,_SCREAMING_SNAKE_CASE="<pad>" ,_SCREAMING_SNAKE_CASE="[SEP]" ,_SCREAMING_SNAKE_CASE="[MASK]" ,_SCREAMING_SNAKE_CASE="[CLS]" ,**_SCREAMING_SNAKE_CASE ,) -> Dict: _snake_case = AddedToken(_SCREAMING_SNAKE_CASE ,lstrip=_SCREAMING_SNAKE_CASE ,rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else bos_token _snake_case = AddedToken(_SCREAMING_SNAKE_CASE ,lstrip=_SCREAMING_SNAKE_CASE ,rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else eos_token _snake_case = AddedToken(_SCREAMING_SNAKE_CASE ,lstrip=_SCREAMING_SNAKE_CASE ,rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else unk_token _snake_case = AddedToken(_SCREAMING_SNAKE_CASE ,lstrip=_SCREAMING_SNAKE_CASE ,rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else pad_token _snake_case = AddedToken(_SCREAMING_SNAKE_CASE ,lstrip=_SCREAMING_SNAKE_CASE ,rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else cls_token _snake_case = AddedToken(_SCREAMING_SNAKE_CASE ,lstrip=_SCREAMING_SNAKE_CASE ,rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else sep_token # Mask token behave like a normal word, i.e. include the space before it _snake_case = AddedToken(_SCREAMING_SNAKE_CASE ,lstrip=_SCREAMING_SNAKE_CASE ,rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else mask_token super().__init__( _SCREAMING_SNAKE_CASE ,tokenizer_file=_SCREAMING_SNAKE_CASE ,bos_token=_SCREAMING_SNAKE_CASE ,eos_token=_SCREAMING_SNAKE_CASE ,unk_token=_SCREAMING_SNAKE_CASE ,sep_token=_SCREAMING_SNAKE_CASE ,pad_token=_SCREAMING_SNAKE_CASE ,cls_token=_SCREAMING_SNAKE_CASE ,mask_token=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,) _snake_case = vocab_file _snake_case = False if not self.vocab_file else True def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ) -> List[int]: _snake_case = [self.sep_token_id] _snake_case = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = False ) -> List[int]: if already_has_special_tokens: if token_ids_a is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model." ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is None: return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ) -> List[int]: _snake_case = [self.sep_token_id] _snake_case = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(_SCREAMING_SNAKE_CASE ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return _snake_case = os.path.join( _SCREAMING_SNAKE_CASE ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ): copyfile(self.vocab_file ,_SCREAMING_SNAKE_CASE ) return (out_vocab_file,)
142
1
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ConditionalDetrImageProcessor class _snake_case ( unittest.TestCase ): def __init__( self , _lowerCamelCase , _lowerCamelCase=7 , _lowerCamelCase=3 , _lowerCamelCase=30 , _lowerCamelCase=400 , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=True , _lowerCamelCase=1 / 255 , _lowerCamelCase=True , ): # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p UpperCAmelCase__ : List[Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333} UpperCAmelCase__ : Tuple = parent UpperCAmelCase__ : int = batch_size UpperCAmelCase__ : Optional[Any] = num_channels UpperCAmelCase__ : Optional[int] = min_resolution UpperCAmelCase__ : Any = max_resolution UpperCAmelCase__ : Optional[int] = do_resize UpperCAmelCase__ : Optional[Any] = size UpperCAmelCase__ : Any = do_normalize UpperCAmelCase__ : Optional[Any] = image_mean UpperCAmelCase__ : Optional[Any] = image_std UpperCAmelCase__ : str = do_rescale UpperCAmelCase__ : Union[str, Any] = rescale_factor UpperCAmelCase__ : List[str] = do_pad def snake_case__ ( self): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase=False): if not batched: UpperCAmelCase__ : Optional[int] = image_inputs[0] if isinstance(_lowerCamelCase , Image.Image): UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = image.size else: UpperCAmelCase__ , UpperCAmelCase__ : int = image.shape[1], image.shape[2] if w < h: UpperCAmelCase__ : Dict = int(self.size["""shortest_edge"""] * h / w) UpperCAmelCase__ : List[Any] = self.size["""shortest_edge"""] elif w > h: UpperCAmelCase__ : List[Any] = self.size["""shortest_edge"""] UpperCAmelCase__ : Any = int(self.size["""shortest_edge"""] * w / h) else: UpperCAmelCase__ : Dict = self.size["""shortest_edge"""] UpperCAmelCase__ : Any = self.size["""shortest_edge"""] else: UpperCAmelCase__ : Dict = [] for image in image_inputs: UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) UpperCAmelCase__ : List[str] = max(_lowerCamelCase , key=lambda _lowerCamelCase: item[0])[0] UpperCAmelCase__ : Any = max(_lowerCamelCase , key=lambda _lowerCamelCase: item[1])[1] return expected_height, expected_width @require_torch @require_vision class _snake_case ( a__ , unittest.TestCase ): lowerCAmelCase :Optional[int] = ConditionalDetrImageProcessor if is_vision_available() else None def snake_case__ ( self): UpperCAmelCase__ : Dict = ConditionalDetrImageProcessingTester(self) @property def snake_case__ ( self): return self.image_processor_tester.prepare_image_processor_dict() def snake_case__ ( self): UpperCAmelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(_lowerCamelCase , """image_mean""")) self.assertTrue(hasattr(_lowerCamelCase , """image_std""")) self.assertTrue(hasattr(_lowerCamelCase , """do_normalize""")) self.assertTrue(hasattr(_lowerCamelCase , """do_resize""")) self.assertTrue(hasattr(_lowerCamelCase , """size""")) def snake_case__ ( self): UpperCAmelCase__ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333}) self.assertEqual(image_processor.do_pad , _lowerCamelCase) UpperCAmelCase__ : List[Any] = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_lowerCamelCase) self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84}) self.assertEqual(image_processor.do_pad , _lowerCamelCase) def snake_case__ ( self): pass def snake_case__ ( self): # Initialize image_processing UpperCAmelCase__ : List[str] = self.image_processing_class(**self.image_processor_dict) # create random PIL images UpperCAmelCase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase) for image in image_inputs: self.assertIsInstance(_lowerCamelCase , Image.Image) # Test not batched input UpperCAmelCase__ : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values UpperCAmelCase__ , UpperCAmelCase__ : int = self.image_processor_tester.get_expected_values(_lowerCamelCase) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase) UpperCAmelCase__ : List[Any] = image_processing(_lowerCamelCase , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def snake_case__ ( self): # Initialize image_processing UpperCAmelCase__ : Dict = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors UpperCAmelCase__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase) for image in image_inputs: self.assertIsInstance(_lowerCamelCase , np.ndarray) # Test not batched input UpperCAmelCase__ : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.image_processor_tester.get_expected_values(_lowerCamelCase) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase__ : Tuple = image_processing(_lowerCamelCase , return_tensors="""pt""").pixel_values UpperCAmelCase__ , UpperCAmelCase__ : str = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def snake_case__ ( self): # Initialize image_processing UpperCAmelCase__ : List[str] = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors UpperCAmelCase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase) for image in image_inputs: self.assertIsInstance(_lowerCamelCase , torch.Tensor) # Test not batched input UpperCAmelCase__ : Dict = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values UpperCAmelCase__ , UpperCAmelCase__ : int = self.image_processor_tester.get_expected_values(_lowerCamelCase) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase__ : int = image_processing(_lowerCamelCase , return_tensors="""pt""").pixel_values UpperCAmelCase__ , UpperCAmelCase__ : int = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def snake_case__ ( self): # prepare image and target UpperCAmelCase__ : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""") with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""") as f: UpperCAmelCase__ : Dict = json.loads(f.read()) UpperCAmelCase__ : Tuple = {"""image_id""": 3_9769, """annotations""": target} # encode them UpperCAmelCase__ : int = ConditionalDetrImageProcessor.from_pretrained("""microsoft/conditional-detr-resnet-50""") UpperCAmelCase__ : str = image_processing(images=_lowerCamelCase , annotations=_lowerCamelCase , return_tensors="""pt""") # verify pixel values UpperCAmelCase__ : List[Any] = torch.Size([1, 3, 800, 1066]) self.assertEqual(encoding["""pixel_values"""].shape , _lowerCamelCase) UpperCAmelCase__ : Any = torch.tensor([0.2796, 0.3138, 0.3481]) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , _lowerCamelCase , atol=1e-4)) # verify area UpperCAmelCase__ : int = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , _lowerCamelCase)) # verify boxes UpperCAmelCase__ : Optional[Any] = torch.Size([6, 4]) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , _lowerCamelCase) UpperCAmelCase__ : List[str] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , _lowerCamelCase , atol=1e-3)) # verify image_id UpperCAmelCase__ : Any = torch.tensor([3_9769]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , _lowerCamelCase)) # verify is_crowd UpperCAmelCase__ : Dict = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , _lowerCamelCase)) # verify class_labels UpperCAmelCase__ : Optional[int] = torch.tensor([75, 75, 63, 65, 17, 17]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , _lowerCamelCase)) # verify orig_size UpperCAmelCase__ : str = torch.tensor([480, 640]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , _lowerCamelCase)) # verify size UpperCAmelCase__ : str = torch.tensor([800, 1066]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , _lowerCamelCase)) @slow def snake_case__ ( self): # prepare image, target and masks_path UpperCAmelCase__ : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""") with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""") as f: UpperCAmelCase__ : Optional[int] = json.loads(f.read()) UpperCAmelCase__ : Union[str, Any] = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target} UpperCAmelCase__ : Tuple = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""") # encode them UpperCAmelCase__ : Dict = ConditionalDetrImageProcessor(format="""coco_panoptic""") UpperCAmelCase__ : Union[str, Any] = image_processing(images=_lowerCamelCase , annotations=_lowerCamelCase , masks_path=_lowerCamelCase , return_tensors="""pt""") # verify pixel values UpperCAmelCase__ : List[Any] = torch.Size([1, 3, 800, 1066]) self.assertEqual(encoding["""pixel_values"""].shape , _lowerCamelCase) UpperCAmelCase__ : List[str] = torch.tensor([0.2796, 0.3138, 0.3481]) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , _lowerCamelCase , atol=1e-4)) # verify area UpperCAmelCase__ : int = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , _lowerCamelCase)) # verify boxes UpperCAmelCase__ : Optional[int] = torch.Size([6, 4]) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , _lowerCamelCase) UpperCAmelCase__ : Optional[int] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , _lowerCamelCase , atol=1e-3)) # verify image_id UpperCAmelCase__ : List[str] = torch.tensor([3_9769]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , _lowerCamelCase)) # verify is_crowd UpperCAmelCase__ : List[str] = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , _lowerCamelCase)) # verify class_labels UpperCAmelCase__ : List[Any] = torch.tensor([17, 17, 63, 75, 75, 93]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , _lowerCamelCase)) # verify masks UpperCAmelCase__ : Union[str, Any] = 82_2873 self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , _lowerCamelCase) # verify orig_size UpperCAmelCase__ : int = torch.tensor([480, 640]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , _lowerCamelCase)) # verify size UpperCAmelCase__ : List[str] = torch.tensor([800, 1066]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , _lowerCamelCase))
163
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __A =logging.get_logger(__name__) __A ={ 'google/pegasus-large': 'https://huggingface.co./google/pegasus-large/resolve/main/config.json', # See all PEGASUS models at https://huggingface.co./models?filter=pegasus } class _snake_case ( a__ ): lowerCAmelCase :Optional[int] = '''pegasus''' lowerCAmelCase :Optional[int] = ['''past_key_values'''] lowerCAmelCase :str = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self , _lowerCamelCase=5_0265 , _lowerCamelCase=1024 , _lowerCamelCase=12 , _lowerCamelCase=4096 , _lowerCamelCase=16 , _lowerCamelCase=12 , _lowerCamelCase=4096 , _lowerCamelCase=16 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase="gelu" , _lowerCamelCase=1024 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.02 , _lowerCamelCase=0 , _lowerCamelCase=False , _lowerCamelCase=0 , _lowerCamelCase=1 , _lowerCamelCase=1 , **_lowerCamelCase , ): UpperCAmelCase__ : Union[str, Any] = vocab_size UpperCAmelCase__ : Union[str, Any] = max_position_embeddings UpperCAmelCase__ : List[Any] = d_model UpperCAmelCase__ : Union[str, Any] = encoder_ffn_dim UpperCAmelCase__ : Any = encoder_layers UpperCAmelCase__ : List[str] = encoder_attention_heads UpperCAmelCase__ : int = decoder_ffn_dim UpperCAmelCase__ : Any = decoder_layers UpperCAmelCase__ : Tuple = decoder_attention_heads UpperCAmelCase__ : Optional[int] = dropout UpperCAmelCase__ : Dict = attention_dropout UpperCAmelCase__ : Optional[int] = activation_dropout UpperCAmelCase__ : Dict = activation_function UpperCAmelCase__ : Optional[Any] = init_std UpperCAmelCase__ : int = encoder_layerdrop UpperCAmelCase__ : Tuple = decoder_layerdrop UpperCAmelCase__ : str = use_cache UpperCAmelCase__ : Any = encoder_layers UpperCAmelCase__ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , is_encoder_decoder=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , forced_eos_token_id=_lowerCamelCase , **_lowerCamelCase , ) @property def snake_case__ ( self): return self.encoder_attention_heads @property def snake_case__ ( self): return self.d_model
163
1
from timeit import timeit def UpperCAmelCase__ ( UpperCAmelCase__ ) -> List[str]: if number < 0: raise ValueError("""the value of input must not be negative""" ) A_ = 0 while number: number &= number - 1 result += 1 return result def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple: if number < 0: raise ValueError("""the value of input must not be negative""" ) A_ = 0 while number: if number % 2 == 1: result += 1 number >>= 1 return result def UpperCAmelCase__ ( ) -> Optional[int]: def do_benchmark(UpperCAmelCase__ ) -> None: A_ = """import __main__ as z""" print(F'''Benchmark when {number = }:''' ) print(F'''{get_set_bits_count_using_modulo_operator(_a ) = }''' ) A_ = timeit("""z.get_set_bits_count_using_modulo_operator(25)""", setup=_a ) print(F'''timeit() runs in {timing} seconds''' ) print(F'''{get_set_bits_count_using_brian_kernighans_algorithm(_a ) = }''' ) A_ = timeit( """z.get_set_bits_count_using_brian_kernighans_algorithm(25)""", setup=_a, ) print(F'''timeit() runs in {timing} seconds''' ) for number in (25, 37, 58, 0): do_benchmark(_a ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
361
'''simple docstring''' import copy import unittest from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_MULTIPLE_CHOICE_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, ) from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class A__ : def __init__( self , UpperCamelCase__ , UpperCamelCase__=2 , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=2 , UpperCamelCase__=7 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=99 , UpperCamelCase__=36 , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=16 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=6 , UpperCamelCase__=6 , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=None , UpperCamelCase__=1000 , ) -> Optional[int]: '''simple docstring''' A_ = parent A_ = batch_size A_ = num_channels A_ = image_size A_ = patch_size A_ = text_seq_length A_ = is_training A_ = use_input_mask A_ = use_token_type_ids A_ = use_labels A_ = vocab_size A_ = hidden_size A_ = num_hidden_layers A_ = num_attention_heads A_ = intermediate_size A_ = hidden_act A_ = hidden_dropout_prob A_ = attention_probs_dropout_prob A_ = max_position_embeddings A_ = type_vocab_size A_ = type_sequence_label_size A_ = initializer_range A_ = coordinate_size A_ = shape_size A_ = num_labels A_ = num_choices A_ = scope A_ = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) A_ = text_seq_length A_ = (image_size // patch_size) ** 2 + 1 A_ = self.text_seq_length + self.image_seq_length def snake_case_ ( self ) -> Any: '''simple docstring''' A_ = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) A_ = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: A_ = bbox[i, j, 3] A_ = bbox[i, j, 1] A_ = t if bbox[i, j, 2] < bbox[i, j, 0]: A_ = bbox[i, j, 2] A_ = bbox[i, j, 0] A_ = t A_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A_ = None if self.use_input_mask: A_ = random_attention_mask([self.batch_size, self.text_seq_length] ) A_ = None if self.use_token_type_ids: A_ = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) A_ = None A_ = None if self.use_labels: A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A_ = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) A_ = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> str: '''simple docstring''' A_ = LayoutLMvaModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() # text + image A_ = model(UpperCamelCase__ , pixel_values=UpperCamelCase__ ) A_ = model( UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) A_ = model(UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) A_ = model(UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only A_ = model(UpperCamelCase__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only A_ = model(pixel_values=UpperCamelCase__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]: '''simple docstring''' A_ = self.num_labels A_ = LayoutLMvaForSequenceClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() A_ = model( UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> str: '''simple docstring''' A_ = self.num_labels A_ = LayoutLMvaForTokenClassification(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() A_ = model( UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int: '''simple docstring''' A_ = LayoutLMvaForQuestionAnswering(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() A_ = model( UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def snake_case_ ( self ) -> int: '''simple docstring''' A_ = self.prepare_config_and_inputs() ( ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ) = config_and_inputs A_ = { """input_ids""": input_ids, """bbox""": bbox, """pixel_values""": pixel_values, """token_type_ids""": token_type_ids, """attention_mask""": input_mask, } return config, inputs_dict @require_torch class A__ ( _snake_case , _snake_case , unittest.TestCase ): lowercase = False lowercase = False lowercase = False lowercase = ( ( LayoutLMvaModel, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaForQuestionAnswering, ) if is_torch_available() else () ) lowercase = ( {"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel} if is_torch_available() else {} ) def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Dict: '''simple docstring''' # `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual # embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has # the sequence dimension of the text embedding only. # (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`) return True def snake_case_ ( self ) -> str: '''simple docstring''' A_ = LayoutLMvaModelTester(self ) A_ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 ) def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ) -> Union[str, Any]: '''simple docstring''' A_ = copy.deepcopy(UpperCamelCase__ ) if model_class in get_values(UpperCamelCase__ ): A_ = { k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous() if isinstance(UpperCamelCase__ , torch.Tensor ) and v.ndim > 1 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(UpperCamelCase__ ): A_ = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ ) elif model_class in get_values(UpperCamelCase__ ): A_ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ ) A_ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ ) elif model_class in [ *get_values(UpperCamelCase__ ), ]: A_ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ ) elif model_class in [ *get_values(UpperCamelCase__ ), ]: A_ = torch.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=UpperCamelCase__ , ) return inputs_dict def snake_case_ ( self ) -> Optional[Any]: '''simple docstring''' self.config_tester.run_common_tests() def snake_case_ ( self ) -> Dict: '''simple docstring''' A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def snake_case_ ( self ) -> Dict: '''simple docstring''' A_ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: A_ = type self.model_tester.create_and_check_model(*UpperCamelCase__ ) def snake_case_ ( self ) -> Dict: '''simple docstring''' A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ ) def snake_case_ ( self ) -> List[str]: '''simple docstring''' A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ ) def snake_case_ ( self ) -> Tuple: '''simple docstring''' A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ ) @slow def snake_case_ ( self ) -> Union[str, Any]: '''simple docstring''' for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ = LayoutLMvaModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) def UpperCAmelCase__ ( ) -> Dict: A_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch class A__ ( unittest.TestCase ): @cached_property def snake_case_ ( self ) -> List[Any]: '''simple docstring''' return LayoutLMvaImageProcessor(apply_ocr=UpperCamelCase__ ) if is_vision_available() else None @slow def snake_case_ ( self ) -> Optional[Any]: '''simple docstring''' A_ = LayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ).to(UpperCamelCase__ ) A_ = self.default_image_processor A_ = prepare_img() A_ = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).pixel_values.to(UpperCamelCase__ ) A_ = torch.tensor([[1, 2]] ) A_ = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 ) # forward pass A_ = model( input_ids=input_ids.to(UpperCamelCase__ ) , bbox=bbox.to(UpperCamelCase__ ) , pixel_values=pixel_values.to(UpperCamelCase__ ) , ) # verify the logits A_ = torch.Size((1, 199, 768) ) self.assertEqual(outputs.last_hidden_state.shape , UpperCamelCase__ ) A_ = torch.tensor( [[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(UpperCamelCase__ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
101
0
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { "MIT/ast-finetuned-audioset-10-10-0.4593": ( "https://huggingface.co./MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json" ), } class lowerCamelCase__( A__): UpperCAmelCase__ : Union[str, Any] = 'audio-spectrogram-transformer' def __init__( self: Union[str, Any] , UpperCamelCase_: Tuple=7_68 , UpperCamelCase_: List[str]=12 , UpperCamelCase_: Any=12 , UpperCamelCase_: Optional[Any]=30_72 , UpperCamelCase_: List[str]="gelu" , UpperCamelCase_: Optional[Any]=0.0 , UpperCamelCase_: Dict=0.0 , UpperCamelCase_: Optional[Any]=0.02 , UpperCamelCase_: int=1E-12 , UpperCamelCase_: int=16 , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Optional[int]=10 , UpperCamelCase_: Optional[Any]=10 , UpperCamelCase_: Optional[int]=10_24 , UpperCamelCase_: List[str]=1_28 , **UpperCamelCase_: Optional[int] , ): super().__init__(**_a ) __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = initializer_range __lowerCamelCase = layer_norm_eps __lowerCamelCase = patch_size __lowerCamelCase = qkv_bias __lowerCamelCase = frequency_stride __lowerCamelCase = time_stride __lowerCamelCase = max_length __lowerCamelCase = num_mel_bins
12
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType from ...utils.imports import is_botoa_available from .config_args import SageMakerConfig from .config_utils import ( DYNAMO_BACKENDS, _ask_field, _ask_options, _convert_dynamo_backend, _convert_mixed_precision, _convert_sagemaker_distributed_mode, _convert_yes_no_to_bool, ) if is_botoa_available(): import botoa # noqa: F401 def _lowerCAmelCase ( _UpperCamelCase : Optional[int] ) -> List[str]: """simple docstring""" _SCREAMING_SNAKE_CASE =botoa.client('iam' ) _SCREAMING_SNAKE_CASE ={ 'Version': '2012-10-17', 'Statement': [ {'Effect': 'Allow', 'Principal': {'Service': 'sagemaker.amazonaws.com'}, 'Action': 'sts:AssumeRole'} ], } try: # create the role, associated with the chosen trust policy iam_client.create_role( RoleName=_UpperCamelCase , AssumeRolePolicyDocument=json.dumps(_UpperCamelCase , indent=2 ) ) _SCREAMING_SNAKE_CASE ={ 'Version': '2012-10-17', 'Statement': [ { 'Effect': 'Allow', 'Action': [ 'sagemaker:*', 'ecr:GetDownloadUrlForLayer', 'ecr:BatchGetImage', 'ecr:BatchCheckLayerAvailability', 'ecr:GetAuthorizationToken', 'cloudwatch:PutMetricData', 'cloudwatch:GetMetricData', 'cloudwatch:GetMetricStatistics', 'cloudwatch:ListMetrics', 'logs:CreateLogGroup', 'logs:CreateLogStream', 'logs:DescribeLogStreams', 'logs:PutLogEvents', 'logs:GetLogEvents', 's3:CreateBucket', 's3:ListBucket', 's3:GetBucketLocation', 's3:GetObject', 's3:PutObject', ], 'Resource': '*', } ], } # attach policy to role iam_client.put_role_policy( RoleName=_UpperCamelCase , PolicyName=f"{role_name}_policy_permission" , PolicyDocument=json.dumps(_UpperCamelCase , indent=2 ) , ) except iam_client.exceptions.EntityAlreadyExistsException: print(f"role {role_name} already exists. Using existing one" ) def _lowerCAmelCase ( _UpperCamelCase : List[str] ) -> Optional[int]: """simple docstring""" _SCREAMING_SNAKE_CASE =botoa.client('iam' ) return iam_client.get_role(RoleName=_UpperCamelCase )["Role"]["Arn"] def _lowerCAmelCase ( ) -> Optional[int]: """simple docstring""" _SCREAMING_SNAKE_CASE =_ask_options( 'How do you want to authorize?' , ['AWS Profile', 'Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '] , _UpperCamelCase , ) _SCREAMING_SNAKE_CASE =None if credentials_configuration == 0: _SCREAMING_SNAKE_CASE =_ask_field('Enter your AWS Profile name: [default] ' , default='default' ) _SCREAMING_SNAKE_CASE =aws_profile else: print( 'Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,' '`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`' ) _SCREAMING_SNAKE_CASE =_ask_field('AWS Access Key ID: ' ) _SCREAMING_SNAKE_CASE =aws_access_key_id _SCREAMING_SNAKE_CASE =_ask_field('AWS Secret Access Key: ' ) _SCREAMING_SNAKE_CASE =aws_secret_access_key _SCREAMING_SNAKE_CASE =_ask_field('Enter your AWS Region: [us-east-1]' , default='us-east-1' ) _SCREAMING_SNAKE_CASE =aws_region _SCREAMING_SNAKE_CASE =_ask_options( 'Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?' , ['Provide IAM Role name', 'Create new IAM role using credentials'] , _UpperCamelCase , ) if role_management == 0: _SCREAMING_SNAKE_CASE =_ask_field('Enter your IAM role name: ' ) else: _SCREAMING_SNAKE_CASE ='accelerate_sagemaker_execution_role' print(f"Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials" ) _create_iam_role_for_sagemaker(_UpperCamelCase ) _SCREAMING_SNAKE_CASE =_ask_field( 'Do you want to use custom Docker image? [yes/NO]: ' , _convert_yes_no_to_bool , default=_UpperCamelCase , error_message='Please enter yes or no.' , ) _SCREAMING_SNAKE_CASE =None if is_custom_docker_image: _SCREAMING_SNAKE_CASE =_ask_field('Enter your Docker image: ' , lambda _UpperCamelCase : str(_UpperCamelCase ).lower() ) _SCREAMING_SNAKE_CASE =_ask_field( 'Do you want to provide SageMaker input channels with data locations? [yes/NO]: ' , _convert_yes_no_to_bool , default=_UpperCamelCase , error_message='Please enter yes or no.' , ) _SCREAMING_SNAKE_CASE =None if is_sagemaker_inputs_enabled: _SCREAMING_SNAKE_CASE =_ask_field( 'Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ' , lambda _UpperCamelCase : str(_UpperCamelCase ).lower() , ) _SCREAMING_SNAKE_CASE =_ask_field( 'Do you want to enable SageMaker metrics? [yes/NO]: ' , _convert_yes_no_to_bool , default=_UpperCamelCase , error_message='Please enter yes or no.' , ) _SCREAMING_SNAKE_CASE =None if is_sagemaker_metrics_enabled: _SCREAMING_SNAKE_CASE =_ask_field( 'Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ' , lambda _UpperCamelCase : str(_UpperCamelCase ).lower() , ) _SCREAMING_SNAKE_CASE =_ask_options( 'What is the distributed mode?' , ['No distributed training', 'Data parallelism'] , _convert_sagemaker_distributed_mode , ) _SCREAMING_SNAKE_CASE ={} _SCREAMING_SNAKE_CASE =_ask_field( 'Do you wish to optimize your script with torch dynamo?[yes/NO]:' , _convert_yes_no_to_bool , default=_UpperCamelCase , error_message='Please enter yes or no.' , ) if use_dynamo: _SCREAMING_SNAKE_CASE ='dynamo_' _SCREAMING_SNAKE_CASE =_ask_options( 'Which dynamo backend would you like to use?' , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , ) _SCREAMING_SNAKE_CASE =_ask_field( 'Do you want to customize the defaults sent to torch.compile? [yes/NO]: ' , _convert_yes_no_to_bool , default=_UpperCamelCase , error_message='Please enter yes or no.' , ) if use_custom_options: _SCREAMING_SNAKE_CASE =_ask_options( 'Which mode do you want to use?' , _UpperCamelCase , lambda _UpperCamelCase : TORCH_DYNAMO_MODES[int(_UpperCamelCase )] , default='default' , ) _SCREAMING_SNAKE_CASE =_ask_field( 'Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ' , _convert_yes_no_to_bool , default=_UpperCamelCase , error_message='Please enter yes or no.' , ) _SCREAMING_SNAKE_CASE =_ask_field( 'Do you want to enable dynamic shape tracing? [yes/NO]: ' , _convert_yes_no_to_bool , default=_UpperCamelCase , error_message='Please enter yes or no.' , ) _SCREAMING_SNAKE_CASE ='Which EC2 instance type you want to use for your training?' if distributed_type != SageMakerDistributedType.NO: _SCREAMING_SNAKE_CASE =_ask_options( _UpperCamelCase , _UpperCamelCase , lambda _UpperCamelCase : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(_UpperCamelCase )] ) else: eca_instance_query += "? [ml.p3.2xlarge]:" _SCREAMING_SNAKE_CASE =_ask_field(_UpperCamelCase , lambda _UpperCamelCase : str(_UpperCamelCase ).lower() , default='ml.p3.2xlarge' ) _SCREAMING_SNAKE_CASE =1 if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL): _SCREAMING_SNAKE_CASE =_ask_field( 'How many machines do you want use? [1]: ' , _UpperCamelCase , default=1 , ) _SCREAMING_SNAKE_CASE =_ask_options( 'Do you wish to use FP16 or BF16 (mixed precision)?' , ['no', 'fp16', 'bf16', 'fp8'] , _convert_mixed_precision , ) if use_dynamo and mixed_precision == "no": print( 'Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.' ) return SageMakerConfig( image_uri=_UpperCamelCase , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=_UpperCamelCase , use_cpu=_UpperCamelCase , dynamo_config=_UpperCamelCase , eca_instance_type=_UpperCamelCase , profile=_UpperCamelCase , region=_UpperCamelCase , iam_role_name=_UpperCamelCase , mixed_precision=_UpperCamelCase , num_machines=_UpperCamelCase , sagemaker_inputs_file=_UpperCamelCase , sagemaker_metrics_file=_UpperCamelCase , )
47
0
import unittest from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowerCamelCase_ = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece @require_tokenizers class __lowerCamelCase ( _snake_case , unittest.TestCase ): lowerCamelCase_ : Tuple = ReformerTokenizer lowerCamelCase_ : int = ReformerTokenizerFast lowerCamelCase_ : Tuple = True lowerCamelCase_ : Optional[Any] = False lowerCamelCase_ : str = True def lowerCAmelCase_ ( self ) -> str: super().setUp() snake_case_ = ReformerTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCAmelCase_ ( self ) -> Union[str, Any]: snake_case_ = """<s>""" snake_case_ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ ) def lowerCAmelCase_ ( self ) -> Tuple: snake_case_ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<unk>""" ) self.assertEqual(vocab_keys[1] , """<s>""" ) self.assertEqual(vocab_keys[-1] , """j""" ) self.assertEqual(len(UpperCamelCase__ ) , 1000 ) def lowerCAmelCase_ ( self ) -> Dict: self.assertEqual(self.get_tokenizer().vocab_size , 1000 ) def lowerCAmelCase_ ( self ) -> Union[str, Any]: if not self.test_rust_tokenizer: return snake_case_ = self.get_tokenizer() snake_case_ = self.get_rust_tokenizer() snake_case_ = """I was born in 92000, and this is falsé.""" snake_case_ = tokenizer.tokenize(UpperCamelCase__ ) snake_case_ = rust_tokenizer.tokenize(UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) snake_case_ = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) snake_case_ = rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) snake_case_ = self.get_rust_tokenizer() snake_case_ = tokenizer.encode(UpperCamelCase__ ) snake_case_ = rust_tokenizer.encode(UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) def lowerCAmelCase_ ( self , lowerCamelCase=15 ) -> int: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): snake_case_ = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ ) # Simple input snake_case_ = """This is a simple input""" snake_case_ = ["""This is a simple input 1""", """This is a simple input 2"""] snake_case_ = ("""This is a simple input""", """This is a pair""") snake_case_ = [ ("""This is a simple input 1""", """This is a simple input 2"""), ("""This is a simple pair 1""", """This is a simple pair 2"""), ] # Simple input tests self.assertRaises(UpperCamelCase__ , tokenizer_r.encode , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="""max_length""" ) # Simple input self.assertRaises(UpperCamelCase__ , tokenizer_r.encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="""max_length""" ) # Simple input self.assertRaises( UpperCamelCase__ , tokenizer_r.batch_encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="""max_length""" , ) # Pair input self.assertRaises(UpperCamelCase__ , tokenizer_r.encode , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="""max_length""" ) # Pair input self.assertRaises(UpperCamelCase__ , tokenizer_r.encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="""max_length""" ) # Pair input self.assertRaises( UpperCamelCase__ , tokenizer_r.batch_encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="""max_length""" , ) def lowerCAmelCase_ ( self ) -> Optional[Any]: pass def lowerCAmelCase_ ( self ) -> Union[str, Any]: snake_case_ = ReformerTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ ) snake_case_ = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(UpperCamelCase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [285, 46, 10, 170, 382] , ) snake_case_ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( UpperCamelCase__ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) snake_case_ = tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) self.assertListEqual( UpperCamelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) snake_case_ = tokenizer.convert_ids_to_tokens(UpperCamelCase__ ) self.assertListEqual( UpperCamelCase__ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) @cached_property def lowerCAmelCase_ ( self ) -> Optional[int]: return ReformerTokenizer.from_pretrained("""google/reformer-crime-and-punishment""" ) @slow def lowerCAmelCase_ ( self ) -> Dict: snake_case_ = """Hello World!""" snake_case_ = [126, 32, 262, 152, 38, 72, 287] self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) ) @slow def lowerCAmelCase_ ( self ) -> Any: snake_case_ = ( """This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will""" """ add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth""" ) snake_case_ = [ 108, 265, 24, 111, 4, 258, 156, 35, 28, 275, 3, 259, 297, 260, 84, 4, 35, 110, 44, 8, 259, 91, 268, 21, 11, 209, 274, 109, 266, 277, 117, 86, 93, 315, 258, 278, 258, 277, 258, 0, 258, 288, 258, 319, 258, 0, 258, 0, 258, 0, 258, 0, 258, 287, 258, 315, 258, 289, 258, 278, 99, 269, 266, 262, 8, 259, 241, 4, 217, 230, 268, 266, 55, 168, 106, 75, 193, 266, 223, 27, 49, 26, 282, 25, 264, 299, 19, 26, 0, 258, 277, 117, 86, 93, 176, 183, 270, 11, 262, 42, 61, 265, ] self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) ) @require_torch @slow def lowerCAmelCase_ ( self ) -> str: import torch from transformers import ReformerConfig, ReformerModel # Build sequence snake_case_ = list(self.big_tokenizer.get_vocab().keys() )[:10] snake_case_ = """ """.join(UpperCamelCase__ ) snake_case_ = self.big_tokenizer.encode_plus(UpperCamelCase__ , return_tensors="""pt""" ) snake_case_ = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="""pt""" ) snake_case_ = ReformerConfig() # The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024) snake_case_ = encoded_sequence["""input_ids"""].shape snake_case_ = ReformerModel(UpperCamelCase__ ) # Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**UpperCamelCase__ ) model(**UpperCamelCase__ ) @slow def lowerCAmelCase_ ( self ) -> Optional[int]: snake_case_ = {"""input_ids""": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # This tokenizer does not know some characters like ")". # That is the reason why we use very simple texts here. # Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064 snake_case_ = [ """This is a very simple sentence.""", """The quick brown fox jumps over the lazy dog.""", ] self.tokenizer_integration_test_util( expected_encoding=UpperCamelCase__ , model_name="""google/reformer-crime-and-punishment""" , revision="""0e6c3decb8211d49bf881013425dc8b0448b3f5a""" , padding=UpperCamelCase__ , sequences=UpperCamelCase__ , )
362
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCamelCase_ = { '''configuration_graphormer''': ['''GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GraphormerConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GraphormerForGraphClassification''', '''GraphormerModel''', '''GraphormerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_graphormer import ( GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST, GraphormerForGraphClassification, GraphormerModel, GraphormerPreTrainedModel, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
34
0
"""simple docstring""" def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = 1 # To kept the Calculated Value # Since C(n, k) = C(n, n-k) if k > (n - k): __lowerCAmelCase = n - k # Calculate C(n,k) for i in range(A_ ): result *= n - i result //= i + 1 return result def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' return binomial_coefficient(2 * node_count , A_ ) // (node_count + 1) def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' if n < 0: raise ValueError("factorial() not defined for negative values" ) __lowerCAmelCase = 1 for i in range(1 , n + 1 ): result *= i return result def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' return catalan_number(A_ ) * factorial(A_ ) if __name__ == "__main__": A : int = int(input("Enter the number of nodes: ").strip() or 0) if node_count <= 0: raise ValueError("We need some nodes to work with.") print( f'''Given {node_count} nodes, there are {binary_tree_count(node_count)} ''' f'''binary trees and {catalan_number(node_count)} binary search trees.''' )
57
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __lowercase = { """configuration_rag""": ["""RagConfig"""], """retrieval_rag""": ["""RagRetriever"""], """tokenization_rag""": ["""RagTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """RagModel""", """RagPreTrainedModel""", """RagSequenceForGeneration""", """RagTokenForGeneration""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """TFRagModel""", """TFRagPreTrainedModel""", """TFRagSequenceForGeneration""", """TFRagTokenForGeneration""", ] if TYPE_CHECKING: from .configuration_rag import RagConfig from .retrieval_rag import RagRetriever from .tokenization_rag import RagTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rag import ( TFRagModel, TFRagPreTrainedModel, TFRagSequenceForGeneration, TFRagTokenForGeneration, ) else: import sys __lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
40
0
import argparse import shutil from pathlib import Path from tqdm import tqdm from transformers import AutoTokenizer def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=1_0_2_4 ): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = [], [] SCREAMING_SNAKE_CASE_ = list(zip(__UpperCamelCase , __UpperCamelCase ) ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = sorted_examples[0] def is_too_big(__UpperCamelCase ): return tok(__UpperCamelCase , return_tensors="pt" ).input_ids.shape[1] > max_tokens for src, tgt in tqdm(sorted_examples[1:] ): SCREAMING_SNAKE_CASE_ = new_src + " " + src SCREAMING_SNAKE_CASE_ = new_tgt + " " + tgt if is_too_big(__UpperCamelCase ) or is_too_big(__UpperCamelCase ): # cant fit, finalize example finished_src.append(__UpperCamelCase ) finished_tgt.append(__UpperCamelCase ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = src, tgt else: # can fit, keep adding SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = cand_src, cand_tgt # cleanup if new_src: assert new_tgt finished_src.append(__UpperCamelCase ) finished_tgt.append(__UpperCamelCase ) return finished_src, finished_tgt def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): SCREAMING_SNAKE_CASE_ = Path(__UpperCamelCase ) save_path.mkdir(exist_ok=__UpperCamelCase ) for split in ["train"]: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = data_dir / F'''{split}.source''', data_dir / F'''{split}.target''' SCREAMING_SNAKE_CASE_ = [x.rstrip() for x in Path(__UpperCamelCase ).open().readlines()] SCREAMING_SNAKE_CASE_ = [x.rstrip() for x in Path(__UpperCamelCase ).open().readlines()] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = pack_examples(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) print(F'''packed {split} split from {len(__UpperCamelCase )} examples -> {len(__UpperCamelCase )}.''' ) Path(save_path / F'''{split}.source''' ).open("w" ).write("\n".join(__UpperCamelCase ) ) Path(save_path / F'''{split}.target''' ).open("w" ).write("\n".join(__UpperCamelCase ) ) for split in ["val", "test"]: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = data_dir / F'''{split}.source''', data_dir / F'''{split}.target''' shutil.copyfile(__UpperCamelCase , save_path / F'''{split}.source''' ) shutil.copyfile(__UpperCamelCase , save_path / F'''{split}.target''' ) def a__ ( ): SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser() parser.add_argument("--tok_name" , type=__UpperCamelCase , help="like facebook/bart-large-cnn,t5-base, etc." ) parser.add_argument("--max_seq_len" , type=__UpperCamelCase , default=1_2_8 ) parser.add_argument("--data_dir" , type=__UpperCamelCase ) parser.add_argument("--save_path" , type=__UpperCamelCase ) SCREAMING_SNAKE_CASE_ = parser.parse_args() SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained(args.tok_name ) return pack_data_dir(__UpperCamelCase , Path(args.data_dir ) , args.max_seq_len , args.save_path ) if __name__ == "__main__": packer_cli()
305
from __future__ import annotations import collections import pprint from pathlib import Path def a__ ( __UpperCamelCase ): return "".join(sorted(__UpperCamelCase ) ) def a__ ( __UpperCamelCase ): return word_by_signature[signature(__UpperCamelCase )] A : str = Path(__file__).parent.joinpath("words.txt").read_text(encoding="utf-8") A : int = sorted({word.strip().lower() for word in data.splitlines()}) A : Tuple = collections.defaultdict(list) for word in word_list: word_by_signature[signature(word)].append(word) if __name__ == "__main__": A : Union[str, Any] = {word: anagram(word) for word in word_list if len(anagram(word)) > 1} with open("anagrams.txt", "w") as file: file.write("all_anagrams = \n ") file.write(pprint.pformat(all_anagrams))
305
1
import json import os import unittest from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ): _UpperCAmelCase : List[Any] = XLMTokenizer _UpperCAmelCase : List[Any] = False def __lowerCamelCase ( self : Tuple ) ->List[Any]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCamelCase__ : Optional[int] = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''w</w>''', '''r</w>''', '''t</w>''', '''lo''', '''low''', '''er</w>''', '''low</w>''', '''lowest</w>''', '''newer</w>''', '''wider</w>''', '''<unk>''', ] lowerCamelCase__ : Union[str, Any] = dict(zip(A , range(len(A ) ) ) ) lowerCamelCase__ : Any = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', ''''''] lowerCamelCase__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCamelCase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' ) as fp: fp.write(json.dumps(A ) ) with open(self.merges_file , '''w''' ) as fp: fp.write('''\n'''.join(A ) ) def __lowerCamelCase ( self : List[str] , A : int ) ->Any: lowerCamelCase__ : Any = '''lower newer''' lowerCamelCase__ : Any = '''lower newer''' return input_text, output_text def __lowerCamelCase ( self : Optional[Any] ) ->Optional[int]: lowerCamelCase__ : Dict = XLMTokenizer(self.vocab_file , self.merges_file ) lowerCamelCase__ : List[Any] = '''lower''' lowerCamelCase__ : Optional[int] = ['''low''', '''er</w>'''] lowerCamelCase__ : Union[str, Any] = tokenizer.tokenize(A ) self.assertListEqual(A , A ) lowerCamelCase__ : int = tokens + ['''<unk>'''] lowerCamelCase__ : Union[str, Any] = [1_4, 1_5, 2_0] self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A ) @slow def __lowerCamelCase ( self : int ) ->Union[str, Any]: lowerCamelCase__ : int = XLMTokenizer.from_pretrained('''xlm-mlm-en-2048''' ) lowerCamelCase__ : Dict = tokenizer.encode('''sequence builders''' , add_special_tokens=A ) lowerCamelCase__ : Dict = tokenizer.encode('''multi-sequence build''' , add_special_tokens=A ) lowerCamelCase__ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(A ) lowerCamelCase__ : str = tokenizer.build_inputs_with_special_tokens(A , A ) assert encoded_sentence == [0] + text + [1] assert encoded_pair == [0] + text + [1] + text_a + [1]
142
from ...configuration_utils import PretrainedConfig from ...utils import logging _A : Dict = logging.get_logger(__name__) _A : Union[str, Any] = { 'sayakpaul/vit-msn-base': 'https://huggingface.co./sayakpaul/vit-msn-base/resolve/main/config.json', # See all ViT MSN models at https://huggingface.co./models?filter=vit_msn } class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): _UpperCAmelCase : Any = "vit_msn" def __init__( self : Optional[Any] , A : Dict=7_6_8 , A : Union[str, Any]=1_2 , A : Optional[Any]=1_2 , A : List[Any]=3_0_7_2 , A : List[str]="gelu" , A : Optional[int]=0.0 , A : int=0.0 , A : int=0.02 , A : Tuple=1e-06 , A : int=2_2_4 , A : Union[str, Any]=1_6 , A : Dict=3 , A : Optional[Any]=True , **A : Optional[Any] , ) ->Dict: super().__init__(**A ) lowerCamelCase__ : int = hidden_size lowerCamelCase__ : Dict = num_hidden_layers lowerCamelCase__ : str = num_attention_heads lowerCamelCase__ : Tuple = intermediate_size lowerCamelCase__ : str = hidden_act lowerCamelCase__ : Optional[int] = hidden_dropout_prob lowerCamelCase__ : Any = attention_probs_dropout_prob lowerCamelCase__ : List[str] = initializer_range lowerCamelCase__ : Optional[int] = layer_norm_eps lowerCamelCase__ : Any = image_size lowerCamelCase__ : Any = patch_size lowerCamelCase__ : Union[str, Any] = num_channels lowerCamelCase__ : Tuple = qkv_bias
142
1
"""simple docstring""" import logging from transformers.configuration_utils import PretrainedConfig lowerCAmelCase : Dict = logging.getLogger(__name__) class __magic_name__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = "masked_bert" def __init__( self , _a=30_522 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1e-1_2 , _a=0 , _a="topK" , _a="constant" , _a=0.0 , **_a , ): """simple docstring""" super().__init__(pad_token_id=_a , **_a ) lowerCamelCase = vocab_size lowerCamelCase = hidden_size lowerCamelCase = num_hidden_layers lowerCamelCase = num_attention_heads lowerCamelCase = hidden_act lowerCamelCase = intermediate_size lowerCamelCase = hidden_dropout_prob lowerCamelCase = attention_probs_dropout_prob lowerCamelCase = max_position_embeddings lowerCamelCase = type_vocab_size lowerCamelCase = initializer_range lowerCamelCase = layer_norm_eps lowerCamelCase = pruning_method lowerCamelCase = mask_init lowerCamelCase = mask_scale
168
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFXLMRobertaModel @require_tf @require_sentencepiece @require_tokenizers class __magic_name__ ( unittest.TestCase ): '''simple docstring''' @slow def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = TFXLMRobertaModel.from_pretrained("""jplu/tf-xlm-roberta-base""" ) lowerCamelCase = { """input_ids""": tf.convert_to_tensor([[0, 2_646, 10_269, 83, 99_942, 2]] , dtype=tf.intaa ), # "My dog is cute" """attention_mask""": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ), } lowerCamelCase = model(_a )["""last_hidden_state"""] lowerCamelCase = tf.TensorShape((1, 6, 768) ) self.assertEqual(output.shape , _a ) # compare the actual values for a slice. lowerCamelCase = tf.convert_to_tensor( [ [ [0.0_681_762, 0.10_894_451, 0.06_772_504], [-0.06_423_668, 0.02_366_615, 0.04_329_344], [-0.06_057_295, 0.09_974_135, -0.00_070_584], ] ] , dtype=tf.floataa , ) self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
168
1
'''simple docstring''' import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def a_ ( __snake_case : BertModel , __snake_case : str , __snake_case : str ) -> str: """simple docstring""" lowerCamelCase_ =('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''') lowerCamelCase_ =( ('''layer.''', '''layer_'''), ('''word_embeddings.weight''', '''word_embeddings'''), ('''position_embeddings.weight''', '''position_embeddings'''), ('''token_type_embeddings.weight''', '''token_type_embeddings'''), ('''.''', '''/'''), ('''LayerNorm/weight''', '''LayerNorm/gamma'''), ('''LayerNorm/bias''', '''LayerNorm/beta'''), ('''weight''', '''kernel'''), ) if not os.path.isdir(__snake_case ): os.makedirs(__snake_case ) lowerCamelCase_ =model.state_dict() def to_tf_var_name(__snake_case : str ): for patt, repl in iter(__snake_case ): lowerCamelCase_ =name.replace(__snake_case , __snake_case ) return F'''bert/{name}''' def create_tf_var(__snake_case : np.ndarray , __snake_case : str , __snake_case : tf.Session ): lowerCamelCase_ =tf.dtypes.as_dtype(tensor.dtype ) lowerCamelCase_ =tf.get_variable(dtype=__snake_case , shape=tensor.shape , name=__snake_case , initializer=tf.zeros_initializer() ) session.run(tf.variables_initializer([tf_var] ) ) session.run(__snake_case ) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: lowerCamelCase_ =to_tf_var_name(__snake_case ) lowerCamelCase_ =state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose ): lowerCamelCase_ =torch_tensor.T lowerCamelCase_ =create_tf_var(tensor=__snake_case , name=__snake_case , session=__snake_case ) tf.keras.backend.set_value(__snake_case , __snake_case ) lowerCamelCase_ =session.run(__snake_case ) print(F'''Successfully created {tf_name}: {np.allclose(__snake_case , __snake_case )}''' ) lowerCamelCase_ =tf.train.Saver(tf.trainable_variables() ) saver.save(__snake_case , os.path.join(__snake_case , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) ) def a_ ( __snake_case : Union[str, Any]=None ) -> Any: """simple docstring""" lowerCamelCase_ =argparse.ArgumentParser() parser.add_argument('''--model_name''' , type=__snake_case , required=__snake_case , help='''model name e.g. bert-base-uncased''' ) parser.add_argument( '''--cache_dir''' , type=__snake_case , default=__snake_case , required=__snake_case , help='''Directory containing pytorch model''' ) parser.add_argument('''--pytorch_model_path''' , type=__snake_case , required=__snake_case , help='''/path/to/<pytorch-model-name>.bin''' ) parser.add_argument('''--tf_cache_dir''' , type=__snake_case , required=__snake_case , help='''Directory in which to save tensorflow model''' ) lowerCamelCase_ =parser.parse_args(__snake_case ) lowerCamelCase_ =BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , ) convert_pytorch_checkpoint_to_tf(model=__snake_case , ckpt_dir=args.tf_cache_dir , model_name=args.model_name ) if __name__ == "__main__": main()
75
import random from .binary_exp_mod import bin_exp_mod def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=1000 ): '''simple docstring''' if n < 2: return False if n % 2 == 0: return n == 2 # this means n is odd lowercase = n - 1 lowercase = 0 while d % 2 == 0: d /= 2 exp += 1 # n - 1=d*(2**exp) lowercase = 0 while count < prec: lowercase = random.randint(2 , n - 1 ) lowercase = bin_exp_mod(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) if b != 1: lowercase = True for _ in range(lowerCAmelCase__ ): if b == n - 1: lowercase = False break lowercase = b * b b %= n if flag: return False count += 1 return True if __name__ == "__main__": lowercase__ :Tuple = abs(int(input("Enter bound : ").strip())) print("Here's the list of primes:") print(", ".join(str(i) for i in range(n + 1) if is_prime_big(i)))
101
0
import warnings from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch from ...models import UNetaDModel from ...schedulers import RePaintScheduler from ...utils import PIL_INTERPOLATION, logging, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput snake_case : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name def __lowerCamelCase ( UpperCAmelCase_ : Union[List, PIL.Image.Image, torch.Tensor] ): """simple docstring""" warnings.warn( '''The preprocess method is deprecated and will be removed in a future version. Please''' ''' use VaeImageProcessor.preprocess instead''' , UpperCAmelCase_ , ) if isinstance(UpperCAmelCase_ , torch.Tensor ): return image elif isinstance(UpperCAmelCase_ , PIL.Image.Image ): a :str = [image] if isinstance(image[0] , PIL.Image.Image ): a , a :Any = image[0].size a , a :Optional[Any] = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 a :str = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image] a :int = np.concatenate(UpperCAmelCase_ , axis=0 ) a :Union[str, Any] = np.array(UpperCAmelCase_ ).astype(np.floataa ) / 255.0 a :List[str] = image.transpose(0 , 3 , 1 , 2 ) a :Dict = 2.0 * image - 1.0 a :List[Any] = torch.from_numpy(UpperCAmelCase_ ) elif isinstance(image[0] , torch.Tensor ): a :Any = torch.cat(UpperCAmelCase_ , dim=0 ) return image def __lowerCamelCase ( UpperCAmelCase_ : Union[List, PIL.Image.Image, torch.Tensor] ): """simple docstring""" if isinstance(UpperCAmelCase_ , torch.Tensor ): return mask elif isinstance(UpperCAmelCase_ , PIL.Image.Image ): a :List[Any] = [mask] if isinstance(mask[0] , PIL.Image.Image ): a , a :Optional[int] = mask[0].size a , a :Dict = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 a :Optional[int] = [np.array(m.convert('''L''' ).resize((w, h) , resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask] a :Union[str, Any] = np.concatenate(UpperCAmelCase_ , axis=0 ) a :Tuple = mask.astype(np.floataa ) / 255.0 a :Optional[int] = 0 a :str = 1 a :Optional[int] = torch.from_numpy(UpperCAmelCase_ ) elif isinstance(mask[0] , torch.Tensor ): a :Dict = torch.cat(UpperCAmelCase_ , dim=0 ) return mask class _snake_case ( _snake_case ): SCREAMING_SNAKE_CASE__ = 42 SCREAMING_SNAKE_CASE__ = 42 def __init__( self , _lowerCamelCase , _lowerCamelCase ): super().__init__() self.register_modules(unet=_lowerCamelCase , scheduler=_lowerCamelCase ) @torch.no_grad() def __call__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 250 , _lowerCamelCase = 0.0 , _lowerCamelCase = 10 , _lowerCamelCase = 10 , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , ): a :Any = image a :Optional[int] = _preprocess_image(_lowerCamelCase ) a :Optional[int] = original_image.to(device=self.device , dtype=self.unet.dtype ) a :List[str] = _preprocess_mask(_lowerCamelCase ) a :Tuple = mask_image.to(device=self.device , dtype=self.unet.dtype ) a :str = original_image.shape[0] # sample gaussian noise to begin the loop if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) != batch_size: raise ValueError( F'''You have passed a list of generators of length {len(_lowerCamelCase )}, but requested an effective batch''' F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) a :Dict = original_image.shape a :Dict = randn_tensor(_lowerCamelCase , generator=_lowerCamelCase , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , self.device ) a :List[Any] = eta a :Tuple = self.scheduler.timesteps[0] + 1 a :int = generator[0] if isinstance(_lowerCamelCase , _lowerCamelCase ) else generator for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): if t < t_last: # predict the noise residual a :Tuple = self.unet(_lowerCamelCase , _lowerCamelCase ).sample # compute previous image: x_t -> x_t-1 a :List[Any] = self.scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ).prev_sample else: # compute the reverse: x_t-1 -> x_t a :str = self.scheduler.undo_step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) a :Any = t a :Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 ) a :int = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": a :Optional[int] = self.numpy_to_pil(_lowerCamelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=_lowerCamelCase )
281
def __lowerCamelCase ( UpperCAmelCase_ : int = 100 ): """simple docstring""" a :List[Any] = 0 a :List[Any] = 0 for i in range(1 , n + 1 ): sum_of_squares += i**2 sum_of_ints += i return sum_of_ints**2 - sum_of_squares if __name__ == "__main__": print(F"""{solution() = }""")
281
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = { "facebook/xlm-roberta-xl": "https://huggingface.co./facebook/xlm-roberta-xl/resolve/main/config.json", "facebook/xlm-roberta-xxl": "https://huggingface.co./facebook/xlm-roberta-xxl/resolve/main/config.json", # See all XLM-RoBERTa-XL models at https://huggingface.co./models?filter=xlm-roberta-xl } class SCREAMING_SNAKE_CASE_ ( __a ): """simple docstring""" __lowercase : Dict = '''xlm-roberta-xl''' def __init__( self , lowerCAmelCase__=2_5_0_8_8_0 , lowerCAmelCase__=2_5_6_0 , lowerCAmelCase__=3_6 , lowerCAmelCase__=3_2 , lowerCAmelCase__=1_0_2_4_0 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_1_4 , lowerCAmelCase__=1 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-05 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , lowerCAmelCase__="absolute" , lowerCAmelCase__=True , lowerCAmelCase__=None , **lowerCAmelCase__ , ): super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__) __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = layer_norm_eps __SCREAMING_SNAKE_CASE = position_embedding_type __SCREAMING_SNAKE_CASE = use_cache __SCREAMING_SNAKE_CASE = classifier_dropout class SCREAMING_SNAKE_CASE_ ( __a ): """simple docstring""" @property def snake_case_ ( self): if self.task == "multiple-choice": __SCREAMING_SNAKE_CASE = {0: """batch""", 1: """choice""", 2: """sequence"""} else: __SCREAMING_SNAKE_CASE = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ])
100
'''simple docstring''' import math from typing import Optional import numpy as np from ...configuration_utils import PretrainedConfig from ...utils import logging A =logging.get_logger(__name__) A ={ 'facebook/encodec_24khz': 'https://huggingface.co./facebook/encodec_24khz/resolve/main/config.json', 'facebook/encodec_48khz': 'https://huggingface.co./facebook/encodec_48khz/resolve/main/config.json', } class _a ( __a ): __a : Union[str, Any] = """encodec""" def __init__( self : Tuple , lowercase : List[str]=[1.5, 3.0, 6.0, 12.0, 24.0] , lowercase : Any=24_000 , lowercase : str=1 , lowercase : Optional[int]=False , lowercase : Optional[Any]=None , lowercase : str=None , lowercase : Tuple=128 , lowercase : Union[str, Any]=32 , lowercase : Union[str, Any]=1 , lowercase : Optional[Any]=[8, 5, 4, 2] , lowercase : Any="weight_norm" , lowercase : Tuple=7 , lowercase : int=7 , lowercase : Dict=3 , lowercase : List[Any]=2 , lowercase : str=True , lowercase : List[str]="reflect" , lowercase : List[Any]=2 , lowercase : Optional[Any]=2 , lowercase : int=1.0 , lowercase : Dict=1_024 , lowercase : str=None , lowercase : Union[str, Any]=True , **lowercase : Optional[int] , ): '''simple docstring''' UpperCAmelCase = target_bandwidths UpperCAmelCase = sampling_rate UpperCAmelCase = audio_channels UpperCAmelCase = normalize UpperCAmelCase = chunk_length_s UpperCAmelCase = overlap UpperCAmelCase = hidden_size UpperCAmelCase = num_filters UpperCAmelCase = num_residual_layers UpperCAmelCase = upsampling_ratios UpperCAmelCase = norm_type UpperCAmelCase = kernel_size UpperCAmelCase = last_kernel_size UpperCAmelCase = residual_kernel_size UpperCAmelCase = dilation_growth_rate UpperCAmelCase = use_causal_conv UpperCAmelCase = pad_mode UpperCAmelCase = compress UpperCAmelCase = num_lstm_layers UpperCAmelCase = trim_right_ratio UpperCAmelCase = codebook_size UpperCAmelCase = codebook_dim if codebook_dim is not None else hidden_size UpperCAmelCase = use_conv_shortcut if self.norm_type not in ["weight_norm", "time_group_norm"]: raise ValueError( f"self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}" ) super().__init__(**lowercase ) @property def A ( self : Dict ): '''simple docstring''' if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def A ( self : Union[str, Any] ): '''simple docstring''' if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 , int((1.0 - self.overlap) * self.chunk_length ) ) @property def A ( self : Any ): '''simple docstring''' UpperCAmelCase = np.prod(self.upsampling_ratios ) return math.ceil(self.sampling_rate / hop_length ) @property def A ( self : Optional[int] ): '''simple docstring''' return int(1_000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
34
0
'''simple docstring''' from maths.is_square_free import is_square_free from maths.prime_factors import prime_factors def snake_case_ ( __SCREAMING_SNAKE_CASE : int ): """simple docstring""" lowercase_ : Tuple = prime_factors(__SCREAMING_SNAKE_CASE ) if is_square_free(__SCREAMING_SNAKE_CASE ): return -1 if len(__SCREAMING_SNAKE_CASE ) % 2 else 1 return 0 if __name__ == "__main__": import doctest doctest.testmod()
361
'''simple docstring''' import qiskit def snake_case_ ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ): """simple docstring""" lowercase_ : List[Any] = qiskit.Aer.get_backend('''aer_simulator''' ) # Create a Quantum Circuit acting on the q register lowercase_ : Dict = qiskit.QuantumCircuit(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Map the quantum measurement to the classical bits circuit.measure([0] , [0] ) # Execute the circuit on the simulator lowercase_ : Union[str, Any] = qiskit.execute(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , shots=1000 ) # Return the histogram data of the results of the experiment. return job.result().get_counts(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": print(f"""Total count for various states are: {single_qubit_measure(1, 1)}""")
264
0
import unittest import numpy as np from transformers import BertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.bert.modeling_flax_bert import ( FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, ) class A ( unittest.TestCase ): '''simple docstring''' def __init__(self : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : str=13 , _UpperCAmelCase : List[str]=7 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : str=True , _UpperCAmelCase : str=True , _UpperCAmelCase : Dict=99 , _UpperCAmelCase : Any=32 , _UpperCAmelCase : List[str]=5 , _UpperCAmelCase : Union[str, Any]=4 , _UpperCAmelCase : str=37 , _UpperCAmelCase : Union[str, Any]="gelu" , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Dict=512 , _UpperCAmelCase : Optional[int]=16 , _UpperCAmelCase : str=2 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : List[str]=4 , ) -> List[Any]: """simple docstring""" lowercase__ = parent lowercase__ = batch_size lowercase__ = seq_length lowercase__ = is_training lowercase__ = use_attention_mask lowercase__ = use_token_type_ids lowercase__ = use_labels lowercase__ = vocab_size lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = intermediate_size lowercase__ = hidden_act lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = max_position_embeddings lowercase__ = type_vocab_size lowercase__ = type_sequence_label_size lowercase__ = initializer_range lowercase__ = num_choices def lowerCamelCase__ (self : List[str] ) -> Dict: """simple docstring""" lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__ = None if self.use_attention_mask: lowercase__ = random_attention_mask([self.batch_size, self.seq_length] ) lowercase__ = None if self.use_token_type_ids: lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase__ = BertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def lowerCamelCase__ (self : int ) -> Any: """simple docstring""" lowercase__ = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ , lowercase__ = config_and_inputs lowercase__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict def lowerCamelCase__ (self : Tuple ) -> str: """simple docstring""" lowercase__ = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ , lowercase__ = config_and_inputs lowercase__ = True lowercase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) lowercase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, attention_mask, encoder_hidden_states, encoder_attention_mask, ) @require_flax class A ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' A__ = True A__ = ( ( FlaxBertModel, FlaxBertForPreTraining, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForQuestionAnswering, FlaxBertForNextSentencePrediction, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertForQuestionAnswering, ) if is_flax_available() else () ) def lowerCamelCase__ (self : Optional[int] ) -> List[str]: """simple docstring""" lowercase__ = FlaxBertModelTester(self ) @slow def lowerCamelCase__ (self : List[str] ) -> Union[str, Any]: """simple docstring""" lowercase__ = FlaxBertModel.from_pretrained("""bert-base-cased""" ) lowercase__ = model(np.ones((1, 1) ) ) self.assertIsNotNone(_UpperCAmelCase )
305
import requests from bsa import BeautifulSoup def UpperCamelCase ( __magic_name__ : str = "AAPL" ) -> str: """simple docstring""" lowercase__ = f'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}''' lowercase__ = BeautifulSoup(requests.get(__magic_name__ ).text , """html.parser""" ) lowercase__ = """My(6px) Pos(r) smartphone_Mt(6px)""" return soup.find("""div""" , class_=class_ ).find("""span""" ).text if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(F'Current {symbol:<4} stock price is {stock_price(symbol):>8}')
305
1
"""simple docstring""" import unittest import torch from diffusers import VQModel from diffusers.utils import floats_tensor, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class snake_case ( __snake_case, __snake_case, unittest.TestCase ): SCREAMING_SNAKE_CASE_ : int = VQModel SCREAMING_SNAKE_CASE_ : Any = """sample""" @property def lowercase_ ( self : List[Any] , UpperCamelCase__ : Optional[int]=(3_2, 3_2))-> Optional[int]: '''simple docstring''' __lowerCAmelCase: Union[str, Any] = 4 __lowerCAmelCase: Optional[int] = 3 __lowerCAmelCase: List[str] = floats_tensor((batch_size, num_channels) + sizes).to(UpperCamelCase__) return {"sample": image} @property def lowercase_ ( self : int)-> int: '''simple docstring''' return (3, 3_2, 3_2) @property def lowercase_ ( self : Tuple)-> List[Any]: '''simple docstring''' return (3, 3_2, 3_2) def lowercase_ ( self : Dict)-> int: '''simple docstring''' __lowerCAmelCase: List[Any] = { "block_out_channels": [3_2, 6_4], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 3, } __lowerCAmelCase: List[str] = self.dummy_input return init_dict, inputs_dict def lowercase_ ( self : str)-> List[Any]: '''simple docstring''' pass def lowercase_ ( self : Any)-> str: '''simple docstring''' pass def lowercase_ ( self : Union[str, Any])-> Union[str, Any]: '''simple docstring''' __lowerCAmelCase , __lowerCAmelCase: int = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=UpperCamelCase__) self.assertIsNotNone(UpperCamelCase__) self.assertEqual(len(loading_info["missing_keys"]) , 0) model.to(UpperCamelCase__) __lowerCAmelCase: List[str] = model(**self.dummy_input) assert image is not None, "Make sure output is not None" def lowercase_ ( self : List[Any])-> Tuple: '''simple docstring''' __lowerCAmelCase: Union[str, Any] = VQModel.from_pretrained("fusing/vqgan-dummy") model.to(UpperCamelCase__).eval() torch.manual_seed(0) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0) __lowerCAmelCase: List[Any] = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size) __lowerCAmelCase: List[Any] = image.to(UpperCamelCase__) with torch.no_grad(): __lowerCAmelCase: Dict = model(UpperCamelCase__).sample __lowerCAmelCase: int = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off __lowerCAmelCase: List[Any] = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143]) # fmt: on self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3))
108
"""simple docstring""" from __future__ import annotations from decimal import Decimal from math import * # noqa: F403 from sympy import diff def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 1_0**-1_0 ) -> float: __lowerCAmelCase: Union[str, Any] = a while True: __lowerCAmelCase: Optional[int] = Decimal(__SCREAMING_SNAKE_CASE ) - ( Decimal(eval(__SCREAMING_SNAKE_CASE ) ) / Decimal(eval(str(diff(__SCREAMING_SNAKE_CASE ) ) ) ) # noqa: S307 ) # This number dictates the accuracy of the answer if abs(eval(__SCREAMING_SNAKE_CASE ) ) < precision: # noqa: S307 return float(__SCREAMING_SNAKE_CASE ) # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(F'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''') # Find root of polynomial print(F'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}''') # Find Square Root of 5 print(F'''The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}''') # Exponential Roots print(F'''The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}''')
108
1
'''simple docstring''' import inspect from typing import Callable, List, Optional, Union import torch from transformers import ( CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, WhisperForConditionalGeneration, WhisperProcessor, ) from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.utils import logging a_ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name class a ( _SCREAMING_SNAKE_CASE ): def __init__( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) -> Any: super().__init__() if safety_checker is None: logger.warning( f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure' ' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered' ' results in services or applications open to the public. Both the diffusers team and Hugging Face' ' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling' ' it only for use-cases that involve analyzing network behavior or auditing its results. For more' ' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' ) self.register_modules( speech_model=__magic_name__ , speech_processor=__magic_name__ , vae=__magic_name__ , text_encoder=__magic_name__ , tokenizer=__magic_name__ , unet=__magic_name__ , scheduler=__magic_name__ , feature_extractor=__magic_name__ , ) def __UpperCAmelCase ( self , __magic_name__ = "auto" ) -> Optional[Any]: if slice_size == "auto": _a = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(__magic_name__ ) def __UpperCAmelCase ( self ) -> Optional[int]: self.enable_attention_slicing(__magic_name__ ) @torch.no_grad() def __call__( self , __magic_name__ , __magic_name__=1_60_00 , __magic_name__ = 5_12 , __magic_name__ = 5_12 , __magic_name__ = 50 , __magic_name__ = 7.5 , __magic_name__ = None , __magic_name__ = 1 , __magic_name__ = 0.0 , __magic_name__ = None , __magic_name__ = None , __magic_name__ = "pil" , __magic_name__ = True , __magic_name__ = None , __magic_name__ = 1 , **__magic_name__ , ) -> Dict: _a = self.speech_processor.feature_extractor( __magic_name__ , return_tensors='pt' , sampling_rate=__magic_name__ ).input_features.to(self.device ) _a = self.speech_model.generate(__magic_name__ , max_length=48_00_00 ) _a = self.speech_processor.tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , normalize=__magic_name__ )[ 0 ] if isinstance(__magic_name__ , __magic_name__ ): _a = 1 elif isinstance(__magic_name__ , __magic_name__ ): _a = len(__magic_name__ ) else: raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(__magic_name__ )}' ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.' ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(__magic_name__ , __magic_name__ ) or callback_steps <= 0) ): raise ValueError( f'`callback_steps` has to be a positive integer but is {callback_steps} of type' f' {type(__magic_name__ )}.' ) # get prompt text embeddings _a = self.tokenizer( __magic_name__ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , ) _a = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: _a = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( 'The following part of your input was truncated because CLIP can only handle sequences up to' f' {self.tokenizer.model_max_length} tokens: {removed_text}' ) _a = text_input_ids[:, : self.tokenizer.model_max_length] _a = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method _a , _a , _a = text_embeddings.shape _a = text_embeddings.repeat(1 , __magic_name__ , 1 ) _a = text_embeddings.view(bs_embed * num_images_per_prompt , __magic_name__ , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. _a = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: _a = 42 if negative_prompt is None: _a = [''] * batch_size elif type(__magic_name__ ) is not type(__magic_name__ ): raise TypeError( f'`negative_prompt` should be the same type to `prompt`, but got {type(__magic_name__ )} !=' f' {type(__magic_name__ )}.' ) elif isinstance(__magic_name__ , __magic_name__ ): _a = [negative_prompt] elif batch_size != len(__magic_name__ ): raise ValueError( f'`negative_prompt`: {negative_prompt} has batch size {len(__magic_name__ )}, but `prompt`:' f' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches' ' the batch size of `prompt`.' ) else: _a = negative_prompt _a = text_input_ids.shape[-1] _a = self.tokenizer( __magic_name__ , padding='max_length' , max_length=__magic_name__ , truncation=__magic_name__ , return_tensors='pt' , ) _a = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method _a = uncond_embeddings.shape[1] _a = uncond_embeddings.repeat(1 , __magic_name__ , 1 ) _a = uncond_embeddings.view(batch_size * num_images_per_prompt , __magic_name__ , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes _a = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. _a = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) _a = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps _a = torch.randn(__magic_name__ , generator=__magic_name__ , device='cpu' , dtype=__magic_name__ ).to( self.device ) else: _a = torch.randn(__magic_name__ , generator=__magic_name__ , device=self.device , dtype=__magic_name__ ) else: if latents.shape != latents_shape: raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' ) _a = latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(__magic_name__ ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand _a = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler _a = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] _a = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) _a = {} if accepts_eta: _a = eta for i, t in enumerate(self.progress_bar(__magic_name__ ) ): # expand the latents if we are doing classifier free guidance _a = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents _a = self.scheduler.scale_model_input(__magic_name__ , __magic_name__ ) # predict the noise residual _a = self.unet(__magic_name__ , __magic_name__ , encoder_hidden_states=__magic_name__ ).sample # perform guidance if do_classifier_free_guidance: _a , _a = noise_pred.chunk(2 ) _a = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 _a = self.scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(__magic_name__ , __magic_name__ , __magic_name__ ) _a = 1 / 0.1_8_2_1_5 * latents _a = self.vae.decode(__magic_name__ ).sample _a = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 _a = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": _a = self.numpy_to_pil(__magic_name__ ) if not return_dict: return image return StableDiffusionPipelineOutput(images=__magic_name__ , nsfw_content_detected=__magic_name__ )
168
'''simple docstring''' def _A (lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :str ) -> List[Any]: '''simple docstring''' if height >= 1: move_tower(height - 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) move_disk(lowerCAmelCase__ , lowerCAmelCase__ ) move_tower(height - 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) def _A (lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :int ) -> Optional[Any]: '''simple docstring''' print('moving disk from' , lowerCAmelCase__ , 'to' , lowerCAmelCase__ ) def _A () -> str: '''simple docstring''' _a = int(input('Height of hanoi: ' ).strip() ) move_tower(lowerCAmelCase__ , 'A' , 'B' , 'C' ) if __name__ == "__main__": main()
168
1
'''simple docstring''' import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Union[str, Any]: # Initialise PyTorch model. # If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of # TapasConfig to False. # initialize configuration from json file __lowerCAmelCase = TapasConfig.from_json_file(lowercase ) # set absolute/relative position embeddings parameter __lowerCAmelCase = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": __lowerCAmelCase = TapasForQuestionAnswering(config=lowercase ) elif task == "WTQ": # run_task_main.py hparams __lowerCAmelCase = 4 __lowerCAmelCase = True # hparam_utils.py hparams __lowerCAmelCase = 0.66_46_94 __lowerCAmelCase = 0.20_79_51 __lowerCAmelCase = 0.12_11_94 __lowerCAmelCase = True __lowerCAmelCase = True __lowerCAmelCase = False __lowerCAmelCase = 0.0_35_25_13 __lowerCAmelCase = TapasForQuestionAnswering(config=lowercase ) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams __lowerCAmelCase = 4 __lowerCAmelCase = False # hparam_utils.py hparams __lowerCAmelCase = 36.45_19 __lowerCAmelCase = 0.90_34_21 __lowerCAmelCase = 222.088 __lowerCAmelCase = True __lowerCAmelCase = True __lowerCAmelCase = True __lowerCAmelCase = 0.76_31_41 __lowerCAmelCase = TapasForQuestionAnswering(config=lowercase ) elif task == "TABFACT": __lowerCAmelCase = TapasForSequenceClassification(config=lowercase ) elif task == "MLM": __lowerCAmelCase = TapasForMaskedLM(config=lowercase ) elif task == "INTERMEDIATE_PRETRAINING": __lowerCAmelCase = TapasModel(config=lowercase ) else: raise ValueError(f'Task {task} not supported.' ) print(f'Building PyTorch model from configuration: {config}' ) # Load weights from tf checkpoint load_tf_weights_in_tapas(lowercase , lowercase , lowercase ) # Save pytorch-model (weights and configuration) print(f'Save PyTorch model to {pytorch_dump_path}' ) model.save_pretrained(lowercase ) # Save tokenizer files print(f'Save tokenizer files to {pytorch_dump_path}' ) __lowerCAmelCase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=512 ) tokenizer.save_pretrained(lowercase ) print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell ) if __name__ == "__main__": _a : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA.""" ) parser.add_argument( """--reset_position_index_per_cell""", default=False, action="""store_true""", help="""Whether to use relative position embeddings or not. Defaults to True.""", ) parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--tapas_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained TAPAS model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) _a : List[Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
355
'''simple docstring''' import warnings from functools import wraps from typing import Callable def _lowerCAmelCase ( lowercase ) -> Callable: @wraps(lowercase ) def _inner_fn(*lowercase , **lowercase ): warnings.warn( (f'\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.') , lowercase , ) return fn(*lowercase , **lowercase ) return _inner_fn
46
0
import unittest from transformers import BigBirdTokenizer, BigBirdTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin snake_case : str = "▁" snake_case : List[Any] = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers class _snake_case ( snake_case , unittest.TestCase ): UpperCamelCase__ = BigBirdTokenizer UpperCamelCase__ = BigBirdTokenizerFast UpperCamelCase__ = True UpperCamelCase__ = True def SCREAMING_SNAKE_CASE ( self ): super().setUp() __magic_name__ : Optional[Any] = self.tokenizer_class(_a , keep_accents=_a ) tokenizer.save_pretrained(self.tmpdirname ) def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : Union[str, Any] = "<s>" __magic_name__ : Dict = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a ) def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : Optional[int] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<unk>" ) self.assertEqual(vocab_keys[1] , "<s>" ) self.assertEqual(vocab_keys[-1] , "[MASK]" ) self.assertEqual(len(_a ) , 1_004 ) def SCREAMING_SNAKE_CASE ( self ): self.assertEqual(self.get_tokenizer().vocab_size , 1_000 ) def SCREAMING_SNAKE_CASE ( self ): if not self.test_rust_tokenizer: return __magic_name__ : Dict = self.get_tokenizer() __magic_name__ : str = self.get_rust_tokenizer() __magic_name__ : Any = "I was born in 92000, and this is falsé." __magic_name__ : Dict = tokenizer.tokenize(_a ) __magic_name__ : Any = rust_tokenizer.tokenize(_a ) self.assertListEqual(_a , _a ) __magic_name__ : List[Any] = tokenizer.encode(_a , add_special_tokens=_a ) __magic_name__ : List[str] = rust_tokenizer.encode(_a , add_special_tokens=_a ) self.assertListEqual(_a , _a ) __magic_name__ : str = self.get_rust_tokenizer() __magic_name__ : Dict = tokenizer.encode(_a ) __magic_name__ : Optional[int] = rust_tokenizer.encode(_a ) self.assertListEqual(_a , _a ) def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : Optional[int] = BigBirdTokenizer(_a , keep_accents=_a ) __magic_name__ : str = tokenizer.tokenize("This is a test" ) self.assertListEqual(_a , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_a ) , [285, 46, 10, 170, 382] , ) __magic_name__ : Dict = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( _a , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) __magic_name__ : Union[str, Any] = tokenizer.convert_tokens_to_ids(_a ) self.assertListEqual( _a , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) __magic_name__ : int = tokenizer.convert_ids_to_tokens(_a ) self.assertListEqual( _a , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) @cached_property def SCREAMING_SNAKE_CASE ( self ): return BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" ) @slow def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : Any = "Hello World!" __magic_name__ : Dict = [65, 18_536, 2_260, 101, 66] self.assertListEqual(_a , self.big_tokenizer.encode(_a ) ) @slow def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : Dict = ( "This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will" " add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth" ) # fmt: off __magic_name__ : List[str] = [65, 871, 419, 358, 946, 991, 2_521, 452, 358, 1_357, 387, 7_751, 3_536, 112, 985, 456, 126, 865, 938, 5_400, 5_734, 458, 1_368, 467, 786, 2_462, 5_246, 1_159, 633, 865, 4_519, 457, 582, 852, 2_557, 427, 916, 508, 405, 34_324, 497, 391, 408, 11_342, 1_244, 385, 100, 938, 985, 456, 574, 362, 12_597, 3_200, 3_129, 1_172, 66] # noqa: E231 # fmt: on self.assertListEqual(_a , self.big_tokenizer.encode(_a ) ) @require_torch @slow def SCREAMING_SNAKE_CASE ( self ): import torch from transformers import BigBirdConfig, BigBirdModel # Build sequence __magic_name__ : Optional[Any] = list(self.big_tokenizer.get_vocab().keys() )[:10] __magic_name__ : List[Any] = " ".join(_a ) __magic_name__ : Any = self.big_tokenizer.encode_plus(_a , return_tensors="pt" , return_token_type_ids=_a ) __magic_name__ : Union[str, Any] = self.big_tokenizer.batch_encode_plus( [sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=_a ) __magic_name__ : List[str] = BigBirdConfig(attention_type="original_full" ) __magic_name__ : Optional[int] = BigBirdModel(_a ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**_a ) model(**_a ) @slow def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : int = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" ) __magic_name__ : int = tokenizer.decode(tokenizer("Paris is the [MASK]." ).input_ids ) self.assertTrue(decoded_text == "[CLS] Paris is the[MASK].[SEP]" ) @slow def SCREAMING_SNAKE_CASE ( self ): # fmt: off __magic_name__ : Optional[Any] = {"input_ids": [[65, 39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114, 66], [65, 448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_a , model_name="google/bigbird-roberta-base" , revision="215c99f1600e06f83acce68422f2035b2b5c3510" , )
281
import unicodedata from dataclasses import dataclass from typing import Optional, Union import numpy as np from transformers.data.data_collator import DataCollatorMixin from transformers.file_utils import PaddingStrategy from transformers.tokenization_utils_base import PreTrainedTokenizerBase def lowerCAmelCase_ ( _snake_case : List[str] , _snake_case : Tuple , _snake_case : List[Any] , _snake_case : Optional[Any] ) -> Optional[Any]: '''simple docstring''' if isinstance(_snake_case , _snake_case ): __magic_name__ : Union[str, Any] = np.full((len(_snake_case ), sequence_length, 2) , _snake_case ) else: __magic_name__ : List[Any] = np.full((len(_snake_case ), sequence_length) , _snake_case ) for i, tensor in enumerate(_snake_case ): if padding_side == "right": if isinstance(_snake_case , _snake_case ): __magic_name__ : Optional[Any] = tensor[:sequence_length] else: __magic_name__ : Union[str, Any] = tensor[:sequence_length] else: if isinstance(_snake_case , _snake_case ): __magic_name__ : List[Any] = tensor[:sequence_length] else: __magic_name__ : Optional[Any] = tensor[:sequence_length] return out_tensor.tolist() def lowerCAmelCase_ ( _snake_case : Optional[int] ) -> Tuple: '''simple docstring''' __magic_name__ : Union[str, Any] = ord(_snake_case ) if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126): return True __magic_name__ : Any = unicodedata.category(_snake_case ) if cat.startswith("P" ): return True return False @dataclass class _snake_case ( snake_case ): UpperCamelCase__ = 42 UpperCamelCase__ = True UpperCamelCase__ = None UpperCamelCase__ = None UpperCamelCase__ = -100 UpperCamelCase__ = "pt" def SCREAMING_SNAKE_CASE ( self , _a ): import torch __magic_name__ : List[str] = "label" if "label" in features[0].keys() else "labels" __magic_name__ : Union[str, Any] = [feature[label_name] for feature in features] if label_name in features[0].keys() else None __magic_name__ : Optional[int] = self.tokenizer.pad( _a , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" if labels is None else None , ) if labels is None: return batch __magic_name__ : Dict = torch.tensor(batch["entity_ids"] ).shape[1] __magic_name__ : List[Any] = self.tokenizer.padding_side if padding_side == "right": __magic_name__ : str = [ list(_a ) + [self.label_pad_token_id] * (sequence_length - len(_a )) for label in labels ] else: __magic_name__ : int = [ [self.label_pad_token_id] * (sequence_length - len(_a )) + list(_a ) for label in labels ] __magic_name__ : Dict = [feature["ner_tags"] for feature in features] __magic_name__ : List[Any] = padding_tensor(_a , -1 , _a , _a ) __magic_name__ : Any = [feature["original_entity_spans"] for feature in features] __magic_name__ : Any = padding_tensor(_a , (-1, -1) , _a , _a ) __magic_name__ : List[Any] = {k: torch.tensor(_a , dtype=torch.intaa ) for k, v in batch.items()} return batch
281
1
from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL SCREAMING_SNAKE_CASE :List[str] = logging.get_logger(__name__) def UpperCAmelCase ( a_ ) -> List[List[ImageInput]]: """simple docstring""" if isinstance(a_ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(a_ , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(a_ ): return [[videos]] raise ValueError(F'''Could not make batched video from {videos}''' ) class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = ["pixel_values"] def __init__( self : Union[str, Any] ,A : bool = True ,A : Dict[str, int] = None ,A : PILImageResampling = PILImageResampling.BILINEAR ,A : bool = True ,A : Dict[str, int] = None ,A : bool = True ,A : Union[int, float] = 1 / 2_55 ,A : bool = True ,A : bool = True ,A : Optional[Union[float, List[float]]] = None ,A : Optional[Union[float, List[float]]] = None ,**A : Optional[Any] ,): super().__init__(**A ) __A = size if size is not None else {"shortest_edge": 2_56} __A = get_size_dict(A ,default_to_square=A ) __A = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24} __A = get_size_dict(A ,param_name="crop_size" ) __A = do_resize __A = size __A = do_center_crop __A = crop_size __A = resample __A = do_rescale __A = rescale_factor __A = offset __A = do_normalize __A = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __A = image_std if image_std is not None else IMAGENET_STANDARD_STD def UpperCamelCase_ ( self : List[str] ,A : np.ndarray ,A : Dict[str, int] ,A : PILImageResampling = PILImageResampling.BILINEAR ,A : Optional[Union[str, ChannelDimension]] = None ,**A : int ,): __A = get_size_dict(A ,default_to_square=A ) if "shortest_edge" in size: __A = get_resize_output_image_size(A ,size["shortest_edge"] ,default_to_square=A ) elif "height" in size and "width" in size: __A = (size["height"], size["width"]) else: raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' ) return resize(A ,size=A ,resample=A ,data_format=A ,**A ) def UpperCamelCase_ ( self : List[Any] ,A : np.ndarray ,A : Dict[str, int] ,A : Optional[Union[str, ChannelDimension]] = None ,**A : Dict ,): __A = get_size_dict(A ) if "height" not in size or "width" not in size: raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' ) return center_crop(A ,size=(size["height"], size["width"]) ,data_format=A ,**A ) def UpperCamelCase_ ( self : Optional[Any] ,A : np.ndarray ,A : Union[int, float] ,A : bool = True ,A : Optional[Union[str, ChannelDimension]] = None ,**A : List[Any] ,): __A = image.astype(np.floataa ) if offset: __A = image - (scale / 2) return rescale(A ,scale=A ,data_format=A ,**A ) def UpperCamelCase_ ( self : Any ,A : np.ndarray ,A : Union[float, List[float]] ,A : Union[float, List[float]] ,A : Optional[Union[str, ChannelDimension]] = None ,**A : Dict ,): return normalize(A ,mean=A ,std=A ,data_format=A ,**A ) def UpperCamelCase_ ( self : Any ,A : ImageInput ,A : bool = None ,A : Dict[str, int] = None ,A : PILImageResampling = None ,A : bool = None ,A : Dict[str, int] = None ,A : bool = None ,A : float = None ,A : bool = None ,A : bool = None ,A : Optional[Union[float, List[float]]] = None ,A : Optional[Union[float, List[float]]] = None ,A : Optional[ChannelDimension] = ChannelDimension.FIRST ,): if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) if offset and not do_rescale: raise ValueError("For offset, do_rescale must also be set to True." ) # All transformations expect numpy arrays. __A = to_numpy_array(A ) if do_resize: __A = self.resize(image=A ,size=A ,resample=A ) if do_center_crop: __A = self.center_crop(A ,size=A ) if do_rescale: __A = self.rescale(image=A ,scale=A ,offset=A ) if do_normalize: __A = self.normalize(image=A ,mean=A ,std=A ) __A = to_channel_dimension_format(A ,A ) return image def UpperCamelCase_ ( self : str ,A : ImageInput ,A : bool = None ,A : Dict[str, int] = None ,A : PILImageResampling = None ,A : bool = None ,A : Dict[str, int] = None ,A : bool = None ,A : float = None ,A : bool = None ,A : bool = None ,A : Optional[Union[float, List[float]]] = None ,A : Optional[Union[float, List[float]]] = None ,A : Optional[Union[str, TensorType]] = None ,A : ChannelDimension = ChannelDimension.FIRST ,**A : Dict ,): __A = do_resize if do_resize is not None else self.do_resize __A = resample if resample is not None else self.resample __A = do_center_crop if do_center_crop is not None else self.do_center_crop __A = do_rescale if do_rescale is not None else self.do_rescale __A = rescale_factor if rescale_factor is not None else self.rescale_factor __A = offset if offset is not None else self.offset __A = do_normalize if do_normalize is not None else self.do_normalize __A = image_mean if image_mean is not None else self.image_mean __A = image_std if image_std is not None else self.image_std __A = size if size is not None else self.size __A = get_size_dict(A ,default_to_square=A ) __A = crop_size if crop_size is not None else self.crop_size __A = get_size_dict(A ,param_name="crop_size" ) if not valid_images(A ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) __A = make_batched(A ) __A = [ [ self._preprocess_image( image=A ,do_resize=A ,size=A ,resample=A ,do_center_crop=A ,crop_size=A ,do_rescale=A ,rescale_factor=A ,offset=A ,do_normalize=A ,image_mean=A ,image_std=A ,data_format=A ,) for img in video ] for video in videos ] __A = {"pixel_values": videos} return BatchFeature(data=A ,tensor_type=A )
368
import argparse import os import re import packaging.version SCREAMING_SNAKE_CASE :int = 'examples/' SCREAMING_SNAKE_CASE :str = { 'examples': (re.compile(R'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'), 'init': (re.compile(R'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'), 'setup': (re.compile(R'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), R'\1version="VERSION",'), 'doc': (re.compile(R'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'), } SCREAMING_SNAKE_CASE :int = { 'init': 'src/diffusers/__init__.py', 'setup': 'setup.py', } SCREAMING_SNAKE_CASE :List[str] = 'README.md' def UpperCAmelCase ( a_ , a_ , a_ ) -> Tuple: """simple docstring""" with open(a_ , "r" , encoding="utf-8" , newline="\n" ) as f: __A = f.read() __A , __A = REPLACE_PATTERNS[pattern] __A = replace.replace("VERSION" , a_ ) __A = re_pattern.sub(a_ , a_ ) with open(a_ , "w" , encoding="utf-8" , newline="\n" ) as f: f.write(a_ ) def UpperCAmelCase ( a_ ) -> Optional[Any]: """simple docstring""" for folder, directories, fnames in os.walk(a_ ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("research_projects" ) if "legacy" in directories: directories.remove("legacy" ) for fname in fnames: if fname.endswith(".py" ): update_version_in_file(os.path.join(a_ , a_ ) , a_ , pattern="examples" ) def UpperCAmelCase ( a_ , a_=False ) -> str: """simple docstring""" for pattern, fname in REPLACE_FILES.items(): update_version_in_file(a_ , a_ , a_ ) if not patch: update_version_in_examples(a_ ) def UpperCAmelCase ( ) -> Optional[int]: """simple docstring""" __A = "🤗 Transformers currently provides the following architectures" __A = "1. Want to contribute a new model?" with open(a_ , "r" , encoding="utf-8" , newline="\n" ) as f: __A = f.readlines() # Find the start of the list. __A = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 __A = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith("1." ): __A = lines[index].replace( "https://huggingface.co./docs/diffusers/main/model_doc" , "https://huggingface.co./docs/diffusers/model_doc" , ) index += 1 with open(a_ , "w" , encoding="utf-8" , newline="\n" ) as f: f.writelines(a_ ) def UpperCAmelCase ( ) -> Dict: """simple docstring""" with open(REPLACE_FILES["init"] , "r" ) as f: __A = f.read() __A = REPLACE_PATTERNS["init"][0].search(a_ ).groups()[0] return packaging.version.parse(a_ ) def UpperCAmelCase ( a_=False ) -> str: """simple docstring""" __A = get_version() if patch and default_version.is_devrelease: raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" ) if default_version.is_devrelease: __A = default_version.base_version elif patch: __A = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}''' else: __A = F'''{default_version.major}.{default_version.minor + 1}.0''' # Now let's ask nicely if that's the right one. __A = input(F'''Which version are you releasing? [{default_version}]''' ) if len(a_ ) == 0: __A = default_version print(F'''Updating version to {version}.''' ) global_version_update(a_ , patch=a_ ) def UpperCAmelCase ( ) -> Optional[int]: """simple docstring""" __A = get_version() __A = F'''{current_version.major}.{current_version.minor + 1}.0.dev0''' __A = current_version.base_version # Check with the user we got that right. __A = input(F'''Which version are we developing now? [{dev_version}]''' ) if len(a_ ) == 0: __A = dev_version print(F'''Updating version to {version}.''' ) global_version_update(a_ ) # print("Cleaning main README, don't forget to run `make fix-copies`.") # clean_main_ref_in_model_list() if __name__ == "__main__": SCREAMING_SNAKE_CASE :Union[str, Any] = argparse.ArgumentParser() parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.') parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.') SCREAMING_SNAKE_CASE :List[str] = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('Nothing to do after a patch :-)') else: post_release_work()
124
0
'''simple docstring''' import re import subprocess import sys A__ : List[str] =subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''') A__ : Union[str, Any] =( subprocess.check_output(F"""git diff --diff-filter=d --name-only {fork_point_sha}""".split()).decode('''utf-8''').split() ) A__ : Union[str, Any] ='''|'''.join(sys.argv[1:]) A__ : Tuple =re.compile(rF"""^({joined_dirs}).*?\.py$""") A__ : str =[x for x in modified_files if regex.match(x)] print(''' '''.join(relevant_modified_files), end='''''')
70
"""simple docstring""" import shutil import tempfile import unittest from unittest.mock import patch from transformers import ( DefaultFlowCallback, IntervalStrategy, PrinterCallback, ProgressCallback, Trainer, TrainerCallback, TrainingArguments, is_torch_available, ) from transformers.testing_utils import require_torch if is_torch_available(): from transformers.trainer import DEFAULT_CALLBACKS from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel class _UpperCAmelCase ( lowerCAmelCase__): def __init__( self : Optional[int] ): snake_case_ : str = [] def _snake_case ( self : List[Any] , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : List[str] , **lowercase_ : Tuple ): self.events.append('''on_init_end''' ) def _snake_case ( self : List[Any] , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : List[str] , **lowercase_ : List[str] ): self.events.append('''on_train_begin''' ) def _snake_case ( self : Any , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : List[Any] , **lowercase_ : Optional[int] ): self.events.append('''on_train_end''' ) def _snake_case ( self : str , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : Optional[Any] , **lowercase_ : List[Any] ): self.events.append('''on_epoch_begin''' ) def _snake_case ( self : Tuple , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : Union[str, Any] , **lowercase_ : Optional[Any] ): self.events.append('''on_epoch_end''' ) def _snake_case ( self : List[str] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : int , **lowercase_ : Optional[Any] ): self.events.append('''on_step_begin''' ) def _snake_case ( self : int , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , **lowercase_ : List[str] ): self.events.append('''on_step_end''' ) def _snake_case ( self : str , lowercase_ : int , lowercase_ : Dict , lowercase_ : List[str] , **lowercase_ : List[str] ): self.events.append('''on_evaluate''' ) def _snake_case ( self : Dict , lowercase_ : Union[str, Any] , lowercase_ : Any , lowercase_ : List[Any] , **lowercase_ : str ): self.events.append('''on_predict''' ) def _snake_case ( self : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : int , **lowercase_ : Union[str, Any] ): self.events.append('''on_save''' ) def _snake_case ( self : str , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : List[str] , **lowercase_ : Any ): self.events.append('''on_log''' ) def _snake_case ( self : Dict , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : Union[str, Any] , **lowercase_ : Optional[int] ): self.events.append('''on_prediction_step''' ) @require_torch class _UpperCAmelCase ( unittest.TestCase): def _snake_case ( self : List[str] ): snake_case_ : Tuple = tempfile.mkdtemp() def _snake_case ( self : Tuple ): shutil.rmtree(self.output_dir ) def _snake_case ( self : int , lowercase_ : Union[str, Any]=0 , lowercase_ : Dict=0 , lowercase_ : List[str]=64 , lowercase_ : Union[str, Any]=64 , lowercase_ : Union[str, Any]=None , lowercase_ : Any=False , **lowercase_ : List[Any] ): # disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure # its set to False since the tests later on depend on its value. snake_case_ : int = RegressionDataset(length=lowercase_ ) snake_case_ : Any = RegressionDataset(length=lowercase_ ) snake_case_ : int = RegressionModelConfig(a=lowercase_ , b=lowercase_ ) snake_case_ : Tuple = RegressionPreTrainedModel(lowercase_ ) snake_case_ : Any = TrainingArguments(self.output_dir , disable_tqdm=lowercase_ , report_to=[] , **lowercase_ ) return Trainer( lowercase_ , lowercase_ , train_dataset=lowercase_ , eval_dataset=lowercase_ , callbacks=lowercase_ , ) def _snake_case ( self : Optional[int] , lowercase_ : Any , lowercase_ : List[Any] ): self.assertEqual(len(lowercase_ ) , len(lowercase_ ) ) # Order doesn't matter snake_case_ : Any = sorted(lowercase_ , key=lambda lowercase_ : cb.__name__ if isinstance(lowercase_ , lowercase_ ) else cb.__class__.__name__ ) snake_case_ : List[str] = sorted(lowercase_ , key=lambda lowercase_ : cb.__name__ if isinstance(lowercase_ , lowercase_ ) else cb.__class__.__name__ ) for cba, cba in zip(lowercase_ , lowercase_ ): if isinstance(lowercase_ , lowercase_ ) and isinstance(lowercase_ , lowercase_ ): self.assertEqual(lowercase_ , lowercase_ ) elif isinstance(lowercase_ , lowercase_ ) and not isinstance(lowercase_ , lowercase_ ): self.assertEqual(lowercase_ , cba.__class__ ) elif not isinstance(lowercase_ , lowercase_ ) and isinstance(lowercase_ , lowercase_ ): self.assertEqual(cba.__class__ , lowercase_ ) else: self.assertEqual(lowercase_ , lowercase_ ) def _snake_case ( self : Optional[Any] , lowercase_ : Tuple ): snake_case_ : Tuple = ['''on_init_end''', '''on_train_begin'''] snake_case_ : List[Any] = 0 snake_case_ : Union[str, Any] = len(trainer.get_eval_dataloader() ) snake_case_ : List[Any] = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate'''] for _ in range(trainer.state.num_train_epochs ): expected_events.append('''on_epoch_begin''' ) for _ in range(lowercase_ ): step += 1 expected_events += ["on_step_begin", "on_step_end"] if step % trainer.args.logging_steps == 0: expected_events.append('''on_log''' ) if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0: expected_events += evaluation_events.copy() if step % trainer.args.save_steps == 0: expected_events.append('''on_save''' ) expected_events.append('''on_epoch_end''' ) if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH: expected_events += evaluation_events.copy() expected_events += ["on_log", "on_train_end"] return expected_events def _snake_case ( self : List[str] ): snake_case_ : Union[str, Any] = self.get_trainer() snake_case_ : Dict = DEFAULT_CALLBACKS.copy() + [ProgressCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ ) # Callbacks passed at init are added to the default callbacks snake_case_ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] ) expected_callbacks.append(lowercase_ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ ) # TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback snake_case_ : Optional[int] = self.get_trainer(disable_tqdm=lowercase_ ) snake_case_ : List[Any] = DEFAULT_CALLBACKS.copy() + [PrinterCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ ) def _snake_case ( self : int ): snake_case_ : int = DEFAULT_CALLBACKS.copy() + [ProgressCallback] snake_case_ : List[Any] = self.get_trainer() # We can add, pop, or remove by class name trainer.remove_callback(lowercase_ ) expected_callbacks.remove(lowercase_ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ ) snake_case_ : Dict = self.get_trainer() snake_case_ : Optional[int] = trainer.pop_callback(lowercase_ ) self.assertEqual(cb.__class__ , lowercase_ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ ) trainer.add_callback(lowercase_ ) expected_callbacks.insert(0 , lowercase_ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ ) # We can also add, pop, or remove by instance snake_case_ : Optional[int] = self.get_trainer() snake_case_ : List[Any] = trainer.callback_handler.callbacks[0] trainer.remove_callback(lowercase_ ) expected_callbacks.remove(lowercase_ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ ) snake_case_ : List[Any] = self.get_trainer() snake_case_ : Optional[int] = trainer.callback_handler.callbacks[0] snake_case_ : Optional[Any] = trainer.pop_callback(lowercase_ ) self.assertEqual(lowercase_ , lowercase_ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ ) trainer.add_callback(lowercase_ ) expected_callbacks.insert(0 , lowercase_ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ ) def _snake_case ( self : List[Any] ): import warnings # XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested warnings.simplefilter(action='''ignore''' , category=lowercase_ ) snake_case_ : int = self.get_trainer(callbacks=[MyTestTrainerCallback] ) trainer.train() snake_case_ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) ) # Independent log/save/eval snake_case_ : int = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 ) trainer.train() snake_case_ : str = trainer.callback_handler.callbacks[-2].events self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) ) snake_case_ : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 ) trainer.train() snake_case_ : int = trainer.callback_handler.callbacks[-2].events self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) ) snake_case_ : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='''steps''' ) trainer.train() snake_case_ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) ) snake_case_ : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='''epoch''' ) trainer.train() snake_case_ : Dict = trainer.callback_handler.callbacks[-2].events self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) ) # A bit of everything snake_case_ : str = self.get_trainer( callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy='''steps''' , ) trainer.train() snake_case_ : str = trainer.callback_handler.callbacks[-2].events self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) ) # warning should be emitted for duplicated callbacks with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock: snake_case_ : Dict = self.get_trainer( callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , ) assert str(lowercase_ ) in warn_mock.call_args[0][0]
264
0
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> bool: lowercase : Optional[int] = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
285
from __future__ import annotations import numpy as np def _snake_case( SCREAMING_SNAKE_CASE__ ) -> tuple[np.ndarray, np.ndarray]: lowercase , lowercase : Dict = np.shape(SCREAMING_SNAKE_CASE__ ) if rows != columns: lowercase : str = ( """'table' has to be of square shaped array but got a """ f"{rows}x{columns} array:\n{table}" ) raise ValueError(SCREAMING_SNAKE_CASE__ ) lowercase : Any = np.zeros((rows, columns) ) lowercase : int = np.zeros((rows, columns) ) for i in range(SCREAMING_SNAKE_CASE__ ): for j in range(SCREAMING_SNAKE_CASE__ ): lowercase : Optional[int] = sum(lower[i][k] * upper[k][j] for k in range(SCREAMING_SNAKE_CASE__ ) ) if upper[j][j] == 0: raise ArithmeticError("""No LU decomposition exists""" ) lowercase : str = (table[i][j] - total) / upper[j][j] lowercase : Optional[Any] = 1 for j in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase : Any = sum(lower[i][k] * upper[k][j] for k in range(SCREAMING_SNAKE_CASE__ ) ) lowercase : Tuple = table[i][j] - total return lower, upper if __name__ == "__main__": import doctest doctest.testmod()
285
1
"""simple docstring""" from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { '''EleutherAI/gpt-j-6B''': '''https://huggingface.co./EleutherAI/gpt-j-6B/resolve/main/config.json''', # See all GPT-J models at https://huggingface.co./models?filter=gpt_j } class SCREAMING_SNAKE_CASE__ ( lowercase ): """simple docstring""" a : Any ="gptj" a : Any ={ "max_position_embeddings": "n_positions", "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , snake_case__=50_400 , snake_case__=2_048 , snake_case__=4_096 , snake_case__=28 , snake_case__=16 , snake_case__=64 , snake_case__=None , snake_case__="gelu_new" , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=1e-5 , snake_case__=0.02 , snake_case__=True , snake_case__=50_256 , snake_case__=50_256 , snake_case__=False , **snake_case__ , ): """simple docstring""" lowerCAmelCase : Any = vocab_size lowerCAmelCase : Tuple = n_positions lowerCAmelCase : List[Any] = n_embd lowerCAmelCase : Any = n_layer lowerCAmelCase : List[Any] = n_head lowerCAmelCase : Optional[int] = n_inner lowerCAmelCase : List[str] = rotary_dim lowerCAmelCase : Dict = activation_function lowerCAmelCase : Dict = resid_pdrop lowerCAmelCase : List[Any] = embd_pdrop lowerCAmelCase : List[str] = attn_pdrop lowerCAmelCase : Optional[int] = layer_norm_epsilon lowerCAmelCase : Optional[int] = initializer_range lowerCAmelCase : int = use_cache lowerCAmelCase : Dict = bos_token_id lowerCAmelCase : List[str] = eos_token_id super().__init__( bos_token_id=snake_case__ , eos_token_id=snake_case__ , tie_word_embeddings=snake_case__ , **snake_case__ ) class SCREAMING_SNAKE_CASE__ ( lowercase ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ = "default" , snake_case__ = None , snake_case__ = False , ): """simple docstring""" super().__init__(snake_case__ , task=snake_case__ , patching_specs=snake_case__ , use_past=snake_case__ ) if not getattr(self._config , "pad_token_id" , snake_case__ ): # TODO: how to do that better? lowerCAmelCase : Any = 0 @property def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : Dict = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} ) if self.use_past: self.fill_with_past_key_values_(snake_case__ , direction="inputs" ) lowerCAmelCase : int = {0: "batch", 1: "past_sequence + sequence"} else: lowerCAmelCase : Optional[int] = {0: "batch", 1: "sequence"} return common_inputs @property def lowercase__ ( self ): """simple docstring""" return self._config.n_layer @property def lowercase__ ( self ): """simple docstring""" return self._config.n_head def lowercase__ ( self , snake_case__ , snake_case__ = -1 , snake_case__ = -1 , snake_case__ = False , snake_case__ = None , ): """simple docstring""" lowerCAmelCase : Tuple = super(snake_case__ , self ).generate_dummy_inputs( snake_case__ , batch_size=snake_case__ , seq_length=snake_case__ , is_pair=snake_case__ , framework=snake_case__ ) # We need to order the input in the way they appears in the forward() lowerCAmelCase : List[str] = OrderedDict({"input_ids": common_inputs["input_ids"]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch lowerCAmelCase , lowerCAmelCase : Dict = common_inputs["input_ids"].shape # Not using the same length for past_key_values lowerCAmelCase : Dict = seqlen + 2 lowerCAmelCase : Dict = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) lowerCAmelCase : Union[str, Any] = [ (torch.zeros(snake_case__ ), torch.zeros(snake_case__ )) for _ in range(self.num_layers ) ] lowerCAmelCase : Optional[Any] = common_inputs["attention_mask"] if self.use_past: lowerCAmelCase : Optional[Any] = ordered_inputs["attention_mask"].dtype lowerCAmelCase : Union[str, Any] = torch.cat( [ordered_inputs["attention_mask"], torch.ones(snake_case__ , snake_case__ , dtype=snake_case__ )] , dim=1 ) return ordered_inputs @property def lowercase__ ( self ): """simple docstring""" return 13
108
"""simple docstring""" import jax.numpy as jnp from ...utils import logging from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel from .configuration_mta import MTaConfig lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = '''T5Config''' def a__ ( SCREAMING_SNAKE_CASE : jnp.array , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ): '''simple docstring''' lowerCAmelCase : List[str] = jnp.zeros_like(SCREAMING_SNAKE_CASE ) lowerCAmelCase : Optional[int] = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] ) lowerCAmelCase : List[str] = shifted_input_ids.at[:, 0].set(SCREAMING_SNAKE_CASE ) lowerCAmelCase : str = jnp.where(shifted_input_ids == -1_0_0 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return shifted_input_ids class SCREAMING_SNAKE_CASE__ ( lowercase ): """simple docstring""" a : List[Any] ="mt5" a : Tuple =MTaConfig class SCREAMING_SNAKE_CASE__ ( lowercase ): """simple docstring""" a : Union[str, Any] ="mt5" a : Optional[Any] =MTaConfig class SCREAMING_SNAKE_CASE__ ( lowercase ): """simple docstring""" a : str ="mt5" a : Dict =MTaConfig
108
1
'''simple docstring''' import inspect import unittest from math import floor from transformers import CvtConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import CvtForImageClassification, CvtModel from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' def _snake_case ( self : Tuple ) -> Union[str, Any]: '''simple docstring''' A: Any = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''embed_dim''' ) ) self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''num_heads''' ) ) class lowerCAmelCase_ : '''simple docstring''' def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[Any]=13 , SCREAMING_SNAKE_CASE_ : List[str]=64 , SCREAMING_SNAKE_CASE_ : List[str]=3 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=[16, 48, 96] , SCREAMING_SNAKE_CASE_ : str=[1, 3, 6] , SCREAMING_SNAKE_CASE_ : Any=[1, 2, 10] , SCREAMING_SNAKE_CASE_ : Tuple=[7, 3, 3] , SCREAMING_SNAKE_CASE_ : Dict=[4, 2, 2] , SCREAMING_SNAKE_CASE_ : Optional[Any]=[2, 1, 1] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=[2, 2, 2] , SCREAMING_SNAKE_CASE_ : Optional[int]=[False, False, True] , SCREAMING_SNAKE_CASE_ : str=[0.0, 0.0, 0.0] , SCREAMING_SNAKE_CASE_ : Tuple=0.02 , SCREAMING_SNAKE_CASE_ : Optional[Any]=1E-12 , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , ) -> str: '''simple docstring''' A: Optional[int] = parent A: Any = batch_size A: Union[str, Any] = image_size A: int = patch_sizes A: Optional[int] = patch_stride A: Any = patch_padding A: int = is_training A: Union[str, Any] = use_labels A: Union[str, Any] = num_labels A: Optional[int] = num_channels A: List[Any] = embed_dim A: Optional[int] = num_heads A: int = stride_kv A: Tuple = depth A: Dict = cls_token A: Tuple = attention_drop_rate A: List[str] = initializer_range A: List[Any] = layer_norm_eps def _snake_case ( self : str ) -> List[Any]: '''simple docstring''' A: Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A: List[Any] = None if self.use_labels: A: str = ids_tensor([self.batch_size] , self.num_labels ) A: Any = self.get_config() return config, pixel_values, labels def _snake_case ( self : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' return CvtConfig( image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , ) def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Any: '''simple docstring''' A: List[str] = CvtModel(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() A: int = model(SCREAMING_SNAKE_CASE_ ) A: Union[str, Any] = (self.image_size, self.image_size) A , A: Tuple = image_size[0], image_size[1] for i in range(len(self.depth ) ): A: Tuple = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) A: int = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) ) def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] ) -> int: '''simple docstring''' A: List[str] = self.num_labels A: Dict = CvtForImageClassification(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() A: Tuple = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _snake_case ( self : Union[str, Any] ) -> str: '''simple docstring''' A: Dict = self.prepare_config_and_inputs() A , A , A: Union[str, Any] = config_and_inputs A: int = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): '''simple docstring''' UpperCamelCase_ : int = (CvtModel, CvtForImageClassification) if is_torch_available() else () UpperCamelCase_ : Union[str, Any] = ( {"""feature-extraction""": CvtModel, """image-classification""": CvtForImageClassification} if is_torch_available() else {} ) UpperCamelCase_ : Dict = False UpperCamelCase_ : List[Any] = False UpperCamelCase_ : List[Any] = False UpperCamelCase_ : Dict = False UpperCamelCase_ : Any = False def _snake_case ( self : Optional[int] ) -> List[Any]: '''simple docstring''' A: Dict = CvtModelTester(self ) A: str = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 ) def _snake_case ( self : Optional[Any] ) -> Tuple: '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _snake_case ( self : Tuple ) -> Tuple: '''simple docstring''' return @unittest.skip(reason='''Cvt does not output attentions''' ) def _snake_case ( self : Dict ) -> Tuple: '''simple docstring''' pass @unittest.skip(reason='''Cvt does not use inputs_embeds''' ) def _snake_case ( self : Optional[int] ) -> Optional[Any]: '''simple docstring''' pass @unittest.skip(reason='''Cvt does not support input and output embeddings''' ) def _snake_case ( self : int ) -> Any: '''simple docstring''' pass def _snake_case ( self : Any ) -> List[Any]: '''simple docstring''' A , A: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A: Optional[Any] = model_class(SCREAMING_SNAKE_CASE_ ) A: int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A: str = [*signature.parameters.keys()] A: Optional[int] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Optional[Any] ) -> Any: '''simple docstring''' A: Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : List[Any] ) -> Dict: '''simple docstring''' def check_hidden_states_output(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int ): A: List[str] = model_class(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() with torch.no_grad(): A: str = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) A: Optional[Any] = outputs.hidden_states A: Dict = len(self.model_tester.depth ) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) A , A: Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A: List[Any] = True check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A: Dict = True check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Tuple ) -> List[Any]: '''simple docstring''' A: str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def _snake_case ( self : Optional[int] ) -> Any: '''simple docstring''' pass @slow def _snake_case ( self : Dict ) -> Dict: '''simple docstring''' for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A: List[Any] = CvtModel.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) def SCREAMING_SNAKE_CASE( ) -> List[Any]: A: Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def _snake_case ( self : Tuple ) -> str: '''simple docstring''' return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def _snake_case ( self : str ) -> Any: '''simple docstring''' A: Optional[int] = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(SCREAMING_SNAKE_CASE_ ) A: List[Any] = self.default_image_processor A: List[str] = prepare_img() A: Tuple = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ ) # forward pass with torch.no_grad(): A: Tuple = model(**SCREAMING_SNAKE_CASE_ ) # verify the logits A: Tuple = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ ) A: int = torch.tensor([0.9285, 0.9015, -0.3150] ).to(SCREAMING_SNAKE_CASE_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
334
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block @dataclass class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : torch.FloatTensor class lowerCAmelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ): '''simple docstring''' @register_to_config def __init__( self : str , SCREAMING_SNAKE_CASE_ : int = 6_55_36 , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 0 , SCREAMING_SNAKE_CASE_ : str = "fourier" , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : float = 0.0 , SCREAMING_SNAKE_CASE_ : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , SCREAMING_SNAKE_CASE_ : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , SCREAMING_SNAKE_CASE_ : Tuple[str] = "UNetMidBlock1D" , SCREAMING_SNAKE_CASE_ : str = None , SCREAMING_SNAKE_CASE_ : Tuple[int] = (32, 32, 64) , SCREAMING_SNAKE_CASE_ : str = None , SCREAMING_SNAKE_CASE_ : int = 8 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : bool = False , ) -> Tuple: '''simple docstring''' super().__init__() A: Optional[Any] = sample_size # time if time_embedding_type == "fourier": A: Tuple = GaussianFourierProjection( embedding_size=8 , set_W_to_weight=SCREAMING_SNAKE_CASE_ , log=SCREAMING_SNAKE_CASE_ , flip_sin_to_cos=SCREAMING_SNAKE_CASE_ ) A: List[str] = 2 * block_out_channels[0] elif time_embedding_type == "positional": A: str = Timesteps( block_out_channels[0] , flip_sin_to_cos=SCREAMING_SNAKE_CASE_ , downscale_freq_shift=SCREAMING_SNAKE_CASE_ ) A: Any = block_out_channels[0] if use_timestep_embedding: A: Optional[Any] = block_out_channels[0] * 4 A: List[Any] = TimestepEmbedding( in_channels=SCREAMING_SNAKE_CASE_ , time_embed_dim=SCREAMING_SNAKE_CASE_ , act_fn=SCREAMING_SNAKE_CASE_ , out_dim=block_out_channels[0] , ) A: Optional[Any] = nn.ModuleList([] ) A: str = None A: str = nn.ModuleList([] ) A: Tuple = None # down A: Any = in_channels for i, down_block_type in enumerate(SCREAMING_SNAKE_CASE_ ): A: Optional[int] = output_channel A: List[Any] = block_out_channels[i] if i == 0: input_channel += extra_in_channels A: List[Any] = i == len(SCREAMING_SNAKE_CASE_ ) - 1 A: Optional[int] = get_down_block( SCREAMING_SNAKE_CASE_ , num_layers=SCREAMING_SNAKE_CASE_ , in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , ) self.down_blocks.append(SCREAMING_SNAKE_CASE_ ) # mid A: Union[str, Any] = get_mid_block( SCREAMING_SNAKE_CASE_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=SCREAMING_SNAKE_CASE_ , add_downsample=SCREAMING_SNAKE_CASE_ , ) # up A: Optional[Any] = list(reversed(SCREAMING_SNAKE_CASE_ ) ) A: List[str] = reversed_block_out_channels[0] if out_block_type is None: A: int = out_channels else: A: Union[str, Any] = block_out_channels[0] for i, up_block_type in enumerate(SCREAMING_SNAKE_CASE_ ): A: List[Any] = output_channel A: int = ( reversed_block_out_channels[i + 1] if i < len(SCREAMING_SNAKE_CASE_ ) - 1 else final_upsample_channels ) A: Optional[int] = i == len(SCREAMING_SNAKE_CASE_ ) - 1 A: Optional[Any] = get_up_block( SCREAMING_SNAKE_CASE_ , num_layers=SCREAMING_SNAKE_CASE_ , in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , ) self.up_blocks.append(SCREAMING_SNAKE_CASE_ ) A: Any = output_channel # out A: List[str] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 ) A: Optional[int] = get_out_block( out_block_type=SCREAMING_SNAKE_CASE_ , num_groups_out=SCREAMING_SNAKE_CASE_ , embed_dim=block_out_channels[0] , out_channels=SCREAMING_SNAKE_CASE_ , act_fn=SCREAMING_SNAKE_CASE_ , fc_dim=block_out_channels[-1] // 4 , ) def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : Union[torch.Tensor, float, int] , SCREAMING_SNAKE_CASE_ : bool = True , ) -> Union[UNetaDOutput, Tuple]: '''simple docstring''' A: Any = timestep if not torch.is_tensor(SCREAMING_SNAKE_CASE_ ): A: Union[str, Any] = torch.tensor([timesteps] , dtype=torch.long , device=sample.device ) elif torch.is_tensor(SCREAMING_SNAKE_CASE_ ) and len(timesteps.shape ) == 0: A: List[str] = timesteps[None].to(sample.device ) A: int = self.time_proj(SCREAMING_SNAKE_CASE_ ) if self.config.use_timestep_embedding: A: List[Any] = self.time_mlp(SCREAMING_SNAKE_CASE_ ) else: A: str = timestep_embed[..., None] A: Union[str, Any] = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype ) A: Tuple = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) ) # 2. down A: List[str] = () for downsample_block in self.down_blocks: A , A: Optional[int] = downsample_block(hidden_states=SCREAMING_SNAKE_CASE_ , temb=SCREAMING_SNAKE_CASE_ ) down_block_res_samples += res_samples # 3. mid if self.mid_block: A: Dict = self.mid_block(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # 4. up for i, upsample_block in enumerate(self.up_blocks ): A: List[Any] = down_block_res_samples[-1:] A: List[str] = down_block_res_samples[:-1] A: Optional[int] = upsample_block(SCREAMING_SNAKE_CASE_ , res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ , temb=SCREAMING_SNAKE_CASE_ ) # 5. post-process if self.out_block: A: Any = self.out_block(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if not return_dict: return (sample,) return UNetaDOutput(sample=SCREAMING_SNAKE_CASE_ )
334
1
import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_gpta import GPTaTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowerCamelCase : Any = logging.get_logger(__name__) lowerCamelCase : Optional[int] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} lowerCamelCase : Union[str, Any] = { 'vocab_file': { 'gpt2': 'https://huggingface.co./gpt2/resolve/main/vocab.json', 'gpt2-medium': 'https://huggingface.co./gpt2-medium/resolve/main/vocab.json', 'gpt2-large': 'https://huggingface.co./gpt2-large/resolve/main/vocab.json', 'gpt2-xl': 'https://huggingface.co./gpt2-xl/resolve/main/vocab.json', 'distilgpt2': 'https://huggingface.co./distilgpt2/resolve/main/vocab.json', }, 'merges_file': { 'gpt2': 'https://huggingface.co./gpt2/resolve/main/merges.txt', 'gpt2-medium': 'https://huggingface.co./gpt2-medium/resolve/main/merges.txt', 'gpt2-large': 'https://huggingface.co./gpt2-large/resolve/main/merges.txt', 'gpt2-xl': 'https://huggingface.co./gpt2-xl/resolve/main/merges.txt', 'distilgpt2': 'https://huggingface.co./distilgpt2/resolve/main/merges.txt', }, 'tokenizer_file': { 'gpt2': 'https://huggingface.co./gpt2/resolve/main/tokenizer.json', 'gpt2-medium': 'https://huggingface.co./gpt2-medium/resolve/main/tokenizer.json', 'gpt2-large': 'https://huggingface.co./gpt2-large/resolve/main/tokenizer.json', 'gpt2-xl': 'https://huggingface.co./gpt2-xl/resolve/main/tokenizer.json', 'distilgpt2': 'https://huggingface.co./distilgpt2/resolve/main/tokenizer.json', }, } lowerCamelCase : Optional[int] = { 'gpt2': 1_0_2_4, 'gpt2-medium': 1_0_2_4, 'gpt2-large': 1_0_2_4, 'gpt2-xl': 1_0_2_4, 'distilgpt2': 1_0_2_4, } class __lowercase (_UpperCAmelCase ): """simple docstring""" _snake_case = VOCAB_FILES_NAMES _snake_case = PRETRAINED_VOCAB_FILES_MAP _snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _snake_case = ["""input_ids""", """attention_mask"""] _snake_case = GPTaTokenizer def __init__( self , A=None , A=None , A=None , A="<|endoftext|>" , A="<|endoftext|>" , A="<|endoftext|>" , A=False , **A , ) -> Dict: super().__init__( A , A , tokenizer_file=A , unk_token=A , bos_token=A , eos_token=A , add_prefix_space=A , **A , ) snake_case : Tuple = kwargs.pop("""add_bos_token""" , A ) snake_case : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""" , A ) != add_prefix_space: snake_case : Optional[int] = getattr(A , pre_tok_state.pop("""type""" ) ) snake_case : str = add_prefix_space snake_case : List[Any] = pre_tok_class(**A ) snake_case : List[Any] = add_prefix_space def UpperCAmelCase ( self , *A , **A ) -> BatchEncoding: snake_case : Tuple = kwargs.get("""is_split_into_words""" , A ) assert self.add_prefix_space or not is_split_into_words, ( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*A , **A ) def UpperCAmelCase ( self , *A , **A ) -> BatchEncoding: snake_case : Union[str, Any] = kwargs.get("""is_split_into_words""" , A ) assert self.add_prefix_space or not is_split_into_words, ( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._encode_plus(*A , **A ) def UpperCAmelCase ( self , A , A = None ) -> Tuple[str]: snake_case : List[Any] = self._tokenizer.model.save(A , name=A ) return tuple(A ) def UpperCAmelCase ( self , A ) -> List[int]: snake_case : str = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(A , add_special_tokens=A ) + [self.eos_token_id] ) if len(A ) > self.model_max_length: snake_case : Optional[Any] = input_ids[-self.model_max_length :] return input_ids
124
"""simple docstring""" import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = {"vocab_file": "spiece.model"} SCREAMING_SNAKE_CASE__ = { "vocab_file": { "AI-Sweden/gpt-sw3-126m": "https://huggingface.co./AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-350m": "https://huggingface.co./AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co./AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co./AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-20b": "https://huggingface.co./AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model", } } SCREAMING_SNAKE_CASE__ = { "AI-Sweden/gpt-sw3-126m": 2_048, "AI-Sweden/gpt-sw3-350m": 2_048, "AI-Sweden/gpt-sw3-1.6b": 2_048, "AI-Sweden/gpt-sw3-6.7b": 2_048, "AI-Sweden/gpt-sw3-20b": 2_048, } class lowercase ( _UpperCAmelCase ): _SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES _SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP _SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask'] def __init__( self , lowercase , lowercase=False , lowercase=False , lowercase=False , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase = None , **lowercase , ) -> None: lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs lowerCAmelCase = kwargs.get("""name_or_path""" ) if name_or_path is None: logger.warning( """name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,""" """ you are testing the model, this can safely be ignored""" ) lowerCAmelCase = """None""" # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing lowerCAmelCase = """<|endoftext|>""" if eos_token is None else eos_token lowerCAmelCase = """<unk>""" if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: lowerCAmelCase = unk_token if pad_token is None else pad_token lowerCAmelCase = eos_token if bos_token is None else bos_token else: lowerCAmelCase = """<pad>""" if pad_token is None else pad_token lowerCAmelCase = """<s>""" if bos_token is None else bos_token super().__init__( do_lower_case=lowercase , remove_space=lowercase , keep_accents=lowercase , bos_token=lowercase , eos_token=lowercase , unk_token=lowercase , pad_token=lowercase , sp_model_kwargs=self.sp_model_kwargs , **lowercase , ) lowerCAmelCase = do_lower_case lowerCAmelCase = remove_space lowerCAmelCase = keep_accents lowerCAmelCase = vocab_file lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowercase ) # Used for whitespace normalization in input texts # fmt : off lowerCAmelCase = {""" """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """""", """„"""} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing lowerCAmelCase = re.compile( f'[{"".join(map(lowercase , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8_203] ) )}]' ) def __getstate__( self ) -> Optional[int]: lowerCAmelCase = self.__dict__.copy() lowerCAmelCase = None return state def __setstate__( self , lowercase ) -> str: lowerCAmelCase = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): lowerCAmelCase = {} lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def _snake_case ( self ) -> int: return len(self.sp_model ) def _snake_case ( self , lowercase ) -> str: lowerCAmelCase = self.non_printing_characters_re.sub("""""" , lowercase ) # Normalize whitespaces lowerCAmelCase = """""".join([char if char not in self.whitespaces else """ """ for char in text] ) # NFC Unicode normalization lowerCAmelCase = unicodedata.normalize("""NFC""" , lowercase ) return text def _snake_case ( self , lowercase , **lowercase ) -> List[str]: lowerCAmelCase = self.preprocess_text(lowercase ) return self.sp_model.encode(lowercase , out_type=lowercase ) def _snake_case ( self , lowercase ) -> int: return self.sp_model.PieceToId(lowercase ) def _snake_case ( self , lowercase ) -> str: return self.sp_model.IdToPiece(lowercase ) @staticmethod def _snake_case ( lowercase ) -> str: return out_string def _snake_case ( self , lowercase ) -> str: lowerCAmelCase = [] lowerCAmelCase = """""" lowerCAmelCase = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(lowercase ) + token lowerCAmelCase = True lowerCAmelCase = [] else: current_sub_tokens.append(lowercase ) lowerCAmelCase = False out_string += self.sp_model.decode(lowercase ) return out_string def _snake_case ( self ) -> Dict[str, int]: lowerCAmelCase = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _snake_case ( self , lowercase , lowercase = None ) -> Tuple[str]: if not os.path.isdir(lowercase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return lowerCAmelCase = os.path.join( lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowercase ) elif not os.path.isfile(self.vocab_file ): with open(lowercase , """wb""" ) as fi: lowerCAmelCase = self.sp_model.serialized_model_proto() fi.write(lowercase ) return (out_vocab_file,) def _snake_case ( self , lowercase , lowercase = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]: if isinstance(lowercase , lowercase ): lowerCAmelCase = self.preprocess_text(lowercase ) lowerCAmelCase = self.sp_model.encode(lowercase ) else: lowerCAmelCase = [self.preprocess_text(lowercase ) for t in text] lowerCAmelCase = self.sp_model.encode(lowercase ) if return_tensors is True or return_tensors == "pt": lowerCAmelCase = torch.tensor(lowercase ) return token_ids def _snake_case ( self , lowercase ) -> str: return self.sp_model.decode(lowercase ) def _snake_case ( self , lowercase ) -> List[int]: lowerCAmelCase = [f'User: {text}' if is_user else f'Bot: {text}' for is_user, text in conversation.iter_texts()] lowerCAmelCase = ( f'{self.eos_token}{self.bos_token}' + f'{self.bos_token}'.join(lowercase ) + f'{self.bos_token}Bot:' ) return self.encode(text=lowercase )
46
0
from __future__ import annotations def __snake_case ( __UpperCamelCase : int = 4 ): """simple docstring""" A_ = abs(__UpperCamelCase ) or 4 return [[1 + x + y * row_size for x in range(__UpperCamelCase )] for y in range(__UpperCamelCase )] def __snake_case ( __UpperCamelCase : list[list[int]] ): """simple docstring""" return reverse_row(transpose(__UpperCamelCase ) ) # OR.. transpose(reverse_column(matrix)) def __snake_case ( __UpperCamelCase : list[list[int]] ): """simple docstring""" return reverse_row(reverse_column(__UpperCamelCase ) ) # OR.. reverse_column(reverse_row(matrix)) def __snake_case ( __UpperCamelCase : list[list[int]] ): """simple docstring""" return reverse_column(transpose(__UpperCamelCase ) ) # OR.. transpose(reverse_row(matrix)) def __snake_case ( __UpperCamelCase : list[list[int]] ): """simple docstring""" A_ = [list(__UpperCamelCase ) for x in zip(*__UpperCamelCase )] return matrix def __snake_case ( __UpperCamelCase : list[list[int]] ): """simple docstring""" A_ = matrix[::-1] return matrix def __snake_case ( __UpperCamelCase : list[list[int]] ): """simple docstring""" A_ = [x[::-1] for x in matrix] return matrix def __snake_case ( __UpperCamelCase : list[list[int]] ): """simple docstring""" for i in matrix: print(*__UpperCamelCase ) if __name__ == "__main__": __a :Any = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 90 counterclockwise:\n') print_matrix(rotate_aa(matrix)) __a :Any = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 180:\n') print_matrix(rotate_aaa(matrix)) __a :Any = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 270 counterclockwise:\n') print_matrix(rotate_aaa(matrix))
365
import itertools import math def __snake_case ( __UpperCamelCase : int ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 ,int(math.sqrt(__UpperCamelCase ) + 1 ) ,6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def __snake_case ( ): """simple docstring""" A_ = 2 while True: if is_prime(__UpperCamelCase ): yield num num += 1 def __snake_case ( __UpperCamelCase : int = 1_0001 ): """simple docstring""" return next(itertools.islice(prime_generator() ,nth - 1 ,__UpperCamelCase ) ) if __name__ == "__main__": print(F"{solution() = }")
329
0
"""simple docstring""" from collections import Counter import numpy as np from sklearn import datasets from sklearn.model_selection import train_test_split _snake_case : Optional[Any] = datasets.load_iris() _snake_case : List[Any] = np.array(data['data']) _snake_case : str = np.array(data['target']) _snake_case : int = data['target_names'] _snake_case : Union[str, Any] = train_test_split(X, y) def A__ ( UpperCamelCase , UpperCamelCase ): return np.linalg.norm(np.array(UpperCamelCase ) - np.array(UpperCamelCase ) ) def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=5 ): A = zip(UpperCamelCase , UpperCamelCase ) # List of distances of all points from the point to be classified A = [] for data_point in data: A = euclidean_distance(data_point[0] , UpperCamelCase ) distances.append((distance, data_point[1]) ) # Choosing 'k' points with the least distances. A = [i[1] for i in sorted(UpperCamelCase )[:k]] # Most commonly occurring class among them # is the class into which the point is classified A = Counter(UpperCamelCase ).most_common(1 )[0][0] return classes[result] if __name__ == "__main__": print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
292
def SCREAMING_SNAKE_CASE__ ( ) -> int: return [ a * b * (1000 - a - b) for a in range(1 ,999 ) for b in range(lowercase ,999 ) if (a * a + b * b == (1000 - a - b) ** 2) ][0] if __name__ == "__main__": print(f"""{solution() = }""")
124
0
import argparse import json import os import torch from transformers.file_utils import has_file from diffusers import UNetaDConditionModel, UNetaDModel __snake_case : List[Any] = False __snake_case : int = True __snake_case : Dict = False if __name__ == "__main__": __snake_case : Any = argparse.ArgumentParser() parser.add_argument( """--repo_path""", default=None, type=str, required=True, help="""The config json file corresponding to the architecture.""", ) parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") __snake_case : str = parser.parse_args() __snake_case : Union[str, Any] = { """image_size""": """sample_size""", """num_res_blocks""": """layers_per_block""", """block_channels""": """block_out_channels""", """down_blocks""": """down_block_types""", """up_blocks""": """up_block_types""", """downscale_freq_shift""": """freq_shift""", """resnet_num_groups""": """norm_num_groups""", """resnet_act_fn""": """act_fn""", """resnet_eps""": """norm_eps""", """num_head_channels""": """attention_head_dim""", } __snake_case : Optional[Any] = { """time_steps""": """time_proj""", """mid""": """mid_block""", """downsample_blocks""": """down_blocks""", """upsample_blocks""": """up_blocks""", } __snake_case : List[str] = """""" if has_file(args.repo_path, """config.json""") else """unet""" with open(os.path.join(args.repo_path, subfolder, """config.json"""), """r""", encoding="""utf-8""") as reader: __snake_case : Any = reader.read() __snake_case : Optional[Any] = json.loads(text) if do_only_config: for key in config_parameters_to_change.keys(): config.pop(key, None) if has_file(args.repo_path, """config.json"""): __snake_case : Optional[int] = UNetaDModel(**config) else: __snake_case : Union[str, Any] = UNetaDConditionModel if """ldm-text2im-large-256""" in args.repo_path else UNetaDModel __snake_case : Tuple = class_name(**config) if do_only_config: model.save_config(os.path.join(args.repo_path, subfolder)) __snake_case : Optional[Any] = dict(model.config) if do_only_renaming: for key, value in config_parameters_to_change.items(): if key in config: __snake_case : str = config[key] del config[key] __snake_case : Dict = [k.replace("""UNetRes""", """""") for k in config["""down_block_types"""]] __snake_case : Any = [k.replace("""UNetRes""", """""") for k in config["""up_block_types"""]] if do_only_weights: __snake_case : int = torch.load(os.path.join(args.repo_path, subfolder, """diffusion_pytorch_model.bin""")) __snake_case : int = {} for param_key, param_value in state_dict.items(): if param_key.endswith(""".op.bias""") or param_key.endswith(""".op.weight"""): continue __snake_case : Any = False for key, new_key in key_parameters_to_change.items(): if not has_changed and param_key.split(""".""")[0] == key: __snake_case : Optional[Any] = param_value __snake_case : Any = True if not has_changed: __snake_case : List[str] = param_value model.load_state_dict(new_state_dict) model.save_pretrained(os.path.join(args.repo_path, subfolder))
122
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class __SCREAMING_SNAKE_CASE ( unittest.TestCase): def __init__( self , _UpperCamelCase , _UpperCamelCase=7 , _UpperCamelCase=3 , _UpperCamelCase=30 , _UpperCamelCase=4_00 , _UpperCamelCase=True , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase=[0.5, 0.5, 0.5] , _UpperCamelCase=[0.5, 0.5, 0.5] , _UpperCamelCase=True , _UpperCamelCase=1 / 2_55 , _UpperCamelCase=True , ): """simple docstring""" # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p lowerCAmelCase__ = size if size is not None else {'shortest_edge': 18, 'longest_edge': 13_33} lowerCAmelCase__ = parent lowerCAmelCase__ = batch_size lowerCAmelCase__ = num_channels lowerCAmelCase__ = min_resolution lowerCAmelCase__ = max_resolution lowerCAmelCase__ = do_resize lowerCAmelCase__ = size lowerCAmelCase__ = do_normalize lowerCAmelCase__ = image_mean lowerCAmelCase__ = image_std lowerCAmelCase__ = do_rescale lowerCAmelCase__ = rescale_factor lowerCAmelCase__ = do_pad def UpperCamelCase__ ( self ): """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase=False ): """simple docstring""" if not batched: lowerCAmelCase__ = image_inputs[0] if isinstance(_UpperCamelCase , Image.Image ): lowerCAmelCase__ , lowerCAmelCase__ = image.size else: lowerCAmelCase__ , lowerCAmelCase__ = image.shape[1], image.shape[2] if w < h: lowerCAmelCase__ = int(self.size['shortest_edge'] * h / w ) lowerCAmelCase__ = self.size['shortest_edge'] elif w > h: lowerCAmelCase__ = self.size['shortest_edge'] lowerCAmelCase__ = int(self.size['shortest_edge'] * w / h ) else: lowerCAmelCase__ = self.size['shortest_edge'] lowerCAmelCase__ = self.size['shortest_edge'] else: lowerCAmelCase__ = [] for image in image_inputs: lowerCAmelCase__ , lowerCAmelCase__ = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) lowerCAmelCase__ = max(_UpperCamelCase , key=lambda _UpperCamelCase : item[0] )[0] lowerCAmelCase__ = max(_UpperCamelCase , key=lambda _UpperCamelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class __SCREAMING_SNAKE_CASE ( __lowercase , unittest.TestCase): _SCREAMING_SNAKE_CASE : Dict = DeformableDetrImageProcessor if is_vision_available() else None def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase__ = DeformableDetrImageProcessingTester(self ) @property def UpperCamelCase__ ( self ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_UpperCamelCase , 'image_mean' ) ) self.assertTrue(hasattr(_UpperCamelCase , 'image_std' ) ) self.assertTrue(hasattr(_UpperCamelCase , 'do_normalize' ) ) self.assertTrue(hasattr(_UpperCamelCase , 'do_resize' ) ) self.assertTrue(hasattr(_UpperCamelCase , 'do_rescale' ) ) self.assertTrue(hasattr(_UpperCamelCase , 'do_pad' ) ) self.assertTrue(hasattr(_UpperCamelCase , 'size' ) ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase__ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 13_33} ) self.assertEqual(image_processor.do_pad , _UpperCamelCase ) lowerCAmelCase__ = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_UpperCamelCase ) self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} ) self.assertEqual(image_processor.do_pad , _UpperCamelCase ) def UpperCamelCase__ ( self ): """simple docstring""" pass def UpperCamelCase__ ( self ): """simple docstring""" # Initialize image_processing lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase ) for image in image_inputs: self.assertIsInstance(_UpperCamelCase , Image.Image ) # Test not batched input lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(_UpperCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase ) lowerCAmelCase__ = image_processing(_UpperCamelCase , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCamelCase__ ( self ): """simple docstring""" # Initialize image_processing lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , numpify=_UpperCamelCase ) for image in image_inputs: self.assertIsInstance(_UpperCamelCase , np.ndarray ) # Test not batched input lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(_UpperCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowerCAmelCase__ = image_processing(_UpperCamelCase , return_tensors='pt' ).pixel_values lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCamelCase__ ( self ): """simple docstring""" # Initialize image_processing lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , torchify=_UpperCamelCase ) for image in image_inputs: self.assertIsInstance(_UpperCamelCase , torch.Tensor ) # Test not batched input lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(_UpperCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowerCAmelCase__ = image_processing(_UpperCamelCase , return_tensors='pt' ).pixel_values lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def UpperCamelCase__ ( self ): """simple docstring""" # prepare image and target lowerCAmelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f: lowerCAmelCase__ = json.loads(f.read() ) lowerCAmelCase__ = {'image_id': 3_97_69, 'annotations': target} # encode them lowerCAmelCase__ = DeformableDetrImageProcessor() lowerCAmelCase__ = image_processing(images=_UpperCamelCase , annotations=_UpperCamelCase , return_tensors='pt' ) # verify pixel values lowerCAmelCase__ = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding['pixel_values'].shape , _UpperCamelCase ) lowerCAmelCase__ = torch.tensor([0.27_96, 0.31_38, 0.34_81] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _UpperCamelCase , atol=1E-4 ) ) # verify area lowerCAmelCase__ = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _UpperCamelCase ) ) # verify boxes lowerCAmelCase__ = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape , _UpperCamelCase ) lowerCAmelCase__ = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _UpperCamelCase , atol=1E-3 ) ) # verify image_id lowerCAmelCase__ = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _UpperCamelCase ) ) # verify is_crowd lowerCAmelCase__ = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _UpperCamelCase ) ) # verify class_labels lowerCAmelCase__ = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _UpperCamelCase ) ) # verify orig_size lowerCAmelCase__ = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _UpperCamelCase ) ) # verify size lowerCAmelCase__ = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _UpperCamelCase ) ) @slow def UpperCamelCase__ ( self ): """simple docstring""" # prepare image, target and masks_path lowerCAmelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f: lowerCAmelCase__ = json.loads(f.read() ) lowerCAmelCase__ = {'file_name': '000000039769.png', 'image_id': 3_97_69, 'segments_info': target} lowerCAmelCase__ = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' ) # encode them lowerCAmelCase__ = DeformableDetrImageProcessor(format='coco_panoptic' ) lowerCAmelCase__ = image_processing(images=_UpperCamelCase , annotations=_UpperCamelCase , masks_path=_UpperCamelCase , return_tensors='pt' ) # verify pixel values lowerCAmelCase__ = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding['pixel_values'].shape , _UpperCamelCase ) lowerCAmelCase__ = torch.tensor([0.27_96, 0.31_38, 0.34_81] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _UpperCamelCase , atol=1E-4 ) ) # verify area lowerCAmelCase__ = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _UpperCamelCase ) ) # verify boxes lowerCAmelCase__ = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape , _UpperCamelCase ) lowerCAmelCase__ = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _UpperCamelCase , atol=1E-3 ) ) # verify image_id lowerCAmelCase__ = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _UpperCamelCase ) ) # verify is_crowd lowerCAmelCase__ = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _UpperCamelCase ) ) # verify class_labels lowerCAmelCase__ = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _UpperCamelCase ) ) # verify masks lowerCAmelCase__ = 82_28_73 self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _UpperCamelCase ) # verify orig_size lowerCAmelCase__ = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _UpperCamelCase ) ) # verify size lowerCAmelCase__ = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _UpperCamelCase ) )
122
1
import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def __lowerCamelCase ( UpperCamelCase__ = 3 ): '''simple docstring''' if isinstance(UpperCamelCase__ , UpperCamelCase__ ): raise TypeError('number of qubits must be a integer.' ) if number_of_qubits <= 0: raise ValueError('number of qubits must be > 0.' ) if math.floor(UpperCamelCase__ ) != number_of_qubits: raise ValueError('number of qubits must be exact integer.' ) if number_of_qubits > 10: raise ValueError('number of qubits too large to simulate(>10).' ) snake_case_ = QuantumRegister(UpperCamelCase__ , 'qr' ) snake_case_ = ClassicalRegister(UpperCamelCase__ , 'cr' ) snake_case_ = QuantumCircuit(UpperCamelCase__ , UpperCamelCase__ ) snake_case_ = number_of_qubits for i in range(UpperCamelCase__ ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(UpperCamelCase__ ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , UpperCamelCase__ , UpperCamelCase__ ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(UpperCamelCase__ , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(UpperCamelCase__ , UpperCamelCase__ ) # simulate with 10000 shots snake_case_ = Aer.get_backend('qasm_simulator' ) snake_case_ = execute(UpperCamelCase__ , UpperCamelCase__ , shots=10000 ) return job.result().get_counts(UpperCamelCase__ ) if __name__ == "__main__": print( F'''Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}''' )
285
import unittest from datasets import load_dataset from transformers import BloomTokenizerFast from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowercase ( lowercase_ , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[Any] = None __SCREAMING_SNAKE_CASE : Any = BloomTokenizerFast __SCREAMING_SNAKE_CASE : int = BloomTokenizerFast __SCREAMING_SNAKE_CASE : Optional[Any] = True __SCREAMING_SNAKE_CASE : str = False __SCREAMING_SNAKE_CASE : Union[str, Any] = '''tokenizer_file''' __SCREAMING_SNAKE_CASE : Optional[int] = {'''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''unk_token''': '''<unk>''', '''pad_token''': '''<pad>'''} def a ( self ): super().setUp() snake_case_ = BloomTokenizerFast.from_pretrained('bigscience/tokenizer' ) tokenizer.save_pretrained(self.tmpdirname ) def a ( self , **snake_case ): kwargs.update(self.special_tokens_map ) return BloomTokenizerFast.from_pretrained(self.tmpdirname , **snake_case ) def a ( self ): snake_case_ = self.get_rust_tokenizer() snake_case_ = ['The quick brown fox</s>', 'jumps over the lazy dog</s>'] snake_case_ = [[2175, 2_3714, 7_3173, 14_4252, 2], [77, 13_2619, 3478, 368, 10_9586, 3_5433, 2]] snake_case_ = tokenizer.batch_encode_plus(snake_case )['input_ids'] self.assertListEqual(snake_case , snake_case ) snake_case_ = tokenizer.batch_decode(snake_case ) self.assertListEqual(snake_case , snake_case ) def a ( self , snake_case=6 ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): snake_case_ = self.rust_tokenizer_class.from_pretrained(snake_case , **snake_case ) # tokenizer_r.pad_token = None # Hotfixing padding = None # Simple input snake_case_ = 'This is a simple input' snake_case_ = ['This is a simple input 1', 'This is a simple input 2'] snake_case_ = ('This is a simple input', 'This is a pair') snake_case_ = [ ('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2'), ] # Simple input tests try: tokenizer_r.encode(snake_case , max_length=snake_case ) tokenizer_r.encode_plus(snake_case , max_length=snake_case ) tokenizer_r.batch_encode_plus(snake_case , max_length=snake_case ) tokenizer_r.encode(snake_case , max_length=snake_case ) tokenizer_r.batch_encode_plus(snake_case , max_length=snake_case ) except ValueError: self.fail('Bloom Tokenizer should be able to deal with padding' ) snake_case_ = None # Hotfixing padding = None self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length' ) # Simple input self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length' ) # Simple input self.assertRaises( snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , ) # Pair input self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length' ) # Pair input self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length' ) # Pair input self.assertRaises( snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , ) def a ( self ): snake_case_ = self.get_rust_tokenizer() snake_case_ = load_dataset('xnli' , 'all_languages' , split='test' , streaming=snake_case ) snake_case_ = next(iter(snake_case ) )['premise'] # pick up one data snake_case_ = list(sample_data.values() ) snake_case_ = list(map(tokenizer.encode , snake_case ) ) snake_case_ = [tokenizer.decode(snake_case , clean_up_tokenization_spaces=snake_case ) for x in output_tokens] self.assertListEqual(snake_case , snake_case ) def a ( self ): # The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have # any sequence length constraints. This test of the parent class will fail since it relies on the # maximum sequence length of the positoonal embeddings. self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 ) self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
285
1
from collections import Counter from timeit import timeit def lowerCamelCase__ ( a__ : str = "" , ) -> bool: return sum(c % 2 for c in Counter(input_str.replace(""" """ , """""" ).lower() ).values() ) < 2 def lowerCamelCase__ ( a__ : str = "" ) -> bool: if len(a__ ) == 0: return True UpperCamelCase_ = input_str.replace(""" """ , """""" ).lower() # character_freq_dict: Stores the frequency of every character in the input string UpperCamelCase_ = {} for character in lower_case_input_str: UpperCamelCase_ = character_freq_dict.get(a__ , 0 ) + 1 UpperCamelCase_ = 0 for character_count in character_freq_dict.values(): if character_count % 2: odd_char += 1 if odd_char > 1: return False return True def lowerCamelCase__ ( a__ : str = "" ) -> None: print("""\nFor string = """ , a__ , """:""" ) print( """> can_string_be_rearranged_as_palindrome_counter()""" , """\tans =""" , can_string_be_rearranged_as_palindrome_counter(a__ ) , """\ttime =""" , timeit( """z.can_string_be_rearranged_as_palindrome_counter(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , ) print( """> can_string_be_rearranged_as_palindrome()""" , """\tans =""" , can_string_be_rearranged_as_palindrome(a__ ) , """\ttime =""" , timeit( """z.can_string_be_rearranged_as_palindrome(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , ) if __name__ == "__main__": _A = input( '''Enter string to determine if it can be rearranged as a palindrome or not: ''' ).strip() benchmark(check_str) _A = can_string_be_rearranged_as_palindrome_counter(check_str) print(F'''{check_str} can {'' if status else 'not '}be rearranged as a palindrome''')
261
import math def lowerCamelCase__ ( a__ : float , a__ : float ) -> float: if ( not isinstance(a__ , (int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError("""power_factor must be a valid float value between -1 and 1.""" ) return apparent_power * power_factor def lowerCamelCase__ ( a__ : float , a__ : float ) -> float: if ( not isinstance(a__ , (int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError("""power_factor must be a valid float value between -1 and 1.""" ) return apparent_power * math.sqrt(1 - power_factor**2 ) if __name__ == "__main__": import doctest doctest.testmod()
261
1
from __future__ import annotations import unittest from transformers import EsmConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers.models.esm.modeling_tf_esm import ( TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, TFEsmModel, ) class a_ : """simple docstring""" def __init__( self : Optional[Any] ,snake_case : Any ,): SCREAMING_SNAKE_CASE =parent SCREAMING_SNAKE_CASE =13 SCREAMING_SNAKE_CASE =7 SCREAMING_SNAKE_CASE =True SCREAMING_SNAKE_CASE =True SCREAMING_SNAKE_CASE =True SCREAMING_SNAKE_CASE =99 SCREAMING_SNAKE_CASE =32 SCREAMING_SNAKE_CASE =2 SCREAMING_SNAKE_CASE =4 SCREAMING_SNAKE_CASE =37 SCREAMING_SNAKE_CASE ='gelu' SCREAMING_SNAKE_CASE =0.1 SCREAMING_SNAKE_CASE =0.1 SCREAMING_SNAKE_CASE =512 SCREAMING_SNAKE_CASE =16 SCREAMING_SNAKE_CASE =2 SCREAMING_SNAKE_CASE =0.02 SCREAMING_SNAKE_CASE =3 SCREAMING_SNAKE_CASE =4 SCREAMING_SNAKE_CASE =None def _lowerCAmelCase ( self : str ): SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) SCREAMING_SNAKE_CASE =None if self.use_input_mask: SCREAMING_SNAKE_CASE =random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE =None SCREAMING_SNAKE_CASE =None SCREAMING_SNAKE_CASE =None if self.use_labels: SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] ,self.type_sequence_label_size ) SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] ,self.num_choices ) SCREAMING_SNAKE_CASE =EsmConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,pad_token_id=1 ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _lowerCAmelCase ( self : Union[str, Any] ): ( ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ) =self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE =True SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 ) return ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def _lowerCAmelCase ( self : Tuple ,snake_case : Dict ,snake_case : Any ,snake_case : str ,snake_case : Any ,snake_case : Union[str, Any] ,snake_case : Union[str, Any] ): SCREAMING_SNAKE_CASE =TFEsmModel(config=snake_case ) SCREAMING_SNAKE_CASE ={'input_ids': input_ids, 'attention_mask': input_mask} SCREAMING_SNAKE_CASE =model(snake_case ) SCREAMING_SNAKE_CASE =[input_ids, input_mask] SCREAMING_SNAKE_CASE =model(snake_case ) SCREAMING_SNAKE_CASE =model(snake_case ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _lowerCAmelCase ( self : Any ,snake_case : int ,snake_case : Dict ,snake_case : str ,snake_case : Union[str, Any] ,snake_case : Dict ,snake_case : str ,snake_case : Union[str, Any] ,snake_case : Union[str, Any] ,): SCREAMING_SNAKE_CASE =True SCREAMING_SNAKE_CASE =TFEsmModel(config=snake_case ) SCREAMING_SNAKE_CASE ={ 'input_ids': input_ids, 'attention_mask': input_mask, 'encoder_hidden_states': encoder_hidden_states, 'encoder_attention_mask': encoder_attention_mask, } SCREAMING_SNAKE_CASE =model(snake_case ) SCREAMING_SNAKE_CASE =[input_ids, input_mask] SCREAMING_SNAKE_CASE =model(snake_case ,encoder_hidden_states=snake_case ) # Also check the case where encoder outputs are not passed SCREAMING_SNAKE_CASE =model(snake_case ,attention_mask=snake_case ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _lowerCAmelCase ( self : Optional[Any] ,snake_case : str ,snake_case : int ,snake_case : List[str] ,snake_case : Union[str, Any] ,snake_case : Any ,snake_case : Dict ): SCREAMING_SNAKE_CASE =TFEsmForMaskedLM(config=snake_case ) SCREAMING_SNAKE_CASE =model([input_ids, input_mask] ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _lowerCAmelCase ( self : int ,snake_case : Any ,snake_case : Union[str, Any] ,snake_case : Union[str, Any] ,snake_case : Optional[Any] ,snake_case : str ,snake_case : int ): SCREAMING_SNAKE_CASE =self.num_labels SCREAMING_SNAKE_CASE =TFEsmForTokenClassification(config=snake_case ) SCREAMING_SNAKE_CASE ={'input_ids': input_ids, 'attention_mask': input_mask} SCREAMING_SNAKE_CASE =model(snake_case ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def _lowerCAmelCase ( self : Tuple ): SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ) =config_and_inputs SCREAMING_SNAKE_CASE ={'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_tf class a_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ): """simple docstring""" __UpperCAmelCase = ( ( TFEsmModel, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, ) if is_tf_available() else () ) __UpperCAmelCase = ( { 'feature-extraction': TFEsmModel, 'fill-mask': TFEsmForMaskedLM, 'text-classification': TFEsmForSequenceClassification, 'token-classification': TFEsmForTokenClassification, 'zero-shot': TFEsmForSequenceClassification, } if is_tf_available() else {} ) __UpperCAmelCase = False __UpperCAmelCase = False def _lowerCAmelCase ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE =TFEsmModelTester(self ) SCREAMING_SNAKE_CASE =ConfigTester(self ,config_class=snake_case ,hidden_size=37 ) def _lowerCAmelCase ( self : Tuple ): self.config_tester.run_common_tests() def _lowerCAmelCase ( self : List[Any] ): SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def _lowerCAmelCase ( self : List[str] ): SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*snake_case ) def _lowerCAmelCase ( self : List[str] ): SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*snake_case ) def _lowerCAmelCase ( self : Optional[Any] ): SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*snake_case ) @slow def _lowerCAmelCase ( self : str ): for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE =TFEsmModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) @unittest.skip('Protein models do not support embedding resizing.' ) def _lowerCAmelCase ( self : int ): pass @unittest.skip('Protein models do not support embedding resizing.' ) def _lowerCAmelCase ( self : Dict ): pass def _lowerCAmelCase ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE =model_class(snake_case ) assert isinstance(model.get_input_embeddings() ,tf.keras.layers.Layer ) if model_class is TFEsmForMaskedLM: # Output embedding test differs from the main test because they're a matrix, not a layer SCREAMING_SNAKE_CASE =model.get_bias() assert isinstance(snake_case ,snake_case ) for k, v in name.items(): assert isinstance(snake_case ,tf.Variable ) else: SCREAMING_SNAKE_CASE =model.get_output_embeddings() assert x is None SCREAMING_SNAKE_CASE =model.get_bias() assert name is None @require_tf class a_ ( unittest.TestCase ): """simple docstring""" @slow def _lowerCAmelCase ( self : Any ): SCREAMING_SNAKE_CASE =TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' ) SCREAMING_SNAKE_CASE =tf.constant([[0, 1, 2, 3, 4, 5]] ) SCREAMING_SNAKE_CASE =model(snake_case )[0] SCREAMING_SNAKE_CASE =[1, 6, 33] self.assertEqual(list(output.numpy().shape ) ,snake_case ) # compare the actual values for a slice. SCREAMING_SNAKE_CASE =tf.constant( [ [ [8.921_518, -10.589_814, -6.4_671_307], [-6.3_967_156, -13.911_377, -1.1_211_915], [-7.781_247, -13.951_557, -3.740_592], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-2 ) ) @slow def _lowerCAmelCase ( self : Dict ): SCREAMING_SNAKE_CASE =TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' ) SCREAMING_SNAKE_CASE =tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) SCREAMING_SNAKE_CASE =model(snake_case )[0] # compare the actual values for a slice. SCREAMING_SNAKE_CASE =tf.constant( [ [ [0.14_443_092, 0.54_125_327, 0.3_247_739], [0.30_340_484, 0.00_526_676, 0.31_077_722], [0.32_278_043, -0.24_987_096, 0.3_414_628], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
334
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING _lowerCamelCase =logging.get_logger(__name__) _lowerCamelCase ={ "salesforce/blip2-opt-2.7b": "https://huggingface.co./salesforce/blip2-opt-2.7b/resolve/main/config.json", } class a_ ( lowerCamelCase_ ): """simple docstring""" __UpperCAmelCase = 'blip_2_vision_model' def __init__( self : List[Any] ,snake_case : List[Any]=1408 ,snake_case : Optional[Any]=6144 ,snake_case : Optional[int]=39 ,snake_case : Optional[int]=16 ,snake_case : Optional[Any]=224 ,snake_case : Tuple=14 ,snake_case : Optional[Any]="gelu" ,snake_case : Union[str, Any]=0.00_001 ,snake_case : Dict=0.0 ,snake_case : Union[str, Any]=1e-10 ,snake_case : int=True ,**snake_case : str ,): super().__init__(**snake_case ) SCREAMING_SNAKE_CASE =hidden_size SCREAMING_SNAKE_CASE =intermediate_size SCREAMING_SNAKE_CASE =num_hidden_layers SCREAMING_SNAKE_CASE =num_attention_heads SCREAMING_SNAKE_CASE =patch_size SCREAMING_SNAKE_CASE =image_size SCREAMING_SNAKE_CASE =initializer_range SCREAMING_SNAKE_CASE =attention_dropout SCREAMING_SNAKE_CASE =layer_norm_eps SCREAMING_SNAKE_CASE =hidden_act SCREAMING_SNAKE_CASE =qkv_bias @classmethod def _lowerCAmelCase ( cls : Dict ,snake_case : Union[str, os.PathLike] ,**snake_case : str ): cls._set_token_in_kwargs(snake_case ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =cls.get_config_dict(snake_case ,**snake_case ) # get the vision config dict if we are loading from Blip2Config if config_dict.get('model_type' ) == "blip-2": SCREAMING_SNAKE_CASE =config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(snake_case ,**snake_case ) class a_ ( lowerCamelCase_ ): """simple docstring""" __UpperCAmelCase = 'blip_2_qformer' def __init__( self : Any ,snake_case : Dict=30522 ,snake_case : int=768 ,snake_case : List[Any]=12 ,snake_case : List[str]=12 ,snake_case : Optional[Any]=3072 ,snake_case : str="gelu" ,snake_case : Optional[Any]=0.1 ,snake_case : Union[str, Any]=0.1 ,snake_case : Optional[Any]=512 ,snake_case : List[Any]=0.02 ,snake_case : List[str]=1e-12 ,snake_case : Tuple=0 ,snake_case : Union[str, Any]="absolute" ,snake_case : List[Any]=2 ,snake_case : List[str]=1408 ,**snake_case : Optional[Any] ,): super().__init__(pad_token_id=snake_case ,**snake_case ) SCREAMING_SNAKE_CASE =vocab_size SCREAMING_SNAKE_CASE =hidden_size SCREAMING_SNAKE_CASE =num_hidden_layers SCREAMING_SNAKE_CASE =num_attention_heads SCREAMING_SNAKE_CASE =hidden_act SCREAMING_SNAKE_CASE =intermediate_size SCREAMING_SNAKE_CASE =hidden_dropout_prob SCREAMING_SNAKE_CASE =attention_probs_dropout_prob SCREAMING_SNAKE_CASE =max_position_embeddings SCREAMING_SNAKE_CASE =initializer_range SCREAMING_SNAKE_CASE =layer_norm_eps SCREAMING_SNAKE_CASE =position_embedding_type SCREAMING_SNAKE_CASE =cross_attention_frequency SCREAMING_SNAKE_CASE =encoder_hidden_size @classmethod def _lowerCAmelCase ( cls : List[Any] ,snake_case : Union[str, os.PathLike] ,**snake_case : Dict ): cls._set_token_in_kwargs(snake_case ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =cls.get_config_dict(snake_case ,**snake_case ) # get the qformer config dict if we are loading from Blip2Config if config_dict.get('model_type' ) == "blip-2": SCREAMING_SNAKE_CASE =config_dict['qformer_config'] if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(snake_case ,**snake_case ) class a_ ( lowerCamelCase_ ): """simple docstring""" __UpperCAmelCase = 'blip-2' __UpperCAmelCase = True def __init__( self : int ,snake_case : Dict=None ,snake_case : Tuple=None ,snake_case : str=None ,snake_case : Union[str, Any]=32 ,**snake_case : int ): super().__init__(**snake_case ) if vision_config is None: SCREAMING_SNAKE_CASE ={} logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' ) if qformer_config is None: SCREAMING_SNAKE_CASE ={} logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' ) if text_config is None: SCREAMING_SNAKE_CASE ={} logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' ) SCREAMING_SNAKE_CASE =BlipaVisionConfig(**snake_case ) SCREAMING_SNAKE_CASE =BlipaQFormerConfig(**snake_case ) SCREAMING_SNAKE_CASE =text_config['model_type'] if 'model_type' in text_config else 'opt' SCREAMING_SNAKE_CASE =CONFIG_MAPPING[text_model_type](**snake_case ) SCREAMING_SNAKE_CASE =self.text_config.tie_word_embeddings SCREAMING_SNAKE_CASE =self.text_config.is_encoder_decoder SCREAMING_SNAKE_CASE =num_query_tokens SCREAMING_SNAKE_CASE =self.vision_config.hidden_size SCREAMING_SNAKE_CASE =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES SCREAMING_SNAKE_CASE =1.0 SCREAMING_SNAKE_CASE =0.02 @classmethod def _lowerCAmelCase ( cls : Union[str, Any] ,snake_case : BlipaVisionConfig ,snake_case : BlipaQFormerConfig ,snake_case : PretrainedConfig ,**snake_case : Any ,): return cls( vision_config=vision_config.to_dict() ,qformer_config=qformer_config.to_dict() ,text_config=text_config.to_dict() ,**snake_case ,) def _lowerCAmelCase ( self : Optional[Any] ): SCREAMING_SNAKE_CASE =copy.deepcopy(self.__dict__ ) SCREAMING_SNAKE_CASE =self.vision_config.to_dict() SCREAMING_SNAKE_CASE =self.qformer_config.to_dict() SCREAMING_SNAKE_CASE =self.text_config.to_dict() SCREAMING_SNAKE_CASE =self.__class__.model_type return output
334
1
"""simple docstring""" from collections import deque from .hash_table import HashTable class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self : str ,*A : str ,**A : List[str] ): super().__init__(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) def UpperCamelCase_ ( self : Optional[Any] ,A : str ,A : Optional[int] ): __A = deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(_SCREAMING_SNAKE_CASE ) __A = self.values[key] def UpperCamelCase_ ( self : Dict ): return ( sum(self.charge_factor - len(_SCREAMING_SNAKE_CASE ) for slot in self.values ) / self.size_table * self.charge_factor ) def UpperCamelCase_ ( self : Optional[Any] ,A : Union[str, Any] ,A : Union[str, Any]=None ): if not ( len(self.values[key] ) == self.charge_factor and self.values.count(_SCREAMING_SNAKE_CASE ) == 0 ): return key return super()._collision_resolution(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
371
from __future__ import annotations def UpperCAmelCase ( a_ = 4 ) -> list[list[int]]: """simple docstring""" __A = abs(a_ ) or 4 return [[1 + x + y * row_size for x in range(a_ )] for y in range(a_ )] def UpperCAmelCase ( a_ ) -> list[list[int]]: """simple docstring""" return reverse_row(transpose(a_ ) ) # OR.. transpose(reverse_column(matrix)) def UpperCAmelCase ( a_ ) -> list[list[int]]: """simple docstring""" return reverse_row(reverse_column(a_ ) ) # OR.. reverse_column(reverse_row(matrix)) def UpperCAmelCase ( a_ ) -> list[list[int]]: """simple docstring""" return reverse_column(transpose(a_ ) ) # OR.. transpose(reverse_row(matrix)) def UpperCAmelCase ( a_ ) -> list[list[int]]: """simple docstring""" __A = [list(a_ ) for x in zip(*a_ )] return matrix def UpperCAmelCase ( a_ ) -> list[list[int]]: """simple docstring""" __A = matrix[::-1] return matrix def UpperCAmelCase ( a_ ) -> list[list[int]]: """simple docstring""" __A = [x[::-1] for x in matrix] return matrix def UpperCAmelCase ( a_ ) -> None: """simple docstring""" for i in matrix: print(*a_ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE :Union[str, Any] = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 90 counterclockwise:\n') print_matrix(rotate_aa(matrix)) SCREAMING_SNAKE_CASE :Tuple = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 180:\n') print_matrix(rotate_aaa(matrix)) SCREAMING_SNAKE_CASE :Union[str, Any] = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 270 counterclockwise:\n') print_matrix(rotate_aaa(matrix))
124
0
from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, flip_channel_order, get_resize_output_image_size, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging if is_vision_available(): import PIL if is_torch_available(): import torch snake_case__ : int = logging.get_logger(__name__) class A_ ( _lowerCamelCase ): lowerCAmelCase__ = ['pixel_values'] def __init__(self :Any , _UpperCamelCase :str = True , _UpperCamelCase :Optional[Any] = None , _UpperCamelCase :Any = PILImageResampling.BILINEAR , _UpperCamelCase :List[str] = True , _UpperCamelCase :str = 1 / 255 , _UpperCamelCase :List[Any] = True , _UpperCamelCase :str = None , _UpperCamelCase :Union[str, Any] = True , **_UpperCamelCase :Union[str, Any] , )-> None: super().__init__(**_SCREAMING_SNAKE_CASE ) __A = size if size is not None else {'''shortest_edge''': 224} __A = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE ) __A = crop_size if crop_size is not None else {'''height''': 256, '''width''': 256} __A = get_size_dict(_SCREAMING_SNAKE_CASE , param_name='''crop_size''' ) __A = do_resize __A = size __A = resample __A = do_rescale __A = rescale_factor __A = do_center_crop __A = crop_size __A = do_flip_channel_order def _lowerCAmelCase (self :Optional[int] , _UpperCamelCase :str , _UpperCamelCase :List[str] , _UpperCamelCase :Optional[int] = PIL.Image.BILINEAR , _UpperCamelCase :List[Any] = None , **_UpperCamelCase :Optional[Any] , )-> np.ndarray: __A = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE ) if "shortest_edge" not in size: raise ValueError(f"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" ) __A = get_resize_output_image_size(_SCREAMING_SNAKE_CASE , size=size['''shortest_edge'''] , default_to_square=_SCREAMING_SNAKE_CASE ) return resize(_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def _lowerCAmelCase (self :Dict , _UpperCamelCase :List[Any] , _UpperCamelCase :int , _UpperCamelCase :Dict = None , **_UpperCamelCase :List[str] , )-> np.ndarray: __A = get_size_dict(_SCREAMING_SNAKE_CASE ) if "height" not in size or "width" not in size: raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" ) return center_crop(_SCREAMING_SNAKE_CASE , size=(size['''height'''], size['''width''']) , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def _lowerCAmelCase (self :Tuple , _UpperCamelCase :int , _UpperCamelCase :Optional[int] , _UpperCamelCase :List[str] = None , **_UpperCamelCase :int , )-> Any: return rescale(_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def _lowerCAmelCase (self :Optional[int] , _UpperCamelCase :List[Any] , _UpperCamelCase :Union[str, Any] = None )-> np.ndarray: return flip_channel_order(_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE ) def _lowerCAmelCase (self :int , _UpperCamelCase :List[str] , _UpperCamelCase :Optional[int] = None , _UpperCamelCase :Tuple = None , _UpperCamelCase :List[Any] = None , _UpperCamelCase :List[str] = None , _UpperCamelCase :Any = None , _UpperCamelCase :Optional[Any] = None , _UpperCamelCase :int = None , _UpperCamelCase :Any = None , _UpperCamelCase :int = None , _UpperCamelCase :str = ChannelDimension.FIRST , **_UpperCamelCase :List[Any] , )-> PIL.Image.Image: __A = do_resize if do_resize is not None else self.do_resize __A = resample if resample is not None else self.resample __A = do_rescale if do_rescale is not None else self.do_rescale __A = rescale_factor if rescale_factor is not None else self.rescale_factor __A = do_center_crop if do_center_crop is not None else self.do_center_crop __A = ( do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order ) __A = size if size is not None else self.size __A = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE ) __A = crop_size if crop_size is not None else self.crop_size __A = get_size_dict(_SCREAMING_SNAKE_CASE , param_name='''crop_size''' ) __A = make_list_of_images(_SCREAMING_SNAKE_CASE ) if not valid_images(_SCREAMING_SNAKE_CASE ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) # All transformations expect numpy arrays. __A = [to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images] if do_resize: __A = [self.resize(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE ) for image in images] if do_center_crop: __A = [self.center_crop(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE ) for image in images] if do_rescale: __A = [self.rescale(image=_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE ) for image in images] # the pretrained checkpoints assume images are BGR, not RGB if do_flip_channel_order: __A = [self.flip_channel_order(image=_SCREAMING_SNAKE_CASE ) for image in images] __A = [to_channel_dimension_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images] __A = {'''pixel_values''': images} return BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE ) def _lowerCAmelCase (self :Optional[Any] , _UpperCamelCase :Tuple , _UpperCamelCase :Any = None )-> Optional[Any]: __A = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ): raise ValueError( '''Make sure that you pass in as many target sizes as the batch dimension of the logits''' ) if is_torch_tensor(_SCREAMING_SNAKE_CASE ): __A = target_sizes.numpy() __A = [] for idx in range(len(_SCREAMING_SNAKE_CASE ) ): __A = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=_SCREAMING_SNAKE_CASE ) __A = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(_SCREAMING_SNAKE_CASE ) else: __A = logits.argmax(dim=1 ) __A = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
117
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase__ :str = { '''configuration_megatron_bert''': ['''MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegatronBertConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ :Union[str, Any] = [ '''MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MegatronBertForCausalLM''', '''MegatronBertForMaskedLM''', '''MegatronBertForMultipleChoice''', '''MegatronBertForNextSentencePrediction''', '''MegatronBertForPreTraining''', '''MegatronBertForQuestionAnswering''', '''MegatronBertForSequenceClassification''', '''MegatronBertForTokenClassification''', '''MegatronBertModel''', '''MegatronBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_megatron_bert import ( MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, MegatronBertPreTrainedModel, ) else: import sys lowerCAmelCase__ :List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
329
0
"""simple docstring""" from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor from .base import PipelineTool class _UpperCamelCase ( _UpperCAmelCase ): '''simple docstring''' __UpperCAmelCase : Tuple ="""openai/whisper-base""" __UpperCAmelCase : Any =( """This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """ """transcribed text.""" ) __UpperCAmelCase : List[Any] ="""transcriber""" __UpperCAmelCase : Optional[Any] =WhisperProcessor __UpperCAmelCase : Union[str, Any] =WhisperForConditionalGeneration __UpperCAmelCase : Tuple =["""audio"""] __UpperCAmelCase : List[str] =["""text"""] def snake_case ( self , __a ): return self.pre_processor(lowercase_ , return_tensors="pt" ).input_features def snake_case ( self , __a ): return self.model.generate(inputs=lowercase_ ) def snake_case ( self , __a ): return self.pre_processor.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )[0]
371
"""simple docstring""" import inspect import tempfile import unittest from huggingface_hub import hf_hub_download from transformers import is_torch_available from transformers.testing_utils import is_flaky, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin A : Optional[int] = 1e-4 if is_torch_available(): import torch from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder @require_torch class _UpperCamelCase : '''simple docstring''' def __init__( self , __a , __a=16 , __a=13 , __a=7 , __a=14 , __a=10 , __a=19 , __a=5 , __a=4 , __a=True , __a=16 , __a=2 , __a=4 , __a=4 , __a="gelu" , __a=0.1 , __a=0.1 , __a=[1, 2, 3, 4, 5] , __a=25 , __a=5 , ): __lowerCAmelCase = d_model __lowerCAmelCase = parent __lowerCAmelCase = batch_size __lowerCAmelCase = prediction_length __lowerCAmelCase = context_length __lowerCAmelCase = cardinality __lowerCAmelCase = num_time_features __lowerCAmelCase = lags_sequence __lowerCAmelCase = embedding_dimension __lowerCAmelCase = is_training __lowerCAmelCase = hidden_size __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = intermediate_size __lowerCAmelCase = hidden_act __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = attention_probs_dropout_prob __lowerCAmelCase = context_length __lowerCAmelCase = prediction_length + label_length __lowerCAmelCase = label_length __lowerCAmelCase = moving_average __lowerCAmelCase = autocorrelation_factor def snake_case ( self ): return AutoformerConfig( d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , ) def snake_case ( self , __a ): __lowerCAmelCase = config.context_length + max(config.lags_sequence ) __lowerCAmelCase = ids_tensor([self.batch_size, 1] , config.cardinality[0] ) __lowerCAmelCase = floats_tensor([self.batch_size, _past_length, config.num_time_features] ) __lowerCAmelCase = floats_tensor([self.batch_size, _past_length] ) __lowerCAmelCase = floats_tensor([self.batch_size, _past_length] ) > 0.5 # decoder inputs __lowerCAmelCase = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] ) __lowerCAmelCase = floats_tensor([self.batch_size, config.prediction_length] ) __lowerCAmelCase = { "past_values": past_values, "static_categorical_features": static_categorical_features, "past_time_features": past_time_features, "past_observed_mask": past_observed_mask, "future_time_features": future_time_features, "future_values": future_values, } return inputs_dict def snake_case ( self ): __lowerCAmelCase = self.get_config() __lowerCAmelCase = self.prepare_autoformer_inputs_dict(__a ) return config, inputs_dict def snake_case ( self ): __lowerCAmelCase , __lowerCAmelCase = self.prepare_config_and_inputs() return config, inputs_dict def snake_case ( self , __a , __a ): __lowerCAmelCase = AutoformerModel(config=__a ).to(__a ).eval() __lowerCAmelCase = model(**__a ) __lowerCAmelCase = outputs.encoder_last_hidden_state __lowerCAmelCase = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: __lowerCAmelCase = model.get_encoder() encoder.save_pretrained(__a ) __lowerCAmelCase = AutoformerEncoder.from_pretrained(__a ).to(__a ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = model.create_network_inputs(**__a ) __lowerCAmelCase , __lowerCAmelCase = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] ) __lowerCAmelCase = torch.cat( (transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , ) __lowerCAmelCase = encoder(inputs_embeds=__a )[0] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 ) __lowerCAmelCase = ( torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 ) .unsqueeze(1 ) .repeat(1 , config.prediction_length , 1 ) ) __lowerCAmelCase = torch.zeros( [transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , ) __lowerCAmelCase = torch.cat( ( torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) , dim=-1 , ) __lowerCAmelCase = torch.cat( ( torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) , dim=-1 , ) with tempfile.TemporaryDirectory() as tmpdirname: __lowerCAmelCase = model.get_decoder() decoder.save_pretrained(__a ) __lowerCAmelCase = AutoformerDecoder.from_pretrained(__a ).to(__a ) __lowerCAmelCase = decoder( trend=__a , inputs_embeds=__a , encoder_hidden_states=__a , )[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 ) @require_torch class _UpperCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,unittest.TestCase ): '''simple docstring''' __UpperCAmelCase : List[Any] =(AutoformerModel, AutoformerForPrediction) if is_torch_available() else () __UpperCAmelCase : List[Any] =(AutoformerForPrediction,) if is_torch_available() else () __UpperCAmelCase : Tuple ={"""feature-extraction""": AutoformerModel} if is_torch_available() else {} __UpperCAmelCase : Tuple =False __UpperCAmelCase : Any =False __UpperCAmelCase : Dict =False __UpperCAmelCase : Union[str, Any] =False __UpperCAmelCase : Union[str, Any] =False __UpperCAmelCase : Optional[Any] =False def snake_case ( self ): __lowerCAmelCase = AutoformerModelTester(self ) __lowerCAmelCase = ConfigTester(self , config_class=__a , has_text_modality=__a ) def snake_case ( self ): self.config_tester.run_common_tests() def snake_case ( self ): __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: __lowerCAmelCase = model_class(__a ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__a ) __lowerCAmelCase , __lowerCAmelCase = model_class.from_pretrained(__a , output_loading_info=__a ) self.assertEqual(info["missing_keys"] , [] ) def snake_case ( self ): __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*__a ) @unittest.skip(reason="Model has no tokens embeddings" ) def snake_case ( self ): pass def snake_case ( self ): __lowerCAmelCase = inspect.signature(getattr(__a , "forward" ) ) # The main input is the name of the argument after `self` __lowerCAmelCase = list(model_signature.parameters.keys() )[1] self.assertEqual(AutoformerModel.main_input_name , __a ) def snake_case ( self ): __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCAmelCase = model_class(__a ) __lowerCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCAmelCase = [*signature.parameters.keys()] __lowerCAmelCase = [ "past_values", "past_time_features", "past_observed_mask", "static_categorical_features", "static_real_features", "future_values", "future_time_features", ] if model.__class__.__name__ in ["AutoformerForPrediction"]: expected_arg_names.append("future_observed_mask" ) expected_arg_names.extend( [ "decoder_attention_mask", "head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs", "past_key_values", "output_hidden_states", "output_attentions", "use_cache", "return_dict", ] ) self.assertListEqual(arg_names[: len(__a )] , __a ) def snake_case ( self ): __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() __lowerCAmelCase = True __lowerCAmelCase = getattr(self.model_tester , "seq_length" , __a ) __lowerCAmelCase = getattr(self.model_tester , "decoder_seq_length" , __a ) __lowerCAmelCase = getattr(self.model_tester , "encoder_seq_length" , __a ) __lowerCAmelCase = getattr(self.model_tester , "d_model" , __a ) __lowerCAmelCase = getattr(self.model_tester , "num_attention_heads" , __a ) __lowerCAmelCase = d_model // num_attention_heads for model_class in self.all_model_classes: __lowerCAmelCase = True __lowerCAmelCase = False __lowerCAmelCase = True __lowerCAmelCase = model_class(__a ) model.to(__a ) model.eval() with torch.no_grad(): __lowerCAmelCase = model(**self._prepare_for_class(__a , __a ) ) __lowerCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] __lowerCAmelCase = True __lowerCAmelCase = model_class(__a ) model.to(__a ) model.eval() with torch.no_grad(): __lowerCAmelCase = model(**self._prepare_for_class(__a , __a ) ) __lowerCAmelCase = outputs.encoder_attentions self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , ) __lowerCAmelCase = len(__a ) __lowerCAmelCase = 7 if "last_hidden_state" in outputs: correct_outlen += 1 if "trend" in outputs: correct_outlen += 1 if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned if "loss" in outputs: correct_outlen += 1 if "params" in outputs: correct_outlen += 1 self.assertEqual(__a , __a ) # decoder attentions __lowerCAmelCase = outputs.decoder_attentions self.assertIsInstance(__a , (list, tuple) ) self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , ) # cross attentions __lowerCAmelCase = outputs.cross_attentions self.assertIsInstance(__a , (list, tuple) ) self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , ) # Check attention is always last and order is fine __lowerCAmelCase = True __lowerCAmelCase = True __lowerCAmelCase = model_class(__a ) model.to(__a ) model.eval() with torch.no_grad(): __lowerCAmelCase = model(**self._prepare_for_class(__a , __a ) ) self.assertEqual(out_len + 2 , len(__a ) ) __lowerCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , ) @is_flaky() def snake_case ( self ): super().test_retain_grad_hidden_states_attentions() def _lowerCamelCase ( _UpperCamelCase="train-batch.pt" ): '''simple docstring''' __lowerCAmelCase = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=_UpperCamelCase , repo_type="dataset" ) __lowerCAmelCase = torch.load(_UpperCamelCase , map_location=_UpperCamelCase ) return batch @require_torch @slow class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def snake_case ( self ): __lowerCAmelCase = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(__a ) __lowerCAmelCase = prepare_batch() with torch.no_grad(): __lowerCAmelCase = model( past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0] __lowerCAmelCase = torch.Size( (64, model.config.prediction_length + model.config.label_length, model.config.feature_size) ) self.assertEqual(output.shape , __a ) __lowerCAmelCase = torch.tensor( [[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] , device=__a ) self.assertTrue(torch.allclose(output[0, :3, :3] , __a , atol=__a ) ) def snake_case ( self ): __lowerCAmelCase = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(__a ) __lowerCAmelCase = prepare_batch("val-batch.pt" ) with torch.no_grad(): __lowerCAmelCase = model( past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state __lowerCAmelCase = torch.Size((64, model.config.context_length, model.config.d_model) ) self.assertEqual(output.shape , __a ) __lowerCAmelCase = torch.tensor( [[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] , device=__a ) self.assertTrue(torch.allclose(output[0, :3, :3] , __a , atol=__a ) ) def snake_case ( self ): __lowerCAmelCase = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(__a ) __lowerCAmelCase = prepare_batch("val-batch.pt" ) with torch.no_grad(): __lowerCAmelCase = model.generate( static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , ) __lowerCAmelCase = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) ) self.assertEqual(outputs.sequences.shape , __a ) __lowerCAmelCase = torch.tensor([3_1_3_0.6_7_6_3, 4_0_5_6.5_2_9_3, 7_0_5_3.0_7_8_6] , device=__a ) __lowerCAmelCase = outputs.sequences.mean(dim=1 ) self.assertTrue(torch.allclose(mean_prediction[0, -3:] , __a , rtol=1e-1 ) )
259
0
import hashlib import unittest from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available from transformers.pipelines import DepthEstimationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class lowercase_ : @staticmethod def lowerCamelCase_ ( *__UpperCamelCase , **__UpperCamelCase ): """simple docstring""" pass def lowerCamelCase__ ( a__ : Image ) -> str: UpperCamelCase_ = hashlib.mda(image.tobytes() ) return m.hexdigest() @is_pipeline_test @require_vision @require_timm @require_torch class lowercase_ ( unittest.TestCase ): A__ : List[Any] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" UpperCamelCase_ = DepthEstimationPipeline(model=__UpperCamelCase , image_processor=__UpperCamelCase ) return depth_estimator, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" UpperCamelCase_ = depth_estimator("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) self.assertEqual({"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )} , __UpperCamelCase ) import datasets UpperCamelCase_ = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" ) UpperCamelCase_ = depth_estimator( [ Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ), """http://images.cocodataset.org/val2017/000000039769.jpg""", # RGBA dataset[0]["""file"""], # LA dataset[1]["""file"""], # L dataset[2]["""file"""], ] ) self.assertEqual( [ {"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )}, {"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )}, {"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )}, {"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )}, {"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )}, ] , __UpperCamelCase , ) @require_tf @unittest.skip("""Depth estimation is not implemented in TF""" ) def lowerCamelCase_ ( self ): """simple docstring""" pass @slow @require_torch def lowerCamelCase_ ( self ): """simple docstring""" UpperCamelCase_ = """Intel/dpt-large""" UpperCamelCase_ = pipeline("""depth-estimation""" , model=__UpperCamelCase ) UpperCamelCase_ = depth_estimator("""http://images.cocodataset.org/val2017/000000039769.jpg""" ) UpperCamelCase_ = hashimage(outputs["""depth"""] ) # This seems flaky. # self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977") self.assertEqual(nested_simplify(outputs["""predicted_depth"""].max().item() ) , 29.304 ) self.assertEqual(nested_simplify(outputs["""predicted_depth"""].min().item() ) , 2.662 ) @require_torch def lowerCamelCase_ ( self ): """simple docstring""" self.skipTest("""There is not hf-internal-testing tiny model for either GLPN nor DPT""" )
122
from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class lowercase_ ( __SCREAMING_SNAKE_CASE ): A__ : torch.FloatTensor class lowercase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): @register_to_config def __init__( self , __UpperCamelCase = 3 , __UpperCamelCase = 3 , __UpperCamelCase = ("DownEncoderBlock2D",) , __UpperCamelCase = ("UpDecoderBlock2D",) , __UpperCamelCase = (6_4,) , __UpperCamelCase = 1 , __UpperCamelCase = "silu" , __UpperCamelCase = 3 , __UpperCamelCase = 3_2 , __UpperCamelCase = 2_5_6 , __UpperCamelCase = 3_2 , __UpperCamelCase = None , __UpperCamelCase = 0.18_215 , __UpperCamelCase = "group" , ): """simple docstring""" super().__init__() # pass init params to Encoder UpperCamelCase_ = Encoder( in_channels=__UpperCamelCase , out_channels=__UpperCamelCase , down_block_types=__UpperCamelCase , block_out_channels=__UpperCamelCase , layers_per_block=__UpperCamelCase , act_fn=__UpperCamelCase , norm_num_groups=__UpperCamelCase , double_z=__UpperCamelCase , ) UpperCamelCase_ = vq_embed_dim if vq_embed_dim is not None else latent_channels UpperCamelCase_ = nn.Convad(__UpperCamelCase , __UpperCamelCase , 1 ) UpperCamelCase_ = VectorQuantizer(__UpperCamelCase , __UpperCamelCase , beta=0.25 , remap=__UpperCamelCase , sane_index_shape=__UpperCamelCase ) UpperCamelCase_ = nn.Convad(__UpperCamelCase , __UpperCamelCase , 1 ) # pass init params to Decoder UpperCamelCase_ = Decoder( in_channels=__UpperCamelCase , out_channels=__UpperCamelCase , up_block_types=__UpperCamelCase , block_out_channels=__UpperCamelCase , layers_per_block=__UpperCamelCase , act_fn=__UpperCamelCase , norm_num_groups=__UpperCamelCase , norm_type=__UpperCamelCase , ) @apply_forward_hook def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = True ): """simple docstring""" UpperCamelCase_ = self.encoder(__UpperCamelCase ) UpperCamelCase_ = self.quant_conv(__UpperCamelCase ) if not return_dict: return (h,) return VQEncoderOutput(latents=__UpperCamelCase ) @apply_forward_hook def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = False , __UpperCamelCase = True ): """simple docstring""" if not force_not_quantize: UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.quantize(__UpperCamelCase ) else: UpperCamelCase_ = h UpperCamelCase_ = self.post_quant_conv(__UpperCamelCase ) UpperCamelCase_ = self.decoder(__UpperCamelCase , quant if self.config.norm_type == """spatial""" else None ) if not return_dict: return (dec,) return DecoderOutput(sample=__UpperCamelCase ) def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = True ): """simple docstring""" UpperCamelCase_ = sample UpperCamelCase_ = self.encode(__UpperCamelCase ).latents UpperCamelCase_ = self.decode(__UpperCamelCase ).sample if not return_dict: return (dec,) return DecoderOutput(sample=__UpperCamelCase )
122
1
'''simple docstring''' import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed __A : List[str] = { "distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), "roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), "bert": (BertConfig, BertForMaskedLM, BertTokenizer), "gpt2": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def UpperCamelCase_ ( A__ : Any ): '''simple docstring''' assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts ) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config ) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights ) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def UpperCamelCase_ ( A__ : Union[str, Any] , A__ : Any ): '''simple docstring''' if args.student_type == "roberta": lowerCAmelCase_ : Any = False elif args.student_type == "gpt2": lowerCAmelCase_ : List[Any] = False def UpperCamelCase_ ( A__ : Any , A__ : Dict ): '''simple docstring''' if args.student_type == "roberta": lowerCAmelCase_ : Optional[int] = False def UpperCamelCase_ ( ): '''simple docstring''' lowerCAmelCase_ : List[str] = argparse.ArgumentParser(description="""Training""" ) parser.add_argument("""--force""" , action="""store_true""" , help="""Overwrite dump_path if it already exists.""" ) parser.add_argument( """--dump_path""" , type=A__ , required=A__ , help="""The output directory (log, checkpoints, parameters, etc.)""" ) parser.add_argument( """--data_file""" , type=A__ , required=A__ , help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" , ) parser.add_argument( """--student_type""" , type=A__ , choices=["""distilbert""", """roberta""", """gpt2"""] , required=A__ , help="""The student type (DistilBERT, RoBERTa).""" , ) parser.add_argument("""--student_config""" , type=A__ , required=A__ , help="""Path to the student configuration.""" ) parser.add_argument( """--student_pretrained_weights""" , default=A__ , type=A__ , help="""Load student initialization checkpoint.""" ) parser.add_argument( """--teacher_type""" , choices=["""bert""", """roberta""", """gpt2"""] , required=A__ , help="""Teacher type (BERT, RoBERTa).""" ) parser.add_argument("""--teacher_name""" , type=A__ , required=A__ , help="""The teacher model.""" ) parser.add_argument("""--temperature""" , default=2.0 , type=A__ , help="""Temperature for the softmax temperature.""" ) parser.add_argument( """--alpha_ce""" , default=0.5 , type=A__ , help="""Linear weight for the distillation loss. Must be >=0.""" ) parser.add_argument( """--alpha_mlm""" , default=0.0 , type=A__ , help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" , ) parser.add_argument("""--alpha_clm""" , default=0.5 , type=A__ , help="""Linear weight for the CLM loss. Must be >=0.""" ) parser.add_argument("""--alpha_mse""" , default=0.0 , type=A__ , help="""Linear weight of the MSE loss. Must be >=0.""" ) parser.add_argument( """--alpha_cos""" , default=0.0 , type=A__ , help="""Linear weight of the cosine embedding loss. Must be >=0.""" ) parser.add_argument( """--mlm""" , action="""store_true""" , help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""" ) parser.add_argument( """--mlm_mask_prop""" , default=0.15 , type=A__ , help="""Proportion of tokens for which we need to make a prediction.""" , ) parser.add_argument("""--word_mask""" , default=0.8 , type=A__ , help="""Proportion of tokens to mask out.""" ) parser.add_argument("""--word_keep""" , default=0.1 , type=A__ , help="""Proportion of tokens to keep.""" ) parser.add_argument("""--word_rand""" , default=0.1 , type=A__ , help="""Proportion of tokens to randomly replace.""" ) parser.add_argument( """--mlm_smoothing""" , default=0.7 , type=A__ , help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" , ) parser.add_argument("""--token_counts""" , type=A__ , help="""The token counts in the data_file for MLM.""" ) parser.add_argument( """--restrict_ce_to_mask""" , action="""store_true""" , help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" , ) parser.add_argument( """--freeze_pos_embs""" , action="""store_true""" , help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""" , ) parser.add_argument( """--freeze_token_type_embds""" , action="""store_true""" , help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""" , ) parser.add_argument("""--n_epoch""" , type=A__ , default=3 , help="""Number of pass on the whole dataset.""" ) parser.add_argument("""--batch_size""" , type=A__ , default=5 , help="""Batch size (for each process).""" ) parser.add_argument( """--group_by_size""" , action="""store_false""" , help="""If true, group sequences that have similar length into the same batch. Default is true.""" , ) parser.add_argument( """--gradient_accumulation_steps""" , type=A__ , default=50 , help="""Gradient accumulation for larger training batches.""" , ) parser.add_argument("""--warmup_prop""" , default=0.05 , type=A__ , help="""Linear warmup proportion.""" ) parser.add_argument("""--weight_decay""" , default=0.0 , type=A__ , help="""Weight decay if we apply some.""" ) parser.add_argument("""--learning_rate""" , default=5E-4 , type=A__ , help="""The initial learning rate for Adam.""" ) parser.add_argument("""--adam_epsilon""" , default=1E-6 , type=A__ , help="""Epsilon for Adam optimizer.""" ) parser.add_argument("""--max_grad_norm""" , default=5.0 , type=A__ , help="""Max gradient norm.""" ) parser.add_argument("""--initializer_range""" , default=0.02 , type=A__ , help="""Random initialization range.""" ) parser.add_argument( """--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , ) parser.add_argument( """--fp16_opt_level""" , type=A__ , default="""O1""" , help=( """For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].""" """See details at https://nvidia.github.io/apex/amp.html""" ) , ) parser.add_argument("""--n_gpu""" , type=A__ , default=1 , help="""Number of GPUs in the node.""" ) parser.add_argument("""--local_rank""" , type=A__ , default=-1 , help="""Distributed training - Local rank""" ) parser.add_argument("""--seed""" , type=A__ , default=56 , help="""Random seed""" ) parser.add_argument("""--log_interval""" , type=A__ , default=5_00 , help="""Tensorboard logging interval.""" ) parser.add_argument("""--checkpoint_interval""" , type=A__ , default=40_00 , help="""Checkpoint interval.""" ) lowerCAmelCase_ : List[str] = parser.parse_args() sanity_checks(A__ ) # ARGS # init_gpu_params(A__ ) set_seed(A__ ) if args.is_master: if os.path.exists(args.dump_path ): if not args.force: raise ValueError( f'Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite' """ itUse `--force` if you want to overwrite it""" ) else: shutil.rmtree(args.dump_path ) if not os.path.exists(args.dump_path ): os.makedirs(args.dump_path ) logger.info(f'Experiment will be dumped and logged in {args.dump_path}' ) # SAVE PARAMS # logger.info(f'Param: {args}' ) with open(os.path.join(args.dump_path , """parameters.json""" ) , """w""" ) as f: json.dump(vars(A__ ) , A__ , indent=4 ) git_log(args.dump_path ) lowerCAmelCase_ : Tuple = MODEL_CLASSES[args.student_type] lowerCAmelCase_ : Dict = MODEL_CLASSES[args.teacher_type] # TOKENIZER # lowerCAmelCase_ : Union[str, Any] = teacher_tokenizer_class.from_pretrained(args.teacher_name ) lowerCAmelCase_ : str = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): lowerCAmelCase_ : Optional[Any] = tokenizer.all_special_tokens.index(A__ ) lowerCAmelCase_ : List[Any] = tokenizer.all_special_ids[idx] logger.info(f'Special tokens {special_tok_ids}' ) lowerCAmelCase_ : Optional[Any] = special_tok_ids lowerCAmelCase_ : Union[str, Any] = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(f'Loading data from {args.data_file}' ) with open(args.data_file , """rb""" ) as fp: lowerCAmelCase_ : Union[str, Any] = pickle.load(A__ ) if args.mlm: logger.info(f'Loading token counts from {args.token_counts} (already pre-computed)' ) with open(args.token_counts , """rb""" ) as fp: lowerCAmelCase_ : Union[str, Any] = pickle.load(A__ ) lowerCAmelCase_ : Any = np.maximum(A__ , 1 ) ** -args.mlm_smoothing for idx in special_tok_ids.values(): lowerCAmelCase_ : Union[str, Any] = 0.0 # do not predict special tokens lowerCAmelCase_ : str = torch.from_numpy(A__ ) else: lowerCAmelCase_ : Optional[Any] = None lowerCAmelCase_ : Union[str, Any] = LmSeqsDataset(params=A__ , data=A__ ) logger.info("""Data loader created.""" ) # STUDENT # logger.info(f'Loading student config from {args.student_config}' ) lowerCAmelCase_ : str = student_config_class.from_pretrained(args.student_config ) lowerCAmelCase_ : Dict = True if args.student_pretrained_weights is not None: logger.info(f'Loading pretrained weights from {args.student_pretrained_weights}' ) lowerCAmelCase_ : List[Any] = student_model_class.from_pretrained(args.student_pretrained_weights , config=A__ ) else: lowerCAmelCase_ : Dict = student_model_class(A__ ) if args.n_gpu > 0: student.to(f'cuda:{args.local_rank}' ) logger.info("""Student loaded.""" ) # TEACHER # lowerCAmelCase_ : List[Any] = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=A__ ) if args.n_gpu > 0: teacher.to(f'cuda:{args.local_rank}' ) logger.info(f'Teacher loaded from {args.teacher_name}.' ) # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(A__ , A__ ) if args.freeze_token_type_embds: freeze_token_type_embeddings(A__ , A__ ) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0 ) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() lowerCAmelCase_ : Optional[int] = Distiller( params=A__ , dataset=A__ , token_probs=A__ , student=A__ , teacher=A__ ) distiller.train() logger.info("""Let's go get some drinks.""" ) if __name__ == "__main__": main()
354
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.utils import ComputeEnvironment from .cluster import get_cluster_input from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401 from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401 from .sagemaker import get_sagemaker_input __A : str = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine" def UpperCamelCase_ ( ): '''simple docstring''' lowerCAmelCase_ : List[Any] = _ask_options( """In which compute environment are you running?""" , ["""This machine""", """AWS (Amazon SageMaker)"""] , _convert_compute_environment , ) if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: lowerCAmelCase_ : str = get_sagemaker_input() else: lowerCAmelCase_ : Optional[int] = get_cluster_input() return config def UpperCamelCase_ ( A__ : Optional[Any]=None ): '''simple docstring''' if subparsers is not None: lowerCAmelCase_ : List[str] = subparsers.add_parser("""config""" , description=A__ ) else: lowerCAmelCase_ : Optional[int] = argparse.ArgumentParser("""Accelerate config command""" , description=A__ ) parser.add_argument( """--config_file""" , default=A__ , help=( """The path to use to store the config file. Will default to a file named default_config.yaml in the cache """ """location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """ """such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """ """with 'huggingface'.""" ) , ) if subparsers is not None: parser.set_defaults(func=A__ ) return parser def UpperCamelCase_ ( A__ : Any ): '''simple docstring''' lowerCAmelCase_ : Dict = get_user_input() if args.config_file is not None: lowerCAmelCase_ : List[str] = args.config_file else: if not os.path.isdir(A__ ): os.makedirs(A__ ) lowerCAmelCase_ : List[Any] = default_yaml_config_file if config_file.endswith(""".json""" ): config.to_json_file(A__ ) else: config.to_yaml_file(A__ ) print(f'accelerate configuration saved at {config_file}' ) def UpperCamelCase_ ( ): '''simple docstring''' lowerCAmelCase_ : str = config_command_parser() lowerCAmelCase_ : Tuple = parser.parse_args() config_command(A__ ) if __name__ == "__main__": main()
89
0
"""simple docstring""" import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class snake_case__ : def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=50 , lowerCamelCase=0.02 , lowerCamelCase=True , lowerCamelCase=None , ): __a = parent __a = batch_size __a = seq_length __a = is_training __a = use_input_mask __a = vocab_size __a = hidden_size __a = num_hidden_layers __a = num_attention_heads __a = intermediate_size __a = hidden_act __a = hidden_dropout_prob __a = attention_probs_dropout_prob __a = max_position_embeddings __a = initializer_range __a = use_labels __a = scope def a__ ( self ): __a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __a = None if self.use_input_mask: __a = random_attention_mask([self.batch_size, self.seq_length] ) if self.use_labels: __a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __a = self.get_config() return config, input_ids, input_mask, token_labels def a__ ( self ): return BertGenerationConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , ) def a__ ( self ): ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) = self.prepare_config_and_inputs() __a = True __a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase , ): __a = BertGenerationEncoder(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() __a = model(lowerCamelCase , attention_mask=lowerCamelCase ) __a = model(lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase , ): __a = True __a = BertGenerationEncoder(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() __a = model( lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , ) __a = model( lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase , ): __a = True __a = True __a = BertGenerationDecoder(config=lowerCamelCase ).to(lowerCamelCase ).eval() # first forward pass __a = model( lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , use_cache=lowerCamelCase , ) __a = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids __a = ids_tensor((self.batch_size, 3) , config.vocab_size ) __a = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and __a = torch.cat([input_ids, next_tokens] , dim=-1 ) __a = torch.cat([input_mask, next_mask] , dim=-1 ) __a = model( lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , output_hidden_states=lowerCamelCase , )["hidden_states"][0] __a = model( lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , output_hidden_states=lowerCamelCase , )["hidden_states"][0] # select random slice __a = ids_tensor((1,) , output_from_past.shape[-1] ).item() __a = output_from_no_past[:, -3:, random_slice_idx].detach() __a = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) ) def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , *lowerCamelCase , ): __a = BertGenerationDecoder(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() __a = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def a__ ( self ): __a , __a , __a , __a = self.prepare_config_and_inputs() __a = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class snake_case__ ( snake_case_, snake_case_, snake_case_, unittest.TestCase ): _snake_case : Union[str, Any] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () _snake_case : Any = (BertGenerationDecoder,) if is_torch_available() else () _snake_case : Union[str, Any] = ( {"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder} if is_torch_available() else {} ) def a__ ( self ): __a = BertGenerationEncoderTester(self ) __a = ConfigTester(self , config_class=lowerCamelCase , hidden_size=37 ) def a__ ( self ): self.config_tester.run_common_tests() def a__ ( self ): __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase ) def a__ ( self ): __a , __a , __a , __a = self.model_tester.prepare_config_and_inputs() __a = "bert" self.model_tester.create_and_check_model(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) def a__ ( self ): __a = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase ) def a__ ( self ): __a = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCamelCase ) def a__ ( self ): # This regression test was failing with PyTorch < 1.3 ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) = self.model_tester.prepare_config_and_inputs_for_decoder() __a = None self.model_tester.create_and_check_model_as_decoder( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) def a__ ( self ): __a = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase ) @slow def a__ ( self ): __a = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" ) self.assertIsNotNone(lowerCamelCase ) @require_torch class snake_case__ ( unittest.TestCase ): @slow def a__ ( self ): __a = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" ) __a = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] ) with torch.no_grad(): __a = model(lowerCamelCase )[0] __a = torch.Size([1, 8, 1024] ) self.assertEqual(output.shape , lowerCamelCase ) __a = torch.tensor( [[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase , atol=1E-4 ) ) @require_torch class snake_case__ ( unittest.TestCase ): @slow def a__ ( self ): __a = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" ) __a = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] ) with torch.no_grad(): __a = model(lowerCamelCase )[0] __a = torch.Size([1, 8, 50358] ) self.assertEqual(output.shape , lowerCamelCase ) __a = torch.tensor( [[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase , atol=1E-4 ) )
261
"""simple docstring""" import argparse import collections import torch from flax import traverse_util from tax import checkpoints from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def _lowerCamelCase( a , a , a , a="attention" ): __a = params[F"{prefix}/layers_{i}/{layer_name}/key/kernel"] __a = params[F"{prefix}/layers_{i}/{layer_name}/out/kernel"] __a = params[F"{prefix}/layers_{i}/{layer_name}/query/kernel"] __a = params[F"{prefix}/layers_{i}/{layer_name}/value/kernel"] return k, o, q, v def _lowerCamelCase( a , a , a , a=False ): if split_mlp_wi: __a = params[F"{prefix}/layers_{i}/mlp/wi_0/kernel"] __a = params[F"{prefix}/layers_{i}/mlp/wi_1/kernel"] __a = (wi_a, wi_a) else: __a = params[F"{prefix}/layers_{i}/mlp/wi/kernel"] __a = params[F"{prefix}/layers_{i}/mlp/wo/kernel"] return wi, wo def _lowerCamelCase( a , a , a , a ): return params[F"{prefix}/layers_{i}/{layer_name}/scale"] def _lowerCamelCase( a , *, a , a ): __a = traverse_util.flatten_dict(variables["target"] ) __a = {"/".join(a ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi __a = "encoder/layers_0/mlp/wi_0/kernel" in old print("Split MLP:" , a ) __a = collections.OrderedDict() # Shared embeddings. __a = old["token_embedder/embedding"] # Encoder. for i in range(a ): # Block i, layer 0 (Self Attention). __a = tax_layer_norm_lookup(a , a , "encoder" , "pre_attention_layer_norm" ) __a , __a , __a , __a = tax_attention_lookup(a , a , "encoder" , "attention" ) __a = layer_norm __a = k.T __a = o.T __a = q.T __a = v.T # Block i, layer 1 (MLP). __a = tax_layer_norm_lookup(a , a , "encoder" , "pre_mlp_layer_norm" ) __a , __a = tax_mlp_lookup(a , a , "encoder" , a ) __a = layer_norm if split_mlp_wi: __a = wi[0].T __a = wi[1].T else: __a = wi.T __a = wo.T __a = old[ "encoder/relpos_bias/rel_embedding" ].T __a = old["encoder/encoder_norm/scale"] if not is_encoder_only: # Decoder. for i in range(a ): # Block i, layer 0 (Self Attention). __a = tax_layer_norm_lookup(a , a , "decoder" , "pre_self_attention_layer_norm" ) __a , __a , __a , __a = tax_attention_lookup(a , a , "decoder" , "self_attention" ) __a = layer_norm __a = k.T __a = o.T __a = q.T __a = v.T # Block i, layer 1 (Cross Attention). __a = tax_layer_norm_lookup(a , a , "decoder" , "pre_cross_attention_layer_norm" ) __a , __a , __a , __a = tax_attention_lookup(a , a , "decoder" , "encoder_decoder_attention" ) __a = layer_norm __a = k.T __a = o.T __a = q.T __a = v.T # Block i, layer 2 (MLP). __a = tax_layer_norm_lookup(a , a , "decoder" , "pre_mlp_layer_norm" ) __a , __a = tax_mlp_lookup(a , a , "decoder" , a ) __a = layer_norm if split_mlp_wi: __a = wi[0].T __a = wi[1].T else: __a = wi.T __a = wo.T __a = old["decoder/decoder_norm/scale"] __a = old[ "decoder/relpos_bias/rel_embedding" ].T # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: __a = old["decoder/logits_dense/kernel"].T return new def _lowerCamelCase( a , a ): __a = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: __a = state_dict["shared.weight"] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: __a = state_dict["shared.weight"] if "lm_head.weight" not in state_dict: # For old 1.0 models. print("Using shared word embeddings as lm_head." ) __a = state_dict["shared.weight"] return state_dict def _lowerCamelCase( a , a , a , a ): __a = checkpoints.load_tax_checkpoint(a ) __a = convert_tax_to_pytorch(a , num_layers=config.num_layers , is_encoder_only=a ) __a = make_state_dict(a , a ) model.load_state_dict(a , strict=a ) def _lowerCamelCase( a , a , a , a = False ): __a = TaConfig.from_json_file(a ) print(F"Building PyTorch model from configuration: {config}" ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: __a = TaEncoderModel(a ) else: __a = TaForConditionalGeneration(a ) # Load weights from tf checkpoint load_tax_weights_in_ta(a , a , a , a ) # Save pytorch-model print(F"Save PyTorch model to {pytorch_dump_path}" ) model.save_pretrained(a ) # Verify that we can load the checkpoint. model.from_pretrained(a ) print("Done" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__:Tuple = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""") # Required parameters parser.add_argument( """--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint.""" ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""", ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False ) SCREAMING_SNAKE_CASE__:Tuple = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only )
261
1
import warnings from contextlib import contextmanager from ....processing_utils import ProcessorMixin class _lowerCAmelCase ( UpperCAmelCase_ ): '''simple docstring''' a_ : Union[str, Any] ="""MCTCTFeatureExtractor""" a_ : Any ="""AutoTokenizer""" def __init__( self : int , UpperCamelCase : Dict , UpperCamelCase : Tuple ): '''simple docstring''' super().__init__(UpperCamelCase , UpperCamelCase ) _snake_case : Any = self.feature_extractor _snake_case : int = False def __call__( self : List[str] , *UpperCamelCase : Optional[Any] , **UpperCamelCase : List[str] ): '''simple docstring''' if self._in_target_context_manager: return self.current_processor(*UpperCamelCase , **UpperCamelCase ) if "raw_speech" in kwargs: warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' ) _snake_case : Optional[Any] = kwargs.pop('raw_speech' ) else: _snake_case : Optional[int] = kwargs.pop('audio' , UpperCamelCase ) _snake_case : List[Any] = kwargs.pop('sampling_rate' , UpperCamelCase ) _snake_case : Tuple = kwargs.pop('text' , UpperCamelCase ) if len(UpperCamelCase ) > 0: _snake_case : Optional[int] = args[0] _snake_case : Tuple = args[1:] if audio is None and text is None: raise ValueError('You need to specify either an `audio` or `text` input to process.' ) if audio is not None: _snake_case : Optional[int] = self.feature_extractor(UpperCamelCase , *UpperCamelCase , sampling_rate=UpperCamelCase , **UpperCamelCase ) if text is not None: _snake_case : Union[str, Any] = self.tokenizer(UpperCamelCase , **UpperCamelCase ) if text is None: return inputs elif audio is None: return encodings else: _snake_case : List[Any] = encodings['input_ids'] return inputs def UpperCamelCase_ ( self : List[str] , *UpperCamelCase : Tuple , **UpperCamelCase : List[str] ): '''simple docstring''' return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase ) def UpperCamelCase_ ( self : int , *UpperCamelCase : List[str] , **UpperCamelCase : Optional[int] ): '''simple docstring''' if self._in_target_context_manager: return self.current_processor.pad(*UpperCamelCase , **UpperCamelCase ) _snake_case : List[str] = kwargs.pop('input_features' , UpperCamelCase ) _snake_case : Union[str, Any] = kwargs.pop('labels' , UpperCamelCase ) if len(UpperCamelCase ) > 0: _snake_case : List[str] = args[0] _snake_case : Any = args[1:] if input_features is not None: _snake_case : Optional[Any] = self.feature_extractor.pad(UpperCamelCase , *UpperCamelCase , **UpperCamelCase ) if labels is not None: _snake_case : Any = self.tokenizer.pad(UpperCamelCase , **UpperCamelCase ) if labels is None: return input_features elif input_features is None: return labels else: _snake_case : int = labels['input_ids'] return input_features def UpperCamelCase_ ( self : Optional[Any] , *UpperCamelCase : int , **UpperCamelCase : Optional[Any] ): '''simple docstring''' return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase ) @contextmanager def UpperCamelCase_ ( self : List[str] ): '''simple docstring''' warnings.warn( '`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ' 'labels by using the argument `text` of the regular `__call__` method (either in the same call as ' 'your audio inputs, or in a separate call.' ) _snake_case : Tuple = True _snake_case : List[Any] = self.tokenizer yield _snake_case : Optional[Any] = self.feature_extractor _snake_case : List[str] = False
260
# This is the module that test_patching.py uses to test patch_submodule() import os # noqa: this is just for tests import os as renamed_os # noqa: this is just for tests from os import path # noqa: this is just for tests from os import path as renamed_path # noqa: this is just for tests from os.path import join # noqa: this is just for tests from os.path import join as renamed_join # noqa: this is just for tests lowerCAmelCase_ = open # noqa: we just need to have a builtin inside this module to test it properly
260
1
import argparse import glob import importlib.util import os import re import black from doc_builder.style_doc import style_docstrings_in_code # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py _A = 'src/diffusers' _A = '.' # This is to make sure the diffusers module imported is the one in the repo. _A = importlib.util.spec_from_file_location( 'diffusers', os.path.join(DIFFUSERS_PATH, '__init__.py'), submodule_search_locations=[DIFFUSERS_PATH], ) _A = spec.loader.load_module() def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ): return line.startswith(SCREAMING_SNAKE_CASE__ ) or len(SCREAMING_SNAKE_CASE__ ) <= 1 or re.search(r'^\s*\)(\s*->.*:|:)\s*$' , SCREAMING_SNAKE_CASE__ ) is not None def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any ): __UpperCamelCase =object_name.split('.' ) __UpperCamelCase =0 # First let's find the module where our object lives. __UpperCamelCase =parts[i] while i < len(SCREAMING_SNAKE_CASE__ ) and not os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE__ , F'{module}.py' ) ): i += 1 if i < len(SCREAMING_SNAKE_CASE__ ): __UpperCamelCase =os.path.join(SCREAMING_SNAKE_CASE__ , parts[i] ) if i >= len(SCREAMING_SNAKE_CASE__ ): raise ValueError(F'`object_name` should begin with the name of a module of diffusers but got {object_name}.' ) with open(os.path.join(SCREAMING_SNAKE_CASE__ , F'{module}.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f: __UpperCamelCase =f.readlines() # Now let's find the class / func in the code! __UpperCamelCase ='' __UpperCamelCase =0 for name in parts[i + 1 :]: while ( line_index < len(SCREAMING_SNAKE_CASE__ ) and re.search(rF'^{indent}(class|def)\s+{name}(\(|\:)' , lines[line_index] ) is None ): line_index += 1 indent += " " line_index += 1 if line_index >= len(SCREAMING_SNAKE_CASE__ ): raise ValueError(F' {object_name} does not match any function or class in {module}.' ) # We found the beginning of the class / func, now let's find the end (when the indent diminishes). __UpperCamelCase =line_index while line_index < len(SCREAMING_SNAKE_CASE__ ) and _should_continue(lines[line_index] , SCREAMING_SNAKE_CASE__ ): line_index += 1 # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 __UpperCamelCase =lines[start_index:line_index] return "".join(SCREAMING_SNAKE_CASE__ ) _A = re.compile(R'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)') _A = re.compile(R'^\s*(\S+)->(\S+)(\s+.*|$)') _A = re.compile(R'<FILL\s+[^>]*>') def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ): __UpperCamelCase =code.split('\n' ) __UpperCamelCase =0 while idx < len(SCREAMING_SNAKE_CASE__ ) and len(lines[idx] ) == 0: idx += 1 if idx < len(SCREAMING_SNAKE_CASE__ ): return re.search(r'^(\s*)\S' , lines[idx] ).groups()[0] return "" def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] ): __UpperCamelCase =len(get_indent(SCREAMING_SNAKE_CASE__ ) ) > 0 if has_indent: __UpperCamelCase =F'class Bla:\n{code}' __UpperCamelCase =black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 , preview=SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =black.format_str(SCREAMING_SNAKE_CASE__ , mode=SCREAMING_SNAKE_CASE__ ) __UpperCamelCase , __UpperCamelCase =style_docstrings_in_code(SCREAMING_SNAKE_CASE__ ) return result[len('class Bla:\n' ) :] if has_indent else result def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any]=False ): with open(SCREAMING_SNAKE_CASE__ , 'r' , encoding='utf-8' , newline='\n' ) as f: __UpperCamelCase =f.readlines() __UpperCamelCase =[] __UpperCamelCase =0 # Not a for loop cause `lines` is going to change (if `overwrite=True`). while line_index < len(SCREAMING_SNAKE_CASE__ ): __UpperCamelCase =_re_copy_warning.search(lines[line_index] ) if search is None: line_index += 1 continue # There is some copied code here, let's retrieve the original. __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =search.groups() __UpperCamelCase =find_code_in_diffusers(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =get_indent(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =line_index + 1 if indent == theoretical_indent else line_index + 2 __UpperCamelCase =theoretical_indent __UpperCamelCase =start_index # Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment. __UpperCamelCase =True while line_index < len(SCREAMING_SNAKE_CASE__ ) and should_continue: line_index += 1 if line_index >= len(SCREAMING_SNAKE_CASE__ ): break __UpperCamelCase =lines[line_index] __UpperCamelCase =_should_continue(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and re.search(F'^{indent}# End copy' , SCREAMING_SNAKE_CASE__ ) is None # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 __UpperCamelCase =lines[start_index:line_index] __UpperCamelCase =''.join(SCREAMING_SNAKE_CASE__ ) # Remove any nested `Copied from` comments to avoid circular copies __UpperCamelCase =[line for line in theoretical_code.split('\n' ) if _re_copy_warning.search(SCREAMING_SNAKE_CASE__ ) is None] __UpperCamelCase ='\n'.join(SCREAMING_SNAKE_CASE__ ) # Before comparing, use the `replace_pattern` on the original code. if len(SCREAMING_SNAKE_CASE__ ) > 0: __UpperCamelCase =replace_pattern.replace('with' , '' ).split(',' ) __UpperCamelCase =[_re_replace_pattern.search(SCREAMING_SNAKE_CASE__ ) for p in patterns] for pattern in patterns: if pattern is None: continue __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =pattern.groups() __UpperCamelCase =re.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if option.strip() == "all-casing": __UpperCamelCase =re.sub(obja.lower() , obja.lower() , SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =re.sub(obja.upper() , obja.upper() , SCREAMING_SNAKE_CASE__ ) # Blackify after replacement. To be able to do that, we need the header (class or function definition) # from the previous line __UpperCamelCase =blackify(lines[start_index - 1] + theoretical_code ) __UpperCamelCase =theoretical_code[len(lines[start_index - 1] ) :] # Test for a diff and act accordingly. if observed_code != theoretical_code: diffs.append([object_name, start_index] ) if overwrite: __UpperCamelCase =lines[:start_index] + [theoretical_code] + lines[line_index:] __UpperCamelCase =start_index + 1 if overwrite and len(SCREAMING_SNAKE_CASE__ ) > 0: # Warn the user a file has been modified. print(F'Detected changes, rewriting {filename}.' ) with open(SCREAMING_SNAKE_CASE__ , 'w' , encoding='utf-8' , newline='\n' ) as f: f.writelines(SCREAMING_SNAKE_CASE__ ) return diffs def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : bool = False ): __UpperCamelCase =glob.glob(os.path.join(SCREAMING_SNAKE_CASE__ , '**/*.py' ) , recursive=SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =[] for filename in all_files: __UpperCamelCase =is_copy_consistent(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) diffs += [F'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs] if not overwrite and len(SCREAMING_SNAKE_CASE__ ) > 0: __UpperCamelCase ='\n'.join(SCREAMING_SNAKE_CASE__ ) raise Exception( 'Found the following copy inconsistencies:\n' + diff + '\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.' ) if __name__ == "__main__": _A = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') _A = parser.parse_args() check_copies(args.fix_and_overwrite)
62
from __future__ import annotations # This is the precision for this function which can be altered. # It is recommended for users to keep this number greater than or equal to 10. lowerCamelCase : List[Any] = 1_0 def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ) -> int: for i in range(lowercase ,lowercase ): if array[i] == target: return i return -1 def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> int: snake_case : Union[str, Any] = 0 snake_case : Optional[Any] = len(lowercase ) while left <= right: if right - left < precision: return lin_search(lowercase ,lowercase ,lowercase ,lowercase ) snake_case : List[str] = (left + right) // 3 + 1 snake_case : Tuple = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: snake_case : List[str] = one_third - 1 elif array[two_third] < target: snake_case : Any = two_third + 1 else: snake_case : Dict = one_third + 1 snake_case : Any = two_third - 1 else: return -1 def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ) -> int: if left < right: if right - left < precision: return lin_search(lowercase ,lowercase ,lowercase ,lowercase ) snake_case : str = (left + right) // 3 + 1 snake_case : int = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: return rec_ternary_search(lowercase ,one_third - 1 ,lowercase ,lowercase ) elif array[two_third] < target: return rec_ternary_search(two_third + 1 ,lowercase ,lowercase ,lowercase ) else: return rec_ternary_search(one_third + 1 ,two_third - 1 ,lowercase ,lowercase ) else: return -1 if __name__ == "__main__": import doctest doctest.testmod() lowerCamelCase : str = input('Enter numbers separated by comma:\n').strip() lowerCamelCase : Optional[Any] = [int(item.strip()) for item in user_input.split(',')] assert collection == sorted(collection), f"List must be ordered.\n{collection}." lowerCamelCase : int = int(input('Enter the number to be found in the list:\n').strip()) lowerCamelCase : Tuple = ite_ternary_search(collection, target) lowerCamelCase : Any = rec_ternary_search(0, len(collection) - 1, collection, target) if resulta != -1: print(f"""Iterative search: {target} found at positions: {resulta}""") print(f"""Recursive search: {target} found at positions: {resulta}""") else: print('Not found')
124
0
'''simple docstring''' lowerCAmelCase_ : Any = { '''Pillow''': '''Pillow''', '''accelerate''': '''accelerate>=0.11.0''', '''compel''': '''compel==0.1.8''', '''black''': '''black~=23.1''', '''datasets''': '''datasets''', '''filelock''': '''filelock''', '''flax''': '''flax>=0.4.1''', '''hf-doc-builder''': '''hf-doc-builder>=0.3.0''', '''huggingface-hub''': '''huggingface-hub>=0.13.2''', '''requests-mock''': '''requests-mock==1.10.0''', '''importlib_metadata''': '''importlib_metadata''', '''invisible-watermark''': '''invisible-watermark''', '''isort''': '''isort>=5.5.4''', '''jax''': '''jax>=0.2.8,!=0.3.2''', '''jaxlib''': '''jaxlib>=0.1.65''', '''Jinja2''': '''Jinja2''', '''k-diffusion''': '''k-diffusion>=0.0.12''', '''torchsde''': '''torchsde''', '''note_seq''': '''note_seq''', '''librosa''': '''librosa''', '''numpy''': '''numpy''', '''omegaconf''': '''omegaconf''', '''parameterized''': '''parameterized''', '''protobuf''': '''protobuf>=3.20.3,<4''', '''pytest''': '''pytest''', '''pytest-timeout''': '''pytest-timeout''', '''pytest-xdist''': '''pytest-xdist''', '''ruff''': '''ruff>=0.0.241''', '''safetensors''': '''safetensors''', '''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''', '''scipy''': '''scipy''', '''onnx''': '''onnx''', '''regex''': '''regex!=2019.12.17''', '''requests''': '''requests''', '''tensorboard''': '''tensorboard''', '''torch''': '''torch>=1.4''', '''torchvision''': '''torchvision''', '''transformers''': '''transformers>=4.25.1''', '''urllib3''': '''urllib3<=2.0.0''', }
170
'''simple docstring''' import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import numpy as np import pandas as pd from datasets import load_dataset import transformers from transformers import ( AutoConfig, BartForSequenceClassification, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, TapexTokenizer, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.17.0.dev0''') require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/text-classification/requirements.txt''') lowerCAmelCase_ : Optional[Any] = logging.getLogger(__name__) @dataclass class __lowerCAmelCase : snake_case : Optional[str] = field( default="""tab_fact""" , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} ) snake_case : Optional[str] = field( default="""tab_fact""" , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} , ) snake_case : int = field( default=1_0_2_4 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) snake_case : bool = field( default=__a , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} ) snake_case : bool = field( default=__a , metadata={ """help""": ( """Whether to pad all samples to `max_seq_length`. """ """If False, will pad the samples dynamically when batching to the maximum length in the batch.""" ) } , ) snake_case : Optional[int] = field( default=__a , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) snake_case : Optional[int] = field( default=__a , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) snake_case : Optional[int] = field( default=__a , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of prediction examples to this """ """value if set.""" ) } , ) snake_case : Optional[str] = field( default=__a , metadata={"""help""": """A csv or a json file containing the training data."""} ) snake_case : Optional[str] = field( default=__a , metadata={"""help""": """A csv or a json file containing the validation data."""} ) snake_case : Optional[str] = field(default=__a , metadata={"""help""": """A csv or a json file containing the test data."""} ) def snake_case_ (self ): if self.dataset_name is not None: pass elif self.train_file is None or self.validation_file is None: raise ValueError("""Need either a GLUE task, a training/validation file or a dataset name.""" ) else: _UpperCAmelCase : List[str] = self.train_file.split(""".""" )[-1] assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file." _UpperCAmelCase : Union[str, Any] = self.validation_file.split(""".""" )[-1] assert ( validation_extension == train_extension ), "`validation_file` should have the same extension (csv or json) as `train_file`." @dataclass class __lowerCAmelCase : snake_case : str = field( default=__a , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) snake_case : Optional[str] = field( default=__a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) snake_case : Optional[str] = field( default=__a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) snake_case : Optional[str] = field( default=__a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) snake_case : bool = field( default=__a , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , ) snake_case : str = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) snake_case : bool = field( default=__a , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) def __A ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _UpperCAmelCase : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Tuple = parser.parse_args_into_dataclasses() # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) _UpperCAmelCase : Dict = training_args.get_process_log_level() logger.setLevel(lowerCAmelCase_ ) datasets.utils.logging.set_verbosity(lowerCAmelCase_ ) transformers.utils.logging.set_verbosity(lowerCAmelCase_ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" ) logger.info(f"Training/evaluation parameters {training_args}" ) # Detecting last checkpoint. _UpperCAmelCase : List[str] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _UpperCAmelCase : Union[str, Any] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). # # For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table. # # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this # single column. You can easily tweak this behavior (see below) # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. _UpperCAmelCase : Union[str, Any] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir ) else: # Loading a dataset from your local files. # CSV/JSON training and evaluation files are needed. _UpperCAmelCase : Dict = {"""train""": data_args.train_file, """validation""": data_args.validation_file} # Get the test dataset: you can provide your own CSV/JSON test file (see below) # when you use `do_predict` without specifying a GLUE benchmark task. if training_args.do_predict: if data_args.test_file is not None: _UpperCAmelCase : int = data_args.train_file.split(""".""" )[-1] _UpperCAmelCase : str = data_args.test_file.split(""".""" )[-1] assert ( test_extension == train_extension ), "`test_file` should have the same extension (csv or json) as `train_file`." _UpperCAmelCase : Optional[Any] = data_args.test_file else: raise ValueError("""Need either a GLUE task or a test file for `do_predict`.""" ) for key in data_files.keys(): logger.info(f"load a local file for {key}: {data_files[key]}" ) if data_args.train_file.endswith(""".csv""" ): # Loading a dataset from local csv files _UpperCAmelCase : List[str] = load_dataset("""csv""" , data_files=lowerCAmelCase_ , cache_dir=model_args.cache_dir ) else: # Loading a dataset from local json files _UpperCAmelCase : List[str] = load_dataset("""json""" , data_files=lowerCAmelCase_ , cache_dir=model_args.cache_dir ) # See more about loading any type of standard or custom dataset at # https://huggingface.co./docs/datasets/loading_datasets.html. # Labels _UpperCAmelCase : Optional[int] = raw_datasets["""train"""].features["""label"""].names _UpperCAmelCase : Optional[Any] = len(lowerCAmelCase_ ) # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _UpperCAmelCase : Optional[int] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # load tapex tokenizer _UpperCAmelCase : Optional[Any] = TapexTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=lowerCAmelCase_ , ) _UpperCAmelCase : str = BartForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # Padding strategy if data_args.pad_to_max_length: _UpperCAmelCase : int = """max_length""" else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch _UpperCAmelCase : List[str] = False # Some models have set the order of the labels to use, so let's make sure we do use it. _UpperCAmelCase : Dict = {"""Refused""": 0, """Entailed""": 1} _UpperCAmelCase : List[Any] = {0: """Refused""", 1: """Entailed"""} if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the" f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." ) _UpperCAmelCase : str = min(data_args.max_seq_length , tokenizer.model_max_length ) def preprocess_tabfact_function(lowerCAmelCase_ ): # Tokenize the texts def _convert_table_text_to_pandas(lowerCAmelCase_ ): _UpperCAmelCase : int = [_table_row.split("""#""" ) for _table_row in _table_text.strip("""\n""" ).split("""\n""" )] _UpperCAmelCase : Union[str, Any] = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] ) return _table_pd _UpperCAmelCase : Tuple = examples["""statement"""] _UpperCAmelCase : str = list(map(_convert_table_text_to_pandas , examples["""table_text"""] ) ) _UpperCAmelCase : Optional[int] = tokenizer(lowerCAmelCase_ , lowerCAmelCase_ , padding=lowerCAmelCase_ , max_length=lowerCAmelCase_ , truncation=lowerCAmelCase_ ) _UpperCAmelCase : int = examples["""label"""] return result with training_args.main_process_first(desc="""dataset map pre-processing""" ): _UpperCAmelCase : str = raw_datasets.map( lowerCAmelCase_ , batched=lowerCAmelCase_ , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on dataset""" , ) if training_args.do_train: if "train" not in raw_datasets: raise ValueError("""--do_train requires a train dataset""" ) _UpperCAmelCase : Dict = raw_datasets["""train"""] if data_args.max_train_samples is not None: _UpperCAmelCase : List[Any] = train_dataset.select(range(data_args.max_train_samples ) ) if training_args.do_eval: if "validation" not in raw_datasets and "validation_matched" not in raw_datasets: raise ValueError("""--do_eval requires a validation dataset""" ) _UpperCAmelCase : Optional[int] = raw_datasets["""validation"""] if data_args.max_eval_samples is not None: _UpperCAmelCase : Dict = eval_dataset.select(range(data_args.max_eval_samples ) ) if training_args.do_predict or data_args.test_file is not None: if "test" not in raw_datasets and "test_matched" not in raw_datasets: raise ValueError("""--do_predict requires a test dataset""" ) _UpperCAmelCase : Tuple = raw_datasets["""test"""] if data_args.max_predict_samples is not None: _UpperCAmelCase : Tuple = predict_dataset.select(range(data_args.max_predict_samples ) ) # Log a few random samples from the training set: if training_args.do_train: for index in random.sample(range(len(lowerCAmelCase_ ) ) , 3 ): logger.info(f"Sample {index} of the training set: {train_dataset[index]}." ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(lowerCAmelCase_ ): _UpperCAmelCase : Dict = p.predictions[0] if isinstance(p.predictions , lowerCAmelCase_ ) else p.predictions _UpperCAmelCase : int = np.argmax(lowerCAmelCase_ , axis=1 ) return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()} # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: _UpperCAmelCase : Dict = default_data_collator elif training_args.fpaa: _UpperCAmelCase : List[str] = DataCollatorWithPadding(lowerCAmelCase_ , pad_to_multiple_of=8 ) else: _UpperCAmelCase : Optional[Any] = None # Initialize our Trainer _UpperCAmelCase : Optional[Any] = Trainer( model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , ) # Training if training_args.do_train: _UpperCAmelCase : Any = None if training_args.resume_from_checkpoint is not None: _UpperCAmelCase : Tuple = training_args.resume_from_checkpoint elif last_checkpoint is not None: _UpperCAmelCase : Optional[Any] = last_checkpoint _UpperCAmelCase : Any = trainer.train(resume_from_checkpoint=lowerCAmelCase_ ) _UpperCAmelCase : Union[str, Any] = train_result.metrics _UpperCAmelCase : Any = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase_ ) ) _UpperCAmelCase : Optional[Any] = min(lowerCAmelCase_ , len(lowerCAmelCase_ ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics("""train""" , lowerCAmelCase_ ) trainer.save_metrics("""train""" , lowerCAmelCase_ ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("""*** Evaluate ***""" ) _UpperCAmelCase : Dict = trainer.evaluate(eval_dataset=lowerCAmelCase_ ) _UpperCAmelCase : Dict = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCAmelCase_ ) _UpperCAmelCase : Optional[int] = min(lowerCAmelCase_ , len(lowerCAmelCase_ ) ) trainer.log_metrics("""eval""" , lowerCAmelCase_ ) trainer.save_metrics("""eval""" , lowerCAmelCase_ ) if training_args.do_predict: logger.info("""*** Predict ***""" ) # Removing the `label` columns because it contains -1 and Trainer won't like that. _UpperCAmelCase : str = predict_dataset.remove_columns("""label""" ) _UpperCAmelCase : List[Any] = trainer.predict(lowerCAmelCase_ , metric_key_prefix="""predict""" ).predictions _UpperCAmelCase : Dict = np.argmax(lowerCAmelCase_ , axis=1 ) _UpperCAmelCase : List[Any] = os.path.join(training_args.output_dir , """predict_results_tabfact.txt""" ) if trainer.is_world_process_zero(): with open(lowerCAmelCase_ , """w""" ) as writer: logger.info("""***** Predict Results *****""" ) writer.write("""index\tprediction\n""" ) for index, item in enumerate(lowerCAmelCase_ ): _UpperCAmelCase : List[str] = label_list[item] writer.write(f"{index}\t{item}\n" ) _UpperCAmelCase : Union[str, Any] = {"""finetuned_from""": model_args.model_name_or_path, """tasks""": """text-classification"""} if training_args.push_to_hub: trainer.push_to_hub(**lowerCAmelCase_ ) else: trainer.create_model_card(**lowerCAmelCase_ ) def __A ( lowerCAmelCase_ ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
170
1
from statistics import mean import numpy as np def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> list: """simple docstring""" A__ = 0 # Number of processes finished A__ = 0 # Displays the finished process. # If it is 0, the performance is completed if it is 1, before the performance. A__ = [0] * no_of_process # List to include calculation results A__ = [0] * no_of_process # Sort by arrival time. A__ = [burst_time[i] for i in np.argsort(lowercase_ )] A__ = [process_name[i] for i in np.argsort(lowercase_ )] arrival_time.sort() while no_of_process > finished_process_count: A__ = 0 while finished_process[i] == 1: i += 1 if current_time < arrival_time[i]: A__ = arrival_time[i] A__ = 0 # Index showing the location of the process being performed A__ = 0 # Saves the current response ratio. A__ = 0 for i in range(0 , lowercase_ ): if finished_process[i] == 0 and arrival_time[i] <= current_time: A__ = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[ i ] if response_ratio < temp: A__ = temp A__ = i # Calculate the turn around time A__ = current_time + burst_time[loc] - arrival_time[loc] current_time += burst_time[loc] # Indicates that the process has been performed. A__ = 1 # Increase finished_process_count by 1 finished_process_count += 1 return turn_around_time def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> list: """simple docstring""" A__ = [0] * no_of_process for i in range(0 , lowercase_ ): A__ = turn_around_time[i] - burst_time[i] return waiting_time if __name__ == "__main__": _lowerCamelCase : List[Any] = 5 _lowerCamelCase : Any = ["""A""", """B""", """C""", """D""", """E"""] _lowerCamelCase : Optional[int] = [1, 2, 3, 4, 5] _lowerCamelCase : Optional[int] = [1, 2, 3, 4, 5] _lowerCamelCase : Union[str, Any] = calculate_turn_around_time( process_name, arrival_time, burst_time, no_of_process ) _lowerCamelCase : Tuple = calculate_waiting_time( process_name, turn_around_time, burst_time, no_of_process ) print("""Process name \tArrival time \tBurst time \tTurn around time \tWaiting time""") for i in range(0, no_of_process): print( F'''{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t''' F'''{turn_around_time[i]}\t\t\t{waiting_time[i]}''' ) print(F'''average waiting time : {mean(waiting_time):.5f}''') print(F'''average turn around time : {mean(turn_around_time):.5f}''')
14
def _A ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : list[int] ): UpperCamelCase :Tuple = len(SCREAMING_SNAKE_CASE__ ) print('''The following activities are selected:''' ) # The first activity is always selected UpperCamelCase :Dict = 0 print(SCREAMING_SNAKE_CASE__ , end=''',''' ) # Consider rest of the activities for j in range(SCREAMING_SNAKE_CASE__ ): # If this activity has start time greater than # or equal to the finish time of previously # selected activity, then select it if start[j] >= finish[i]: print(SCREAMING_SNAKE_CASE__ , end=''',''' ) UpperCamelCase :List[str] = j if __name__ == "__main__": import doctest doctest.testmod() __snake_case = [1, 3, 0, 5, 8, 5] __snake_case = [2, 4, 6, 7, 9, 9] print_max_activities(start, finish)
259
0
import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin lowercase : List[Any] = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n" class SCREAMING_SNAKE_CASE__ ( unittest.TestCase , lowerCamelCase__ ): """simple docstring""" def __lowerCamelCase ( self ) -> Any: '''simple docstring''' __UpperCamelCase : Optional[int] = load_tool("text-question-answering" ) self.tool.setup() __UpperCamelCase : int = load_tool("text-question-answering" , remote=__UpperCamelCase ) def __lowerCamelCase ( self ) -> List[Any]: '''simple docstring''' __UpperCamelCase : List[Any] = self.tool(__UpperCamelCase , "What did Hugging Face do in April 2021?" ) self.assertEqual(__UpperCamelCase , "launched the BigScience Research Workshop" ) def __lowerCamelCase ( self ) -> int: '''simple docstring''' __UpperCamelCase : str = self.remote_tool(__UpperCamelCase , "What did Hugging Face do in April 2021?" ) self.assertEqual(__UpperCamelCase , "launched the BigScience Research Workshop" ) def __lowerCamelCase ( self ) -> str: '''simple docstring''' __UpperCamelCase : List[Any] = self.tool(text=__UpperCamelCase , question="What did Hugging Face do in April 2021?" ) self.assertEqual(__UpperCamelCase , "launched the BigScience Research Workshop" ) def __lowerCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' __UpperCamelCase : List[str] = self.remote_tool(text=__UpperCamelCase , question="What did Hugging Face do in April 2021?" ) self.assertEqual(__UpperCamelCase , "launched the BigScience Research Workshop" )
171
import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import List import timm import torch import torch.nn as nn from huggingface_hub import hf_hub_download from torch import Tensor from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification from transformers.utils import logging logging.set_verbosity_info() lowercase : Any = logging.get_logger() @dataclass class SCREAMING_SNAKE_CASE__ : """simple docstring""" lowercase : nn.Module lowercase : List[nn.Module] = field(default_factory=lowerCamelCase__ ) lowercase : list = field(default_factory=lowerCamelCase__ ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[int]: '''simple docstring''' __UpperCamelCase : Optional[int] = len(list(m.modules() ) ) == 1 or isinstance(__UpperCamelCase , nn.Convad ) or isinstance(__UpperCamelCase , nn.BatchNormad ) if has_not_submodules: self.traced.append(__UpperCamelCase ) def __call__( self , __UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(__UpperCamelCase ) [x.remove() for x in self.handles] return self @property def __lowerCamelCase ( self ) -> Tuple: '''simple docstring''' return list(filter(lambda __UpperCamelCase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class SCREAMING_SNAKE_CASE__ : """simple docstring""" lowercase : nn.Module lowercase : nn.Module lowercase : int = 0 lowercase : List = field(default_factory=lowerCamelCase__ ) lowercase : List = field(default_factory=lowerCamelCase__ ) def __call__( self , __UpperCamelCase ) -> List[str]: '''simple docstring''' __UpperCamelCase : Optional[Any] = Tracker(self.dest )(__UpperCamelCase ).parametrized __UpperCamelCase : Union[str, Any] = Tracker(self.src )(__UpperCamelCase ).parametrized __UpperCamelCase : Union[str, Any] = list(filter(lambda __UpperCamelCase : type(__UpperCamelCase ) not in self.src_skip , __UpperCamelCase ) ) __UpperCamelCase : Any = list(filter(lambda __UpperCamelCase : type(__UpperCamelCase ) not in self.dest_skip , __UpperCamelCase ) ) if len(__UpperCamelCase ) != len(__UpperCamelCase ): raise Exception( f'''Numbers of operations are different. Source module has {len(__UpperCamelCase )} operations while''' f''' destination module has {len(__UpperCamelCase )}.''' ) for dest_m, src_m in zip(__UpperCamelCase , __UpperCamelCase ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(f'''Transfered from={src_m} to={dest_m}''' ) def UpperCAmelCase_ (_lowerCAmelCase : str , _lowerCAmelCase : ResNetConfig , _lowerCAmelCase : Path , _lowerCAmelCase : bool = True ): print(F'''Converting {name}...''' ) with torch.no_grad(): __UpperCamelCase : Optional[Any] = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase ).eval() __UpperCamelCase : Union[str, Any] = ResNetForImageClassification(_lowerCAmelCase ).eval() __UpperCamelCase : Any = ModuleTransfer(src=_lowerCAmelCase , dest=_lowerCAmelCase ) __UpperCamelCase : Optional[int] = torch.randn((1, 3, 2_24, 2_24) ) module_transfer(_lowerCAmelCase ) assert torch.allclose(from_model(_lowerCAmelCase ) , our_model(_lowerCAmelCase ).logits ), "The model logits don't match the original one." __UpperCamelCase : Tuple = F'''resnet{"-".join(name.split("resnet" ) )}''' print(_lowerCAmelCase ) if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message="Add model" , use_temp_dir=_lowerCAmelCase , ) # we can use the convnext one __UpperCamelCase : List[str] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" ) image_processor.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message="Add image processor" , use_temp_dir=_lowerCAmelCase , ) print(F'''Pushed {checkpoint_name}''' ) def UpperCAmelCase_ (_lowerCAmelCase : Path , _lowerCAmelCase : str = None , _lowerCAmelCase : bool = True ): __UpperCamelCase : str = "imagenet-1k-id2label.json" __UpperCamelCase : Dict = 10_00 __UpperCamelCase : Any = (1, num_labels) __UpperCamelCase : Union[str, Any] = "huggingface/label-files" __UpperCamelCase : List[Any] = num_labels __UpperCamelCase : Tuple = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) ) __UpperCamelCase : Optional[Any] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()} __UpperCamelCase : List[str] = idalabel __UpperCamelCase : str = {v: k for k, v in idalabel.items()} __UpperCamelCase : Dict = partial(_lowerCAmelCase , num_labels=_lowerCAmelCase , idalabel=_lowerCAmelCase , labelaid=_lowerCAmelCase ) __UpperCamelCase : List[str] = { "resnet18": ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type="basic" ), "resnet26": ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="bottleneck" ), "resnet34": ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type="basic" ), "resnet50": ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="bottleneck" ), "resnet101": ImageNetPreTrainedConfig( depths=[3, 4, 23, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="bottleneck" ), "resnet152": ImageNetPreTrainedConfig( depths=[3, 8, 36, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="bottleneck" ), } if model_name: convert_weight_and_push(_lowerCAmelCase , names_to_config[model_name] , _lowerCAmelCase , _lowerCAmelCase ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) return config, expected_shape if __name__ == "__main__": lowercase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default=None, type=str, help=( "The name of the model you wish to convert, it must be one of the supported resnet* architecture," " currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted." ), ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=Path, required=True, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", default=True, type=bool, required=False, help="If True, push model and image processor to the hub.", ) lowercase : Union[str, Any] = parser.parse_args() lowercase : Path = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
171
1
import subprocess import sys from transformers import BertConfig, BertModel, BertTokenizer, pipeline from transformers.testing_utils import TestCasePlus, require_torch class __lowerCamelCase ( _UpperCamelCase): """simple docstring""" @require_torch def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n ' _UpperCAmelCase = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n ' _UpperCAmelCase = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n ' # Force fetching the files so that we can use the cache _UpperCAmelCase = 'hf-internal-testing/tiny-random-bert' BertConfig.from_pretrained(_UpperCAmelCase ) BertModel.from_pretrained(_UpperCAmelCase ) BertTokenizer.from_pretrained(_UpperCAmelCase ) pipeline(task='fill-mask' , model=_UpperCAmelCase ) # baseline - just load from_pretrained with normal network _UpperCAmelCase = [sys.executable, '-c', '\n'.join([load, run, mock] )] # should succeed _UpperCAmelCase = self.get_env() # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files _UpperCAmelCase = '1' _UpperCAmelCase = subprocess.run(_UpperCAmelCase , env=_UpperCAmelCase , check=_UpperCAmelCase , capture_output=_UpperCAmelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('success' , result.stdout.decode() ) @require_torch def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n ' _UpperCAmelCase = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n ' _UpperCAmelCase = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n ' # Force fetching the files so that we can use the cache _UpperCAmelCase = 'hf-internal-testing/tiny-random-bert' BertConfig.from_pretrained(_UpperCAmelCase ) BertModel.from_pretrained(_UpperCAmelCase ) BertTokenizer.from_pretrained(_UpperCAmelCase ) pipeline(task='fill-mask' , model=_UpperCAmelCase ) # baseline - just load from_pretrained with normal network _UpperCAmelCase = [sys.executable, '-c', '\n'.join([load, run, mock] )] # should succeed _UpperCAmelCase = self.get_env() _UpperCAmelCase = subprocess.run(_UpperCAmelCase , env=_UpperCAmelCase , check=_UpperCAmelCase , capture_output=_UpperCAmelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('success' , result.stdout.decode() ) @require_torch def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n ' _UpperCAmelCase = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n ' _UpperCAmelCase = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n ' # baseline - just load from_pretrained with normal network _UpperCAmelCase = [sys.executable, '-c', '\n'.join([load, run] )] # should succeed _UpperCAmelCase = self.get_env() _UpperCAmelCase = subprocess.run(_UpperCAmelCase , env=_UpperCAmelCase , check=_UpperCAmelCase , capture_output=_UpperCAmelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('success' , result.stdout.decode() ) # next emulate no network _UpperCAmelCase = [sys.executable, '-c', '\n'.join([load, mock, run] )] # Doesn't fail anymore since the model is in the cache due to other tests, so commenting this. # env["TRANSFORMERS_OFFLINE"] = "0" # result = subprocess.run(cmd, env=env, check=False, capture_output=True) # self.assertEqual(result.returncode, 1, result.stderr) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files _UpperCAmelCase = '1' _UpperCAmelCase = subprocess.run(_UpperCAmelCase , env=_UpperCAmelCase , check=_UpperCAmelCase , capture_output=_UpperCAmelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('success' , result.stdout.decode() ) @require_torch def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase = '\nfrom transformers import pipeline\n ' _UpperCAmelCase = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n ' _UpperCAmelCase = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n ' _UpperCAmelCase = self.get_env() _UpperCAmelCase = '1' _UpperCAmelCase = [sys.executable, '-c', '\n'.join([load, mock, run] )] _UpperCAmelCase = subprocess.run(_UpperCAmelCase , env=_UpperCAmelCase , check=_UpperCAmelCase , capture_output=_UpperCAmelCase ) self.assertEqual(result.returncode , 1 , result.stderr ) self.assertIn( 'You cannot infer task automatically within `pipeline` when using offline mode' , result.stderr.decode().replace('\n' , '' ) , ) @require_torch def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase = '\nfrom transformers import AutoModel\n ' _UpperCAmelCase = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n ' # baseline - just load from_pretrained with normal network _UpperCAmelCase = [sys.executable, '-c', '\n'.join([load, run] )] # should succeed _UpperCAmelCase = self.get_env() _UpperCAmelCase = subprocess.run(_UpperCAmelCase , env=_UpperCAmelCase , check=_UpperCAmelCase , capture_output=_UpperCAmelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('success' , result.stdout.decode() ) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files _UpperCAmelCase = '1' _UpperCAmelCase = subprocess.run(_UpperCAmelCase , env=_UpperCAmelCase , check=_UpperCAmelCase , capture_output=_UpperCAmelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('success' , result.stdout.decode() )
39
'''simple docstring''' import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=1 ) -> Dict: if n_shave_prefix_segments >= 0: return ".".join(path.split('.' )[n_shave_prefix_segments:] ) else: return ".".join(path.split('.' )[:n_shave_prefix_segments] ) def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=0 ) -> Tuple: _a : Any = [] for old_item in old_list: _a : Union[str, Any] = old_item.replace('in_layers.0' , 'norm1' ) _a : Optional[int] = new_item.replace('in_layers.2' , 'conv1' ) _a : str = new_item.replace('out_layers.0' , 'norm2' ) _a : List[str] = new_item.replace('out_layers.3' , 'conv2' ) _a : str = new_item.replace('emb_layers.1' , 'time_emb_proj' ) _a : Tuple = new_item.replace('skip_connection' , 'conv_shortcut' ) _a : Any = shave_segments(lowerCAmelCase_ , n_shave_prefix_segments=lowerCAmelCase_ ) mapping.append({'old': old_item, 'new': new_item} ) return mapping def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=0 ) -> Any: _a : List[str] = [] for old_item in old_list: _a : List[Any] = old_item _a : Optional[int] = new_item.replace('norm.weight' , 'group_norm.weight' ) _a : Optional[Any] = new_item.replace('norm.bias' , 'group_norm.bias' ) _a : Any = new_item.replace('proj_out.weight' , 'proj_attn.weight' ) _a : Optional[Any] = new_item.replace('proj_out.bias' , 'proj_attn.bias' ) _a : Optional[int] = shave_segments(lowerCAmelCase_ , n_shave_prefix_segments=lowerCAmelCase_ ) mapping.append({'old': old_item, 'new': new_item} ) return mapping def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None ) -> Any: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): _a : Optional[Any] = old_checkpoint[path] _a : Optional[Any] = old_tensor.shape[0] // 3 _a : Any = (-1, channels) if len(old_tensor.shape ) == 3 else (-1) _a : int = old_tensor.shape[0] // config['num_head_channels'] // 3 _a : str = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) _a , _a , _a : Tuple = old_tensor.split(channels // num_heads , dim=1 ) _a : Dict = query.reshape(lowerCAmelCase_ ) _a : str = key.reshape(lowerCAmelCase_ ) _a : Optional[int] = value.reshape(lowerCAmelCase_ ) for path in paths: _a : Dict = path['new'] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here _a : Any = new_path.replace('middle_block.0' , 'mid_block.resnets.0' ) _a : str = new_path.replace('middle_block.1' , 'mid_block.attentions.0' ) _a : Union[str, Any] = new_path.replace('middle_block.2' , 'mid_block.resnets.1' ) if additional_replacements is not None: for replacement in additional_replacements: _a : int = new_path.replace(replacement['old'] , replacement['new'] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: _a : List[str] = old_checkpoint[path['old']][:, :, 0] else: _a : Dict = old_checkpoint[path['old']] def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]: _a : Optional[int] = {} _a : Dict = checkpoint['time_embed.0.weight'] _a : Tuple = checkpoint['time_embed.0.bias'] _a : Union[str, Any] = checkpoint['time_embed.2.weight'] _a : List[str] = checkpoint['time_embed.2.bias'] _a : List[str] = checkpoint['input_blocks.0.0.weight'] _a : Union[str, Any] = checkpoint['input_blocks.0.0.bias'] _a : Optional[int] = checkpoint['out.0.weight'] _a : int = checkpoint['out.0.bias'] _a : List[str] = checkpoint['out.2.weight'] _a : Optional[int] = checkpoint['out.2.bias'] # Retrieves the keys for the input blocks only _a : Optional[int] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} ) _a : Dict = { layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key] for layer_id in range(lowerCAmelCase_ ) } # Retrieves the keys for the middle blocks only _a : List[Any] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} ) _a : Union[str, Any] = { layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key] for layer_id in range(lowerCAmelCase_ ) } # Retrieves the keys for the output blocks only _a : Optional[int] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} ) _a : str = { layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key] for layer_id in range(lowerCAmelCase_ ) } for i in range(1 , lowerCAmelCase_ ): _a : List[Any] = (i - 1) // (config['num_res_blocks'] + 1) _a : Optional[int] = (i - 1) % (config['num_res_blocks'] + 1) _a : Optional[int] = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key] _a : Optional[Any] = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key] if f"""input_blocks.{i}.0.op.weight""" in checkpoint: _a : List[Any] = checkpoint[ f"""input_blocks.{i}.0.op.weight""" ] _a : Union[str, Any] = checkpoint[ f"""input_blocks.{i}.0.op.bias""" ] continue _a : Any = renew_resnet_paths(lowerCAmelCase_ ) _a : List[str] = {'old': f"""input_blocks.{i}.0""", 'new': f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""} _a : Optional[Any] = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'} assign_to_checkpoint( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path, resnet_op] , config=lowerCAmelCase_ ) if len(lowerCAmelCase_ ): _a : List[str] = renew_attention_paths(lowerCAmelCase_ ) _a : List[Any] = { 'old': f"""input_blocks.{i}.1""", 'new': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""", } _a : Optional[Any] = { f"""input_blocks.{i}.1.qkv.bias""": { 'key': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""", 'query': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""", 'value': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""", }, f"""input_blocks.{i}.1.qkv.weight""": { 'key': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""", 'query': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""", 'value': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""", }, } assign_to_checkpoint( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , attention_paths_to_split=lowerCAmelCase_ , config=lowerCAmelCase_ , ) _a : str = middle_blocks[0] _a : Tuple = middle_blocks[1] _a : Any = middle_blocks[2] _a : List[Any] = renew_resnet_paths(lowerCAmelCase_ ) assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , config=lowerCAmelCase_ ) _a : Any = renew_resnet_paths(lowerCAmelCase_ ) assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , config=lowerCAmelCase_ ) _a : int = renew_attention_paths(lowerCAmelCase_ ) _a : int = { 'middle_block.1.qkv.bias': { 'key': 'mid_block.attentions.0.key.bias', 'query': 'mid_block.attentions.0.query.bias', 'value': 'mid_block.attentions.0.value.bias', }, 'middle_block.1.qkv.weight': { 'key': 'mid_block.attentions.0.key.weight', 'query': 'mid_block.attentions.0.query.weight', 'value': 'mid_block.attentions.0.value.weight', }, } assign_to_checkpoint( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , attention_paths_to_split=lowerCAmelCase_ , config=lowerCAmelCase_ ) for i in range(lowerCAmelCase_ ): _a : List[str] = i // (config['num_res_blocks'] + 1) _a : Any = i % (config['num_res_blocks'] + 1) _a : Union[str, Any] = [shave_segments(lowerCAmelCase_ , 2 ) for name in output_blocks[i]] _a : Optional[Any] = {} for layer in output_block_layers: _a , _a : str = layer.split('.' )[0], shave_segments(lowerCAmelCase_ , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(lowerCAmelCase_ ) else: _a : str = [layer_name] if len(lowerCAmelCase_ ) > 1: _a : str = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key] _a : Optional[Any] = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key] _a : Dict = renew_resnet_paths(lowerCAmelCase_ ) _a : str = renew_resnet_paths(lowerCAmelCase_ ) _a : Optional[int] = {'old': f"""output_blocks.{i}.0""", 'new': f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""} assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , config=lowerCAmelCase_ ) if ["conv.weight", "conv.bias"] in output_block_list.values(): _a : List[Any] = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] ) _a : Tuple = checkpoint[ f"""output_blocks.{i}.{index}.conv.weight""" ] _a : List[str] = checkpoint[ f"""output_blocks.{i}.{index}.conv.bias""" ] # Clear attentions as they have been attributed above. if len(lowerCAmelCase_ ) == 2: _a : Union[str, Any] = [] if len(lowerCAmelCase_ ): _a : Tuple = renew_attention_paths(lowerCAmelCase_ ) _a : str = { 'old': f"""output_blocks.{i}.1""", 'new': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""", } _a : List[Any] = { f"""output_blocks.{i}.1.qkv.bias""": { 'key': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""", 'query': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""", 'value': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""", }, f"""output_blocks.{i}.1.qkv.weight""": { 'key': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""", 'query': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""", 'value': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""", }, } assign_to_checkpoint( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=lowerCAmelCase_ , ) else: _a : List[Any] = renew_resnet_paths(lowerCAmelCase_ , n_shave_prefix_segments=1 ) for path in resnet_0_paths: _a : int = '.'.join(['output_blocks', str(lowerCAmelCase_ ), path['old']] ) _a : Union[str, Any] = '.'.join(['up_blocks', str(lowerCAmelCase_ ), 'resnets', str(lowerCAmelCase_ ), path['new']] ) _a : Union[str, Any] = checkpoint[old_path] return new_checkpoint if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the architecture.''', ) parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') __lowerCAmelCase = parser.parse_args() __lowerCAmelCase = torch.load(args.checkpoint_path) with open(args.config_file) as f: __lowerCAmelCase = json.loads(f.read()) __lowerCAmelCase = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] __lowerCAmelCase = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: __lowerCAmelCase = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) __lowerCAmelCase = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) __lowerCAmelCase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
89
0
"""simple docstring""" import copy import tempfile import unittest from transformers import MaMaaaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder def a__ ( __lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , ) -> List[Any]: if attention_mask is None: _A = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: _A = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: _A = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=__lowercase ) if decoder_head_mask is None: _A = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=__lowercase ) if cross_attn_head_mask is None: _A = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=__lowercase ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } class snake_case : def __init__( self : Any , a__ : Any , a__ : Optional[int]=13 , a__ : Optional[Any]=7 , a__ : Dict=True , a__ : int=False , a__ : Dict=99 , a__ : List[str]=16 , a__ : Dict=2 , a__ : str=4 , a__ : str=4 , a__ : str="relu" , a__ : Optional[int]=0.1 , a__ : List[Any]=0.1 , a__ : Union[str, Any]=0.0 , a__ : List[str]=0.0 , a__ : Any=20 , a__ : int=2 , a__ : List[str]=1 , a__ : Tuple=0 , ) -> str: '''simple docstring''' _A = parent _A = batch_size _A = seq_length _A = is_training _A = use_labels _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = encoder_layerdrop _A = decoder_layerdrop _A = max_position_embeddings _A = eos_token_id _A = pad_token_id _A = bos_token_id def a_ ( self : Dict ) -> Tuple: '''simple docstring''' _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = self.eos_token_id # Eos Token _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for M2M100 the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input _A = input_ids.clamp(self.pad_token_id + 1 ) _A = decoder_input_ids.clamp(self.pad_token_id + 1 ) _A = self.get_config() _A = prepare_mam_aaa_inputs_dict(a__ , a__ , a__ ) return config, inputs_dict def a_ ( self : Optional[int] ) -> Union[str, Any]: '''simple docstring''' return MaMaaaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , ) def a_ ( self : str ) -> int: '''simple docstring''' _A , _A = self.prepare_config_and_inputs() return config, inputs_dict def a_ ( self : List[Any] , a__ : Union[str, Any] , a__ : Any ) -> Dict: '''simple docstring''' _A = MaMaaaModel(config=a__ ).get_decoder().to(a__ ).eval() _A = inputs_dict["input_ids"] _A = inputs_dict["attention_mask"] _A = inputs_dict["head_mask"] # first forward pass _A = model(a__ , attention_mask=a__ , head_mask=a__ , use_cache=a__ ) _A , _A = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids _A = ids_tensor((self.batch_size, 3) , config.vocab_size ) _A = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and _A = torch.cat([input_ids, next_tokens] , dim=-1 ) _A = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) _A = model(a__ , attention_mask=a__ )["last_hidden_state"] _A = model(a__ , attention_mask=a__ , past_key_values=a__ )[ "last_hidden_state" ] # select random slice _A = ids_tensor((1,) , output_from_past.shape[-1] ).item() _A = output_from_no_past[:, -3:, random_slice_idx].detach() _A = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(a__ , a__ , atol=1E-2 ) ) def a_ ( self : Tuple , a__ : Any , a__ : Tuple ) -> List[str]: '''simple docstring''' _A = MaMaaaModel(config=a__ ).to(a__ ).eval() _A = model(**a__ ) _A = outputs.encoder_last_hidden_state _A = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: _A = model.get_encoder() encoder.save_pretrained(a__ ) _A = MaMaaaEncoder.from_pretrained(a__ ).to(a__ ) _A = encoder(inputs_dict["input_ids"] , attention_mask=inputs_dict["attention_mask"] )[ 0 ] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 ) with tempfile.TemporaryDirectory() as tmpdirname: _A = model.get_decoder() decoder.save_pretrained(a__ ) _A = MaMaaaDecoder.from_pretrained(a__ ).to(a__ ) _A = decoder( input_ids=inputs_dict["decoder_input_ids"] , attention_mask=inputs_dict["decoder_attention_mask"] , encoder_hidden_states=a__ , encoder_attention_mask=inputs_dict["attention_mask"] , )[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 ) @require_torch class snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase): __UpperCamelCase = ( ( MaMaaaModel, MaMaaaForConditionalGeneration, ) if is_torch_available() else () ) __UpperCamelCase = (MaMaaaForConditionalGeneration,) if is_torch_available() else () __UpperCamelCase = ( { 'conversational': MaMaaaForConditionalGeneration, 'feature-extraction': MaMaaaModel, 'summarization': MaMaaaForConditionalGeneration, 'text2text-generation': MaMaaaForConditionalGeneration, 'translation': MaMaaaForConditionalGeneration, } if is_torch_available() else {} ) __UpperCamelCase = True __UpperCamelCase = True __UpperCamelCase = False __UpperCamelCase = False def a_ ( self : List[Any] , a__ : Union[str, Any] , a__ : Dict , a__ : Dict , a__ : Union[str, Any] , a__ : List[str] ) -> Dict: '''simple docstring''' if pipeline_test_casse_name == "TranslationPipelineTests": # Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`. # `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer. return True return False def a_ ( self : Union[str, Any] ) -> Dict: '''simple docstring''' _A = MaMaaaModelTester(self ) _A = ConfigTester(self , config_class=a__ ) def a_ ( self : Optional[int] ) -> Dict: '''simple docstring''' self.config_tester.run_common_tests() def a_ ( self : str ) -> int: '''simple docstring''' _A , _A = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: _A = model_class(a__ ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(a__ ) _A , _A = model_class.from_pretrained(a__ , output_loading_info=a__ ) self.assertEqual(info["missing_keys"] , [] ) def a_ ( self : List[str] ) -> int: '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*a__ ) def a_ ( self : int ) -> Dict: '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*a__ ) def a_ ( self : List[Any] ) -> str: '''simple docstring''' _A , _A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration): _A = model_class(a__ ) model.to(a__ ) model.eval() _A = copy.deepcopy(self._prepare_for_class(a__ , a__ ) ) if not self.is_encoder_decoder: _A = inputs["input_ids"] del inputs["input_ids"] else: _A = inputs["input_ids"] _A = inputs.get("decoder_input_ids" , a__ ) del inputs["input_ids"] inputs.pop("decoder_input_ids" , a__ ) _A = model.get_input_embeddings() if not self.is_encoder_decoder: _A = wte(a__ ) else: _A = wte(a__ ) _A = wte(a__ ) with torch.no_grad(): model(**a__ )[0] def a_ ( self : Optional[int] ) -> Optional[int]: '''simple docstring''' _A , _A = self.model_tester.prepare_config_and_inputs() _A = input_dict["input_ids"] _A = input_ids.ne(1 ).to(a__ ) _A = MaMaaaForConditionalGeneration(a__ ).eval().to(a__ ) if torch_device == "cuda": model.half() model.generate(a__ , attention_mask=a__ ) model.generate(num_beams=4 , do_sample=a__ , early_stopping=a__ , num_return_sequences=3 ) def a__ ( __lowercase ) -> Optional[Any]: return torch.tensor(__lowercase , dtype=torch.long , device=__lowercase ) a_ = 1E-4 @require_torch @require_sentencepiece @require_tokenizers @slow class snake_case ( unittest.TestCase): @cached_property def a_ ( self : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' return MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" ) def a_ ( self : Optional[Any] ) -> str: '''simple docstring''' _A = MaMaaaModel.from_pretrained("facebook/m2m100_418M" ).to(a__ ) _A = _long_tensor([[12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38, 2]] ) _A = _long_tensor([[2, 12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38]] ) _A = prepare_mam_aaa_inputs_dict(model.config , a__ , a__ ) with torch.no_grad(): _A = model(**a__ )[0] _A = torch.Size((1, 11, 10_24) ) self.assertEqual(output.shape , a__ ) # change to expected output here _A = torch.tensor( [[-0.7_7_8_0, -0.1_6_7_6, 0.1_0_3_8], [-6.7_5_5_6, -1.3_9_9_2, 0.0_5_6_7], [-7.5_3_8_3, -0.5_9_2_0, -0.2_7_7_9]] , device=a__ ) self.assertTrue(torch.allclose(output[:, :3, :3] , a__ , atol=a__ ) ) def a_ ( self : int ) -> Union[str, Any]: '''simple docstring''' _A = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(a__ ) # change to intended input _A = _long_tensor([[12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38, 2]] ) _A = _long_tensor([[2, 12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38]] ) _A = prepare_mam_aaa_inputs_dict(model.config , a__ , a__ ) with torch.no_grad(): _A = model(**a__ )[0] _A = torch.Size((1, 11, model.config.vocab_size) ) self.assertEqual(output.shape , a__ ) # change to expected output here _A = torch.tensor( [[-1.0_4_4_8, -1.0_4_1_1, 3.7_9_9_2], [-3.2_1_9_1, -3.2_3_8_6, -1.3_4_5_1], [-3.6_2_1_0, -3.5_9_9_3, 0.4_9_2_5]] , device=a__ ) self.assertTrue(torch.allclose(output[:, :3, :3] , a__ , atol=a__ ) ) def a_ ( self : str ) -> Any: '''simple docstring''' _A = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(a__ ) _A = MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" , src_lang="fr" , tgt_lang="en" ) _A = [ "L'affaire NSA souligne l'absence totale de débat sur le renseignement", "Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.", "Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent" " Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de" " l'ampleur de la surveillance américaine sur l'ensemble des communications en France.", ] # The below article tests that we don't add any hypotheses outside of the top n_beams _A = tokenizer(a__ , padding=a__ , return_tensors="pt" ) _A = model.generate( input_ids=dct["input_ids"].to(a__ ) , attention_mask=dct["attention_mask"].to(a__ ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("en" ) , ) _A = [ "The NSA case highlights the total absence of intelligence debate", "I think there are two levels of response from the French government.", "When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S." " Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all" " communications in France.", ] _A = tokenizer.batch_decode( hypotheses_batch.tolist() , clean_up_tokenization_spaces=a__ , skip_special_tokens=a__ ) assert generated == expected_en
355
"""simple docstring""" def a__ ( __lowercase ) -> int: assert ( isinstance(__lowercase , __lowercase ) and number_of_steps > 0 ), f"""number_of_steps needs to be positive integer, your input {number_of_steps}""" if number_of_steps == 1: return 1 _A , _A = 1, 1 for _ in range(number_of_steps - 1 ): _A , _A = current + previous, current return current if __name__ == "__main__": import doctest doctest.testmod()
163
0
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' if a < 0: raise ValueError('''Input value must be a positive integer''' ) elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise TypeError('''Input value must be a \'int\' type''' ) return bin(_SCREAMING_SNAKE_CASE ).count('''1''' ) if __name__ == "__main__": import doctest doctest.testmod()
260
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : list[list[int]] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : set ): '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ), len(grid[0] ) if ( min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) < 0 or row == row_length or col == col_length or (row, col) in visit or grid[row][col] == 1 ): return 0 if row == row_length - 1 and col == col_length - 1: return 1 visit.add((row, col) ) _UpperCAmelCase = 0 count += depth_first_search(_SCREAMING_SNAKE_CASE , row + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) count += depth_first_search(_SCREAMING_SNAKE_CASE , row - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) count += depth_first_search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , col + 1 , _SCREAMING_SNAKE_CASE ) count += depth_first_search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , col - 1 , _SCREAMING_SNAKE_CASE ) visit.remove((row, col) ) return count if __name__ == "__main__": import doctest doctest.testmod()
260
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowercase_ = {"""configuration_opt""": ["""OPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """OPTConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """OPT_PRETRAINED_MODEL_ARCHIVE_LIST""", """OPTForCausalLM""", """OPTModel""", """OPTPreTrainedModel""", """OPTForSequenceClassification""", """OPTForQuestionAnswering""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["""TFOPTForCausalLM""", """TFOPTModel""", """TFOPTPreTrainedModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """FlaxOPTForCausalLM""", """FlaxOPTModel""", """FlaxOPTPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_opt import ( OPT_PRETRAINED_MODEL_ARCHIVE_LIST, OPTForCausalLM, OPTForQuestionAnswering, OPTForSequenceClassification, OPTModel, OPTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
224
import os from collections.abc import Iterator def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ = "." ): for dir_path, dir_names, filenames in os.walk(SCREAMING_SNAKE_CASE_ ): lowercase__ = [d for d in dir_names if d != "scripts" and d[0] not in "._"] for filename in filenames: if filename == "__init__.py": continue if os.path.splitext(SCREAMING_SNAKE_CASE_ )[1] in (".py", ".ipynb"): yield os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).lstrip("./" ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): return f'''{i * " "}*''' if i else "\n##" def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ = old_path.split(os.sep ) for i, new_part in enumerate(new_path.split(os.sep ) ): if (i + 1 > len(SCREAMING_SNAKE_CASE_ ) or old_parts[i] != new_part) and new_part: print(f'''{md_prefix(SCREAMING_SNAKE_CASE_ )} {new_part.replace("_" , " " ).title()}''' ) return new_path def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ = "." ): lowercase__ = "" for filepath in sorted(good_file_paths(SCREAMING_SNAKE_CASE_ ) ): lowercase__ , lowercase__ = os.path.split(SCREAMING_SNAKE_CASE_ ) if filepath != old_path: lowercase__ = print_path(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowercase__ = (filepath.count(os.sep ) + 1) if filepath else 0 lowercase__ = f'''{filepath}/{filename}'''.replace(" " , "%20" ) lowercase__ = os.path.splitext(filename.replace("_" , " " ).title() )[0] print(f'''{md_prefix(SCREAMING_SNAKE_CASE_ )} [{filename}]({url})''' ) if __name__ == "__main__": print_directory_md(""".""")
224
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowercase : List[str] ={ "configuration_clap": [ "CLAP_PRETRAINED_MODEL_ARCHIVE_LIST", "ClapAudioConfig", "ClapConfig", "ClapTextConfig", ], "processing_clap": ["ClapProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Tuple =[ "CLAP_PRETRAINED_MODEL_ARCHIVE_LIST", "ClapModel", "ClapPreTrainedModel", "ClapTextModel", "ClapTextModelWithProjection", "ClapAudioModel", "ClapAudioModelWithProjection", ] _lowercase : int =["ClapFeatureExtractor"] if TYPE_CHECKING: from .configuration_clap import ( CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, ClapAudioConfig, ClapConfig, ClapTextConfig, ) from .processing_clap import ClapProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clap import ClapFeatureExtractor from .modeling_clap import ( CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, ClapAudioModel, ClapAudioModelWithProjection, ClapModel, ClapPreTrainedModel, ClapTextModel, ClapTextModelWithProjection, ) else: import sys _lowercase : Any =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
170
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _lowercase : Union[str, Any] ={"configuration_mra": ["MRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MraConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : str =[ "MRA_PRETRAINED_MODEL_ARCHIVE_LIST", "MraForMaskedLM", "MraForMultipleChoice", "MraForQuestionAnswering", "MraForSequenceClassification", "MraForTokenClassification", "MraLayer", "MraModel", "MraPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mra import ( MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraLayer, MraModel, MraPreTrainedModel, ) else: import sys _lowercase : Optional[Any] =_LazyModule(__name__, globals()["__file__"], _import_structure)
170
1
import os import unittest from huggingface_hub.utils import are_progress_bars_disabled import transformers.models.bart.tokenization_bart from transformers import logging from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context from transformers.utils.logging import disable_progress_bar, enable_progress_bar class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCamelCase ( self ): lowercase : Tuple = logging.get_logger() # the current default level is logging.WARNING lowercase : List[str] = logging.get_verbosity() logging.set_verbosity_error() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_warning() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_info() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_debug() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) # restore to the original level logging.set_verbosity(UpperCamelCase__ ) def __lowerCamelCase ( self ): lowercase : Tuple = logging.get_verbosity() lowercase : List[Any] = logging.get_logger('''transformers.models.bart.tokenization_bart''' ) lowercase : Tuple = '''Testing 1, 2, 3''' # should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`) if level_origin <= logging.WARNING: with CaptureLogger(UpperCamelCase__ ) as cl: logger.warning(UpperCamelCase__ ) self.assertEqual(cl.out , msg + '''\n''' ) # this is setting the level for all of `transformers.*` loggers logging.set_verbosity_error() # should not be able to log warnings with CaptureLogger(UpperCamelCase__ ) as cl: logger.warning(UpperCamelCase__ ) self.assertEqual(cl.out , '''''' ) # should be able to log warnings again logging.set_verbosity_warning() with CaptureLogger(UpperCamelCase__ ) as cl: logger.warning(UpperCamelCase__ ) self.assertEqual(cl.out , msg + '''\n''' ) # restore to the original level logging.set_verbosity(UpperCamelCase__ ) @mockenv(TRANSFORMERS_VERBOSITY='''error''' ) def __lowerCamelCase ( self ): transformers.utils.logging._reset_library_root_logger() # this action activates the env var lowercase : Optional[int] = logging.get_logger('''transformers.models.bart.tokenization_bart''' ) lowercase : Optional[Any] = os.getenv('''TRANSFORMERS_VERBOSITY''' , UpperCamelCase__ ) lowercase : Dict = logging.log_levels[env_level_str] lowercase : int = logging.get_verbosity() self.assertEqual( UpperCamelCase__ , UpperCamelCase__ , f"""TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}""" , ) # restore to the original level lowercase : Any = '''''' transformers.utils.logging._reset_library_root_logger() @mockenv(TRANSFORMERS_VERBOSITY='''super-error''' ) def __lowerCamelCase ( self ): transformers.utils.logging._reset_library_root_logger() lowercase : Tuple = logging.logging.getLogger() with CaptureLogger(UpperCamelCase__ ) as cl: # this action activates the env var logging.get_logger('''transformers.models.bart.tokenization_bart''' ) self.assertIn('''Unknown option TRANSFORMERS_VERBOSITY=super-error''' , cl.out ) # no need to restore as nothing was changed def __lowerCamelCase ( self ): transformers.utils.logging._reset_library_root_logger() lowercase : Dict = logging.get_logger('''transformers.models.bart.tokenization_bart''' ) lowercase : List[Any] = '''Testing 1, 2, 3''' with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''1''' ): # nothing should be logged as env var disables this method with CaptureLogger(UpperCamelCase__ ) as cl: logger.warning_advice(UpperCamelCase__ ) self.assertEqual(cl.out , '''''' ) with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''''' ): # should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset with CaptureLogger(UpperCamelCase__ ) as cl: logger.warning_advice(UpperCamelCase__ ) self.assertEqual(cl.out , msg + '''\n''' ) def __lowercase ( ) ->Tuple: """simple docstring""" disable_progress_bar() assert are_progress_bars_disabled() enable_progress_bar() assert not are_progress_bars_disabled()
369
def __lowercase ( _UpperCamelCase = 50 ) ->int: """simple docstring""" lowercase : str = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2, 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(F'''{solution() = }''')
173
0
"""simple docstring""" from typing import Callable, Dict, Optional, Tuple import torch from torch import nn from torch.distributions import ( AffineTransform, Distribution, Independent, NegativeBinomial, Normal, StudentT, TransformedDistribution, ) class lowerCamelCase ( lowerCAmelCase__ ): '''simple docstring''' def __init__(self , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=0 ): """simple docstring""" UpperCAmelCase__ : Union[str, Any] = 1.0 if scale is None else scale UpperCAmelCase__ : Union[str, Any] = 0.0 if loc is None else loc super().__init__(_lowerCamelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=_lowerCamelCase )] ) @property def _a (self ): """simple docstring""" return self.base_dist.mean * self.scale + self.loc @property def _a (self ): """simple docstring""" return self.base_dist.variance * self.scale**2 @property def _a (self ): """simple docstring""" return self.variance.sqrt() class lowerCamelCase ( nn.Module ): '''simple docstring''' def __init__(self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ): """simple docstring""" super().__init__(**_lowerCamelCase ) UpperCAmelCase__ : Optional[int] = args_dim UpperCAmelCase__ : str = nn.ModuleList([nn.Linear(_lowerCamelCase , _lowerCamelCase ) for dim in args_dim.values()] ) UpperCAmelCase__ : Optional[int] = domain_map def _a (self , _lowerCamelCase ): """simple docstring""" UpperCAmelCase__ : Optional[Any] = [proj(_lowerCamelCase ) for proj in self.proj] return self.domain_map(*_lowerCamelCase ) class lowerCamelCase ( nn.Module ): '''simple docstring''' def __init__(self , _lowerCamelCase ): """simple docstring""" super().__init__() UpperCAmelCase__ : Union[str, Any] = function def _a (self , _lowerCamelCase , *_lowerCamelCase ): """simple docstring""" return self.function(_lowerCamelCase , *_lowerCamelCase ) class lowerCamelCase : '''simple docstring''' SCREAMING_SNAKE_CASE = 42 SCREAMING_SNAKE_CASE = 42 SCREAMING_SNAKE_CASE = 42 def __init__(self , _lowerCamelCase = 1 ): """simple docstring""" UpperCAmelCase__ : List[str] = dim UpperCAmelCase__ : Optional[int] = {k: dim * self.args_dim[k] for k in self.args_dim} def _a (self , _lowerCamelCase ): """simple docstring""" if self.dim == 1: return self.distribution_class(*_lowerCamelCase ) else: return Independent(self.distribution_class(*_lowerCamelCase ) , 1 ) def _a (self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , ): """simple docstring""" UpperCAmelCase__ : Optional[Any] = self._base_distribution(_lowerCamelCase ) if loc is None and scale is None: return distr else: return AffineTransformed(_lowerCamelCase , loc=_lowerCamelCase , scale=_lowerCamelCase , event_dim=self.event_dim ) @property def _a (self ): """simple docstring""" return () if self.dim == 1 else (self.dim,) @property def _a (self ): """simple docstring""" return len(self.event_shape ) @property def _a (self ): """simple docstring""" return 0.0 def _a (self , _lowerCamelCase ): """simple docstring""" return ParameterProjection( in_features=_lowerCamelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , ) def _a (self , *_lowerCamelCase ): """simple docstring""" raise NotImplementedError() @staticmethod def _a (_lowerCamelCase ): """simple docstring""" return (x + torch.sqrt(torch.square(_lowerCamelCase ) + 4.0 )) / 2.0 class lowerCamelCase ( lowerCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE = {"df": 1, "loc": 1, "scale": 1} SCREAMING_SNAKE_CASE = StudentT @classmethod def _a (cls , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): """simple docstring""" UpperCAmelCase__ : Union[str, Any] = cls.squareplus(_lowerCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps ) UpperCAmelCase__ : Dict = 2.0 + cls.squareplus(_lowerCamelCase ) return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 ) class lowerCamelCase ( lowerCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE = {"loc": 1, "scale": 1} SCREAMING_SNAKE_CASE = Normal @classmethod def _a (cls , _lowerCamelCase , _lowerCamelCase ): """simple docstring""" UpperCAmelCase__ : List[Any] = cls.squareplus(_lowerCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps ) return loc.squeeze(-1 ), scale.squeeze(-1 ) class lowerCamelCase ( lowerCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE = {"total_count": 1, "logits": 1} SCREAMING_SNAKE_CASE = NegativeBinomial @classmethod def _a (cls , _lowerCamelCase , _lowerCamelCase ): """simple docstring""" UpperCAmelCase__ : Dict = cls.squareplus(_lowerCamelCase ) return total_count.squeeze(-1 ), logits.squeeze(-1 ) def _a (self , _lowerCamelCase ): """simple docstring""" UpperCAmelCase__ , UpperCAmelCase__ : Any = distr_args if self.dim == 1: return self.distribution_class(total_count=_lowerCamelCase , logits=_lowerCamelCase ) else: return Independent(self.distribution_class(total_count=_lowerCamelCase , logits=_lowerCamelCase ) , 1 ) def _a (self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None ): """simple docstring""" UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = distr_args if scale is not None: # See scaling property of Gamma. logits += scale.log() return self._base_distribution((total_count, logits) )
171
"""simple docstring""" import os from datetime import datetime as dt from github import Github _A = [ """good first issue""", """feature request""", """wip""", ] def a__ ( ) -> str: UpperCAmelCase__ : Union[str, Any] = Github(os.environ["""GITHUB_TOKEN"""] ) UpperCAmelCase__ : Dict = g.get_repo("""huggingface/accelerate""" ) UpperCAmelCase__ : str = repo.get_issues(state="""open""" ) for issue in open_issues: UpperCAmelCase__ : Optional[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda lowerCAmelCase : i.created_at , reverse=lowerCAmelCase ) UpperCAmelCase__ : List[Any] = comments[0] if len(lowerCAmelCase ) > 0 else None UpperCAmelCase__ : Optional[Any] = dt.utcnow() UpperCAmelCase__ : List[str] = (current_time - issue.updated_at).days UpperCAmelCase__ : Optional[int] = (current_time - issue.created_at).days if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and days_since_updated > 7 and days_since_creation >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Close issue since it has been 7 days of inactivity since bot mention. issue.edit(state="""closed""" ) elif ( days_since_updated > 23 and days_since_creation >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Add stale comment issue.create_comment( """This issue has been automatically marked as stale because it has not had """ """recent activity. If you think this still needs to be addressed """ """please comment on this thread.\n\nPlease note that issues that do not follow the """ """[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) """ """are likely to be ignored.""" ) if __name__ == "__main__": main()
171
1
def lowerCamelCase__ ( a__ : int ) -> list[int]: if length <= 0 or not isinstance(a__ , a__ ): raise ValueError("""Length must be a positive integer.""" ) return [n * (2 * n - 1) for n in range(a__ )] if __name__ == "__main__": print(hexagonal_numbers(length=5)) print(hexagonal_numbers(length=10))
261
def lowerCamelCase__ ( a__ : List[Any] ) -> Optional[int]: UpperCamelCase_ = len(a__ ) while cur > 1: # Find the maximum number in arr UpperCamelCase_ = arr.index(max(arr[0:cur] ) ) # Reverse from 0 to mi UpperCamelCase_ = arr[mi::-1] + arr[mi + 1 : len(a__ )] # Reverse whole list UpperCamelCase_ = arr[cur - 1 :: -1] + arr[cur : len(a__ )] cur -= 1 return arr if __name__ == "__main__": _A = input('''Enter numbers separated by a comma:\n''').strip() _A = [int(item) for item in user_input.split(''',''')] print(pancake_sort(unsorted))
261
1
"""simple docstring""" def _SCREAMING_SNAKE_CASE ( lowercase_ = 4_00_00_00 ) -> Dict: A__ = [0, 1] A__ = 0 while fib[i] <= n: fib.append(fib[i] + fib[i + 1] ) if fib[i + 2] > n: break i += 1 A__ = 0 for j in range(len(UpperCamelCase__ ) - 1 ): if fib[j] % 2 == 0: total += fib[j] return total if __name__ == "__main__": print(f'{solution() = }')
247
'''simple docstring''' import torch from torch import nn class _snake_case ( nn.Module ): def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=1 , _lowerCamelCase=False): super().__init__() UpperCAmelCase__ : List[Any] = n_token UpperCAmelCase__ : Tuple = d_embed UpperCAmelCase__ : str = d_proj UpperCAmelCase__ : str = cutoffs + [n_token] UpperCAmelCase__ : List[Any] = [0] + self.cutoffs UpperCAmelCase__ : Optional[Any] = div_val UpperCAmelCase__ : Optional[int] = self.cutoffs[0] UpperCAmelCase__ : Optional[int] = len(self.cutoffs) - 1 UpperCAmelCase__ : Union[str, Any] = self.shortlist_size + self.n_clusters if self.n_clusters > 0: UpperCAmelCase__ : int = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed)) UpperCAmelCase__ : Optional[Any] = nn.Parameter(torch.zeros(self.n_clusters)) UpperCAmelCase__ : int = nn.ModuleList() UpperCAmelCase__ : List[Any] = nn.ParameterList() if div_val == 1: for i in range(len(self.cutoffs)): if d_proj != d_embed: self.out_projs.append(nn.Parameter(torch.FloatTensor(_lowerCamelCase , _lowerCamelCase))) else: self.out_projs.append(_lowerCamelCase) self.out_layers.append(nn.Linear(_lowerCamelCase , _lowerCamelCase)) else: for i in range(len(self.cutoffs)): UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1] UpperCAmelCase__ : Union[str, Any] = d_embed // (div_val**i) self.out_projs.append(nn.Parameter(torch.FloatTensor(_lowerCamelCase , _lowerCamelCase))) self.out_layers.append(nn.Linear(_lowerCamelCase , r_idx - l_idx)) UpperCAmelCase__ : Optional[int] = keep_order def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase): if proj is None: UpperCAmelCase__ : Dict = nn.functional.linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase) else: # if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1: UpperCAmelCase__ : Optional[int] = nn.functional.linear(_lowerCamelCase , proj.t().contiguous()) UpperCAmelCase__ : List[str] = nn.functional.linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase) # else: # logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t())) # if bias is not None: # logit = logit + bias return logit def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=False): if labels is not None: # Shift so that tokens < n predict n UpperCAmelCase__ : Optional[int] = hidden[..., :-1, :].contiguous() UpperCAmelCase__ : int = labels[..., 1:].contiguous() UpperCAmelCase__ : List[str] = hidden.view(-1 , hidden.size(-1)) UpperCAmelCase__ : Optional[int] = labels.view(-1) if hidden.size(0) != labels.size(0): raise RuntimeError("""Input and labels should have the same size in the batch dimension.""") else: UpperCAmelCase__ : Optional[int] = hidden.view(-1 , hidden.size(-1)) if self.n_clusters == 0: UpperCAmelCase__ : Tuple = self._compute_logit(_lowerCamelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0]) if labels is not None: UpperCAmelCase__ : Dict = labels != -100 UpperCAmelCase__ : Tuple = torch.zeros_like(_lowerCamelCase , dtype=hidden.dtype , device=hidden.device) UpperCAmelCase__ : List[Any] = ( -nn.functional.log_softmax(_lowerCamelCase , dim=-1)[mask].gather(1 , labels[mask].unsqueeze(1)).squeeze(1) ) else: UpperCAmelCase__ : List[str] = nn.functional.log_softmax(_lowerCamelCase , dim=-1) else: # construct weights and biases UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = [], [] for i in range(len(self.cutoffs)): if self.div_val == 1: UpperCAmelCase__ , UpperCAmelCase__ : int = self.cutoff_ends[i], self.cutoff_ends[i + 1] UpperCAmelCase__ : Dict = self.out_layers[0].weight[l_idx:r_idx] UpperCAmelCase__ : Any = self.out_layers[0].bias[l_idx:r_idx] else: UpperCAmelCase__ : Union[str, Any] = self.out_layers[i].weight UpperCAmelCase__ : Any = self.out_layers[i].bias if i == 0: UpperCAmelCase__ : Optional[Any] = torch.cat([weight_i, self.cluster_weight] , dim=0) UpperCAmelCase__ : List[Any] = torch.cat([bias_i, self.cluster_bias] , dim=0) weights.append(_lowerCamelCase) biases.append(_lowerCamelCase) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = weights[0], biases[0], self.out_projs[0] UpperCAmelCase__ : Optional[int] = self._compute_logit(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) UpperCAmelCase__ : Union[str, Any] = nn.functional.log_softmax(_lowerCamelCase , dim=1) if labels is None: UpperCAmelCase__ : str = hidden.new_empty((head_logit.size(0), self.n_token)) else: UpperCAmelCase__ : Optional[Any] = torch.zeros_like(_lowerCamelCase , dtype=hidden.dtype , device=hidden.device) UpperCAmelCase__ : Optional[int] = 0 UpperCAmelCase__ : List[str] = [0] + self.cutoffs for i in range(len(_lowerCamelCase) - 1): UpperCAmelCase__ , UpperCAmelCase__ : Dict = cutoff_values[i], cutoff_values[i + 1] if labels is not None: UpperCAmelCase__ : List[str] = (labels >= l_idx) & (labels < r_idx) UpperCAmelCase__ : str = mask_i.nonzero().squeeze() if indices_i.numel() == 0: continue UpperCAmelCase__ : List[Any] = labels.index_select(0 , _lowerCamelCase) - l_idx UpperCAmelCase__ : List[str] = head_logprob.index_select(0 , _lowerCamelCase) UpperCAmelCase__ : Optional[Any] = hidden.index_select(0 , _lowerCamelCase) else: UpperCAmelCase__ : Any = hidden if i == 0: if labels is not None: UpperCAmelCase__ : List[Any] = head_logprob_i.gather(1 , target_i[:, None]).squeeze(1) else: UpperCAmelCase__ : Tuple = head_logprob[:, : self.cutoffs[0]] else: UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = weights[i], biases[i], self.out_projs[i] UpperCAmelCase__ : int = self._compute_logit(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) UpperCAmelCase__ : str = nn.functional.log_softmax(_lowerCamelCase , dim=1) UpperCAmelCase__ : int = self.cutoffs[0] + i - 1 # No probability for the head cluster if labels is not None: UpperCAmelCase__ : Dict = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather( 1 , target_i[:, None]).squeeze(1) else: UpperCAmelCase__ : List[str] = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i UpperCAmelCase__ : Tuple = logprob_i if labels is not None: if (hasattr(self , """keep_order""") and self.keep_order) or keep_order: out.index_copy_(0 , _lowerCamelCase , -logprob_i) else: out[offset : offset + logprob_i.size(0)].copy_(-logprob_i) offset += logprob_i.size(0) return out def snake_case__ ( self , _lowerCamelCase): if self.n_clusters == 0: UpperCAmelCase__ : Union[str, Any] = self._compute_logit(_lowerCamelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0]) return nn.functional.log_softmax(_lowerCamelCase , dim=-1) else: # construct weights and biases UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = [], [] for i in range(len(self.cutoffs)): if self.div_val == 1: UpperCAmelCase__ , UpperCAmelCase__ : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1] UpperCAmelCase__ : Union[str, Any] = self.out_layers[0].weight[l_idx:r_idx] UpperCAmelCase__ : Any = self.out_layers[0].bias[l_idx:r_idx] else: UpperCAmelCase__ : int = self.out_layers[i].weight UpperCAmelCase__ : List[str] = self.out_layers[i].bias if i == 0: UpperCAmelCase__ : List[Any] = torch.cat([weight_i, self.cluster_weight] , dim=0) UpperCAmelCase__ : Optional[int] = torch.cat([bias_i, self.cluster_bias] , dim=0) weights.append(_lowerCamelCase) biases.append(_lowerCamelCase) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = weights[0], biases[0], self.out_projs[0] UpperCAmelCase__ : List[Any] = self._compute_logit(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) UpperCAmelCase__ : List[Any] = hidden.new_empty((head_logit.size(0), self.n_token)) UpperCAmelCase__ : int = nn.functional.log_softmax(_lowerCamelCase , dim=1) UpperCAmelCase__ : str = [0] + self.cutoffs for i in range(len(_lowerCamelCase) - 1): UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = cutoff_values[i], cutoff_values[i + 1] if i == 0: UpperCAmelCase__ : List[Any] = head_logprob[:, : self.cutoffs[0]] else: UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = weights[i], biases[i], self.out_projs[i] UpperCAmelCase__ : Union[str, Any] = self._compute_logit(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) UpperCAmelCase__ : List[str] = nn.functional.log_softmax(_lowerCamelCase , dim=1) UpperCAmelCase__ : Union[str, Any] = head_logprob[:, -i] + tail_logprob_i UpperCAmelCase__ : Dict = logprob_i return out
163
0
from math import pow def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ): '''simple docstring''' if current_sum == needed_sum: # If the sum of the powers is equal to needed_sum, then we have a solution. solutions_count += 1 return current_sum, solutions_count snake_case_ = int(pow(__snake_case , __snake_case ) ) if current_sum + i_to_n <= needed_sum: # If the sum of the powers is less than needed_sum, then continue adding powers. current_sum += i_to_n snake_case_ , snake_case_ = backtrack( __snake_case , __snake_case , current_number + 1 , __snake_case , __snake_case ) current_sum -= i_to_n if i_to_n < needed_sum: # If the power of i is less than needed_sum, then try with the next power. snake_case_ , snake_case_ = backtrack( __snake_case , __snake_case , current_number + 1 , __snake_case , __snake_case ) return current_sum, solutions_count def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' if not (1 <= needed_sum <= 1000 and 2 <= power <= 10): raise ValueError( 'Invalid input\n' 'needed_sum must be between 1 and 1000, power between 2 and 10.' ) return backtrack(__snake_case , __snake_case , 1 , 0 , 0 )[1] # Return the solutions_count if __name__ == "__main__": import doctest doctest.testmod()
362
import argparse from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird from transformers.utils import logging logging.set_verbosity_info() def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = BigBirdConfig.from_json_file(UpperCamelCase__ ) print(F'''Building PyTorch model from configuration: {config}''' ) if is_trivia_qa: snake_case_ = BigBirdForQuestionAnswering(UpperCamelCase__ ) else: snake_case_ = BigBirdForPreTraining(UpperCamelCase__ ) # Load weights from tf checkpoint load_tf_weights_in_big_bird(UpperCamelCase__ , UpperCamelCase__ , is_trivia_qa=UpperCamelCase__ ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) model.save_pretrained(UpperCamelCase__ ) if __name__ == "__main__": _UpperCAmelCase : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--big_bird_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained BERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--is_trivia_qa""", action="""store_true""", help="""Whether to convert a model with a trivia_qa head.""" ) _UpperCAmelCase : str = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa )
200
0
"""simple docstring""" from .imports import is_tqdm_available if is_tqdm_available(): from tqdm.auto import tqdm as _tqdm from ..state import PartialState def UpperCamelCase_ ( lowerCAmelCase__ : bool = True , *lowerCAmelCase__ : Optional[int] , **lowerCAmelCase__ : List[Any] ) -> str: """simple docstring""" if not is_tqdm_available(): raise ImportError('Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.' ) lowerCAmelCase_ : Any = False if main_process_only: lowerCAmelCase_ : Tuple = PartialState().local_process_index == 0 return _tqdm(*lowerCAmelCase__ , **lowerCAmelCase__ , disable=lowerCAmelCase__ )
224
"""simple docstring""" def UpperCamelCase_ ( lowerCAmelCase__ : Optional[Any] ) -> Optional[int]: """simple docstring""" lowerCAmelCase_ : Tuple = [0] * len(lowerCAmelCase__ ) lowerCAmelCase_ : List[str] = [] lowerCAmelCase_ : Tuple = [1] * len(lowerCAmelCase__ ) for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(lowerCAmelCase__ ) ): if indegree[i] == 0: queue.append(lowerCAmelCase__ ) while queue: lowerCAmelCase_ : Union[str, Any] = queue.pop(0 ) for x in graph[vertex]: indegree[x] -= 1 if long_dist[vertex] + 1 > long_dist[x]: lowerCAmelCase_ : Any = long_dist[vertex] + 1 if indegree[x] == 0: queue.append(lowerCAmelCase__ ) print(max(lowerCAmelCase__ ) ) # Adjacency list of Graph lowercase__ : Any = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []} longest_distance(graph)
224
1
import math from typing import Any, Callable, List, Optional, Tuple, Union import numpy as np import torch from ...models import TaFilmDecoder from ...schedulers import DDPMScheduler from ...utils import is_onnx_available, logging, randn_tensor if is_onnx_available(): from ..onnx_utils import OnnxRuntimeModel from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline from .continous_encoder import SpectrogramContEncoder from .notes_encoder import SpectrogramNotesEncoder UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name UpperCAmelCase_ = 256 class lowerCamelCase__( __lowerCamelCase): UpperCAmelCase__ : Union[str, Any] = ['melgan'] def __init__( self: Optional[Any] , UpperCamelCase_: SpectrogramNotesEncoder , UpperCamelCase_: SpectrogramContEncoder , UpperCamelCase_: TaFilmDecoder , UpperCamelCase_: DDPMScheduler , UpperCamelCase_: OnnxRuntimeModel if is_onnx_available() else Any , ): super().__init__() # From MELGAN __lowerCamelCase = math.log(1E-5 ) # Matches MelGAN training. __lowerCamelCase = 4.0 # Largest value for most examples __lowerCamelCase = 1_28 self.register_modules( notes_encoder=UpperCamelCase_ , continuous_encoder=UpperCamelCase_ , decoder=UpperCamelCase_ , scheduler=UpperCamelCase_ , melgan=UpperCamelCase_ , ) def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Tuple , UpperCamelCase_: int=(-1.0, 1.0) , UpperCamelCase_: Union[str, Any]=False ): __lowerCamelCase, __lowerCamelCase = output_range if clip: __lowerCamelCase = torch.clip(UpperCamelCase_ , self.min_value , self.max_value ) # Scale to [0, 1]. __lowerCamelCase = (features - self.min_value) / (self.max_value - self.min_value) # Scale to [min_out, max_out]. return zero_one * (max_out - min_out) + min_out def lowerCAmelCase__ ( self: int , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[Any]=(-1.0, 1.0) , UpperCamelCase_: Dict=False ): __lowerCamelCase, __lowerCamelCase = input_range __lowerCamelCase = torch.clip(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) if clip else outputs # Scale to [0, 1]. __lowerCamelCase = (outputs - min_out) / (max_out - min_out) # Scale to [self.min_value, self.max_value]. return zero_one * (self.max_value - self.min_value) + self.min_value def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Any , UpperCamelCase_: Any , UpperCamelCase_: Dict ): __lowerCamelCase = input_tokens > 0 __lowerCamelCase, __lowerCamelCase = self.notes_encoder( encoder_input_tokens=UpperCamelCase_ , encoder_inputs_mask=UpperCamelCase_ ) __lowerCamelCase, __lowerCamelCase = self.continuous_encoder( encoder_inputs=UpperCamelCase_ , encoder_inputs_mask=UpperCamelCase_ ) return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)] def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: str , UpperCamelCase_: Dict , UpperCamelCase_: Optional[int] ): __lowerCamelCase = noise_time if not torch.is_tensor(UpperCamelCase_ ): __lowerCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device ) elif torch.is_tensor(UpperCamelCase_ ) and len(timesteps.shape ) == 0: __lowerCamelCase = timesteps[None].to(input_tokens.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML __lowerCamelCase = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device ) __lowerCamelCase = self.decoder( encodings_and_masks=UpperCamelCase_ , decoder_input_tokens=UpperCamelCase_ , decoder_noise_time=UpperCamelCase_ ) return logits @torch.no_grad() def __call__( self: List[Any] , UpperCamelCase_: List[List[int]] , UpperCamelCase_: Optional[torch.Generator] = None , UpperCamelCase_: int = 1_00 , UpperCamelCase_: bool = True , UpperCamelCase_: str = "numpy" , UpperCamelCase_: Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCamelCase_: int = 1 , ): if (callback_steps is None) or ( callback_steps is not None and (not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or callback_steps <= 0) ): raise ValueError( F'`callback_steps` has to be a positive integer but is {callback_steps} of type' F' {type(UpperCamelCase_ )}.' ) __lowerCamelCase = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa ) __lowerCamelCase = np.zeros([1, 0, self.n_dims] , np.floataa ) __lowerCamelCase = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=UpperCamelCase_ , device=self.device ) for i, encoder_input_tokens in enumerate(UpperCamelCase_ ): if i == 0: __lowerCamelCase = torch.from_numpy(pred_mel[:1].copy() ).to( device=self.device , dtype=self.decoder.dtype ) # The first chunk has no previous context. __lowerCamelCase = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=UpperCamelCase_ , device=self.device ) else: # The full song pipeline does not feed in a context feature, so the mask # will be all 0s after the feature converter. Because we know we're # feeding in a full context chunk from the previous prediction, set it # to all 1s. __lowerCamelCase = ones __lowerCamelCase = self.scale_features( UpperCamelCase_ , output_range=[-1.0, 1.0] , clip=UpperCamelCase_ ) __lowerCamelCase = self.encode( input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=UpperCamelCase_ , continuous_mask=UpperCamelCase_ , ) # Sample encoder_continuous_inputs shaped gaussian noise to begin loop __lowerCamelCase = randn_tensor( shape=encoder_continuous_inputs.shape , generator=UpperCamelCase_ , device=self.device , dtype=self.decoder.dtype , ) # set step values self.scheduler.set_timesteps(UpperCamelCase_ ) # Denoising diffusion loop for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): __lowerCamelCase = self.decode( encodings_and_masks=UpperCamelCase_ , input_tokens=UpperCamelCase_ , noise_time=t / self.scheduler.config.num_train_timesteps , ) # Compute previous output: x_t -> x_t-1 __lowerCamelCase = self.scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample __lowerCamelCase = self.scale_to_features(UpperCamelCase_ , input_range=[-1.0, 1.0] ) __lowerCamelCase = mel[:1] __lowerCamelCase = mel.cpu().float().numpy() __lowerCamelCase = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 ) # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(UpperCamelCase_ , UpperCamelCase_ ) logger.info("""Generated segment""" , UpperCamelCase_ ) if output_type == "numpy" and not is_onnx_available(): raise ValueError( """Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'.""" ) elif output_type == "numpy" and self.melgan is None: raise ValueError( """Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'.""" ) if output_type == "numpy": __lowerCamelCase = self.melgan(input_features=full_pred_mel.astype(np.floataa ) ) else: __lowerCamelCase = full_pred_mel if not return_dict: return (output,) return AudioPipelineOutput(audios=UpperCamelCase_ )
29
import asyncio import os import shutil import subprocess import sys import tempfile import unittest from distutils.util import strtobool from functools import partial from pathlib import Path from typing import List, Union from unittest import mock import torch from ..state import AcceleratorState, PartialState from ..utils import ( gather, is_bnb_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_mps_available, is_safetensors_available, is_tensorboard_available, is_torch_version, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) def lowerCamelCase__ ( A__ : Dict , A__ : Optional[int]=False ): '''simple docstring''' try: __lowerCamelCase = os.environ[key] except KeyError: # KEY isn't set, default to `default`. __lowerCamelCase = default else: # KEY is set, convert it to True or False. try: __lowerCamelCase = strtobool(A__ ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(f'If set, {key} must be yes or no.' ) return _value UpperCAmelCase_ = parse_flag_from_env('RUN_SLOW', default=False) def lowerCamelCase__ ( A__ : Any ): '''simple docstring''' return unittest.skip("""Test was skipped""" )(A__ ) def lowerCamelCase__ ( A__ : List[Any] ): '''simple docstring''' return unittest.skipUnless(_run_slow_tests , """test is slow""" )(A__ ) def lowerCamelCase__ ( A__ : Union[str, Any] ): '''simple docstring''' return unittest.skipUnless(not torch.cuda.is_available() , """test requires only a CPU""" )(A__ ) def lowerCamelCase__ ( A__ : List[str] ): '''simple docstring''' return unittest.skipUnless(torch.cuda.is_available() , """test requires a GPU""" )(A__ ) def lowerCamelCase__ ( A__ : Union[str, Any] ): '''simple docstring''' return unittest.skipUnless(is_xpu_available() , """test requires a XPU""" )(A__ ) def lowerCamelCase__ ( A__ : Optional[int] ): '''simple docstring''' return unittest.skipUnless(is_mps_available() , """test requires a `mps` backend support in `torch`""" )(A__ ) def lowerCamelCase__ ( A__ : List[Any] ): '''simple docstring''' return unittest.skipUnless( is_transformers_available() and is_datasets_available() , """test requires the Hugging Face suite""" )(A__ ) def lowerCamelCase__ ( A__ : Any ): '''simple docstring''' return unittest.skipUnless(is_bnb_available() , """test requires the bitsandbytes library""" )(A__ ) def lowerCamelCase__ ( A__ : Optional[int] ): '''simple docstring''' return unittest.skipUnless(is_tpu_available() , """test requires TPU""" )(A__ ) def lowerCamelCase__ ( A__ : List[Any] ): '''simple docstring''' return unittest.skipUnless(torch.cuda.device_count() == 1 , """test requires a GPU""" )(A__ ) def lowerCamelCase__ ( A__ : Dict ): '''simple docstring''' return unittest.skipUnless(torch.xpu.device_count() == 1 , """test requires a XPU""" )(A__ ) def lowerCamelCase__ ( A__ : Dict ): '''simple docstring''' return unittest.skipUnless(torch.cuda.device_count() > 1 , """test requires multiple GPUs""" )(A__ ) def lowerCamelCase__ ( A__ : Tuple ): '''simple docstring''' return unittest.skipUnless(torch.xpu.device_count() > 1 , """test requires multiple XPUs""" )(A__ ) def lowerCamelCase__ ( A__ : Optional[int] ): '''simple docstring''' return unittest.skipUnless(is_safetensors_available() , """test requires safetensors""" )(A__ ) def lowerCamelCase__ ( A__ : Dict ): '''simple docstring''' return unittest.skipUnless(is_deepspeed_available() , """test requires DeepSpeed""" )(A__ ) def lowerCamelCase__ ( A__ : List[str] ): '''simple docstring''' return unittest.skipUnless(is_torch_version(""">=""" , """1.12.0""" ) , """test requires torch version >= 1.12.0""" )(A__ ) def lowerCamelCase__ ( A__ : Tuple=None , A__ : Optional[Any]=None ): '''simple docstring''' if test_case is None: return partial(A__ , version=A__ ) return unittest.skipUnless(is_torch_version(""">=""" , A__ ) , f'test requires torch version >= {version}' )(A__ ) def lowerCamelCase__ ( A__ : Dict ): '''simple docstring''' return unittest.skipUnless(is_tensorboard_available() , """test requires Tensorboard""" )(A__ ) def lowerCamelCase__ ( A__ : Optional[Any] ): '''simple docstring''' return unittest.skipUnless(is_wandb_available() , """test requires wandb""" )(A__ ) def lowerCamelCase__ ( A__ : str ): '''simple docstring''' return unittest.skipUnless(is_comet_ml_available() , """test requires comet_ml""" )(A__ ) UpperCAmelCase_ = ( any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available() ) def lowerCamelCase__ ( A__ : Any ): '''simple docstring''' return unittest.skipUnless( _atleast_one_tracker_available , """test requires at least one tracker to be available and for `comet_ml` to not be installed""" , )(A__ ) class lowerCamelCase__( unittest.TestCase): UpperCAmelCase__ : List[Any] = True @classmethod def lowerCAmelCase__ ( cls: int ): __lowerCamelCase = tempfile.mkdtemp() @classmethod def lowerCAmelCase__ ( cls: Any ): if os.path.exists(cls.tmpdir ): shutil.rmtree(cls.tmpdir ) def lowerCAmelCase__ ( self: Any ): if self.clear_on_setup: for path in Path(self.tmpdir ).glob("""**/*""" ): if path.is_file(): path.unlink() elif path.is_dir(): shutil.rmtree(UpperCamelCase_ ) class lowerCamelCase__( unittest.TestCase): def lowerCAmelCase__ ( self: int ): super().tearDown() # Reset the state of the AcceleratorState singleton. AcceleratorState._reset_state() PartialState._reset_state() class lowerCamelCase__( unittest.TestCase): def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Union[mock.Mock, List[mock.Mock]] ): __lowerCamelCase = mocks if isinstance(UpperCamelCase_ , (tuple, list) ) else [mocks] for m in self.mocks: m.start() self.addCleanup(m.stop ) def lowerCamelCase__ ( A__ : Optional[Any] ): '''simple docstring''' __lowerCamelCase = AcceleratorState() __lowerCamelCase = tensor[None].clone().to(state.device ) __lowerCamelCase = gather(A__ ).cpu() __lowerCamelCase = tensor[0].cpu() for i in range(tensors.shape[0] ): if not torch.equal(tensors[i] , A__ ): return False return True class lowerCamelCase__: def __init__( self: Union[str, Any] , UpperCamelCase_: Dict , UpperCamelCase_: Any , UpperCamelCase_: Any ): __lowerCamelCase = returncode __lowerCamelCase = stdout __lowerCamelCase = stderr async def lowerCamelCase__ ( A__ : int , A__ : Any ): '''simple docstring''' while True: __lowerCamelCase = await stream.readline() if line: callback(A__ ) else: break async def lowerCamelCase__ ( A__ : Dict , A__ : List[str]=None , A__ : Any=None , A__ : Optional[Any]=None , A__ : Tuple=False , A__ : List[Any]=False ): '''simple docstring''' if echo: print("""\nRunning: """ , """ """.join(A__ ) ) __lowerCamelCase = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=A__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=A__ , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) __lowerCamelCase = [] __lowerCamelCase = [] def tee(A__ : int , A__ : Any , A__ : Optional[Any] , A__ : int="" ): __lowerCamelCase = line.decode("""utf-8""" ).rstrip() sink.append(A__ ) if not quiet: print(A__ , A__ , file=A__ ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ asyncio.create_task(_read_stream(p.stdout , lambda A__ : tee(A__ , A__ , sys.stdout , label="""stdout:""" ) ) ), asyncio.create_task(_read_stream(p.stderr , lambda A__ : tee(A__ , A__ , sys.stderr , label="""stderr:""" ) ) ), ] , timeout=A__ , ) return _RunOutput(await p.wait() , A__ , A__ ) def lowerCamelCase__ ( A__ : Optional[Any] , A__ : Any=None , A__ : Union[str, Any]=None , A__ : Dict=180 , A__ : str=False , A__ : List[Any]=True ): '''simple docstring''' __lowerCamelCase = asyncio.get_event_loop() __lowerCamelCase = loop.run_until_complete( _stream_subprocess(A__ , env=A__ , stdin=A__ , timeout=A__ , quiet=A__ , echo=A__ ) ) __lowerCamelCase = """ """.join(A__ ) if result.returncode > 0: __lowerCamelCase = """\n""".join(result.stderr ) raise RuntimeError( f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n' f'The combined stderr from workers follows:\n{stderr}' ) return result class lowerCamelCase__( __lowerCamelCase): pass def lowerCamelCase__ ( A__ : List[str] , A__ : Union[str, Any]=False ): '''simple docstring''' try: __lowerCamelCase = subprocess.check_output(A__ , stderr=subprocess.STDOUT ) if return_stdout: if hasattr(A__ , """decode""" ): __lowerCamelCase = output.decode("""utf-8""" ) return output except subprocess.CalledProcessError as e: raise SubprocessCallException( f'Command `{" ".join(A__ )}` failed with the following error:\n\n{e.output.decode()}' ) from e
29
1
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() A__ = logging.get_logger(__name__) def _UpperCAmelCase ( snake_case , snake_case=False ): """simple docstring""" _lowerCAmelCase = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') ) rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') ) rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') ) rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') ) rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') ) rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') ) rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') ) rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') ) rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') ) rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') ) # projection layer + position embeddings rename_keys.extend( [ ("""cls_token""", """vit.embeddings.cls_token"""), ("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""), ("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""), ("""pos_embed""", """vit.embeddings.position_embeddings"""), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""norm.weight""", """layernorm.weight"""), ("""norm.bias""", """layernorm.bias"""), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" _lowerCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("""norm.weight""", """vit.layernorm.weight"""), ("""norm.bias""", """vit.layernorm.bias"""), ("""head.weight""", """classifier.weight"""), ("""head.bias""", """classifier.bias"""), ] ) return rename_keys def _UpperCAmelCase ( snake_case , snake_case , snake_case=False ): """simple docstring""" for i in range(config.num_hidden_layers ): if base_model: _lowerCAmelCase = """""" else: _lowerCAmelCase = """vit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _lowerCAmelCase = state_dict.pop(F'blocks.{i}.attn.qkv.weight' ) _lowerCAmelCase = state_dict.pop(F'blocks.{i}.attn.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict _lowerCAmelCase = in_proj_weight[ : config.hidden_size, : ] _lowerCAmelCase = in_proj_bias[: config.hidden_size] _lowerCAmelCase = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _lowerCAmelCase = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _lowerCAmelCase = in_proj_weight[ -config.hidden_size :, : ] _lowerCAmelCase = in_proj_bias[-config.hidden_size :] def _UpperCAmelCase ( snake_case ): """simple docstring""" _lowerCAmelCase = ["""head.weight""", """head.bias"""] for k in ignore_keys: state_dict.pop(snake_case , snake_case ) def _UpperCAmelCase ( snake_case , snake_case , snake_case ): """simple docstring""" _lowerCAmelCase = dct.pop(snake_case ) _lowerCAmelCase = val def _UpperCAmelCase ( ): """simple docstring""" _lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg""" _lowerCAmelCase = Image.open(requests.get(snake_case , stream=snake_case ).raw ) return im @torch.no_grad() def _UpperCAmelCase ( snake_case , snake_case , snake_case=True ): """simple docstring""" _lowerCAmelCase = ViTConfig() # patch_size if model_name[-1] == "8": _lowerCAmelCase = 8 # set labels if required if not base_model: _lowerCAmelCase = 10_00 _lowerCAmelCase = """huggingface/label-files""" _lowerCAmelCase = """imagenet-1k-id2label.json""" _lowerCAmelCase = json.load(open(hf_hub_download(snake_case , snake_case , repo_type="""dataset""" ) , """r""" ) ) _lowerCAmelCase = {int(snake_case ): v for k, v in idalabel.items()} _lowerCAmelCase = idalabel _lowerCAmelCase = {v: k for k, v in idalabel.items()} # size of the architecture if model_name in ["dino_vits8", "dino_vits16"]: _lowerCAmelCase = 3_84 _lowerCAmelCase = 15_36 _lowerCAmelCase = 12 _lowerCAmelCase = 6 # load original model from torch hub _lowerCAmelCase = torch.hub.load("""facebookresearch/dino:main""" , snake_case ) original_model.eval() # load state_dict of original model, remove and rename some keys _lowerCAmelCase = original_model.state_dict() if base_model: remove_classification_head_(snake_case ) _lowerCAmelCase = create_rename_keys(snake_case , base_model=snake_case ) for src, dest in rename_keys: rename_key(snake_case , snake_case , snake_case ) read_in_q_k_v(snake_case , snake_case , snake_case ) # load HuggingFace model if base_model: _lowerCAmelCase = ViTModel(snake_case , add_pooling_layer=snake_case ).eval() else: _lowerCAmelCase = ViTForImageClassification(snake_case ).eval() model.load_state_dict(snake_case ) # Check outputs on an image, prepared by ViTImageProcessor _lowerCAmelCase = ViTImageProcessor() _lowerCAmelCase = image_processor(images=prepare_img() , return_tensors="""pt""" ) _lowerCAmelCase = encoding["""pixel_values"""] _lowerCAmelCase = model(snake_case ) if base_model: _lowerCAmelCase = original_model(snake_case ) assert torch.allclose(snake_case , outputs.last_hidden_state[:, 0, :] , atol=1E-1 ) else: _lowerCAmelCase = original_model(snake_case ) assert logits.shape == outputs.logits.shape assert torch.allclose(snake_case , outputs.logits , atol=1E-3 ) Path(snake_case ).mkdir(exist_ok=snake_case ) print(F'Saving model {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(snake_case ) print(F'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(snake_case ) if __name__ == "__main__": A__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""dino_vitb16""", type=str, help="""Name of the model trained with DINO you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--base_model""", action="""store_true""", help="""Whether to only convert the base model (no projection head weights).""", ) parser.set_defaults(base_model=True) A__ = parser.parse_args() convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
82
"""simple docstring""" def __magic_name__ ( lowercase = 200_0000 ): SCREAMING_SNAKE_CASE_: str =[0 for i in range(n + 1 )] SCREAMING_SNAKE_CASE_: Any =1 SCREAMING_SNAKE_CASE_: Tuple =1 for i in range(2 , int(n**0.5 ) + 1 ): if primality_list[i] == 0: for j in range(i * i , n + 1 , lowercase ): SCREAMING_SNAKE_CASE_: List[str] =1 SCREAMING_SNAKE_CASE_: Optional[int] =0 for i in range(lowercase ): if primality_list[i] == 0: sum_of_primes += i return sum_of_primes if __name__ == "__main__": print(f"""{solution() = }""")
173
0
"""simple docstring""" import shutil import tempfile import unittest from transformers import ( SPIECE_UNDERLINE, AddedToken, BatchEncoding, NllbTokenizer, NllbTokenizerFast, is_torch_available, ) from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin lowercase__ = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right lowercase__ = 25_6047 lowercase__ = 25_6145 @require_sentencepiece @require_tokenizers class lowerCAmelCase__ ( _a, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = NllbTokenizer lowerCamelCase__ = NllbTokenizerFast lowerCamelCase__ = True lowerCamelCase__ = True lowerCamelCase__ = {} def A_ ( self ): super().setUp() # We have a SentencePiece fixture for testing _lowerCamelCase : Union[str, Any] = NllbTokenizer(__lowerCamelCase , keep_accents=__lowerCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) def A_ ( self ): _lowerCamelCase : str = NllbTokenizer(__lowerCamelCase , keep_accents=__lowerCamelCase ) _lowerCamelCase : Tuple = tokenizer.tokenize('This is a test' ) self.assertListEqual(__lowerCamelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) _lowerCamelCase : Union[str, Any] = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( __lowerCamelCase , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) _lowerCamelCase : str = tokenizer.convert_tokens_to_ids(__lowerCamelCase ) self.assertListEqual( __lowerCamelCase , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) _lowerCamelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(__lowerCamelCase ) self.assertListEqual( __lowerCamelCase , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ] , ) def A_ ( self ): _lowerCamelCase : List[Any] = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-nllb""", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): _lowerCamelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase ) _lowerCamelCase : Optional[int] = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase ) _lowerCamelCase : Any = tempfile.mkdtemp() _lowerCamelCase : Union[str, Any] = tokenizer_r.save_pretrained(__lowerCamelCase ) _lowerCamelCase : Any = tokenizer_p.save_pretrained(__lowerCamelCase ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) _lowerCamelCase : List[Any] = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f ) self.assertSequenceEqual(__lowerCamelCase , __lowerCamelCase ) # Checks everything loads correctly in the same way _lowerCamelCase : Dict = tokenizer_r.from_pretrained(__lowerCamelCase ) _lowerCamelCase : List[Any] = tokenizer_p.from_pretrained(__lowerCamelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__lowerCamelCase , __lowerCamelCase ) ) shutil.rmtree(__lowerCamelCase ) # Save tokenizer rust, legacy_format=True _lowerCamelCase : Union[str, Any] = tempfile.mkdtemp() _lowerCamelCase : Optional[int] = tokenizer_r.save_pretrained(__lowerCamelCase , legacy_format=__lowerCamelCase ) _lowerCamelCase : Dict = tokenizer_p.save_pretrained(__lowerCamelCase ) # Checks it save with the same files self.assertSequenceEqual(__lowerCamelCase , __lowerCamelCase ) # Checks everything loads correctly in the same way _lowerCamelCase : List[str] = tokenizer_r.from_pretrained(__lowerCamelCase ) _lowerCamelCase : Dict = tokenizer_p.from_pretrained(__lowerCamelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__lowerCamelCase , __lowerCamelCase ) ) shutil.rmtree(__lowerCamelCase ) # Save tokenizer rust, legacy_format=False _lowerCamelCase : Tuple = tempfile.mkdtemp() _lowerCamelCase : Union[str, Any] = tokenizer_r.save_pretrained(__lowerCamelCase , legacy_format=__lowerCamelCase ) _lowerCamelCase : List[str] = tokenizer_p.save_pretrained(__lowerCamelCase ) # Checks it saved the tokenizer.json file self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way _lowerCamelCase : Optional[int] = tokenizer_r.from_pretrained(__lowerCamelCase ) _lowerCamelCase : int = tokenizer_p.from_pretrained(__lowerCamelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__lowerCamelCase , __lowerCamelCase ) ) shutil.rmtree(__lowerCamelCase ) @require_torch def A_ ( self ): if not self.test_seqaseq: return _lowerCamelCase : Optional[int] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # Longer text that will definitely require truncation. _lowerCamelCase : Optional[Any] = [ """ UN Chief Says There Is No Military Solution in Syria""", """ Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for""" """ Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons""" """ will only worsen the violence and misery for millions of people.""", ] _lowerCamelCase : Union[str, Any] = [ """Şeful ONU declară că nu există o soluţie militară în Siria""", """Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al""" """ Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi""" """ că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""", ] try: _lowerCamelCase : Any = tokenizer.prepare_seqaseq_batch( src_texts=__lowerCamelCase , tgt_texts=__lowerCamelCase , max_length=3 , max_target_length=10 , return_tensors='pt' , src_lang='eng_Latn' , tgt_lang='ron_Latn' , ) except NotImplementedError: return self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.labels.shape[1] , 10 ) # max_target_length will default to max_length if not specified _lowerCamelCase : List[str] = tokenizer.prepare_seqaseq_batch( __lowerCamelCase , tgt_texts=__lowerCamelCase , max_length=3 , return_tensors='pt' ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.labels.shape[1] , 3 ) _lowerCamelCase : Tuple = tokenizer.prepare_seqaseq_batch( src_texts=__lowerCamelCase , max_length=3 , max_target_length=10 , return_tensors='pt' ) self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 ) self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 ) self.assertNotIn('decoder_input_ids' , __lowerCamelCase ) @unittest.skip('Unfortunately way too slow to build a BPE with SentencePiece.' ) def A_ ( self ): pass def A_ ( self ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): _lowerCamelCase : List[str] = [AddedToken('<special>' , lstrip=__lowerCamelCase )] _lowerCamelCase : List[Any] = self.rust_tokenizer_class.from_pretrained( __lowerCamelCase , additional_special_tokens=__lowerCamelCase , **__lowerCamelCase ) _lowerCamelCase : Optional[int] = tokenizer_r.encode('Hey this is a <special> token' ) _lowerCamelCase : List[Any] = tokenizer_r.encode('<special>' , add_special_tokens=__lowerCamelCase )[0] self.assertTrue(special_token_id in r_output ) if self.test_slow_tokenizer: _lowerCamelCase : str = self.rust_tokenizer_class.from_pretrained( __lowerCamelCase , additional_special_tokens=__lowerCamelCase , **__lowerCamelCase , ) _lowerCamelCase : Dict = self.tokenizer_class.from_pretrained( __lowerCamelCase , additional_special_tokens=__lowerCamelCase , **__lowerCamelCase ) _lowerCamelCase : Union[str, Any] = tokenizer_p.encode('Hey this is a <special> token' ) _lowerCamelCase : List[str] = tokenizer_cr.encode('Hey this is a <special> token' ) self.assertEqual(__lowerCamelCase , __lowerCamelCase ) self.assertEqual(__lowerCamelCase , __lowerCamelCase ) self.assertTrue(special_token_id in p_output ) self.assertTrue(special_token_id in cr_output ) @require_torch @require_sentencepiece @require_tokenizers class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = """facebook/nllb-200-distilled-600M""" lowerCamelCase__ = [ """ UN Chief Says There Is No Military Solution in Syria""", """ Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""", ] lowerCamelCase__ = [ """Şeful ONU declară că nu există o soluţie militară în Siria""", """Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei""" """ pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor""" """ face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""", ] lowerCamelCase__ = [ 25_60_47, 1_62_97, 13_44_08, 81_65, 24_80_66, 1_47_34, 9_50, 11_35, 10_57_21, 35_73, 83, 2_73_52, 1_08, 4_94_86, 2, ] @classmethod def A_ ( cls ): _lowerCamelCase : NllbTokenizer = NllbTokenizer.from_pretrained( cls.checkpoint_name , src_lang='eng_Latn' , tgt_lang='ron_Latn' ) _lowerCamelCase : List[Any] = 1 return cls def A_ ( self ): self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Arab'] , 256001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Latn'] , 256002 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['fra_Latn'] , 256057 ) def A_ ( self ): _lowerCamelCase : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , __lowerCamelCase ) def A_ ( self ): self.assertIn(__lowerCamelCase , self.tokenizer.all_special_ids ) # fmt: off _lowerCamelCase : Optional[int] = [RO_CODE, 4254, 98068, 112923, 39072, 3909, 713, 102767, 26, 17314, 35642, 14683, 33118, 2022, 66987, 2, 256047] # fmt: on _lowerCamelCase : List[Any] = self.tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase ) _lowerCamelCase : int = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__lowerCamelCase ) self.assertEqual(__lowerCamelCase , __lowerCamelCase ) self.assertNotIn(self.tokenizer.eos_token , __lowerCamelCase ) def A_ ( self ): _lowerCamelCase : Union[str, Any] = ["""this is gunna be a long sentence """ * 20] assert isinstance(src_text[0] , __lowerCamelCase ) _lowerCamelCase : int = 10 _lowerCamelCase : List[Any] = self.tokenizer(__lowerCamelCase , max_length=__lowerCamelCase , truncation=__lowerCamelCase ).input_ids[0] self.assertEqual(ids[-1] , 2 ) self.assertEqual(ids[0] , __lowerCamelCase ) self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase ) def A_ ( self ): self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [256203, 3] ) def A_ ( self ): _lowerCamelCase : List[str] = tempfile.mkdtemp() _lowerCamelCase : Tuple = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(__lowerCamelCase ) _lowerCamelCase : Dict = NllbTokenizer.from_pretrained(__lowerCamelCase ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __lowerCamelCase ) @require_torch def A_ ( self ): _lowerCamelCase : Optional[int] = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , ) _lowerCamelCase : Tuple = shift_tokens_right( batch['labels'] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id['ron_Latn'] ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) self.assertEqual((2, 15) , batch.input_ids.shape ) self.assertEqual((2, 15) , batch.attention_mask.shape ) _lowerCamelCase : Optional[Any] = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , __lowerCamelCase ) self.assertEqual(__lowerCamelCase , batch.decoder_input_ids[0, 0] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) def A_ ( self ): _lowerCamelCase : Dict = self.tokenizer(self.src_text , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=3 , return_tensors='pt' ) _lowerCamelCase : int = self.tokenizer( text_target=self.tgt_text , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=10 , return_tensors='pt' ) _lowerCamelCase : str = targets["""input_ids"""] _lowerCamelCase : Dict = shift_tokens_right( __lowerCamelCase , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def A_ ( self ): _lowerCamelCase : Optional[int] = self.tokenizer._build_translation_inputs( 'A test' , return_tensors='pt' , src_lang='eng_Latn' , tgt_lang='fra_Latn' ) self.assertEqual( nested_simplify(__lowerCamelCase ) , { # A, test, EOS, en_XX 'input_ids': [[256047, 70, 7356, 2]], 'attention_mask': [[1, 1, 1, 1]], # ar_AR 'forced_bos_token_id': 256057, } , ) @require_torch def A_ ( self ): _lowerCamelCase : List[Any] = True _lowerCamelCase : Tuple = self.tokenizer( 'UN Chief says there is no military solution in Syria' , src_lang='eng_Latn' , tgt_lang='fra_Latn' ) self.assertEqual( inputs.input_ids , [16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2, 256047] ) _lowerCamelCase : Optional[int] = False _lowerCamelCase : Dict = self.tokenizer( 'UN Chief says there is no military solution in Syria' , src_lang='eng_Latn' , tgt_lang='fra_Latn' ) self.assertEqual( inputs.input_ids , [256047, 16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2] )
367
"""simple docstring""" import string # frequency taken from https://en.wikipedia.org/wiki/Letter_frequency lowercase__ = { """E""": 12.70, """T""": 9.06, """A""": 8.17, """O""": 7.51, """I""": 6.97, """N""": 6.75, """S""": 6.33, """H""": 6.09, """R""": 5.99, """D""": 4.25, """L""": 4.03, """C""": 2.78, """U""": 2.76, """M""": 2.41, """W""": 2.36, """F""": 2.23, """G""": 2.02, """Y""": 1.97, """P""": 1.93, """B""": 1.29, """V""": 0.98, """K""": 0.77, """J""": 0.15, """X""": 0.15, """Q""": 0.10, """Z""": 0.07, } lowercase__ = """ETAOINSHRDLCUMWFGYPBVKJXQZ""" lowercase__ = """ABCDEFGHIJKLMNOPQRSTUVWXYZ""" def _snake_case ( lowercase__ ): _lowerCamelCase : Tuple = {letter: 0 for letter in string.ascii_uppercase} for letter in message.upper(): if letter in LETTERS: letter_count[letter] += 1 return letter_count def _snake_case ( lowercase__ ): return x[0] def _snake_case ( lowercase__ ): _lowerCamelCase : List[Any] = get_letter_count(lowercase__ ) _lowerCamelCase : dict[int, list[str]] = { freq: [] for letter, freq in letter_to_freq.items() } for letter in LETTERS: freq_to_letter[letter_to_freq[letter]].append(lowercase__ ) _lowerCamelCase : dict[int, str] = {} for freq in freq_to_letter: freq_to_letter[freq].sort(key=ETAOIN.find , reverse=lowercase__ ) _lowerCamelCase : Optional[int] = ''.join(freq_to_letter[freq] ) _lowerCamelCase : Any = list(freq_to_letter_str.items() ) freq_pairs.sort(key=lowercase__ , reverse=lowercase__ ) _lowerCamelCase : list[str] = [freq_pair[1] for freq_pair in freq_pairs] return "".join(lowercase__ ) def _snake_case ( lowercase__ ): _lowerCamelCase : str = get_frequency_order(lowercase__ ) _lowerCamelCase : Union[str, Any] = 0 for common_letter in ETAOIN[:6]: if common_letter in freq_order[:6]: match_score += 1 for uncommon_letter in ETAOIN[-6:]: if uncommon_letter in freq_order[-6:]: match_score += 1 return match_score if __name__ == "__main__": import doctest doctest.testmod()
12
0
"""simple docstring""" import warnings from ...utils import logging from .image_processing_beit import BeitImageProcessor A_ : Any = logging.get_logger(__name__) class lowerCamelCase (A__ ): def __init__( self : Union[str, Any] , *__UpperCAmelCase : Optional[Any] , **__UpperCAmelCase : Optional[int] ) -> None: warnings.warn( """The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use BeitImageProcessor instead.""" , __UpperCamelCase , ) super().__init__(*__UpperCamelCase , **__UpperCamelCase )
165
"""simple docstring""" from __future__ import annotations from math import pi from typing import Protocol import matplotlib.pyplot as plt import numpy as np class _a ( lowerCAmelCase): """simple docstring""" def lowercase__ ( self : List[Any] , __UpperCamelCase : float )->float: return 0.0 def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' _UpperCAmelCase = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] ) _UpperCAmelCase = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] ) return lowest, highest def lowercase ( _SCREAMING_SNAKE_CASE : FilterType , _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' _UpperCAmelCase = 512 _UpperCAmelCase = [1] + [0] * (size - 1) _UpperCAmelCase = [filter_type.process(_SCREAMING_SNAKE_CASE ) for item in inputs] _UpperCAmelCase = [0] * (samplerate - size) # zero-padding outputs += filler _UpperCAmelCase = np.abs(np.fft.fft(_SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = 20 * np.logaa(_SCREAMING_SNAKE_CASE ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel('''Frequency (Hz)''' ) plt.xscale('''log''' ) # Display within reasonable bounds _UpperCAmelCase = get_bounds(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) ) plt.ylabel('''Gain (dB)''' ) plt.plot(_SCREAMING_SNAKE_CASE ) plt.show() def lowercase ( _SCREAMING_SNAKE_CASE : FilterType , _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' _UpperCAmelCase = 512 _UpperCAmelCase = [1] + [0] * (size - 1) _UpperCAmelCase = [filter_type.process(_SCREAMING_SNAKE_CASE ) for item in inputs] _UpperCAmelCase = [0] * (samplerate - size) # zero-padding outputs += filler _UpperCAmelCase = np.angle(np.fft.fft(_SCREAMING_SNAKE_CASE ) ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel('''Frequency (Hz)''' ) plt.xscale('''log''' ) plt.ylim(-2 * pi , 2 * pi ) plt.ylabel('''Phase shift (Radians)''' ) plt.plot(np.unwrap(_SCREAMING_SNAKE_CASE , -2 * pi ) ) plt.show()
260
0
"""simple docstring""" import heapq import sys import numpy as np __A = tuple[int, int] class lowerCamelCase__ : '''simple docstring''' def __init__( self ) -> str: _lowerCAmelCase =[] _lowerCAmelCase =set() def _lowerCAmelCase ( self ) -> Tuple: if not self.empty(): return self.elements[0][0] else: return float("""inf""" ) def _lowerCAmelCase ( self ) -> Optional[Any]: return len(self.elements ) == 0 def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> int: if item not in self.set: heapq.heappush(self.elements , (priority, item) ) self.set.add(__UpperCAmelCase ) else: # update # print("update", item) _lowerCAmelCase =[] ((_lowerCAmelCase) , (_lowerCAmelCase)) =heapq.heappop(self.elements ) while x != item: temp.append((pri, x) ) ((_lowerCAmelCase) , (_lowerCAmelCase)) =heapq.heappop(self.elements ) temp.append((priority, item) ) for pro, xxx in temp: heapq.heappush(self.elements , (pro, xxx) ) def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Union[str, Any]: if item in self.set: self.set.remove(__UpperCAmelCase ) _lowerCAmelCase =[] ((_lowerCAmelCase) , (_lowerCAmelCase)) =heapq.heappop(self.elements ) while x != item: temp.append((pro, x) ) ((_lowerCAmelCase) , (_lowerCAmelCase)) =heapq.heappop(self.elements ) for prito, yyy in temp: heapq.heappush(self.elements , (prito, yyy) ) def _lowerCAmelCase ( self ) -> List[Any]: return self.elements[0][1] def _lowerCAmelCase ( self ) -> Union[str, Any]: ((_lowerCAmelCase) , (_lowerCAmelCase)) =heapq.heappop(self.elements ) self.set.remove(__UpperCAmelCase ) return (priority, item) def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Any: # euclidean distance _lowerCAmelCase =np.array(__UpperCamelCase ) _lowerCAmelCase =np.array(__UpperCamelCase ) return np.linalg.norm(a - b ) def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Optional[Any]: # integer division by time variable return consistent_heuristic(__UpperCamelCase , __UpperCamelCase ) // t def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Dict: # manhattan distance return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] ) def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]: _lowerCAmelCase =g_function[start] + Wa * heuristics[i](__UpperCamelCase , __UpperCamelCase ) return ans def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> str: _lowerCAmelCase =np.chararray((n, n) ) for i in range(__UpperCamelCase ): for j in range(__UpperCamelCase ): _lowerCAmelCase ="""*""" for i in range(__UpperCamelCase ): for j in range(__UpperCamelCase ): if (j, (n - 1) - i) in blocks: _lowerCAmelCase ="""#""" _lowerCAmelCase ="""-""" _lowerCAmelCase =back_pointer[goal] while x != start: ((_lowerCAmelCase) , (_lowerCAmelCase)) =x # print(x) _lowerCAmelCase ="""-""" _lowerCAmelCase =back_pointer[x] _lowerCAmelCase ="""-""" for i in range(__UpperCamelCase ): for j in range(__UpperCamelCase ): if (i, j) == (0, n - 1): print(grid[i][j] , end=""" """ ) print("""<-- End position""" , end=""" """ ) else: print(grid[i][j] , end=""" """ ) print() print("""^""" ) print("""Start position""" ) print() print("""# is an obstacle""" ) print("""- is the path taken by algorithm""" ) print("""PATH TAKEN BY THE ALGORITHM IS:-""" ) _lowerCAmelCase =back_pointer[goal] while x != start: print(__UpperCamelCase , end=""" """ ) _lowerCAmelCase =back_pointer[x] print(__UpperCamelCase ) sys.exit() def _lowerCamelCase(__UpperCamelCase ) -> Dict: if p[0] < 0 or p[0] > n - 1: return False if p[1] < 0 or p[1] > n - 1: return False return True def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> str: for itera in range(__UpperCamelCase ): open_list[itera].remove_element(__UpperCamelCase ) # print("s", s) # print("j", j) ((_lowerCAmelCase) , (_lowerCAmelCase)) =s _lowerCAmelCase =(x - 1, y) _lowerCAmelCase =(x + 1, y) _lowerCAmelCase =(x, y + 1) _lowerCAmelCase =(x, y - 1) for neighbours in [left, right, up, down]: if neighbours not in blocks: if valid(__UpperCamelCase ) and neighbours not in visited: # print("neighbour", neighbours) visited.add(__UpperCamelCase ) _lowerCAmelCase =-1 _lowerCAmelCase =float("""inf""" ) if valid(__UpperCamelCase ) and g_function[neighbours] > g_function[s] + 1: _lowerCAmelCase =g_function[s] + 1 _lowerCAmelCase =s if neighbours not in close_list_anchor: open_list[0].put(__UpperCamelCase , key(__UpperCamelCase , 0 , __UpperCamelCase , __UpperCamelCase ) ) if neighbours not in close_list_inad: for var in range(1 , __UpperCamelCase ): if key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) <= Wa * key( __UpperCamelCase , 0 , __UpperCamelCase , __UpperCamelCase ): open_list[j].put( __UpperCamelCase , key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) ) def _lowerCamelCase() -> str: _lowerCAmelCase =[] for x in range(1 , 5 ): for y in range(1 , 6 ): some_list.append((x, y) ) for x in range(15 , 20 ): some_list.append((x, 17) ) for x in range(10 , 19 ): for y in range(1 , 15 ): some_list.append((x, y) ) # L block for x in range(1 , 4 ): for y in range(12 , 19 ): some_list.append((x, y) ) for x in range(3 , 13 ): for y in range(16 , 19 ): some_list.append((x, y) ) return some_list __A = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a} __A = [ (0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (10, 1), (11, 1), (12, 1), (13, 1), (14, 1), (15, 1), (16, 1), (17, 1), (18, 1), (19, 1), ] __A = make_common_ground() __A = blocks_blk # hyper parameters __A = 1 __A = 1 __A = 20 __A = 3 # one consistent and two other inconsistent # start and end destination __A = (0, 0) __A = (n - 1, n - 1) __A = 1 def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[str]: _lowerCAmelCase ={start: 0, goal: float("""inf""" )} _lowerCAmelCase ={start: -1, goal: -1} _lowerCAmelCase =[] _lowerCAmelCase =set() for i in range(__UpperCamelCase ): open_list.append(PriorityQueue() ) open_list[i].put(__UpperCamelCase , key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) ) _lowerCAmelCase =[] _lowerCAmelCase =[] while open_list[0].minkey() < float("""inf""" ): for i in range(1 , __UpperCamelCase ): # print(open_list[0].minkey(), open_list[i].minkey()) if open_list[i].minkey() <= Wa * open_list[0].minkey(): global t t += 1 if g_function[goal] <= open_list[i].minkey(): if g_function[goal] < float("""inf""" ): do_something(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) else: _lowerCAmelCase , _lowerCAmelCase =open_list[i].top_show() visited.add(__UpperCamelCase ) expand_state( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) close_list_inad.append(__UpperCamelCase ) else: if g_function[goal] <= open_list[0].minkey(): if g_function[goal] < float("""inf""" ): do_something(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) else: _lowerCAmelCase =open_list[0].top_show() visited.add(__UpperCamelCase ) expand_state( __UpperCamelCase , 0 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) close_list_anchor.append(__UpperCamelCase ) print("""No path found to goal""" ) print() for i in range(n - 1 , -1 , -1 ): for j in range(__UpperCamelCase ): if (j, i) in blocks: print("""#""" , end=""" """ ) elif (j, i) in back_pointer: if (j, i) == (n - 1, n - 1): print("""*""" , end=""" """ ) else: print("""-""" , end=""" """ ) else: print("""*""" , end=""" """ ) if (j, i) == (n - 1, n - 1): print("""<-- End position""" , end=""" """ ) print() print("""^""" ) print("""Start position""" ) print() print("""# is an obstacle""" ) print("""- is the path taken by algorithm""" ) if __name__ == "__main__": multi_a_star(start, goal, n_heuristic)
341
"""simple docstring""" import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401 from coval.conll import reader, util from coval.eval import evaluator import datasets __A = datasets.logging.get_logger(__name__) __A = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n' __A = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n' __A = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n' def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase="dummy_doc" ) -> Dict: _lowerCAmelCase ={doc: key_lines} _lowerCAmelCase ={doc: sys_lines} _lowerCAmelCase ={} _lowerCAmelCase =0 _lowerCAmelCase =0 _lowerCAmelCase =0 _lowerCAmelCase =0 _lowerCAmelCase =0 _lowerCAmelCase =0 _lowerCAmelCase , _lowerCAmelCase =reader.get_doc_mentions(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase ) key_singletons_num += singletons_num if NP_only or min_span: _lowerCAmelCase =reader.set_annotated_parse_trees(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =reader.get_doc_mentions(__UpperCamelCase , sys_doc_lines[doc] , __UpperCamelCase ) sys_singletons_num += singletons_num if NP_only or min_span: _lowerCAmelCase =reader.set_annotated_parse_trees(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase , __UpperCamelCase ) if remove_nested: _lowerCAmelCase , _lowerCAmelCase =reader.remove_nested_coref_mentions(__UpperCamelCase , __UpperCamelCase ) key_nested_coref_num += nested_mentions key_removed_nested_clusters += removed_clusters _lowerCAmelCase , _lowerCAmelCase =reader.remove_nested_coref_mentions(__UpperCamelCase , __UpperCamelCase ) sys_nested_coref_num += nested_mentions sys_removed_nested_clusters += removed_clusters _lowerCAmelCase =reader.get_mention_assignments(__UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase =reader.get_mention_assignments(__UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase =(key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster) if remove_nested: logger.info( """Number of removed nested coreferring mentions in the key """ F'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''' ) logger.info( """Number of resulting singleton clusters in the key """ F'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''' ) if not keep_singletons: logger.info( F'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system ''' """files, respectively""" ) return doc_coref_infos def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int: _lowerCAmelCase =get_coref_infos(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase ={} _lowerCAmelCase =0 _lowerCAmelCase =0 for name, metric in metrics: _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =evaluator.evaluate_documents(__UpperCamelCase , __UpperCamelCase , beta=1 ) if name in ["muc", "bcub", "ceafe"]: conll += fa conll_subparts_num += 1 output_scores.update({F'''{name}/recall''': recall, F'''{name}/precision''': precision, F'''{name}/f1''': fa} ) logger.info( name.ljust(10 ) , F'''Recall: {recall * 100:.2f}''' , F''' Precision: {precision * 100:.2f}''' , F''' F1: {fa * 100:.2f}''' , ) if conll_subparts_num == 3: _lowerCAmelCase =(conll / 3) * 100 logger.info(F'''CoNLL score: {conll:.2f}''' ) output_scores.update({"""conll_score""": conll} ) return output_scores def _lowerCamelCase(__UpperCamelCase ) -> Tuple: _lowerCAmelCase =False for line in key_lines: if not line.startswith("""#""" ): if len(line.split() ) > 6: _lowerCAmelCase =line.split()[5] if not parse_col == "-": _lowerCAmelCase =True break else: break return has_gold_parse @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase__ ( datasets.Metric ): '''simple docstring''' def _lowerCAmelCase ( self ) -> str: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" ) ), """references""": datasets.Sequence(datasets.Value("""string""" ) ), } ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[ """https://github.com/ns-moosavi/coval""", """https://www.aclweb.org/anthology/P16-1060""", """http://www.conll.cemantix.org/2012/data.html""", ] , ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False ) -> Optional[Any]: _lowerCAmelCase =[ ("""mentions""", evaluator.mentions), ("""muc""", evaluator.muc), ("""bcub""", evaluator.b_cubed), ("""ceafe""", evaluator.ceafe), ("""lea""", evaluator.lea), ] if min_span: _lowerCAmelCase =util.check_gold_parse_annotation(__UpperCAmelCase ) if not has_gold_parse: raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" ) # util.parse_key_file(key_file) # key_file = key_file + ".parsed" _lowerCAmelCase =evaluate( key_lines=__UpperCAmelCase , sys_lines=__UpperCAmelCase , metrics=__UpperCAmelCase , NP_only=__UpperCAmelCase , remove_nested=__UpperCAmelCase , keep_singletons=__UpperCAmelCase , min_span=__UpperCAmelCase , ) return score
341
1
"""simple docstring""" from __future__ import annotations from collections import deque from collections.abc import Iterator from dataclasses import dataclass @dataclass class lowerCAmelCase__ : '''simple docstring''' lowerCamelCase__ = 42 lowerCamelCase__ = 42 class lowerCAmelCase__ : '''simple docstring''' def __init__( self , lowercase ): _lowerCamelCase : list[list[Edge]] = [[] for _ in range(lowercase )] _lowerCamelCase : List[Any] = size def __getitem__( self , lowercase ): return iter(self._graph[vertex] ) @property def A_ ( self ): return self._size def A_ ( self , lowercase , lowercase , lowercase ): if weight not in (0, 1): raise ValueError('Edge weight must be either 0 or 1.' ) if to_vertex < 0 or to_vertex >= self.size: raise ValueError('Vertex indexes must be in [0; size).' ) self._graph[from_vertex].append(Edge(lowercase , lowercase ) ) def A_ ( self , lowercase , lowercase ): _lowerCamelCase : Dict = deque([start_vertex] ) _lowerCamelCase : list[int | None] = [None] * self.size _lowerCamelCase : str = 0 while queue: _lowerCamelCase : Tuple = queue.popleft() _lowerCamelCase : Any = distances[current_vertex] if current_distance is None: continue for edge in self[current_vertex]: _lowerCamelCase : Dict = current_distance + edge.weight _lowerCamelCase : Optional[Any] = distances[edge.destination_vertex] if ( isinstance(lowercase , lowercase ) and new_distance >= dest_vertex_distance ): continue _lowerCamelCase : Union[str, Any] = new_distance if edge.weight == 0: queue.appendleft(edge.destination_vertex ) else: queue.append(edge.destination_vertex ) if distances[finish_vertex] is None: raise ValueError('No path from start_vertex to finish_vertex.' ) return distances[finish_vertex] if __name__ == "__main__": import doctest doctest.testmod()
96
'''simple docstring''' import unittest from knapsack import greedy_knapsack as kp class lowercase__ ( unittest.TestCase ): '''simple docstring''' def UpperCAmelCase_ ( self ): _SCREAMING_SNAKE_CASE : str = [10, 20, 30, 40, 50, 60] _SCREAMING_SNAKE_CASE : List[str] = [2, 4, 6, 8, 10, 12] _SCREAMING_SNAKE_CASE : str = 100 self.assertEqual(kp.calc_profit(__snake_case , __snake_case , __snake_case ) , 210 ) def UpperCAmelCase_ ( self ): self.assertRaisesRegex(__snake_case , """max_weight must greater than zero.""" ) def UpperCAmelCase_ ( self ): self.assertRaisesRegex(__snake_case , """Weight can not be negative.""" ) def UpperCAmelCase_ ( self ): self.assertRaisesRegex(__snake_case , """Profit can not be negative.""" ) def UpperCAmelCase_ ( self ): self.assertRaisesRegex(__snake_case , """max_weight must greater than zero.""" ) def UpperCAmelCase_ ( self ): self.assertRaisesRegex( __snake_case , """The length of profit and weight must be same.""" ) if __name__ == "__main__": unittest.main()
200
0
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor from torchvision.transforms.functional import InterpolationMode import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, ViTImageProcessor, ViTMAEConfig, ViTMAEForPreTraining, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version _A = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('4.31.0') require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt') @dataclass class UpperCAmelCase__ : """simple docstring""" UpperCAmelCase__ : Optional[str] = field( default="cifar10" , metadata={"help": "Name of a dataset from the datasets package"} ) UpperCAmelCase__ : Optional[str] = field( default=A_ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) UpperCAmelCase__ : Optional[str] = field( default=A_ , metadata={"help": "The column name of the images in the files."} ) UpperCAmelCase__ : Optional[str] = field(default=A_ , metadata={"help": "A folder containing the training data."} ) UpperCAmelCase__ : Optional[str] = field(default=A_ , metadata={"help": "A folder containing the validation data."} ) UpperCAmelCase__ : Optional[float] = field( default=0.15 , metadata={"help": "Percent to split off of train for validation."} ) UpperCAmelCase__ : Optional[int] = field( default=A_ , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) } , ) UpperCAmelCase__ : Optional[int] = field( default=A_ , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) } , ) def _a ( self ) -> Optional[int]: __UpperCamelCase ={} if self.train_dir is not None: __UpperCamelCase =self.train_dir if self.validation_dir is not None: __UpperCamelCase =self.validation_dir __UpperCamelCase =data_files if data_files else None @dataclass class UpperCAmelCase__ : """simple docstring""" UpperCAmelCase__ : str = field( default=A_ , metadata={ "help": ( "The model checkpoint for weights initialization.Don't set if you want to train a model from scratch." ) } , ) UpperCAmelCase__ : Optional[str] = field( default=A_ , metadata={"help": "Pretrained config name or path if not the same as model_name_or_path"} ) UpperCAmelCase__ : Optional[str] = field( default=A_ , metadata={ "help": ( "Override some existing default config settings when a model is trained from scratch. Example: " "n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index" ) } , ) UpperCAmelCase__ : Optional[str] = field( default=A_ , metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} ) UpperCAmelCase__ : str = field( default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , ) UpperCAmelCase__ : str = field(default=A_ , metadata={"help": "Name or path of preprocessor config."} ) UpperCAmelCase__ : bool = field( default=A_ , metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) } , ) UpperCAmelCase__ : float = field( default=0.75 , metadata={"help": "The ratio of the number of masked tokens in the input sequence."} ) UpperCAmelCase__ : bool = field( default=A_ , metadata={"help": "Whether or not to train with normalized pixel values as target."} ) @dataclass class UpperCAmelCase__ ( A_ ): """simple docstring""" UpperCAmelCase__ : float = field( default=1e-3 , metadata={"help": "Base learning rate: absolute_lr = base_lr * total_batch_size / 256."} ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ): __UpperCamelCase =torch.stack([example['pixel_values'] for example in examples] ) return {"pixel_values": pixel_values} def _UpperCAmelCase ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __UpperCamelCase =HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('run_mae' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() __UpperCamelCase =training_args.get_process_log_level() logger.setLevel(SCREAMING_SNAKE_CASE__ ) transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE__ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' ) logger.info(F'Training/evaluation parameters {training_args}' ) # Detecting last checkpoint. __UpperCamelCase =None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __UpperCamelCase =get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'Output directory ({training_args.output_dir}) already exists and is not empty. ' 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ' 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Initialize our dataset. __UpperCamelCase =load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. __UpperCamelCase =None if 'validation' in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , SCREAMING_SNAKE_CASE__ ) and data_args.train_val_split > 0.0: __UpperCamelCase =ds['train'].train_test_split(data_args.train_val_split ) __UpperCamelCase =split['train'] __UpperCamelCase =split['test'] # Load pretrained model and image processor # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __UpperCamelCase ={ 'cache_dir': model_args.cache_dir, 'revision': model_args.model_revision, 'use_auth_token': True if model_args.use_auth_token else None, } if model_args.config_name: __UpperCamelCase =ViTMAEConfig.from_pretrained(model_args.config_name , **SCREAMING_SNAKE_CASE__ ) elif model_args.model_name_or_path: __UpperCamelCase =ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **SCREAMING_SNAKE_CASE__ ) else: __UpperCamelCase =ViTMAEConfig() logger.warning('You are instantiating a new config instance from scratch.' ) if model_args.config_overrides is not None: logger.info(F'Overriding config: {model_args.config_overrides}' ) config.update_from_string(model_args.config_overrides ) logger.info(F'New config: {config}' ) # adapt config config.update( { 'mask_ratio': model_args.mask_ratio, 'norm_pix_loss': model_args.norm_pix_loss, } ) # create image processor if model_args.image_processor_name: __UpperCamelCase =ViTImageProcessor.from_pretrained(model_args.image_processor_name , **SCREAMING_SNAKE_CASE__ ) elif model_args.model_name_or_path: __UpperCamelCase =ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **SCREAMING_SNAKE_CASE__ ) else: __UpperCamelCase =ViTImageProcessor() # create model if model_args.model_name_or_path: __UpperCamelCase =ViTMAEForPreTraining.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info('Training new model from scratch' ) __UpperCamelCase =ViTMAEForPreTraining(SCREAMING_SNAKE_CASE__ ) if training_args.do_train: __UpperCamelCase =ds['train'].column_names else: __UpperCamelCase =ds['validation'].column_names if data_args.image_column_name is not None: __UpperCamelCase =data_args.image_column_name elif "image" in column_names: __UpperCamelCase ='image' elif "img" in column_names: __UpperCamelCase ='img' else: __UpperCamelCase =column_names[0] # transformations as done in original MAE paper # source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py if "shortest_edge" in image_processor.size: __UpperCamelCase =image_processor.size['shortest_edge'] else: __UpperCamelCase =(image_processor.size['height'], image_processor.size['width']) __UpperCamelCase =Compose( [ Lambda(lambda SCREAMING_SNAKE_CASE__ : img.convert('RGB' ) if img.mode != "RGB" else img ), RandomResizedCrop(SCREAMING_SNAKE_CASE__ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) def preprocess_images(SCREAMING_SNAKE_CASE__ : Optional[Any] ): __UpperCamelCase =[transforms(SCREAMING_SNAKE_CASE__ ) for image in examples[image_column_name]] return examples if training_args.do_train: if "train" not in ds: raise ValueError('--do_train requires a train dataset' ) if data_args.max_train_samples is not None: __UpperCamelCase =ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(SCREAMING_SNAKE_CASE__ ) if training_args.do_eval: if "validation" not in ds: raise ValueError('--do_eval requires a validation dataset' ) if data_args.max_eval_samples is not None: __UpperCamelCase =( ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(SCREAMING_SNAKE_CASE__ ) # Compute absolute learning rate __UpperCamelCase =( training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size ) if training_args.base_learning_rate is not None: __UpperCamelCase =training_args.base_learning_rate * total_train_batch_size / 2_56 # Initialize our trainer __UpperCamelCase =Trainer( model=SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=SCREAMING_SNAKE_CASE__ , data_collator=SCREAMING_SNAKE_CASE__ , ) # Training if training_args.do_train: __UpperCamelCase =None if training_args.resume_from_checkpoint is not None: __UpperCamelCase =training_args.resume_from_checkpoint elif last_checkpoint is not None: __UpperCamelCase =last_checkpoint __UpperCamelCase =trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE__ ) trainer.save_model() trainer.log_metrics('train' , train_result.metrics ) trainer.save_metrics('train' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: __UpperCamelCase =trainer.evaluate() trainer.log_metrics('eval' , SCREAMING_SNAKE_CASE__ ) trainer.save_metrics('eval' , SCREAMING_SNAKE_CASE__ ) # Write model card and (optionally) push to hub __UpperCamelCase ={ 'tasks': 'masked-auto-encoding', 'dataset': data_args.dataset_name, 'tags': ['masked-auto-encoding'], } if training_args.push_to_hub: trainer.push_to_hub(**SCREAMING_SNAKE_CASE__ ) else: trainer.create_model_card(**SCREAMING_SNAKE_CASE__ ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
117
from ..utils import DummyObject, requires_backends class UpperCAmelCase__ ( metaclass=A_ ): """simple docstring""" UpperCAmelCase__ : Any = ["speech"] def __init__( self , *A_ , **A_ ) -> Any: requires_backends(self , ['speech'] ) class UpperCAmelCase__ ( metaclass=A_ ): """simple docstring""" UpperCAmelCase__ : Union[str, Any] = ["speech"] def __init__( self , *A_ , **A_ ) -> Union[str, Any]: requires_backends(self , ['speech'] )
117
1
from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import ScoreSdeVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class lowerCamelCase (_snake_case ): '''simple docstring''' _snake_case : UNetaDModel _snake_case : ScoreSdeVeScheduler def __init__( self , _UpperCamelCase , _UpperCamelCase ) -> int: super().__init__() self.register_modules(unet=_UpperCamelCase , scheduler=_UpperCamelCase ) @torch.no_grad() def __call__( self , _UpperCamelCase = 1 , _UpperCamelCase = 2_0_0_0 , _UpperCamelCase = None , _UpperCamelCase = "pil" , _UpperCamelCase = True , **_UpperCamelCase , ) -> Union[ImagePipelineOutput, Tuple]: UpperCAmelCase_ : List[Any] = self.unet.config.sample_size UpperCAmelCase_ : Optional[Any] = (batch_size, 3, img_size, img_size) UpperCAmelCase_ : Dict = self.unet UpperCAmelCase_ : Optional[Any] = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase ) * self.scheduler.init_noise_sigma UpperCAmelCase_ : List[Any] = sample.to(self.device ) self.scheduler.set_timesteps(_UpperCamelCase ) self.scheduler.set_sigmas(_UpperCamelCase ) for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): UpperCAmelCase_ : str = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device ) # correction step for _ in range(self.scheduler.config.correct_steps ): UpperCAmelCase_ : str = self.unet(_UpperCamelCase , _UpperCamelCase ).sample UpperCAmelCase_ : Tuple = self.scheduler.step_correct(_UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase ).prev_sample # prediction step UpperCAmelCase_ : str = model(_UpperCamelCase , _UpperCamelCase ).sample UpperCAmelCase_ : List[Any] = self.scheduler.step_pred(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase ) UpperCAmelCase_ , UpperCAmelCase_ : Any = output.prev_sample, output.prev_sample_mean UpperCAmelCase_ : Any = sample_mean.clamp(0 , 1 ) UpperCAmelCase_ : Optional[Any] = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": UpperCAmelCase_ : Tuple = self.numpy_to_pil(_UpperCamelCase ) if not return_dict: return (sample,) return ImagePipelineOutput(images=_UpperCamelCase )
29
from __future__ import annotations def lowercase__ ( __snake_case : tuple[int, int] , __snake_case : int ): '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ : Tuple = position UpperCAmelCase_ : str = [ (y + 1, x + 2), (y - 1, x + 2), (y + 1, x - 2), (y - 1, x - 2), (y + 2, x + 1), (y + 2, x - 1), (y - 2, x + 1), (y - 2, x - 1), ] UpperCAmelCase_ : Optional[Any] = [] for position in positions: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = position if 0 <= y_test < n and 0 <= x_test < n: permissible_positions.append(__snake_case ) return permissible_positions def lowercase__ ( __snake_case : list[list[int]] ): '''simple docstring''' return not any(elem == 0 for row in board for elem in row ) def lowercase__ ( __snake_case : list[list[int]] , __snake_case : tuple[int, int] , __snake_case : int ): '''simple docstring''' if is_complete(__snake_case ): return True for position in get_valid_pos(__snake_case , len(__snake_case ) ): UpperCAmelCase_ , UpperCAmelCase_ : Any = position if board[y][x] == 0: UpperCAmelCase_ : Optional[Any] = curr + 1 if open_knight_tour_helper(__snake_case , __snake_case , curr + 1 ): return True UpperCAmelCase_ : List[Any] = 0 return False def lowercase__ ( __snake_case : int ): '''simple docstring''' UpperCAmelCase_ : str = [[0 for i in range(__snake_case )] for j in range(__snake_case )] for i in range(__snake_case ): for j in range(__snake_case ): UpperCAmelCase_ : Optional[Any] = 1 if open_knight_tour_helper(__snake_case , (i, j) , 1 ): return board UpperCAmelCase_ : List[Any] = 0 UpperCAmelCase_ : List[str] = F"Open Kight Tour cannot be performed on a board of size {n}" raise ValueError(__snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
29
1
from __future__ import annotations import unittest import numpy as np from transformers import BlipTextConfig from transformers.testing_utils import require_tf, slow from transformers.utils import is_tf_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask if is_tf_available(): import tensorflow as tf from transformers import TFBlipTextModel from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST class SCREAMING_SNAKE_CASE__ : def __init__( self , a , a=12 , a=7 , a=True , a=True , a=True , a=99 , a=32 , a=32 , a=2 , a=4 , a=37 , a=0.1 , a=0.1 , a=512 , a=0.02 , a=0 , a=None , ): lowercase__ : Any = parent lowercase__ : List[Any] = batch_size lowercase__ : List[str] = seq_length lowercase__ : List[Any] = is_training lowercase__ : str = use_input_mask lowercase__ : str = use_labels lowercase__ : List[str] = vocab_size lowercase__ : Any = hidden_size lowercase__ : Tuple = projection_dim lowercase__ : List[Any] = num_hidden_layers lowercase__ : Optional[Any] = num_attention_heads lowercase__ : List[str] = intermediate_size lowercase__ : int = dropout lowercase__ : Any = attention_dropout lowercase__ : List[Any] = max_position_embeddings lowercase__ : Tuple = initializer_range lowercase__ : int = scope lowercase__ : List[str] = bos_token_id def snake_case_ ( self): lowercase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) lowercase__ : int = None if self.use_input_mask: lowercase__ : str = random_attention_mask([self.batch_size, self.seq_length]) if input_mask is not None: lowercase__ : Any = input_mask.numpy() lowercase__ , lowercase__ : Dict = input_mask.shape lowercase__ : str = np.random.randint(1 , seq_length - 1 , size=(batch_size,)) for batch_idx, start_index in enumerate(__SCREAMING_SNAKE_CASE): lowercase__ : Tuple = 1 lowercase__ : Dict = 0 lowercase__ : Optional[Any] = self.get_config() return config, input_ids, tf.convert_to_tensor(__SCREAMING_SNAKE_CASE) def snake_case_ ( self): return BlipTextConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , ) def snake_case_ ( self , a , a , a): lowercase__ : List[Any] = TFBlipTextModel(config=__SCREAMING_SNAKE_CASE) lowercase__ : List[str] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE) lowercase__ : Union[str, Any] = model(__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size)) def snake_case_ ( self): lowercase__ : Dict = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ : List[str] = config_and_inputs lowercase__ : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_tf class SCREAMING_SNAKE_CASE__ (lowerCAmelCase_ , unittest.TestCase ): __lowerCamelCase : Optional[int] = (TFBlipTextModel,) if is_tf_available() else () __lowerCamelCase : int = False __lowerCamelCase : List[Any] = False __lowerCamelCase : List[Any] = False def snake_case_ ( self): lowercase__ : Union[str, Any] = BlipTextModelTester(self) lowercase__ : Optional[Any] = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37) def snake_case_ ( self): self.config_tester.run_common_tests() def snake_case_ ( self): lowercase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE) def snake_case_ ( self): pass def snake_case_ ( self): pass @unittest.skip(reason='Blip does not use inputs_embeds') def snake_case_ ( self): pass @unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING') def snake_case_ ( self): pass @unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING') def snake_case_ ( self): pass @slow def snake_case_ ( self): for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : Union[str, Any] = TFBlipTextModel.from_pretrained(__SCREAMING_SNAKE_CASE) self.assertIsNotNone(__SCREAMING_SNAKE_CASE) def snake_case_ ( self , a=True): super().test_pt_tf_model_equivalence(allow_missing_keys=__SCREAMING_SNAKE_CASE)
352
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging snake_case_ = logging.get_logger(__name__) if is_vision_available(): import PIL class SCREAMING_SNAKE_CASE__ (__snake_case ): __lowerCamelCase : Optional[Any] = ["""pixel_values"""] def __init__( self , a = True , a = None , a = PILImageResampling.BICUBIC , a = True , a = None , a = True , a = 1 / 255 , a = True , a = None , a = None , a = True , **a , ): super().__init__(**a) lowercase__ : List[str] = size if size is not None else {'shortest_edge': 224} lowercase__ : str = get_size_dict(a , default_to_square=a) lowercase__ : Optional[int] = crop_size if crop_size is not None else {'height': 224, 'width': 224} lowercase__ : Union[str, Any] = get_size_dict(a , default_to_square=a , param_name='crop_size') lowercase__ : List[str] = do_resize lowercase__ : List[Any] = size lowercase__ : Tuple = resample lowercase__ : int = do_center_crop lowercase__ : Union[str, Any] = crop_size lowercase__ : int = do_rescale lowercase__ : List[str] = rescale_factor lowercase__ : Tuple = do_normalize lowercase__ : str = image_mean if image_mean is not None else OPENAI_CLIP_MEAN lowercase__ : Optional[Any] = image_std if image_std is not None else OPENAI_CLIP_STD lowercase__ : List[Any] = do_convert_rgb def snake_case_ ( self , a , a , a = PILImageResampling.BICUBIC , a = None , **a , ): lowercase__ : str = get_size_dict(a , default_to_square=a) if "shortest_edge" not in size: raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""") lowercase__ : str = get_resize_output_image_size(a , size=size['shortest_edge'] , default_to_square=a) return resize(a , size=a , resample=a , data_format=a , **a) def snake_case_ ( self , a , a , a = None , **a , ): lowercase__ : List[str] = get_size_dict(a) if "height" not in size or "width" not in size: raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""") return center_crop(a , size=(size['height'], size['width']) , data_format=a , **a) def snake_case_ ( self , a , a , a = None , **a , ): return rescale(a , scale=a , data_format=a , **a) def snake_case_ ( self , a , a , a , a = None , **a , ): return normalize(a , mean=a , std=a , data_format=a , **a) def snake_case_ ( self , a , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , **a , ): lowercase__ : int = do_resize if do_resize is not None else self.do_resize lowercase__ : Tuple = size if size is not None else self.size lowercase__ : Union[str, Any] = get_size_dict(a , param_name='size' , default_to_square=a) lowercase__ : Optional[Any] = resample if resample is not None else self.resample lowercase__ : int = do_center_crop if do_center_crop is not None else self.do_center_crop lowercase__ : List[Any] = crop_size if crop_size is not None else self.crop_size lowercase__ : Union[str, Any] = get_size_dict(a , param_name='crop_size' , default_to_square=a) lowercase__ : Dict = do_rescale if do_rescale is not None else self.do_rescale lowercase__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase__ : List[str] = do_normalize if do_normalize is not None else self.do_normalize lowercase__ : Optional[Any] = image_mean if image_mean is not None else self.image_mean lowercase__ : List[str] = image_std if image_std is not None else self.image_std lowercase__ : Any = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb lowercase__ : str = make_list_of_images(a) if not valid_images(a): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.') if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.') if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.') if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.') if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.') # PIL RGBA images are converted to RGB if do_convert_rgb: lowercase__ : str = [convert_to_rgb(a) for image in images] # All transformations expect numpy arrays. lowercase__ : Dict = [to_numpy_array(a) for image in images] if do_resize: lowercase__ : Tuple = [self.resize(image=a , size=a , resample=a) for image in images] if do_center_crop: lowercase__ : List[str] = [self.center_crop(image=a , size=a) for image in images] if do_rescale: lowercase__ : str = [self.rescale(image=a , scale=a) for image in images] if do_normalize: lowercase__ : Tuple = [self.normalize(image=a , mean=a , std=a) for image in images] lowercase__ : Optional[int] = [to_channel_dimension_format(a , a) for image in images] lowercase__ : Dict = {'pixel_values': images} return BatchFeature(data=a , tensor_type=a)
216
0
from dataclasses import dataclass from typing import Dict, Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .attention_processor import AttentionProcessor, AttnProcessor from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder @dataclass class UpperCAmelCase__ ( A_ ): """simple docstring""" UpperCAmelCase__ : "DiagonalGaussianDistribution" class UpperCAmelCase__ ( A_ , A_ ): """simple docstring""" UpperCAmelCase__ : List[Any] = True @register_to_config def __init__( self , A_ = 3 , A_ = 3 , A_ = ("DownEncoderBlock2D",) , A_ = ("UpDecoderBlock2D",) , A_ = (64,) , A_ = 1 , A_ = "silu" , A_ = 4 , A_ = 32 , A_ = 32 , A_ = 0.1_8215 , ) -> Any: super().__init__() # pass init params to Encoder __UpperCamelCase =Encoder( in_channels=A_ , out_channels=A_ , down_block_types=A_ , block_out_channels=A_ , layers_per_block=A_ , act_fn=A_ , norm_num_groups=A_ , double_z=A_ , ) # pass init params to Decoder __UpperCamelCase =Decoder( in_channels=A_ , out_channels=A_ , up_block_types=A_ , block_out_channels=A_ , layers_per_block=A_ , norm_num_groups=A_ , act_fn=A_ , ) __UpperCamelCase =nn.Convad(2 * latent_channels , 2 * latent_channels , 1 ) __UpperCamelCase =nn.Convad(A_ , A_ , 1 ) __UpperCamelCase =False __UpperCamelCase =False # only relevant if vae tiling is enabled __UpperCamelCase =self.config.sample_size __UpperCamelCase =( self.config.sample_size[0] if isinstance(self.config.sample_size , (list, tuple) ) else self.config.sample_size ) __UpperCamelCase =int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) ) __UpperCamelCase =0.25 def _a ( self , A_ , A_=False ) -> Any: if isinstance(A_ , (Encoder, Decoder) ): __UpperCamelCase =value def _a ( self , A_ = True ) -> List[Any]: __UpperCamelCase =use_tiling def _a ( self ) -> Union[str, Any]: self.enable_tiling(A_ ) def _a ( self ) -> List[Any]: __UpperCamelCase =True def _a ( self ) -> Any: __UpperCamelCase =False @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def _a ( self ) -> Dict[str, AttentionProcessor]: __UpperCamelCase ={} def fn_recursive_add_processors(A_ , A_ , A_ ): if hasattr(A_ , 'set_processor' ): __UpperCamelCase =module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(f'{name}.{sub_name}' , A_ , A_ ) return processors for name, module in self.named_children(): fn_recursive_add_processors(A_ , A_ , A_ ) return processors def _a ( self , A_ ) -> Tuple: __UpperCamelCase =len(self.attn_processors.keys() ) if isinstance(A_ , A_ ) and len(A_ ) != count: raise ValueError( f'A dict of processors was passed, but the number of processors {len(A_ )} does not match the' f' number of attention layers: {count}. Please make sure to pass {count} processor classes.' ) def fn_recursive_attn_processor(A_ , A_ , A_ ): if hasattr(A_ , 'set_processor' ): if not isinstance(A_ , A_ ): module.set_processor(A_ ) else: module.set_processor(processor.pop(f'{name}.processor' ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f'{name}.{sub_name}' , A_ , A_ ) for name, module in self.named_children(): fn_recursive_attn_processor(A_ , A_ , A_ ) def _a ( self ) -> List[Any]: self.set_attn_processor(AttnProcessor() ) @apply_forward_hook def _a ( self , A_ , A_ = True ) -> AutoencoderKLOutput: if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size): return self.tiled_encode(A_ , return_dict=A_ ) if self.use_slicing and x.shape[0] > 1: __UpperCamelCase =[self.encoder(A_ ) for x_slice in x.split(1 )] __UpperCamelCase =torch.cat(A_ ) else: __UpperCamelCase =self.encoder(A_ ) __UpperCamelCase =self.quant_conv(A_ ) __UpperCamelCase =DiagonalGaussianDistribution(A_ ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=A_ ) def _a ( self , A_ , A_ = True ) -> Union[DecoderOutput, torch.FloatTensor]: if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size): return self.tiled_decode(A_ , return_dict=A_ ) __UpperCamelCase =self.post_quant_conv(A_ ) __UpperCamelCase =self.decoder(A_ ) if not return_dict: return (dec,) return DecoderOutput(sample=A_ ) @apply_forward_hook def _a ( self , A_ , A_ = True ) -> Union[DecoderOutput, torch.FloatTensor]: if self.use_slicing and z.shape[0] > 1: __UpperCamelCase =[self._decode(A_ ).sample for z_slice in z.split(1 )] __UpperCamelCase =torch.cat(A_ ) else: __UpperCamelCase =self._decode(A_ ).sample if not return_dict: return (decoded,) return DecoderOutput(sample=A_ ) def _a ( self , A_ , A_ , A_ ) -> Optional[Any]: __UpperCamelCase =min(a.shape[2] , b.shape[2] , A_ ) for y in range(A_ ): __UpperCamelCase =a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent) return b def _a ( self , A_ , A_ , A_ ) -> Dict: __UpperCamelCase =min(a.shape[3] , b.shape[3] , A_ ) for x in range(A_ ): __UpperCamelCase =a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent) return b def _a ( self , A_ , A_ = True ) -> AutoencoderKLOutput: __UpperCamelCase =int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) ) __UpperCamelCase =int(self.tile_latent_min_size * self.tile_overlap_factor ) __UpperCamelCase =self.tile_latent_min_size - blend_extent # Split the image into 512x512 tiles and encode them separately. __UpperCamelCase =[] for i in range(0 , x.shape[2] , A_ ): __UpperCamelCase =[] for j in range(0 , x.shape[3] , A_ ): __UpperCamelCase =x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size] __UpperCamelCase =self.encoder(A_ ) __UpperCamelCase =self.quant_conv(A_ ) row.append(A_ ) rows.append(A_ ) __UpperCamelCase =[] for i, row in enumerate(A_ ): __UpperCamelCase =[] for j, tile in enumerate(A_ ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: __UpperCamelCase =self.blend_v(rows[i - 1][j] , A_ , A_ ) if j > 0: __UpperCamelCase =self.blend_h(row[j - 1] , A_ , A_ ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(A_ , dim=3 ) ) __UpperCamelCase =torch.cat(A_ , dim=2 ) __UpperCamelCase =DiagonalGaussianDistribution(A_ ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=A_ ) def _a ( self , A_ , A_ = True ) -> Union[DecoderOutput, torch.FloatTensor]: __UpperCamelCase =int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) ) __UpperCamelCase =int(self.tile_sample_min_size * self.tile_overlap_factor ) __UpperCamelCase =self.tile_sample_min_size - blend_extent # Split z into overlapping 64x64 tiles and decode them separately. # The tiles have an overlap to avoid seams between tiles. __UpperCamelCase =[] for i in range(0 , z.shape[2] , A_ ): __UpperCamelCase =[] for j in range(0 , z.shape[3] , A_ ): __UpperCamelCase =z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size] __UpperCamelCase =self.post_quant_conv(A_ ) __UpperCamelCase =self.decoder(A_ ) row.append(A_ ) rows.append(A_ ) __UpperCamelCase =[] for i, row in enumerate(A_ ): __UpperCamelCase =[] for j, tile in enumerate(A_ ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: __UpperCamelCase =self.blend_v(rows[i - 1][j] , A_ , A_ ) if j > 0: __UpperCamelCase =self.blend_h(row[j - 1] , A_ , A_ ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(A_ , dim=3 ) ) __UpperCamelCase =torch.cat(A_ , dim=2 ) if not return_dict: return (dec,) return DecoderOutput(sample=A_ ) def _a ( self , A_ , A_ = False , A_ = True , A_ = None , ) -> Union[DecoderOutput, torch.FloatTensor]: __UpperCamelCase =sample __UpperCamelCase =self.encode(A_ ).latent_dist if sample_posterior: __UpperCamelCase =posterior.sample(generator=A_ ) else: __UpperCamelCase =posterior.mode() __UpperCamelCase =self.decode(A_ ).sample if not return_dict: return (dec,) return DecoderOutput(sample=A_ )
62
from __future__ import annotations from collections import namedtuple from dataclasses import dataclass @dataclass class lowerCamelCase__: UpperCAmelCase__ : int UpperCAmelCase__ : TreeNode | None = None UpperCAmelCase__ : TreeNode | None = None UpperCAmelCase_ = namedtuple('CoinsDistribResult', 'moves excess') def lowerCamelCase__ ( A__ : TreeNode | None ): '''simple docstring''' if root is None: return 0 # Validation def count_nodes(A__ : TreeNode | None ) -> int: if node is None: return 0 return count_nodes(node.left ) + count_nodes(node.right ) + 1 def count_coins(A__ : TreeNode | None ) -> int: if node is None: return 0 return count_coins(node.left ) + count_coins(node.right ) + node.data if count_nodes(A__ ) != count_coins(A__ ): raise ValueError("""The nodes number should be same as the number of coins""" ) # Main calculation def get_distrib(A__ : TreeNode | None ) -> CoinsDistribResult: if node is None: return CoinsDistribResult(0 , 1 ) __lowerCamelCase, __lowerCamelCase = get_distrib(node.left ) __lowerCamelCase, __lowerCamelCase = get_distrib(node.right ) __lowerCamelCase = 1 - left_distrib_excess __lowerCamelCase = 1 - right_distrib_excess __lowerCamelCase = ( left_distrib_moves + right_distrib_moves + abs(A__ ) + abs(A__ ) ) __lowerCamelCase = node.data - coins_to_left - coins_to_right return CoinsDistribResult(A__ , A__ ) return get_distrib(A__ )[0] if __name__ == "__main__": import doctest doctest.testmod()
12
0
'''simple docstring''' import logging import torch from accelerate import Accelerator from arguments import EvaluationArguments from datasets import load_dataset from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed class A ( _a ): def __init__( self : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str]=10_24 , lowerCAmelCase_ : Optional[Any]=10_24 , lowerCAmelCase_ : Tuple=3.6 ) -> List[Any]: """simple docstring""" _a = tokenizer _a = tokenizer.bos_token_id _a = dataset _a = seq_length _a = seq_length * chars_per_token * num_of_sequences def __iter__( self : Any ) -> int: """simple docstring""" _a = iter(self.dataset ) _a = True while more_examples: _a , _a = [], 0 while True: if buffer_len >= self.input_characters: break try: buffer.append(next(lowerCAmelCase_ )['''content'''] ) buffer_len += len(buffer[-1] ) except StopIteration: _a = False break _a = tokenizer(lowerCAmelCase_ , truncation=lowerCAmelCase_ )['''input_ids'''] _a = [] for tokenized_input in tokenized_inputs: all_token_ids.extend(tokenized_input + [self.concat_token_id] ) for i in range(0 , len(lowerCAmelCase_ ) , self.seq_length ): _a = all_token_ids[i : i + self.seq_length] if len(lowerCAmelCase_ ) == self.seq_length: yield torch.tensor(lowerCAmelCase_ ) def snake_case_ (UpperCamelCase : int ): '''simple docstring''' _a = {'''streaming''': True} _a = load_dataset(args.dataset_name , split='''train''' , **UpperCamelCase ) _a = ConstantLengthDataset(UpperCamelCase , UpperCamelCase , seq_length=args.seq_length ) _a = DataLoader(UpperCamelCase , batch_size=args.batch_size ) return eval_dataloader def snake_case_ (UpperCamelCase : int ): '''simple docstring''' model.eval() _a = [] for step, batch in enumerate(UpperCamelCase ): with torch.no_grad(): _a = model(UpperCamelCase , labels=UpperCamelCase ) _a = outputs.loss.repeat(args.batch_size ) losses.append(accelerator.gather(UpperCamelCase ) ) if args.max_eval_steps > 0 and step >= args.max_eval_steps: break _a = torch.mean(torch.cat(UpperCamelCase ) ) try: _a = torch.exp(UpperCamelCase ) except OverflowError: _a = float('''inf''' ) return loss.item(), perplexity.item() # Setup Accelerator _snake_case : List[str] = Accelerator() # Parse configuration _snake_case : List[str] = HfArgumentParser(EvaluationArguments) _snake_case : Optional[int] = parser.parse_args() set_seed(args.seed) # Logging _snake_case : Any = logging.getLogger(__name__) logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO ) # Load model and tokenizer _snake_case : Dict = AutoModelForCausalLM.from_pretrained(args.model_ckpt) _snake_case : Optional[Any] = AutoTokenizer.from_pretrained(args.model_ckpt) # Load dataset and dataloader _snake_case : List[str] = create_dataloader(args) # Prepare everything with our `accelerator`. _snake_case , _snake_case : Optional[int] = accelerator.prepare(model, eval_dataloader) # Evaluate and save the last checkpoint logger.info('Evaluating and saving model after training') _snake_case , _snake_case : int = evaluate(args) logger.info(F'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
179
'''simple docstring''' import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, AutoConfig, AutoFeatureExtractor, WavaVecaConfig, WavaVecaFeatureExtractor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 _snake_case : int = get_tests_dir('fixtures') _snake_case : Tuple = get_tests_dir('fixtures/dummy_feature_extractor_config.json') _snake_case : Optional[int] = get_tests_dir('fixtures/dummy-config.json') class A ( unittest.TestCase ): def __lowerCAmelCase ( self : int ) -> List[Any]: """simple docstring""" _a = 0 def __lowerCAmelCase ( self : List[str] ) -> int: """simple docstring""" _a = AutoFeatureExtractor.from_pretrained('''facebook/wav2vec2-base-960h''' ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) def __lowerCAmelCase ( self : str ) -> Tuple: """simple docstring""" _a = AutoFeatureExtractor.from_pretrained(lowerCAmelCase_ ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) def __lowerCAmelCase ( self : List[str] ) -> Any: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: _a = WavaVecaConfig() # remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally _a = AutoFeatureExtractor.from_pretrained(lowerCAmelCase_ ).to_dict() config_dict.pop('''feature_extractor_type''' ) _a = WavaVecaFeatureExtractor(**lowerCAmelCase_ ) # save in new folder model_config.save_pretrained(lowerCAmelCase_ ) config.save_pretrained(lowerCAmelCase_ ) _a = AutoFeatureExtractor.from_pretrained(lowerCAmelCase_ ) # make sure private variable is not incorrectly saved _a = json.loads(config.to_json_string() ) self.assertTrue('''_processor_class''' not in dict_as_saved ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" _a = AutoFeatureExtractor.from_pretrained(lowerCAmelCase_ ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) def __lowerCAmelCase ( self : Optional[int] ) -> str: """simple docstring""" with self.assertRaisesRegex( lowerCAmelCase_ , '''bert-base is not a local folder and is not a valid model identifier''' ): _a = AutoFeatureExtractor.from_pretrained('''bert-base''' ) def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]: """simple docstring""" with self.assertRaisesRegex( lowerCAmelCase_ , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): _a = AutoFeatureExtractor.from_pretrained(lowerCAmelCase_ , revision='''aaaaaa''' ) def __lowerCAmelCase ( self : Any ) -> Dict: """simple docstring""" with self.assertRaisesRegex( lowerCAmelCase_ , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ): _a = AutoFeatureExtractor.from_pretrained('''hf-internal-testing/config-no-model''' ) def __lowerCAmelCase ( self : List[Any] ) -> Any: """simple docstring""" with self.assertRaises(lowerCAmelCase_ ): _a = AutoFeatureExtractor.from_pretrained( '''hf-internal-testing/test_dynamic_feature_extractor''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(lowerCAmelCase_ ): _a = AutoFeatureExtractor.from_pretrained( '''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=lowerCAmelCase_ ) _a = AutoFeatureExtractor.from_pretrained( '''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=lowerCAmelCase_ ) self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' ) # Test feature extractor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(lowerCAmelCase_ ) _a = AutoFeatureExtractor.from_pretrained(lowerCAmelCase_ , trust_remote_code=lowerCAmelCase_ ) self.assertEqual(reloaded_feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' ) def __lowerCAmelCase ( self : int ) -> Optional[Any]: """simple docstring""" try: AutoConfig.register('''custom''' , lowerCAmelCase_ ) AutoFeatureExtractor.register(lowerCAmelCase_ , lowerCAmelCase_ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowerCAmelCase_ ): AutoFeatureExtractor.register(lowerCAmelCase_ , lowerCAmelCase_ ) # Now that the config is registered, it can be used as any other config with the auto-API _a = CustomFeatureExtractor.from_pretrained(lowerCAmelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(lowerCAmelCase_ ) _a = AutoFeatureExtractor.from_pretrained(lowerCAmelCase_ ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] def __lowerCAmelCase ( self : Optional[int] ) -> Any: """simple docstring""" class A ( _a ): lowercase_ = True try: AutoConfig.register('''custom''' , lowerCAmelCase_ ) AutoFeatureExtractor.register(lowerCAmelCase_ , lowerCAmelCase_ ) # If remote code is not set, the default is to use local _a = AutoFeatureExtractor.from_pretrained( '''hf-internal-testing/test_dynamic_feature_extractor''' ) self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' ) self.assertTrue(feature_extractor.is_local ) # If remote code is disabled, we load the local one. _a = AutoFeatureExtractor.from_pretrained( '''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=lowerCAmelCase_ ) self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' ) self.assertTrue(feature_extractor.is_local ) # If remote is enabled, we load from the Hub _a = AutoFeatureExtractor.from_pretrained( '''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=lowerCAmelCase_ ) self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' ) self.assertTrue(not hasattr(lowerCAmelCase_ , '''is_local''' ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
179
1
'''simple docstring''' __lowerCAmelCase = { "joule": 1.0, "kilojoule": 1_000, "megajoule": 1_000_000, "gigajoule": 1_000_000_000, "wattsecond": 1.0, "watthour": 3_600, "kilowatthour": 3_600_000, "newtonmeter": 1.0, "calorie_nutr": 4_186.8, "kilocalorie_nutr": 4_186_800.00, "electronvolt": 1.602176634E-19, "britishthermalunit_it": 1_055.05_585, "footpound": 1.355818, } def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: _snake_case = ( f"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n""" f"""Valid values are: {", ".join(_SCREAMING_SNAKE_CASE )}""" ) raise ValueError(_SCREAMING_SNAKE_CASE ) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
341
'''simple docstring''' class _lowerCAmelCase : '''simple docstring''' def __init__(self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None ) -> int: _snake_case = data _snake_case = previous _snake_case = next_node def __str__(self ) -> str: return f"""{self.data}""" def lowercase (self ) -> int: return self.data def lowercase (self ) -> Dict: return self.next def lowercase (self ) -> Union[str, Any]: return self.previous class _lowerCAmelCase : '''simple docstring''' def __init__(self , UpperCAmelCase ) -> List[str]: _snake_case = head def __iter__(self ) -> Optional[Any]: return self def lowercase (self ) -> str: if not self.current: raise StopIteration else: _snake_case = self.current.get_data() _snake_case = self.current.get_next() return value class _lowerCAmelCase : '''simple docstring''' def __init__(self ) -> Optional[int]: _snake_case = None # First node in list _snake_case = None # Last node in list def __str__(self ) -> Optional[int]: _snake_case = self.head _snake_case = [] while current is not None: nodes.append(current.get_data() ) _snake_case = current.get_next() return " ".join(str(UpperCAmelCase ) for node in nodes ) def __contains__(self , UpperCAmelCase ) -> int: _snake_case = self.head while current: if current.get_data() == value: return True _snake_case = current.get_next() return False def __iter__(self ) -> Union[str, Any]: return LinkedListIterator(self.head ) def lowercase (self ) -> str: if self.head: return self.head.get_data() return None def lowercase (self ) -> List[Any]: if self.tail: return self.tail.get_data() return None def lowercase (self , UpperCAmelCase ) -> None: if self.head is None: _snake_case = node _snake_case = node else: self.insert_before_node(self.head , UpperCAmelCase ) def lowercase (self , UpperCAmelCase ) -> None: if self.head is None: self.set_head(UpperCAmelCase ) else: self.insert_after_node(self.tail , UpperCAmelCase ) def lowercase (self , UpperCAmelCase ) -> None: _snake_case = Node(UpperCAmelCase ) if self.head is None: self.set_head(UpperCAmelCase ) else: self.set_tail(UpperCAmelCase ) def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> None: _snake_case = node _snake_case = node.previous if node.get_previous() is None: _snake_case = node_to_insert else: _snake_case = node_to_insert _snake_case = node_to_insert def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> None: _snake_case = node _snake_case = node.next if node.get_next() is None: _snake_case = node_to_insert else: _snake_case = node_to_insert _snake_case = node_to_insert def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> None: _snake_case = 1 _snake_case = Node(UpperCAmelCase ) _snake_case = self.head while node: if current_position == position: self.insert_before_node(UpperCAmelCase , UpperCAmelCase ) return current_position += 1 _snake_case = node.next self.insert_after_node(self.tail , UpperCAmelCase ) def lowercase (self , UpperCAmelCase ) -> Node: _snake_case = self.head while node: if node.get_data() == item: return node _snake_case = node.get_next() raise Exception("""Node not found""" ) def lowercase (self , UpperCAmelCase ) -> Optional[int]: if (node := self.get_node(UpperCAmelCase )) is not None: if node == self.head: _snake_case = self.head.get_next() if node == self.tail: _snake_case = self.tail.get_previous() self.remove_node_pointers(UpperCAmelCase ) @staticmethod def lowercase (UpperCAmelCase ) -> None: if node.get_next(): _snake_case = node.previous if node.get_previous(): _snake_case = node.next _snake_case = None _snake_case = None def lowercase (self ) -> Dict: return self.head is None def __SCREAMING_SNAKE_CASE ( ): pass if __name__ == "__main__": import doctest doctest.testmod()
341
1
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() _SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__) def UpperCamelCase_( snake_case : List[str] , snake_case : List[str]=False ): '''simple docstring''' snake_case_ = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'blocks.{i}.norm1.weight', f'vit.encoder.layer.{i}.layernorm_before.weight') ) rename_keys.append((f'blocks.{i}.norm1.bias', f'vit.encoder.layer.{i}.layernorm_before.bias') ) rename_keys.append((f'blocks.{i}.attn.proj.weight', f'vit.encoder.layer.{i}.attention.output.dense.weight') ) rename_keys.append((f'blocks.{i}.attn.proj.bias', f'vit.encoder.layer.{i}.attention.output.dense.bias') ) rename_keys.append((f'blocks.{i}.norm2.weight', f'vit.encoder.layer.{i}.layernorm_after.weight') ) rename_keys.append((f'blocks.{i}.norm2.bias', f'vit.encoder.layer.{i}.layernorm_after.bias') ) rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'vit.encoder.layer.{i}.intermediate.dense.weight') ) rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'vit.encoder.layer.{i}.intermediate.dense.bias') ) rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'vit.encoder.layer.{i}.output.dense.weight') ) rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'vit.encoder.layer.{i}.output.dense.bias') ) # projection layer + position embeddings rename_keys.extend( [ ("cls_token", "vit.embeddings.cls_token"), ("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"), ("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"), ("pos_embed", "vit.embeddings.position_embeddings"), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" snake_case_ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("norm.weight", "vit.layernorm.weight"), ("norm.bias", "vit.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) return rename_keys def UpperCamelCase_( snake_case : Tuple , snake_case : str , snake_case : Any=False ): '''simple docstring''' for i in range(config.num_hidden_layers ): if base_model: snake_case_ = "" else: snake_case_ = "vit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) snake_case_ = state_dict.pop(f'blocks.{i}.attn.qkv.weight' ) snake_case_ = state_dict.pop(f'blocks.{i}.attn.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict snake_case_ = in_proj_weight[ : config.hidden_size, : ] snake_case_ = in_proj_bias[: config.hidden_size] snake_case_ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] snake_case_ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] snake_case_ = in_proj_weight[ -config.hidden_size :, : ] snake_case_ = in_proj_bias[-config.hidden_size :] def UpperCamelCase_( snake_case : Union[str, Any] ): '''simple docstring''' snake_case_ = ["head.weight", "head.bias"] for k in ignore_keys: state_dict.pop(snake_case , snake_case ) def UpperCamelCase_( snake_case : Union[str, Any] , snake_case : Dict , snake_case : Optional[Any] ): '''simple docstring''' snake_case_ = dct.pop(snake_case ) snake_case_ = val def UpperCamelCase_( ): '''simple docstring''' snake_case_ = "http://images.cocodataset.org/val2017/000000039769.jpg" snake_case_ = Image.open(requests.get(snake_case , stream=snake_case ).raw ) return im @torch.no_grad() def UpperCamelCase_( snake_case : Dict , snake_case : Optional[int] , snake_case : Union[str, Any]=True ): '''simple docstring''' snake_case_ = ViTConfig() # patch_size if model_name[-1] == "8": snake_case_ = 8 # set labels if required if not base_model: snake_case_ = 1_0_0_0 snake_case_ = "huggingface/label-files" snake_case_ = "imagenet-1k-id2label.json" snake_case_ = json.load(open(hf_hub_download(snake_case , snake_case , repo_type="dataset" ) , "r" ) ) snake_case_ = {int(snake_case ): v for k, v in idalabel.items()} snake_case_ = idalabel snake_case_ = {v: k for k, v in idalabel.items()} # size of the architecture if model_name in ["dino_vits8", "dino_vits16"]: snake_case_ = 3_8_4 snake_case_ = 1_5_3_6 snake_case_ = 1_2 snake_case_ = 6 # load original model from torch hub snake_case_ = torch.hub.load("facebookresearch/dino:main" , snake_case ) original_model.eval() # load state_dict of original model, remove and rename some keys snake_case_ = original_model.state_dict() if base_model: remove_classification_head_(snake_case ) snake_case_ = create_rename_keys(snake_case , base_model=snake_case ) for src, dest in rename_keys: rename_key(snake_case , snake_case , snake_case ) read_in_q_k_v(snake_case , snake_case , snake_case ) # load HuggingFace model if base_model: snake_case_ = ViTModel(snake_case , add_pooling_layer=snake_case ).eval() else: snake_case_ = ViTForImageClassification(snake_case ).eval() model.load_state_dict(snake_case ) # Check outputs on an image, prepared by ViTImageProcessor snake_case_ = ViTImageProcessor() snake_case_ = image_processor(images=prepare_img() , return_tensors="pt" ) snake_case_ = encoding["pixel_values"] snake_case_ = model(snake_case ) if base_model: snake_case_ = original_model(snake_case ) assert torch.allclose(snake_case , outputs.last_hidden_state[:, 0, :] , atol=1e-1 ) else: snake_case_ = original_model(snake_case ) assert logits.shape == outputs.logits.shape assert torch.allclose(snake_case , outputs.logits , atol=1e-3 ) Path(snake_case ).mkdir(exist_ok=snake_case ) print(f'Saving model {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(snake_case ) print(f'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(snake_case ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="dino_vitb16", type=str, help="Name of the model trained with DINO you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--base_model", action="store_true", help="Whether to only convert the base model (no projection head weights).", ) parser.set_defaults(base_model=True) _SCREAMING_SNAKE_CASE : Any = parser.parse_args() convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
360
'''simple docstring''' import gc import random import unittest import torch from diffusers import ( IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class _snake_case ( lowercase_ , lowercase_ , unittest.TestCase ): lowerCAmelCase_ : List[str] = IFPipeline lowerCAmelCase_ : int = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"} lowerCAmelCase_ : Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS lowerCAmelCase_ : List[Any] = PipelineTesterMixin.required_optional_params - {"latents"} def lowerCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' return self._get_dummy_components() def lowerCAmelCase__ ( self , a__ , a__=0 ) -> str: '''simple docstring''' if str(a__ ).startswith("mps" ): snake_case_ = torch.manual_seed(a__ ) else: snake_case_ = torch.Generator(device=a__ ).manual_seed(a__ ) snake_case_ = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs def lowerCAmelCase__ ( self ) -> List[str]: '''simple docstring''' self._test_save_load_optional_components() @unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" ) def lowerCAmelCase__ ( self ) -> str: '''simple docstring''' super().test_save_load_floataa(expected_max_diff=1e-1 ) def lowerCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def lowerCAmelCase__ ( self ) -> int: '''simple docstring''' self._test_save_load_local() def lowerCAmelCase__ ( self ) -> int: '''simple docstring''' self._test_inference_batch_single_identical( expected_max_diff=1e-2 , ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def lowerCAmelCase__ ( self ) -> List[str]: '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) @slow @require_torch_gpu class _snake_case ( unittest.TestCase ): def lowerCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' snake_case_ = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa ) snake_case_ = IFSuperResolutionPipeline.from_pretrained( "DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=a__ , tokenizer=a__ ) # pre compute text embeddings and remove T5 to save memory pipe_a.text_encoder.to("cuda" ) snake_case_ , snake_case_ = pipe_a.encode_prompt("anime turtle" , device="cuda" ) del pipe_a.tokenizer del pipe_a.text_encoder gc.collect() snake_case_ = None snake_case_ = None pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if(a__ , a__ , a__ , a__ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # img2img snake_case_ = IFImgaImgPipeline(**pipe_a.components ) snake_case_ = IFImgaImgSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_imgaimg(a__ , a__ , a__ , a__ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # inpainting snake_case_ = IFInpaintingPipeline(**pipe_a.components ) snake_case_ = IFInpaintingSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_inpainting(a__ , a__ , a__ , a__ ) def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ ) -> Dict: '''simple docstring''' _start_torch_memory_measurement() snake_case_ = torch.Generator(device="cpu" ).manual_seed(0 ) snake_case_ = pipe_a( prompt_embeds=a__ , negative_prompt_embeds=a__ , num_inference_steps=2 , generator=a__ , output_type="np" , ) snake_case_ = output.images[0] assert image.shape == (64, 64, 3) snake_case_ = torch.cuda.max_memory_allocated() assert mem_bytes < 13 * 10**9 snake_case_ = load_numpy( "https://huggingface.co./datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" ) assert_mean_pixel_difference(a__ , a__ ) # pipeline 2 _start_torch_memory_measurement() snake_case_ = torch.Generator(device="cpu" ).manual_seed(0 ) snake_case_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a__ ) snake_case_ = pipe_a( prompt_embeds=a__ , negative_prompt_embeds=a__ , image=a__ , generator=a__ , num_inference_steps=2 , output_type="np" , ) snake_case_ = output.images[0] assert image.shape == (256, 256, 3) snake_case_ = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 snake_case_ = load_numpy( "https://huggingface.co./datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" ) assert_mean_pixel_difference(a__ , a__ ) def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ ) -> Dict: '''simple docstring''' _start_torch_memory_measurement() snake_case_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a__ ) snake_case_ = torch.Generator(device="cpu" ).manual_seed(0 ) snake_case_ = pipe_a( prompt_embeds=a__ , negative_prompt_embeds=a__ , image=a__ , num_inference_steps=2 , generator=a__ , output_type="np" , ) snake_case_ = output.images[0] assert image.shape == (64, 64, 3) snake_case_ = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 snake_case_ = load_numpy( "https://huggingface.co./datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" ) assert_mean_pixel_difference(a__ , a__ ) # pipeline 2 _start_torch_memory_measurement() snake_case_ = torch.Generator(device="cpu" ).manual_seed(0 ) snake_case_ = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(a__ ) snake_case_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a__ ) snake_case_ = pipe_a( prompt_embeds=a__ , negative_prompt_embeds=a__ , image=a__ , original_image=a__ , generator=a__ , num_inference_steps=2 , output_type="np" , ) snake_case_ = output.images[0] assert image.shape == (256, 256, 3) snake_case_ = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 snake_case_ = load_numpy( "https://huggingface.co./datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" ) assert_mean_pixel_difference(a__ , a__ ) def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ ) -> str: '''simple docstring''' _start_torch_memory_measurement() snake_case_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a__ ) snake_case_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(a__ ) snake_case_ = torch.Generator(device="cpu" ).manual_seed(0 ) snake_case_ = pipe_a( prompt_embeds=a__ , negative_prompt_embeds=a__ , image=a__ , mask_image=a__ , num_inference_steps=2 , generator=a__ , output_type="np" , ) snake_case_ = output.images[0] assert image.shape == (64, 64, 3) snake_case_ = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 snake_case_ = load_numpy( "https://huggingface.co./datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" ) assert_mean_pixel_difference(a__ , a__ ) # pipeline 2 _start_torch_memory_measurement() snake_case_ = torch.Generator(device="cpu" ).manual_seed(0 ) snake_case_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a__ ) snake_case_ = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(a__ ) snake_case_ = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(a__ ) snake_case_ = pipe_a( prompt_embeds=a__ , negative_prompt_embeds=a__ , image=a__ , mask_image=a__ , original_image=a__ , generator=a__ , num_inference_steps=2 , output_type="np" , ) snake_case_ = output.images[0] assert image.shape == (256, 256, 3) snake_case_ = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 snake_case_ = load_numpy( "https://huggingface.co./datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" ) assert_mean_pixel_difference(a__ , a__ ) def UpperCamelCase_( ): '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
92
0
import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def _a ( lowerCamelCase: Features ) -> Optional[int]: '''simple docstring''' __A = np.inf def set_batch_size(lowerCamelCase: FeatureType ) -> None: nonlocal batch_size if isinstance(lowerCamelCase , lowerCamelCase ): __A = min(lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(lowerCamelCase , lowerCamelCase ): __A = min(lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(lowerCamelCase , lowerCamelCase ) and feature.dtype == "binary": __A = min(lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(lowerCamelCase , lowerCamelCase ) return None if batch_size is np.inf else batch_size class A_ ( _lowerCamelCase ): def __init__(self :int , _UpperCamelCase :NestedDataStructureLike[PathLike] , _UpperCamelCase :Optional[NamedSplit] = None , _UpperCamelCase :Optional[Features] = None , _UpperCamelCase :str = None , _UpperCamelCase :bool = False , _UpperCamelCase :bool = False , _UpperCamelCase :Optional[int] = None , **_UpperCamelCase :int , )-> List[str]: super().__init__( _UpperCamelCase , split=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , keep_in_memory=_UpperCamelCase , streaming=_UpperCamelCase , num_proc=_UpperCamelCase , **_UpperCamelCase , ) __A = path_or_paths if isinstance(_UpperCamelCase , _UpperCamelCase ) else {self.split: path_or_paths} __A = _PACKAGED_DATASETS_MODULES['''parquet'''][1] __A = Parquet( cache_dir=_UpperCamelCase , data_files=_UpperCamelCase , features=_UpperCamelCase , hash=_UpperCamelCase , **_UpperCamelCase , ) def _lowerCAmelCase (self :Optional[Any] )-> List[Any]: # Build iterable dataset if self.streaming: __A = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: __A = None __A = None __A = None __A = None self.builder.download_and_prepare( download_config=_UpperCamelCase , download_mode=_UpperCamelCase , verification_mode=_UpperCamelCase , base_path=_UpperCamelCase , num_proc=self.num_proc , ) __A = self.builder.as_dataset( split=self.split , verification_mode=_UpperCamelCase , in_memory=self.keep_in_memory ) return dataset class A_ : def __init__(self :Any , _UpperCamelCase :Dataset , _UpperCamelCase :Union[PathLike, BinaryIO] , _UpperCamelCase :Optional[int] = None , **_UpperCamelCase :Any , )-> Optional[Any]: __A = dataset __A = path_or_buf __A = batch_size or get_writer_batch_size(dataset.features ) __A = parquet_writer_kwargs def _lowerCAmelCase (self :Dict )-> int: __A = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with open(self.path_or_buf , '''wb+''' ) as buffer: __A = self._write(file_obj=_UpperCamelCase , batch_size=_UpperCamelCase , **self.parquet_writer_kwargs ) else: __A = self._write(file_obj=self.path_or_buf , batch_size=_UpperCamelCase , **self.parquet_writer_kwargs ) return written def _lowerCAmelCase (self :Any , _UpperCamelCase :BinaryIO , _UpperCamelCase :int , **_UpperCamelCase :Any )-> int: __A = 0 __A = parquet_writer_kwargs.pop('''path_or_buf''' , _UpperCamelCase ) __A = self.dataset.features.arrow_schema __A = pq.ParquetWriter(_UpperCamelCase , schema=_UpperCamelCase , **_UpperCamelCase ) for offset in logging.tqdm( range(0 , len(self.dataset ) , _UpperCamelCase ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ): __A = query_table( table=self.dataset._data , key=slice(_UpperCamelCase , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , ) writer.write_table(_UpperCamelCase ) written += batch.nbytes writer.close() return written
117
def _a ( lowerCamelCase: int = 2_00 ) -> int: '''simple docstring''' __A = [1, 2, 5, 10, 20, 50, 1_00, 2_00] __A = [0] * (pence + 1) __A = 1 # base case: 1 way to make 0 pence for coin in coins: for i in range(lowerCamelCase , pence + 1 , 1 ): number_of_ways[i] += number_of_ways[i - coin] return number_of_ways[pence] if __name__ == "__main__": assert solution(200) == 73682
117
1
import math import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from .attention_processor import Attention from .embeddings import get_timestep_embedding from .modeling_utils import ModelMixin class lowercase ( _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' @register_to_config def __init__(self , __a = 128 , __a = 256 , __a = 2000.0 , __a = 768 , __a = 12 , __a = 12 , __a = 64 , __a = 2048 , __a = 0.1 , ) -> Tuple: """simple docstring""" super().__init__() UpperCAmelCase__ = nn.Sequential( nn.Linear(__a , d_model * 4 , bias=__a ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=__a ) , nn.SiLU() , ) UpperCAmelCase__ = nn.Embedding(__a , __a ) UpperCAmelCase__ = False UpperCAmelCase__ = nn.Linear(__a , __a , bias=__a ) UpperCAmelCase__ = nn.Dropout(p=__a ) UpperCAmelCase__ = nn.ModuleList() for lyr_num in range(__a ): # FiLM conditional T5 decoder UpperCAmelCase__ = DecoderLayer(d_model=__a , d_kv=__a , num_heads=__a , d_ff=__a , dropout_rate=__a ) self.decoders.append(__a ) UpperCAmelCase__ = TaLayerNorm(__a ) UpperCAmelCase__ = nn.Dropout(p=__a ) UpperCAmelCase__ = nn.Linear(__a , __a , bias=__a ) def UpperCamelCase__ (self , __a , __a ) -> Optional[Any]: """simple docstring""" UpperCAmelCase__ = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) ) return mask.unsqueeze(-3 ) def UpperCamelCase__ (self , __a , __a , __a ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = decoder_input_tokens.shape assert decoder_noise_time.shape == (batch,) # decoder_noise_time is in [0, 1), so rescale to expected timing range. UpperCAmelCase__ = get_timestep_embedding( decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype ) UpperCAmelCase__ = self.conditioning_emb(__a ).unsqueeze(1 ) assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4) UpperCAmelCase__ = decoder_input_tokens.shape[1] # If we want to use relative positions for audio context, we can just offset # this sequence by the length of encodings_and_masks. UpperCAmelCase__ = torch.broadcast_to( torch.arange(__a , device=decoder_input_tokens.device ) , (batch, seq_length) , ) UpperCAmelCase__ = self.position_encoding(__a ) UpperCAmelCase__ = self.continuous_inputs_projection(__a ) inputs += position_encodings UpperCAmelCase__ = self.dropout(__a ) # decoder: No padding present. UpperCAmelCase__ = torch.ones( decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype ) # Translate encoding masks to encoder-decoder masks. UpperCAmelCase__ = [(x, self.encoder_decoder_mask(__a , __a )) for x, y in encodings_and_masks] # cross attend style: concat encodings UpperCAmelCase__ = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 ) UpperCAmelCase__ = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 ) for lyr in self.decoders: UpperCAmelCase__ = lyr( __a , conditioning_emb=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , )[0] UpperCAmelCase__ = self.decoder_norm(__a ) UpperCAmelCase__ = self.post_dropout(__a ) UpperCAmelCase__ = self.spec_out(__a ) return spec_out class lowercase ( nn.Module ): '''simple docstring''' def __init__(self , __a , __a , __a , __a , __a , __a=1E-6 ) -> Optional[int]: """simple docstring""" super().__init__() UpperCAmelCase__ = nn.ModuleList() # cond self attention: layer 0 self.layer.append( TaLayerSelfAttentionCond(d_model=__a , d_kv=__a , num_heads=__a , dropout_rate=__a ) ) # cross attention: layer 1 self.layer.append( TaLayerCrossAttention( d_model=__a , d_kv=__a , num_heads=__a , dropout_rate=__a , layer_norm_epsilon=__a , ) ) # Film Cond MLP + dropout: last layer self.layer.append( TaLayerFFCond(d_model=__a , d_ff=__a , dropout_rate=__a , layer_norm_epsilon=__a ) ) def UpperCamelCase__ (self , __a , __a=None , __a=None , __a=None , __a=None , __a=None , ) -> List[str]: """simple docstring""" UpperCAmelCase__ = self.layer[0]( __a , conditioning_emb=__a , attention_mask=__a , ) if encoder_hidden_states is not None: UpperCAmelCase__ = torch.where(encoder_attention_mask > 0 , 0 , -1E1_0 ).to( encoder_hidden_states.dtype ) UpperCAmelCase__ = self.layer[1]( __a , key_value_states=__a , attention_mask=__a , ) # Apply Film Conditional Feed Forward layer UpperCAmelCase__ = self.layer[-1](__a , __a ) return (hidden_states,) class lowercase ( nn.Module ): '''simple docstring''' def __init__(self , __a , __a , __a , __a ) -> List[str]: """simple docstring""" super().__init__() UpperCAmelCase__ = TaLayerNorm(__a ) UpperCAmelCase__ = TaFiLMLayer(in_features=d_model * 4 , out_features=__a ) UpperCAmelCase__ = Attention(query_dim=__a , heads=__a , dim_head=__a , out_bias=__a , scale_qk=__a ) UpperCAmelCase__ = nn.Dropout(__a ) def UpperCamelCase__ (self , __a , __a=None , __a=None , ) -> Tuple: """simple docstring""" UpperCAmelCase__ = self.layer_norm(__a ) if conditioning_emb is not None: UpperCAmelCase__ = self.FiLMLayer(__a , __a ) # Self-attention block UpperCAmelCase__ = self.attention(__a ) UpperCAmelCase__ = hidden_states + self.dropout(__a ) return hidden_states class lowercase ( nn.Module ): '''simple docstring''' def __init__(self , __a , __a , __a , __a , __a ) -> Optional[Any]: """simple docstring""" super().__init__() UpperCAmelCase__ = Attention(query_dim=__a , heads=__a , dim_head=__a , out_bias=__a , scale_qk=__a ) UpperCAmelCase__ = TaLayerNorm(__a , eps=__a ) UpperCAmelCase__ = nn.Dropout(__a ) def UpperCamelCase__ (self , __a , __a=None , __a=None , ) -> Tuple: """simple docstring""" UpperCAmelCase__ = self.layer_norm(__a ) UpperCAmelCase__ = self.attention( __a , encoder_hidden_states=__a , attention_mask=attention_mask.squeeze(1 ) , ) UpperCAmelCase__ = hidden_states + self.dropout(__a ) return layer_output class lowercase ( nn.Module ): '''simple docstring''' def __init__(self , __a , __a , __a , __a ) -> Dict: """simple docstring""" super().__init__() UpperCAmelCase__ = TaDenseGatedActDense(d_model=__a , d_ff=__a , dropout_rate=__a ) UpperCAmelCase__ = TaFiLMLayer(in_features=d_model * 4 , out_features=__a ) UpperCAmelCase__ = TaLayerNorm(__a , eps=__a ) UpperCAmelCase__ = nn.Dropout(__a ) def UpperCamelCase__ (self , __a , __a=None ) -> List[Any]: """simple docstring""" UpperCAmelCase__ = self.layer_norm(__a ) if conditioning_emb is not None: UpperCAmelCase__ = self.film(__a , __a ) UpperCAmelCase__ = self.DenseReluDense(__a ) UpperCAmelCase__ = hidden_states + self.dropout(__a ) return hidden_states class lowercase ( nn.Module ): '''simple docstring''' def __init__(self , __a , __a , __a ) -> int: """simple docstring""" super().__init__() UpperCAmelCase__ = nn.Linear(__a , __a , bias=__a ) UpperCAmelCase__ = nn.Linear(__a , __a , bias=__a ) UpperCAmelCase__ = nn.Linear(__a , __a , bias=__a ) UpperCAmelCase__ = nn.Dropout(__a ) UpperCAmelCase__ = NewGELUActivation() def UpperCamelCase__ (self , __a ) -> Optional[Any]: """simple docstring""" UpperCAmelCase__ = self.act(self.wi_a(__a ) ) UpperCAmelCase__ = self.wi_a(__a ) UpperCAmelCase__ = hidden_gelu * hidden_linear UpperCAmelCase__ = self.dropout(__a ) UpperCAmelCase__ = self.wo(__a ) return hidden_states class lowercase ( nn.Module ): '''simple docstring''' def __init__(self , __a , __a=1E-6 ) -> Dict: """simple docstring""" super().__init__() UpperCAmelCase__ = nn.Parameter(torch.ones(__a ) ) UpperCAmelCase__ = eps def UpperCamelCase__ (self , __a ) -> List[str]: """simple docstring""" UpperCAmelCase__ = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=__a ) UpperCAmelCase__ = hidden_states * torch.rsqrt(variance + self.variance_epsilon ) # convert into half-precision if necessary if self.weight.dtype in [torch.floataa, torch.bfloataa]: UpperCAmelCase__ = hidden_states.to(self.weight.dtype ) return self.weight * hidden_states class lowercase ( nn.Module ): '''simple docstring''' def UpperCamelCase__ (self , __a ) -> torch.Tensor: """simple docstring""" return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_47_15 * torch.pow(__a , 3.0 )) )) class lowercase ( nn.Module ): '''simple docstring''' def __init__(self , __a , __a ) -> Optional[int]: """simple docstring""" super().__init__() UpperCAmelCase__ = nn.Linear(__a , out_features * 2 , bias=__a ) def UpperCamelCase__ (self , __a , __a ) -> Any: """simple docstring""" UpperCAmelCase__ = self.scale_bias(__a ) UpperCAmelCase__ , UpperCAmelCase__ = torch.chunk(__a , 2 , -1 ) UpperCAmelCase__ = x * (1 + scale) + shift return x
350
class lowercase : # Public class to implement a graph '''simple docstring''' def __init__(self , __a , __a , __a ) -> None: """simple docstring""" UpperCAmelCase__ = row UpperCAmelCase__ = col UpperCAmelCase__ = graph def UpperCamelCase__ (self , __a , __a , __a ) -> bool: """simple docstring""" return ( 0 <= i < self.ROW and 0 <= j < self.COL and not visited[i][j] and self.graph[i][j] ) def UpperCamelCase__ (self , __a , __a , __a ) -> None: """simple docstring""" UpperCAmelCase__ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order UpperCAmelCase__ = [-1, 0, 1, -1, 1, -1, 0, 1] UpperCAmelCase__ = True # Make those cells visited for k in range(8 ): if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , __a ): self.diffs(i + row_nbr[k] , j + col_nbr[k] , __a ) def UpperCamelCase__ (self ) -> int: # And finally, count all islands. """simple docstring""" UpperCAmelCase__ = [[False for j in range(self.COL )] for i in range(self.ROW )] UpperCAmelCase__ = 0 for i in range(self.ROW ): for j in range(self.COL ): if visited[i][j] is False and self.graph[i][j] == 1: self.diffs(__a , __a , __a ) count += 1 return count
335
0